ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4fcf8b726f4b4b4f1eebeb6ccedbdfb0a60818 | """Generate surface water maps from Sentinel-1 RTC products
Create a surface water extent map from a dual-pol Sentinel-1 RTC product and
a HAND image. The HAND image must be pixel-aligned (same extent and size) to
the RTC images. The water extent maps are created using an adaptive Expectation
Maximization thresholding approach and refined using Fuzzy Logic.
"""
import argparse
import logging
import sys
from pathlib import Path
from typing import Optional, Tuple, Union
import numpy as np
import skfuzzy as fuzz
from osgeo import gdal
from skimage import measure
from asf_tools.composite import get_epsg_code, write_cog
from asf_tools.hand.prepare import prepare_hand_for_raster
from asf_tools.raster import read_as_masked_array
from asf_tools.threshold import expectation_maximization_threshold as em_threshold
from asf_tools.tile import tile_array, untile_array
log = logging.getLogger(__name__)
def mean_of_subtiles(tiles: np.ndarray) -> np.ndarray:
sub_tile_shape = (tiles.shape[1] // 2, tiles.shape[2] // 2)
sub_tiles_mean = np.zeros((tiles.shape[0], 4))
for ii, tile in enumerate(tiles):
sub_tiles = tile_array(tile.filled(0), tile_shape=sub_tile_shape)
sub_tiles_mean[ii, :] = sub_tiles.mean(axis=(1, 2))
return sub_tiles_mean
def select_hand_tiles(tiles: Union[np.ndarray, np.ma.MaskedArray],
hand_threshold: float, hand_fraction: float) -> np.ndarray:
if np.allclose(tiles, 0.0):
raise ValueError(f'All pixels in scene have a HAND value of {0.0} (all water); '
f'scene is not a good candidate for water mapping.')
tile_indexes = np.arange(tiles.shape[0])
tiles = np.ma.masked_greater_equal(tiles, hand_threshold)
percent_valid_pixels = np.sum(~tiles.mask, axis=(1, 2)) / (tiles.shape[1] * tiles.shape[2])
return tile_indexes[percent_valid_pixels > hand_fraction]
def select_backscatter_tiles(backscatter_tiles: np.ndarray, hand_candidates: np.ndarray) -> np.ndarray:
tile_indexes = np.arange(backscatter_tiles.shape[0])
sub_tile_means = mean_of_subtiles(backscatter_tiles)
sub_tile_means_std = sub_tile_means.std(axis=1)
tile_medians = np.ma.median(backscatter_tiles, axis=(1, 2))
tile_variance = sub_tile_means_std / tile_medians # OK
low_mean_threshold = np.ma.median(tile_medians[hand_candidates])
low_mean_candidates = tile_indexes[tile_medians < low_mean_threshold]
potential_candidates = np.intersect1d(hand_candidates, low_mean_candidates)
for variance_threshold in np.percentile(tile_variance, np.arange(5, 96)[::-1]):
variance_candidates = tile_indexes[tile_variance > variance_threshold]
selected = np.intersect1d(variance_candidates, potential_candidates)
sort_index = np.argsort(sub_tile_means_std[selected])[::-1]
if len(selected) >= 5:
return selected[sort_index][:5]
return np.array([])
def determine_em_threshold(tiles: np.ndarray, scaling: float) -> float:
thresholds = []
for ii in range(tiles.shape[0]):
test_tile = (np.around(tiles[ii, :, :] * scaling)).astype(int)
thresholds.append(em_threshold(test_tile) / scaling)
return np.median(np.sort(thresholds)[:4])
def calculate_slope_magnitude(array: np.ndarray, pixel_size) -> np.ndarray:
dx, dy = np.gradient(array)
magnitude = np.sqrt(dx**2, dy**2) / pixel_size
slope = np.arctan(magnitude) / np.pi * 180.
return slope
def determine_membership_limits(
array: np.ndarray, mask_percentile: float = 90., std_range: float = 3.0) -> Tuple[float, float]:
array = np.ma.masked_values(array, 0.)
array = np.ma.masked_greater(array, np.percentile(array, mask_percentile))
lower_limit = np.ma.median(array)
upper_limit = lower_limit + std_range * array.std() + 5.0
return lower_limit, upper_limit
def min_max_membership(array: np.ndarray, lower_limit: float, upper_limit: float, resolution: float) -> np.ndarray:
possible_values = np.arange(array.min(), array.max(), resolution)
activation = fuzz.zmf(possible_values, lower_limit, upper_limit)
membership = fuzz.interp_membership(possible_values, activation, array)
return membership
def segment_area_membership(segments: np.ndarray, min_area: int = 3, max_area: int = 10) -> np.ndarray:
segment_areas = np.bincount(segments.ravel())
possible_areas = np.arange(min_area, max_area + 1)
activation = 1 - fuzz.zmf(possible_areas, min_area, max_area)
segment_membership = np.zeros_like(segments)
segments_above_threshold = np.squeeze((segment_areas > max_area).nonzero())
segments_above_threshold = np.delete(segments_above_threshold, (segments_above_threshold == 0).nonzero())
np.putmask(segment_membership, np.isin(segments, segments_above_threshold), 1)
for area in possible_areas:
mask = np.isin(segments, (segment_areas == area).nonzero())
np.putmask(segment_membership, mask, fuzz.interp_membership(possible_areas, activation, area))
return segment_membership
def remove_small_segments(segments: np.ndarray, min_area: int = 3) -> np.ndarray:
valid_segments = segments != 0
segment_areas = np.bincount(segments.ravel())
segments_below_threshold = (segment_areas < min_area).nonzero()
np.putmask(valid_segments, np.isin(segments, segments_below_threshold), False)
return valid_segments
def fuzzy_refinement(initial_map: np.ndarray, gaussian_array: np.ndarray, hand_array: np.ndarray, pixel_size: float,
gaussian_thresholds: Tuple[float, float], membership_threshold: float = 0.45) -> np.ndarray:
water_map = np.ones_like(initial_map)
water_segments = measure.label(initial_map, connectivity=2)
water_segment_membership = segment_area_membership(water_segments)
water_map &= ~np.isclose(water_segment_membership, 0.)
gaussian_membership = min_max_membership(gaussian_array, gaussian_thresholds[0], gaussian_thresholds[1], 0.005)
water_map &= ~np.isclose(gaussian_membership, 0.)
hand_lower_limit, hand_upper_limit = determine_membership_limits(hand_array)
hand_membership = min_max_membership(hand_array, hand_lower_limit, hand_upper_limit, 0.1)
water_map &= ~np.isclose(hand_membership, 0.)
hand_slopes = calculate_slope_magnitude(hand_array, pixel_size)
slope_membership = min_max_membership(hand_slopes, 0., 15., 0.1)
water_map &= ~np.isclose(slope_membership, 0.)
water_map_weights = (gaussian_membership + hand_membership + slope_membership + water_segment_membership) / 4.
water_map &= water_map_weights >= membership_threshold
return water_map
def make_water_map(out_raster: Union[str, Path], vv_raster: Union[str, Path], vh_raster: Union[str, Path],
hand_raster: Optional[Union[str, Path]] = None, tile_shape: Tuple[int, int] = (100, 100),
max_vv_threshold: float = -15.5, max_vh_threshold: float = -23.0,
hand_threshold: float = 15., hand_fraction: float = 0.8, membership_threshold: float = 0.45):
"""Creates a surface water extent map from a Sentinel-1 RTC product
Create a surface water extent map from a dual-pol Sentinel-1 RTC product and
a HAND image. The HAND image must be pixel-aligned (same extent and size) to
the RTC images. The water extent maps are created using an adaptive Expectation
Maximization thresholding approach and refined with Fuzzy Logic.
The input images are broken into a set of corresponding tiles with a shape of
`tile_shape`, and a set of tiles are selected from the VH RTC
image that contain water boundaries to determine an appropriate water threshold.
Candidate tiles must meet these criteria:
* `hand_fraction` of pixels within a tile must have HAND pixel values lower
than `hand_threshold`
* The median backscatter value for the tile must be lower than an average tiles'
backscatter values
* The tile must have a high variance -- high variance is considered initially to
be a variance in the 95th percentile of the tile variances, but progressively
relaxed to the 5th percentile if there not at least 5 candidate tiles.
The 5 VH tiles with the highest variance are selected for thresholding and a
water threshold value is determined using an Expectation Maximization approach.
If there were not enough candidate tiles or the threshold is too high,
`max_vh_threshold` and/or `max_vv_threshold` will be used instead.
From the initial threshold-based water extent maps, Fuzzy Logic is used to remove
spurious false detections and improve the water extent map quality. The fuzzy logic
uses these indicators for the presence of water:
* radar cross section in a pixel relative to the determined detection threshold
* the height above nearest drainage (HAND)
* the surface slope, which is derived from the HAND data
* the size of the detected water feature
For each indicator, a Z-shaped activation function is used to determine pixel membership.
The membership maps are combined to form the final water extent map. Pixels classified
as water pixels will:
* have non-zero membership in all of the indicators, and
* have an average membership above the `membership_threshold` value.
Finally, the VV and VH water masks will be combined to include all water pixels
from both masks, and the combined water map will be written to `out_raster`.
Args:
out_raster: Water map GeoTIFF to create
vv_raster: Sentinel-1 RTC GeoTIFF, in power scale, with VV polarization
vh_raster: Sentinel-1 RTC GeoTIFF, in power scale, with VH polarization
hand_raster: Height Above Nearest Drainage (HAND) GeoTIFF aligned to the RTC rasters
tile_shape: shape (height, width) in pixels to tile the image to
max_vv_threshold: Maximum threshold value to use for `vv_raster` in decibels (db)
max_vh_threshold: Maximum threshold value to use for `vh_raster` in decibels (db)
hand_threshold: The maximum height above nearest drainage in meters to consider
a pixel valid
hand_fraction: The minimum fraction of valid HAND pixels required in a tile for
thresholding
membership_threshold: The average membership to the fuzzy indicators required for a water pixel
"""
if tile_shape[0] % 2 or tile_shape[1] % 2:
raise ValueError(f'tile_shape {tile_shape} requires even values.')
info = gdal.Info(str(vh_raster), format='json')
out_transform = info['geoTransform']
out_epsg = get_epsg_code(info)
if hand_raster is None:
hand_raster = str(out_raster).replace('.tif', '_HAND.tif')
log.info(f'Extracting HAND data to: {hand_raster}')
prepare_hand_for_raster(hand_raster, vh_raster)
log.info(f'Determining HAND memberships from {hand_raster}')
hand_array = read_as_masked_array(hand_raster)
hand_tiles = tile_array(hand_array, tile_shape=tile_shape, pad_value=np.nan)
hand_candidates = select_hand_tiles(hand_tiles, hand_threshold, hand_fraction)
log.debug(f'Selected HAND tile candidates {hand_candidates}')
selected_tiles = None
water_extent_maps = []
for max_db_threshold, raster, pol in ((max_vh_threshold, vh_raster, 'VH'), (max_vv_threshold, vv_raster, 'VV')):
log.info(f'Creating initial {pol} water extent map from {raster}')
array = read_as_masked_array(raster)
tiles = tile_array(array, tile_shape=tile_shape, pad_value=0.)
# Masking less than zero only necessary for old HyP3/GAMMA products which sometimes returned negative powers
tiles = np.ma.masked_less_equal(tiles, 0.)
if selected_tiles is None:
selected_tiles = select_backscatter_tiles(tiles, hand_candidates)
log.info(f'Selected tiles {selected_tiles} from {raster}')
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning) # invalid value and divide by zero encountered in log10
tiles = np.log10(tiles) + 30. # linear power scale -> Gaussian scale optimized for thresholding
max_gaussian_threshold = max_db_threshold / 10. + 30. # db -> Gaussian scale optimized for thresholding
if selected_tiles.size:
scaling = 256 / (np.mean(tiles) + 3 * np.std(tiles))
gaussian_threshold = determine_em_threshold(tiles[selected_tiles, :, :], scaling)
threshold_db = 10. * (gaussian_threshold - 30.)
log.info(f'Threshold determined to be {threshold_db} db')
if gaussian_threshold > max_gaussian_threshold:
log.warning(f'Threshold too high! Using maximum threshold {max_db_threshold} db')
gaussian_threshold = max_gaussian_threshold
else:
log.warning(f'Tile selection did not converge! using default threshold {max_db_threshold} db')
gaussian_threshold = max_gaussian_threshold
gaussian_array = untile_array(tiles, array.shape)
water_map = np.ma.masked_less_equal(gaussian_array, gaussian_threshold).mask
water_map &= ~array.mask
write_cog(str(out_raster).replace('.tif', f'_{pol}_initial.tif'), water_map, transform=out_transform,
epsg_code=out_epsg, dtype=gdal.GDT_Byte, nodata_value=False)
log.info(f'Refining initial {pol} water extent map using Fuzzy Logic')
array = np.ma.masked_where(~water_map, array)
gaussian_lower_limit = np.log10(np.ma.median(array)) + 30.
water_map = fuzzy_refinement(
water_map, gaussian_array, hand_array, pixel_size=out_transform[1],
gaussian_thresholds=(gaussian_lower_limit, gaussian_threshold), membership_threshold=membership_threshold
)
water_map &= ~array.mask
write_cog(str(out_raster).replace('.tif', f'_{pol}_fuzzy.tif'), water_map, transform=out_transform,
epsg_code=out_epsg, dtype=gdal.GDT_Byte, nodata_value=False)
water_extent_maps.append(water_map)
log.info('Combining Fuzzy VH and VV extent map')
combined_water_map = np.logical_or(*water_extent_maps)
combined_segments = measure.label(combined_water_map, connectivity=2)
combined_water_map = remove_small_segments(combined_segments)
write_cog(out_raster, combined_water_map, transform=out_transform,
epsg_code=out_epsg, dtype=gdal.GDT_Byte, nodata_value=False)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('out_raster', help='Water map GeoTIFF to create')
# FIXME: Decibel RTCs would be real nice.
parser.add_argument('vv_raster',
help='Sentinel-1 RTC GeoTIFF raster, in power scale, with VV polarization')
parser.add_argument('vh_raster',
help='Sentinel-1 RTC GeoTIFF raster, in power scale, with VH polarization')
parser.add_argument('--hand-raster',
help='Height Above Nearest Drainage (HAND) GeoTIFF aligned to the RTC rasters. '
'If not specified, HAND data will be extracted from a Copernicus GLO-30 DEM based HAND.')
parser.add_argument('--tile-shape', type=int, nargs=2, default=(100, 100),
help='shape (height, width) in pixels to tile the image to')
parser.add_argument('--max-vv-threshold', type=float, default=-15.5,
help='Maximum threshold value to use for `vv_raster` in decibels (db)')
parser.add_argument('--max-vh-threshold', type=float, default=-23.0,
help='Maximum threshold value to use for `vh_raster` in decibels (db)')
parser.add_argument('--hand-threshold', type=float, default=15.,
help='The maximum height above nearest drainage in meters to consider a pixel valid')
parser.add_argument('--hand-fraction', type=float, default=0.8,
help='The minimum fraction of valid HAND pixels required in a tile for thresholding')
parser.add_argument('--membership-threshold', type=float, default=0.45,
help='The average membership to the fuzzy indicators required for a water pixel')
parser.add_argument('-v', '--verbose', action='store_true', help='Turn on verbose logging')
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=level)
log.debug(' '.join(sys.argv))
make_water_map(args.out_raster, args.vv_raster, args.vh_raster, args.hand_raster, args.tile_shape,
args.max_vv_threshold, args.max_vh_threshold, args.hand_threshold, args.hand_fraction,
args.membership_threshold)
log.info(f'Water map created successfully: {args.out_raster}')
|
py | 1a4fcfb8f35ca165c09b8de361946a3241701390 | from __future__ import absolute_import, division, print_function, \
unicode_literals
__version__ = b'0.6.2'
|
py | 1a4fcfd95554ff13c6ada7b304faad0fcb29bb37 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions used for creating pairs from labeled and unlabeled data (currently used only for the siamese network)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from sklearn import metrics
from sklearn.neighbors import NearestNeighbors
def get_choices(arr, num_choices, valid_range, not_arr=None, replace=False):
"""Select n=num_choices choices from arr, with the following constraints.
Args:
arr: if arr is an integer, the pool of choices is interpreted as [0, arr]
num_choices: number of choices
valid_range: choice > valid_range[0] and choice < valid_range[1]
not_arr: choice not in not_arr
replace: if True, draw choices with replacement
Returns:
choices.
"""
if not_arr is None:
not_arr = []
if isinstance(valid_range, int):
valid_range = [0, valid_range]
# make sure we have enough valid points in arr
if isinstance(arr, tuple):
if min(arr[1], valid_range[1]) - max(arr[0], valid_range[0]) < num_choices:
raise ValueError('Not enough elements in arr are outside of valid_range!')
n_arr = arr[1]
arr0 = arr[0]
arr = collections.defaultdict(lambda: -1)
get_arr = lambda x: x
replace = True
else:
greater_than = np.array(arr) > valid_range[0]
less_than = np.array(arr) < valid_range[1]
if np.sum(np.logical_and(greater_than, less_than)) < num_choices:
raise ValueError('Not enough elements in arr are outside of valid_range!')
# make a copy of arr, since we'll be editing the array
n_arr = len(arr)
arr0 = 0
arr = np.array(arr, copy=True)
get_arr = lambda x: arr[x]
not_arr_set = set(not_arr)
def get_choice():
arr_idx = random.randint(arr0, n_arr - 1)
while get_arr(arr_idx) in not_arr_set:
arr_idx = random.randint(arr0, n_arr - 1)
return arr_idx
if isinstance(not_arr, int):
not_arr = list(not_arr)
choices = []
for _ in range(num_choices):
arr_idx = get_choice()
while get_arr(arr_idx) <= valid_range[0] or get_arr(
arr_idx) >= valid_range[1]:
arr_idx = get_choice()
choices.append(int(get_arr(arr_idx)))
if not replace:
arr[arr_idx], arr[n_arr - 1] = arr[n_arr - 1], arr[arr_idx]
n_arr -= 1
return choices
def create_pairs_from_labeled_data(x, digit_indices, use_classes=None):
"""Positive and negative pair creation from labeled data.
Alternates between positive and negative pairs.
Args:
x: labeled data
digit_indices: nested array of depth 2 (in other words a jagged matrix),
where row i contains the indices in x of all examples labeled with class i
use_classes: in cases where we only want pairs from a subset of the
classes, use_classes is a list of the classes to draw pairs from, else it
is None
Returns:
pairs: positive and negative pairs
labels: corresponding labels
"""
n_clusters = len(digit_indices)
if use_classes is None:
use_classes = list(range(n_clusters))
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(n_clusters)]) - 1
for d in use_classes:
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, n_clusters)
dn = (d + inc) % n_clusters
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
pairs = np.array(pairs).reshape((len(pairs), 2) + x.shape[1:])
labels = np.array(labels)
return pairs, labels
def create_pairs_from_unlabeled_data(x1,
x2=None,
y=None,
p=None,
k=5,
tot_pairs=None,
pre_shuffled=False,
verbose=None):
"""Generates positive and negative pairs for the siamese network from unlabeled data.
Draws from the k nearest neighbors (where k is the
provided parameter) of each point to form pairs. Number of neighbors
to draw is determined by tot_pairs, if provided, or k if not provided.
Args:
x1: input data array
x2: parallel data array (pairs will exactly shadow the indices of x1, but be
drawn from x2)
y: true labels (if available) purely for checking how good our pairs are
p: permutation vector - in cases where the array is shuffled and we use a
precomputed knn matrix (where knn is performed on unshuffled data), we
keep track of the permutations with p, and apply the same permutation to
the precomputed knn matrix
k: the number of neighbors to use (the 'k' in knn)
tot_pairs: total number of pairs to produce words, an approximation of KNN
pre_shuffled: pre shuffled or not
verbose: flag for extra debugging printouts
Returns:
pairs for x1, (pairs for x2 if x2 is provided), labels
(inferred by knn), (labels_true, the absolute truth, if y
is provided
"""
if x2 is not None and x1.shape != x2.shape:
raise ValueError('x1 and x2 must be the same shape!')
n = len(p) if p is not None else len(x1)
pairs_per_pt = max(1, min(k, int(
tot_pairs / (n * 2)))) if tot_pairs is not None else max(1, k)
if p is not None and not pre_shuffled:
x1 = x1[p[:n]]
y = y[p[:n]]
pairs = []
pairs2 = []
labels = []
true = []
verbose = True
if verbose:
print('computing k={} nearest neighbors...'.format(k))
if len(x1.shape) > 2:
x1_flat = x1.reshape(x1.shape[0], np.prod(x1.shape[1:]))[:n]
else:
x1_flat = x1[:n]
print('I am hereee', x1_flat.shape)
nbrs = NearestNeighbors(n_neighbors=k + 1).fit(x1_flat)
print('NearestNeighbors')
_, idx = nbrs.kneighbors(x1_flat)
print('NearestNeighbors2')
# for each row, remove the element itself from its list of neighbors
# (we don't care that each point is its own closest neighbor)
new_idx = np.empty((idx.shape[0], idx.shape[1] - 1))
print('replace')
assert (idx >= 0).all()
print('I am hereee', idx.shape[0])
for i in range(idx.shape[0]):
try:
new_idx[i] = idx[i, idx[i] != i][:idx.shape[1] - 1]
except Exception as e:
print(idx[i, Ellipsis], new_idx.shape, idx.shape)
raise e
idx = new_idx.astype(np.int)
k_max = min(idx.shape[1], k + 1)
if verbose:
print('creating pairs...')
print('ks', n, k_max, k, pairs_per_pt)
# pair generation loop (alternates between true and false pairs)
consecutive_fails = 0
for i in range(n):
# get_choices sometimes fails with precomputed results. if this happens
# too often, we relax the constraint on k
if consecutive_fails > 5:
k_max = min(idx.shape[1], int(k_max * 2))
consecutive_fails = 0
if verbose and i % 10000 == 0:
print('Iter: {}/{}'.format(i, n))
# pick points from neighbors of i for positive pairs
try:
choices = get_choices(
idx[i, :k_max], pairs_per_pt, valid_range=[-1, np.inf], replace=False)
consecutive_fails = 0
except ValueError:
consecutive_fails += 1
continue
assert i not in choices
# form the pairs
new_pos = [[x1[i], x1[c]] for c in choices]
if x2 is not None:
new_pos2 = [[x2[i], x2[c]] for c in choices]
if y is not None:
pos_labels = [[y[i] == y[c]] for c in choices]
# pick points *not* in neighbors of i for negative pairs
try:
choices = get_choices((0, n),
pairs_per_pt,
valid_range=[-1, np.inf],
not_arr=idx[i, :k_max],
replace=False)
consecutive_fails = 0
except ValueError:
consecutive_fails += 1
continue
# form negative pairs
new_neg = [[x1[i], x1[c]] for c in choices]
if x2 is not None:
new_neg2 = [[x2[i], x2[c]] for c in choices]
if y is not None:
neg_labels = [[y[i] == y[c]] for c in choices]
# add pairs to our list
labels += [1] * len(new_pos) + [0] * len(new_neg)
pairs += new_pos + new_neg
if x2 is not None:
pairs2 += new_pos2 + new_neg2
if y is not None:
true += pos_labels + neg_labels
# package return parameters for output
ret = [np.array(pairs).reshape((len(pairs), 2) + x1.shape[1:])]
if x2 is not None:
ret.append(np.array(pairs2).reshape((len(pairs2), 2) + x2.shape[1:]))
ret.append(np.array(labels))
if y is not None:
true = np.array(true).astype(np.int).reshape(-1, 1)
if verbose:
# if true vectors are provided, we can take a peek to check
# the validity of our kNN approximation
print('confusion matrix for pairs and approximated labels:')
print(metrics.confusion_matrix(true, labels) / true.shape[0])
print(metrics.confusion_matrix(true, labels))
ret.append(true)
return ret
|
py | 1a4fd195b1b0144c65d6c670b477df147b027416 | #!/usr/bin/env python
r"""Compute SSP/PCA projections for ECG artifacts.
Examples
--------
.. code-block:: console
$ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" \
--l-freq 1 --h-freq 100 \
--rej-grad 3000 --rej-mag 4000 --rej-eeg 100
"""
# Authors : Alexandre Gramfort, Ph.D.
# Martin Luessi, Ph.D.
import os
import sys
import mne
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-i", "--in", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("--tmin", dest="tmin", type="float",
help="Time before event in seconds",
default=-0.2)
parser.add_option("--tmax", dest="tmax", type="float",
help="Time after event in seconds",
default=0.4)
parser.add_option("-g", "--n-grad", dest="n_grad", type="int",
help="Number of SSP vectors for gradiometers",
default=2)
parser.add_option("-m", "--n-mag", dest="n_mag", type="int",
help="Number of SSP vectors for magnetometers",
default=2)
parser.add_option("-e", "--n-eeg", dest="n_eeg", type="int",
help="Number of SSP vectors for EEG",
default=2)
parser.add_option("--l-freq", dest="l_freq", type="float",
help="Filter low cut-off frequency in Hz",
default=1)
parser.add_option("--h-freq", dest="h_freq", type="float",
help="Filter high cut-off frequency in Hz",
default=100)
parser.add_option("--ecg-l-freq", dest="ecg_l_freq", type="float",
help="Filter low cut-off frequency in Hz used "
"for ECG event detection",
default=5)
parser.add_option("--ecg-h-freq", dest="ecg_h_freq", type="float",
help="Filter high cut-off frequency in Hz used "
"for ECG event detection",
default=35)
parser.add_option("-p", "--preload", dest="preload",
help="Temporary file used during computation "
"(to save memory)",
default=True)
parser.add_option("-a", "--average", dest="average", action="store_true",
help="Compute SSP after averaging",
default=False) # XXX: change to default=True in 0.17
parser.add_option("--proj", dest="proj",
help="Use SSP projections from a fif file.",
default=None)
parser.add_option("--filtersize", dest="filter_length", type="int",
help="Number of taps to use for filtering",
default=2048)
parser.add_option("-j", "--n-jobs", dest="n_jobs", type="int",
help="Number of jobs to run in parallel",
default=1)
parser.add_option("-c", "--channel", dest="ch_name",
help="Channel to use for ECG detection "
"(Required if no ECG found)",
default=None)
parser.add_option("--rej-grad", dest="rej_grad", type="float",
help="Gradiometers rejection parameter "
"in fT/cm (peak to peak amplitude)",
default=2000)
parser.add_option("--rej-mag", dest="rej_mag", type="float",
help="Magnetometers rejection parameter "
"in fT (peak to peak amplitude)",
default=3000)
parser.add_option("--rej-eeg", dest="rej_eeg", type="float",
help="EEG rejection parameter in uV "
"(peak to peak amplitude)",
default=50)
parser.add_option("--rej-eog", dest="rej_eog", type="float",
help="EOG rejection parameter in uV "
"(peak to peak amplitude)",
default=250)
parser.add_option("--avg-ref", dest="avg_ref", action="store_true",
help="Add EEG average reference proj",
default=False)
parser.add_option("--no-proj", dest="no_proj", action="store_true",
help="Exclude the SSP projectors currently "
"in the fiff file",
default=False)
parser.add_option("--bad", dest="bad_fname",
help="Text file containing bad channels list "
"(one per line)",
default=None)
parser.add_option("--event-id", dest="event_id", type="int",
help="ID to use for events",
default=999)
parser.add_option("--event-raw", dest="raw_event_fname",
help="raw file to use for event detection",
default=None)
parser.add_option("--tstart", dest="tstart", type="float",
help="Start artifact detection after tstart seconds",
default=0.)
parser.add_option("--qrsthr", dest="qrs_threshold", type="string",
help="QRS detection threshold. Between 0 and 1. Can "
"also be 'auto' for automatic selection",
default='auto')
options, args = parser.parse_args()
raw_in = options.raw_in
if raw_in is None:
parser.print_help()
sys.exit(1)
tmin = options.tmin
tmax = options.tmax
n_grad = options.n_grad
n_mag = options.n_mag
n_eeg = options.n_eeg
l_freq = options.l_freq
h_freq = options.h_freq
ecg_l_freq = options.ecg_l_freq
ecg_h_freq = options.ecg_h_freq
average = options.average
preload = options.preload
filter_length = options.filter_length
n_jobs = options.n_jobs
ch_name = options.ch_name
reject = dict(grad=1e-13 * float(options.rej_grad),
mag=1e-15 * float(options.rej_mag),
eeg=1e-6 * float(options.rej_eeg),
eog=1e-6 * float(options.rej_eog))
avg_ref = options.avg_ref
no_proj = options.no_proj
bad_fname = options.bad_fname
event_id = options.event_id
proj_fname = options.proj
raw_event_fname = options.raw_event_fname
tstart = options.tstart
qrs_threshold = options.qrs_threshold
if qrs_threshold != 'auto':
try:
qrs_threshold = float(qrs_threshold)
except ValueError:
raise ValueError('qrsthr must be "auto" or a float')
if bad_fname is not None:
with open(bad_fname, 'r') as fid:
bads = [w.rstrip() for w in fid.readlines()]
print('Bad channels read : %s' % bads)
else:
bads = []
if raw_in.endswith('_raw.fif') or raw_in.endswith('-raw.fif'):
prefix = raw_in[:-8]
else:
prefix = raw_in[:-4]
ecg_event_fname = prefix + '_ecg-eve.fif'
if average:
ecg_proj_fname = prefix + '_ecg_avg-proj.fif'
else:
ecg_proj_fname = prefix + '_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_in, preload=preload)
if raw_event_fname is not None:
raw_event = mne.io.read_raw_fif(raw_event_fname)
else:
raw_event = raw
flat = None # XXX : not exposed to the user
projs, events = mne.preprocessing.compute_proj_ecg(
raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs, ch_name, reject, flat, bads, avg_ref,
no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart, qrs_threshold,
copy=False)
raw.close()
if raw_event_fname is not None:
raw_event.close()
if proj_fname is not None:
print('Including SSP projections from : %s' % proj_fname)
# append the ecg projs, so they are last in the list
projs = mne.read_proj(proj_fname) + projs
if isinstance(preload, str) and os.path.exists(preload):
os.remove(preload)
print("Writing ECG projections in %s" % ecg_proj_fname)
mne.write_proj(ecg_proj_fname, projs)
print("Writing ECG events in %s" % ecg_event_fname)
mne.write_events(ecg_event_fname, events)
mne.utils.run_command_if_main()
|
py | 1a4fd1aae573147b3ec55f9f559569ad1a71b290 | """
A agregação é um tipo de associação em que há pelo menos um
objeto que, essencialmente, precisa de outro.
"""
class CarrinhoDeCompras():
def __init__(self):
self.__produtos = []
def inserir_produto(self, produto):
self.__produtos.append(produto)
def soma_total(self):
soma = 0
for item in self.__produtos:
soma += item.preco
return soma
def mostra_itens(self):
for item in self.__produtos:
print(item.descricao)
def __del__(self):
print('Objeto CarrinhoDeCompras foi apagado')
class Produto():
def __init__(self, descricao, preco):
self.__descricao = descricao
self.__preco = preco
@property
def descricao(self):
return self.__descricao.upper().strip()
@property
def preco(self):
x = str(self.__preco)
return float(x.replace(',', '.').strip())
def __del__(self):
print('Objeto produto foi apagado')
# ↑ Método para ver quando o objeto deixa de ser usado
# no caso da agregação, apagar o objeto que usa esse outro não apaga esse outro.
carrinho_1 = CarrinhoDeCompras()
p1 = Produto('moto de brinquedo', 10.35)
p2 = Produto('boneca', '15,50')
carrinho_1.inserir_produto(p2)
carrinho_1.inserir_produto(p1)
carrinho_1.inserir_produto(p2)
print(carrinho_1.soma_total())
carrinho_1.mostra_itens()
del carrinho_1
print(p1.descricao) # O objeto p1 ainda existe, mesmo depois de o carrinho ter sido apagado.
print('#'*30)
"""
Nesse caso a classe CarrinhoDeCompras depende da Produto para qualquer atividade,
embora a classe Produto não precisa em nada da outra.
""" |
py | 1a4fd291c5883efc39293b292efa2eae6baf0743 | """This module is to declare global objects."""
from datetime import datetime
# Configuration Options
global moesif_options
moesif_options = {}
# Debug Flag
global DEBUG
DEBUG = True
# Patch Flag
global MOESIF_PATCH
MOESIF_PATCH = False
# MoesifAPI Client
global api_client
api_client = None
# App Config class
global app_config
app_config = None
# App Config
global config
config = None
# App Config sampling percentage
global sampling_percentage
sampling_percentage = 100
# App Config eTag
global config_etag
config_etag = None
# App Config last updated time
global last_updated_time
last_updated_time = datetime.utcnow()
|
py | 1a4fd2b0dc62ba873714c781851b1f8a6bb1430b | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Askalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import AskalcoinTestFramework
from test_framework.util import assert_raises_rpc_error
class UptimeTest(AskalcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_negative_time()
self._test_uptime()
def _test_negative_time(self):
assert_raises_rpc_error(-8, "Mocktime can not be negative: -1.", self.nodes[0].setmocktime, -1)
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert self.nodes[0].uptime() >= wait_time
if __name__ == '__main__':
UptimeTest().main()
|
py | 1a4fd325402a62e1ee673e02762c91745129fbbc | import glob
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import multiprocessing
import os.path
import csv
import copy
import joblib
from torchvision import datasets
import torchvision
import seaborn as sns; sns.set(color_codes=True)
sns.set_style("white")
from pdb import set_trace as bp
USE_CUDA = torch.cuda.is_available()
def w(v):
if USE_CUDA:
return v.cuda()
return v
cache = joblib.Memory(location='_cache', verbose=0)
from meta_module import *
class OptimizerOneLayer(nn.Module):
def __init__(self, preproc=False, hidden_sz=10, preproc_factor=10.0):
super().__init__()
self.hidden_sz = hidden_sz
if preproc:
self.recurs = nn.LSTMCell(2, hidden_sz)
else:
self.recurs = nn.LSTMCell(1, hidden_sz)
self.output = nn.Linear(hidden_sz, 1)
self.preproc = preproc
self.preproc_factor = preproc_factor
self.preproc_threshold = np.exp(-preproc_factor)
def forward(self, inp, hidden, cell):
if self.preproc:
inp = inp.data
inp2 = w(torch.zeros(inp.size()[0], 2))
keep_grads = (torch.abs(inp) >= self.preproc_threshold).squeeze()
inp2[:, 0][keep_grads] = (torch.log(torch.abs(inp[keep_grads]) + 1e-8) / self.preproc_factor).squeeze()
inp2[:, 1][keep_grads] = torch.sign(inp[keep_grads]).squeeze()
inp2[:, 0][~keep_grads] = -1
inp2[:, 1][~keep_grads] = (float(np.exp(self.preproc_factor)) * inp[~keep_grads]).squeeze()
inp = w(Variable(inp2))
hidden0, cell0 = self.recurs(inp, (hidden[0], cell[0]))
#hidden1, cell1 = self.recurs2(hidden0, (hidden[1], cell[1]))
return self.output(hidden0), (hidden0, ), (cell0, )
def detach_var(v):
var = w(Variable(v.data, requires_grad=True))
var.retain_grad()
return var
import functools
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
# using wonder's beautiful simplification: https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects/31174427?noredirect=1#comment86638618_31174427
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
def do_fit(opt_net, meta_opt, target_cls, target_to_opt, unroll, optim_it, n_epochs, out_mul, should_train=True):
if should_train:
opt_net.train()
else:
opt_net.eval()
unroll = 1
target = target_cls(training=should_train)
optimizee = w(target_to_opt())
n_params = 0
for name, p in optimizee.all_named_parameters():
n_params += int(np.prod(p.size()))
hidden_states = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
cell_states = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
all_losses_ever = []
if should_train:
meta_opt.zero_grad()
all_losses = None
for iteration in range(1, optim_it + 1):
loss = optimizee(target)
if all_losses is None:
all_losses = loss
else:
all_losses += loss
all_losses_ever.append(loss.data.cpu().numpy())
loss.backward(retain_graph=should_train)
offset = 0
result_params = {}
hidden_states2 = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
cell_states2 = [w(Variable(torch.zeros(n_params, opt_net.hidden_sz))) for _ in range(2)]
for name, p in optimizee.all_named_parameters():
cur_sz = int(np.prod(p.size()))
# We do this so the gradients are disconnected from the graph but we still get
# gradients from the rest
if p.grad is not None:
gradients = detach_var(p.grad.view(cur_sz, 1))
updates, new_hidden, new_cell = opt_net(
gradients,
[h[offset:offset+cur_sz] for h in hidden_states],
[c[offset:offset+cur_sz] for c in cell_states]
)
for i in range(len(new_hidden)):
hidden_states2[i][offset:offset+cur_sz] = new_hidden[i]
cell_states2[i][offset:offset+cur_sz] = new_cell[i]
result_params[name] = p + updates.view(*p.size()) * out_mul
result_params[name].retain_grad()
else:
result_params[name] = p
result_params[name].retain_grad()
offset += cur_sz
if iteration % unroll == 0:
if should_train:
meta_opt.zero_grad()
all_losses.backward()
meta_opt.step()
all_losses = None
optimizee = w(target_to_opt())
optimizee.load_state_dict(result_params)
optimizee.zero_grad()
hidden_states = [detach_var(v) for v in hidden_states2]
cell_states = [detach_var(v) for v in cell_states2]
else:
for name, p in optimizee.all_named_parameters():
rsetattr(optimizee, name, result_params[name])
assert len(list(optimizee.all_named_parameters()))
hidden_states = hidden_states2
cell_states = cell_states2
return all_losses_ever
@cache.cache
def fit_optimizer(target_cls, target_to_opt, preproc=False, unroll=20, optim_it=100, n_epochs=20, n_tests=100, lr=0.001, out_mul=1.0, test_target=None):
opt_net = w(OptimizerOneLayer(preproc=preproc))
meta_opt = optim.Adam(opt_net.parameters(), lr=lr)
best_net = None
best_loss = 100000000000000000
for _ in tqdm(range(n_epochs)):
'''
print("train")
for _ in tqdm(range(20)):
do_fit(opt_net, meta_opt, target_cls, target_to_opt, unroll, optim_it, n_epochs, out_mul, should_train=True)
'''
if test_target is not None:
loss = (np.mean([
np.sum(do_fit(opt_net, meta_opt, target_cls, test_target, unroll, optim_it, n_epochs, out_mul, should_train=False))
for _ in tqdm(range(1))
]))
else:
loss = (np.mean([
np.sum(do_fit(opt_net, meta_opt, target_cls, target_to_opt, unroll, optim_it, n_epochs, out_mul, should_train=False))
for _ in tqdm(range(1))
]))
print(loss)
if loss < best_loss:
print(best_loss, loss)
best_loss = loss
best_net = copy.deepcopy(opt_net.state_dict())
return best_loss, best_net
class CIFAR10Loss:
def __init__(self, training=True):
dataset = datasets.CIFAR10(
'./data/CIFAR10', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
)
indices = list(range(len(dataset)))
np.random.RandomState(10).shuffle(indices)
if training:
indices = indices[:len(indices) // 2]
else:
indices = indices[len(indices) // 2:]
self.loader = torch.utils.data.DataLoader(
dataset, batch_size=128,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices))
self.batches = []
self.cur_batch = 0
def sample(self):
if self.cur_batch >= len(self.batches):
self.batches = []
self.cur_batch = 0
for b in self.loader:
self.batches.append(b)
batch = self.batches[self.cur_batch]
self.cur_batch += 1
return batch
class CIFAR10Net(MetaModule):
def __init__(self, layer_size=20, n_layers=1, **kwargs):
super().__init__()
inp_size = 3 * 32 * 32
self.layers = {}
for i in range(n_layers):
self.layers[f'mat_{i}'] = MetaLinear(inp_size, layer_size)
inp_size = layer_size
self.layers['final_mat'] = MetaLinear(inp_size, 10)
self.layers = nn.ModuleDict(self.layers)
self.activation = nn.Sigmoid()
self.loss = nn.NLLLoss()
def all_named_parameters(self):
return [(k, v) for k, v in self.named_parameters()]
def forward(self, loss):
inp, out = loss.sample()
inp = w(Variable(inp.view(inp.size()[0], 3*32*32)))
out = w(Variable(out))
cur_layer = 0
while f'mat_{cur_layer}' in self.layers:
inp = self.activation(self.layers[f'mat_{cur_layer}'](inp))
cur_layer += 1
inp = F.log_softmax(self.layers['final_mat'](inp), dim=1)
l = self.loss(inp, out)
return l
from resnet_meta import resnet50
class CIFAR10ResNet(MetaModule):
def __init__(self):
super().__init__()
self.net = resnet50()
self.loss = nn.CrossEntropyLoss()
def all_named_parameters(self):
return [(k, v) for k, v in self.named_parameters()]
def forward(self, loss):
inp, out = loss.sample()
inp = w(Variable(inp.view(inp.size()[0], 3, 32, 32)))
out = w(Variable(out))
inp = self.net(inp)
l = self.loss(inp, out)
return l
loss, CIFAR10_optimizer = fit_optimizer(CIFAR10Loss, CIFAR10Net, lr=0.01, n_epochs=50, n_tests=20, out_mul=0.1, preproc=True, test_target=CIFAR10ResNet)
print(loss) |
py | 1a4fd38d19926e15d2aba3ca5fdda00de967f1b3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayAcquireQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._out_trade_no = None
self._trade_no = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.acquire.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = json.dumps(obj=self.out_trade_no.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['out_trade_no'] = self.out_trade_no
if self.trade_no:
if hasattr(self.trade_no, 'to_alipay_dict'):
params['trade_no'] = json.dumps(obj=self.trade_no.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['trade_no'] = self.trade_no
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
py | 1a4fd45c8bb05edba29f34189e73ed37c6588a62 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager execution workflow with RevNet train on CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import cifar_input
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
tfe = tf.contrib.eager
def main(_):
"""Eager execution workflow with RevNet trained on CIFAR-10."""
tf.enable_eager_execution()
config = get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(
data_dir=FLAGS.data_dir, config=config)
model = revnet.RevNet(config=config)
global_step = tf.train.get_or_create_global_step() # Ensure correct summary
global_step.assign(1)
learning_rate = tf.train.piecewise_constant(
global_step, config.lr_decay_steps, config.lr_list)
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=config.momentum)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=global_step)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" "
"with global_step: {}".format(latest_path, global_step.numpy()))
sys.stdout.flush()
for x, y in ds_train:
train_one_iter(model, x, y, optimizer, global_step=global_step)
if global_step.numpy() % config.log_every == 0:
it_test = ds_test.make_one_shot_iterator()
acc_test, loss_test = evaluate(model, it_test)
if FLAGS.validate:
it_train = ds_train_one_shot.make_one_shot_iterator()
it_validation = ds_validation.make_one_shot_iterator()
acc_train, loss_train = evaluate(model, it_train)
acc_validation, loss_validation = evaluate(model, it_validation)
print("Iter {}, "
"training set accuracy {:.4f}, loss {:.4f}; "
"validation set accuracy {:.4f}, loss {:4.f}"
"test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_train, loss_train, acc_validation,
loss_validation, acc_test, loss_test))
else:
print("Iter {}, test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_test, loss_test))
sys.stdout.flush()
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Training accuracy", acc_train)
tf.contrib.summary.scalar("Test accuracy", acc_test)
tf.contrib.summary.scalar("Training loss", loss_train)
tf.contrib.summary.scalar("Test loss", loss_test)
if FLAGS.validate:
tf.contrib.summary.scalar("Validation accuracy", acc_validation)
tf.contrib.summary.scalar("Validation loss", loss_validation)
if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" "
"with global_step: {}".format(saved_path, global_step.numpy()))
sys.stdout.flush()
def get_config(config_name="revnet-38", dataset="cifar-10"):
"""Return configuration."""
print("Config: {}".format(config_name))
sys.stdout.flush()
config = {
"revnet-38": config_.get_hparams_cifar_38(),
"revnet-110": config_.get_hparams_cifar_110(),
"revnet-164": config_.get_hparams_cifar_164(),
}[config_name]
if dataset == "cifar-10":
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
else:
config.add_hparam("n_classes", 100)
config.add_hparam("dataset", "cifar-100")
return config
def get_datasets(data_dir, config):
"""Return dataset."""
if data_dir is None:
raise ValueError("No supplied data directory")
if not os.path.exists(data_dir):
raise ValueError("Data directory {} does not exist".format(data_dir))
if config.dataset not in ["cifar-10", "cifar-100"]:
raise ValueError("Unknown dataset {}".format(config.dataset))
print("Training on {} dataset.".format(config.dataset))
sys.stdout.flush()
data_dir = os.path.join(data_dir, config.dataset)
if FLAGS.validate:
# 40k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
# 10k Training set
ds_validation = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="validation",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
else:
# 50k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
ds_validation = None
# Always compute loss and accuracy on whole test set
ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
ds_test = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="test",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
return ds_train, ds_train_one_shot, ds_validation, ds_test
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
grads, vars_, logits, loss = model.compute_gradients(
inputs, labels, training=True)
optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
return logits, loss
def evaluate(model, iterator):
"""Compute accuracy with the given dataset iterator."""
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
for x, y in iterator:
logits, _ = model(x, training=False)
loss = model.compute_loss(logits=logits, labels=y)
accuracy(
labels=tf.cast(y, tf.int64),
predictions=tf.argmax(logits, axis=1, output_type=tf.int64))
mean_loss(loss)
return accuracy.result().numpy(), mean_loss.result().numpy()
if __name__ == "__main__":
flags.DEFINE_string(
"data_dir", default=None, help="Directory to load tfrecords")
flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_boolean(
"restore",
default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
flags.DEFINE_boolean(
"validate",
default=False,
help="[Optional] Use the validation set or not for hyperparameter search")
flags.DEFINE_string(
"dataset",
default="cifar-10",
help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
flags.DEFINE_string(
"config",
default="revnet-38",
help="[Optional] Architecture of network. "
"Other options include `revnet-110` and `revnet-164`")
FLAGS = flags.FLAGS
tf.app.run(main)
|
py | 1a4fd53b6608605ee28ad620e2452d49c94a0ef1 | # write tests for transcribes
from seqparser import (
transcribe,
reverse_transcribe)
def test_freebie_transcribe_1():
"""
This one is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert True
def test_freebie_transcribe_2():
"""
This too is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert 1 != 2
def test_transcribe():
"""
Write your unit test for the
transcribe function here.
"""
assert transcribe("ACGT") == "UGCA"
def test_reverse_transcribe():
"""
Write your unit test for the
reverse transcribe function here.
"""
assert reverse_transcribe("ACGT") == "ACGU"
|
py | 1a4fd55776dc59abc72e90311afc08fac2a24579 | from __future__ import annotations
import asyncio
import copy
import functools
import logging
import re
import typing
from typing import Annotated, Awaitable, Callable, Coroutine, Optional, Tuple, Any, TYPE_CHECKING
from naff.client.const import MISSING, logger_name
from naff.client.errors import CommandOnCooldown, CommandCheckFailure, MaxConcurrencyReached
from naff.client.mixins.serialization import DictSerializationMixin
from naff.client.utils.attr_utils import define, field, docs
from naff.client.utils.misc_utils import get_parameters, get_object_name, maybe_coroutine
from naff.client.utils.serializer import no_export_meta
from naff.models.naff.cooldowns import Cooldown, Buckets, MaxConcurrency
from naff.models.naff.protocols import Converter
if TYPE_CHECKING:
from naff.models.naff.context import Context
__all__ = ("BaseCommand", "check", "cooldown", "max_concurrency")
log = logging.getLogger(logger_name)
kwargs_reg = re.compile(r"^\*\*\w")
args_reg = re.compile(r"^\*\w")
@define()
class BaseCommand(DictSerializationMixin):
"""
An object all commands inherit from. Outlines the basic structure of a command, and handles checks.
Attributes:
extension: The extension this command belongs to.
enabled: Whether this command is enabled
checks: Any checks that must be run before this command can be run
callback: The coroutine to be called for this command
error_callback: The coroutine to be called when an error occurs
pre_run_callback: A coroutine to be called before this command is run **but** after the checks
post_run_callback: A coroutine to be called after this command has run
"""
extension: Any = field(default=None, metadata=docs("The extension this command belongs to") | no_export_meta)
enabled: bool = field(default=True, metadata=docs("Whether this can be run at all") | no_export_meta)
checks: list = field(
factory=list, metadata=docs("Any checks that must be *checked* before the command can run") | no_export_meta
)
cooldown: Cooldown = field(
default=MISSING, metadata=docs("An optional cooldown to apply to the command") | no_export_meta
)
max_concurrency: MaxConcurrency = field(
default=MISSING,
metadata=docs("An optional maximum number of concurrent instances to apply to the command") | no_export_meta,
)
callback: Callable[..., Coroutine] = field(
default=None, metadata=docs("The coroutine to be called for this command") | no_export_meta
)
error_callback: Callable[..., Coroutine] = field(
default=None, metadata=no_export_meta | docs("The coroutine to be called when an error occurs")
)
pre_run_callback: Callable[..., Coroutine] = field(
default=None,
metadata=no_export_meta
| docs("The coroutine to be called before the command is executed, **but** after the checks"),
)
post_run_callback: Callable[..., Coroutine] = field(
default=None, metadata=no_export_meta | docs("The coroutine to be called after the command has executed")
)
def __attrs_post_init__(self) -> None:
if self.callback is not None:
if hasattr(self.callback, "checks"):
self.checks += self.callback.checks
if hasattr(self.callback, "cooldown"):
self.cooldown = self.callback.cooldown
if hasattr(self.callback, "max_concurrency"):
self.max_concurrency = self.callback.max_concurrency
def __hash__(self) -> int:
return id(self)
async def __call__(self, context: "Context", *args, **kwargs) -> None:
"""
Calls this command.
Args:
context: The context of this command
args: Any
kwargs: Any
"""
# signals if a semaphore has been acquired, for exception handling
# if present assume one will be acquired
max_conc_acquired = self.max_concurrency is not MISSING
try:
if await self._can_run(context):
if self.pre_run_callback is not None:
await self.pre_run_callback(context, *args, **kwargs)
if self.extension is not None and self.extension.extension_prerun:
for prerun in self.extension.extension_prerun:
await prerun(context, *args, **kwargs)
await self.call_callback(self.callback, context)
if self.post_run_callback is not None:
await self.post_run_callback(context, *args, **kwargs)
if self.extension is not None and self.extension.extension_postrun:
for postrun in self.extension.extension_postrun:
await postrun(context, *args, **kwargs)
except Exception as e:
# if a MaxConcurrencyReached-exception is raised a connection was never acquired
max_conc_acquired = not isinstance(e, MaxConcurrencyReached)
if self.error_callback:
await self.error_callback(e, context, *args, **kwargs)
elif self.extension and self.extension.extension_error:
await self.extension.extension_error(context, *args, **kwargs)
else:
raise
finally:
if self.max_concurrency is not MISSING and max_conc_acquired:
await self.max_concurrency.release(context)
@staticmethod
def _get_converter_function(anno: type[Converter] | Converter, name: str) -> Callable[[Context, str], Any]:
num_params = len(get_parameters(anno.convert))
# if we have three parameters for the function, it's likely it has a self parameter
# so we need to get rid of it by initing - typehinting hates this, btw!
# the below line will error out if we aren't supposed to init it, so that works out
try:
actual_anno: Converter = anno() if num_params == 3 else anno # type: ignore
except TypeError:
raise ValueError(
f"{get_object_name(anno)} for {name} is invalid: converters must have exactly 2 arguments."
) from None
# we can only get to this point while having three params if we successfully inited
if num_params == 3:
num_params -= 1
if num_params != 2:
raise ValueError(
f"{get_object_name(anno)} for {name} is invalid: converters must have exactly 2 arguments."
)
return actual_anno.convert
async def try_convert(self, converter: Optional[Callable], context: "Context", value: Any) -> Any:
if converter is None:
return value
return await maybe_coroutine(converter, context, value)
def param_config(self, annotation: Any, name: str) -> Tuple[Callable, Optional[dict]]:
# This thing is complicated. NAFF-annotations can either be annotated directly, or they can be annotated with Annotated[str, CMD_*]
# This helper function handles both cases, and returns a tuple of the converter and its config (if any)
if annotation is None:
return None
if typing.get_origin(annotation) is Annotated and (args := typing.get_args(annotation)):
for ann in args:
v = getattr(ann, name, None)
if v is not None:
return (ann, v)
return (annotation, getattr(annotation, name, None))
async def call_callback(self, callback: Callable, context: "Context") -> None:
callback = functools.partial(callback, context) # first param must be ctx
parameters = get_parameters(callback)
args = []
kwargs = {}
if len(parameters) == 0:
# if no params, user only wants context
return await callback()
c_args = copy.copy(context.args)
for param in parameters.values():
if isinstance(param.annotation, Converter):
# for any future dev looking at this:
# this checks if the class here has a convert function
# it does NOT check if the annotation is actually a subclass of Converter
# this is an intended behavior for Protocols with the runtime_checkable decorator
convert = functools.partial(
self.try_convert, self._get_converter_function(param.annotation, param.name), context
)
else:
convert = functools.partial(self.try_convert, None, context)
func, config = self.param_config(param.annotation, "_annotation_dat")
if config:
# if user has used an naff-annotation, run the annotation, and pass the result to the user
local = {"context": context, "extension": self.extension, "param": param.name}
ano_args = [local[c] for c in config["args"]]
if param.kind != param.POSITIONAL_ONLY:
kwargs[param.name] = func(*ano_args)
else:
args.append(func(*ano_args))
continue
elif param.name in context.kwargs:
# if parameter is in kwargs, user obviously wants it, pass it
if param.kind != param.POSITIONAL_ONLY:
kwargs[param.name] = await convert(context.kwargs[param.name])
else:
args.append(await convert(context.kwargs[param.name]))
if context.kwargs[param.name] in c_args:
c_args.remove(context.kwargs[param.name])
elif param.default is not param.empty:
kwargs[param.name] = param.default
else:
if not str(param).startswith("*"):
if param.kind != param.KEYWORD_ONLY:
try:
args.append(await convert(c_args.pop(0)))
except IndexError:
raise ValueError(
f"{context.invoke_target} expects {len([p for p in parameters.values() if p.default is p.empty]) + len(callback.args)}"
f" arguments but received {len(context.args)} instead"
) from None
else:
raise ValueError(f"Unable to resolve argument: {param.name}")
if any(kwargs_reg.match(str(param)) for param in parameters.values()):
# if user has `**kwargs` pass all remaining kwargs
kwargs = kwargs | {k: v for k, v in context.kwargs.items() if k not in kwargs}
if any(args_reg.match(str(param)) for param in parameters.values()):
# user has `*args` pass all remaining args
args = args + [await convert(c) for c in c_args]
return await callback(*args, **kwargs)
async def _can_run(self, context: Context) -> bool:
"""
Determines if this command can be run.
Args:
context: The context of the command
"""
max_conc_acquired = False # signals if a semaphore has been acquired, for exception handling
try:
if not self.enabled:
return False
for _c in self.checks:
if not await _c(context):
raise CommandCheckFailure(self, _c, context)
if self.extension and self.extension.extension_checks:
for _c in self.extension.extension_checks:
if not await _c(context):
raise CommandCheckFailure(self, _c, context)
if self.max_concurrency is not MISSING:
if not await self.max_concurrency.acquire(context):
raise MaxConcurrencyReached(self, self.max_concurrency)
if self.cooldown is not MISSING:
if not await self.cooldown.acquire_token(context):
raise CommandOnCooldown(self, await self.cooldown.get_cooldown(context))
return True
except Exception:
if max_conc_acquired:
await self.max_concurrency.release(context)
raise
def error(self, call: Callable[..., Coroutine]) -> Callable[..., Coroutine]:
"""A decorator to declare a coroutine as one that will be run upon an error."""
if not asyncio.iscoroutinefunction(call):
raise TypeError("Error handler must be coroutine")
self.error_callback = call
return call
def pre_run(self, call: Callable[..., Coroutine]) -> Callable[..., Coroutine]:
"""A decorator to declare a coroutine as one that will be run before the command."""
if not asyncio.iscoroutinefunction(call):
raise TypeError("pre_run must be coroutine")
self.pre_run_callback = call
return call
def post_run(self, call: Callable[..., Coroutine]) -> Callable[..., Coroutine]:
"""A decorator to declare a coroutine as one that will be run after the command has."""
if not asyncio.iscoroutinefunction(call):
raise TypeError("post_run must be coroutine")
self.post_run_callback = call
return call
def check(check: Callable[["Context"], Awaitable[bool]]) -> Callable[[Coroutine], Coroutine]:
"""
Add a check to a command.
Args:
check: A coroutine as a check for this command
"""
def wrapper(coro: Coroutine) -> Coroutine:
if isinstance(coro, BaseCommand):
coro.checks.append(check)
return coro
if not hasattr(coro, "checks"):
coro.checks = []
coro.checks.append(check)
return coro
return wrapper
def cooldown(bucket: Buckets, rate: int, interval: float) -> Callable[[Coroutine], Coroutine]:
"""
Add a cooldown to a command.
Args:
bucket: The bucket used to track cooldowns
rate: How many commands may be ran per interval
interval: How many seconds to wait for a cooldown
"""
def wrapper(coro: Coroutine) -> Coroutine:
cooldown_obj = Cooldown(bucket, rate, interval)
coro.cooldown = cooldown_obj
return coro
return wrapper
def max_concurrency(bucket: Buckets, concurrent: int) -> Callable[[Coroutine], Coroutine]:
"""
Add a maximum number of concurrent instances to the command.
Args:
bucket: The bucket to enforce the maximum within
concurrent: The maximum number of concurrent instances to allow
"""
def wrapper(coro: Coroutine) -> Coroutine:
max_conc = MaxConcurrency(concurrent, bucket)
coro.max_concurrency = max_conc
return coro
return wrapper
|
py | 1a4fd5611fd146fdbb0935802b2044559f711b6d | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo.config import cfg
from nova import compute
from nova.compute import flavors
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
from nova.openstack.common import timeutils
from nova import quota
from nova import test
import nova.tests.image.fake
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class QuotaIntegrationTestCase(test.TestCase):
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
# Apparently needed by the RPC tests...
self.network = self.start_service('network')
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(QuotaIntegrationTestCase, self).tearDown()
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
"""Create a test instance."""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
def test_too_many_instances(self):
instance_uuids = []
for i in range(CONF.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
for instance_uuid in instance_uuids:
db.instance_destroy(self.context, instance_uuid)
def test_too_many_cores(self):
instance = self._create_instance(cores=4)
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
db.instance_destroy(self.context, instance['uuid'])
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
self.context,
self.project_id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_auto_assigned(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
# auto allocated addresses should not be counted
self.assertRaises(exception.NoMoreFloatingIps,
self.network.allocate_floating_ip,
self.context,
self.project_id,
True)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
metadata = {}
for i in range(CONF.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid,
metadata=metadata)
def _create_with_injected_files(self, files):
api = compute.API()
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid,
injected_files=files)
def test_no_injected_files(self):
api = compute.API()
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context,
instance_type=inst_type,
image_href=image_uuid)
def test_max_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_reservation_expire(self):
self.useFixture(test.TimeOverride())
def assertInstancesReserved(reserved):
result = quota.QUOTAS.get_project_quotas(self.context,
self.context.project_id)
self.assertEqual(result['instances']['reserved'], reserved)
quota.QUOTAS.reserve(self.context,
expire=60,
instances=2)
assertInstancesReserved(2)
timeutils.advance_time_seconds(80)
quota.QUOTAS.expire(self.context)
assertInstancesReserved(0)
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
self.user_id = 'fake_user'
self.project_id = project_id
self.quota_class = quota_class
self.read_deleted = 'no'
def elevated(self):
elevated = self.__class__(self.project_id, self.quota_class)
elevated.is_admin = True
return elevated
class FakeDriver(object):
def __init__(self, by_project=None, by_user=None, by_class=None,
reservations=None):
self.called = []
self.by_project = by_project or {}
self.by_user = by_user or {}
self.by_class = by_class or {}
self.reservations = reservations or []
def get_by_project_and_user(self, context, project_id, user_id, resource):
self.called.append(('get_by_project_and_user',
context, project_id, user_id, resource))
try:
return self.by_user[user_id][resource]
except KeyError:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
def get_by_project(self, context, project_id, resource):
self.called.append(('get_by_project', context, project_id, resource))
try:
return self.by_project[project_id][resource]
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
return self.by_class[quota_class][resource]
except KeyError:
raise exception.QuotaClassNotFound(class_name=quota_class)
def get_defaults(self, context, resources):
self.called.append(('get_defaults', context, resources))
return resources
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
self.called.append(('get_class_quotas', context, resources,
quota_class, defaults))
return resources
def get_user_quotas(self, context, resources, project_id, user_id,
quota_class=None, defaults=True, usages=True):
self.called.append(('get_user_quotas', context, resources,
project_id, user_id, quota_class, defaults,
usages))
return resources
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, usages=True,
remains=False):
self.called.append(('get_project_quotas', context, resources,
project_id, quota_class, defaults, usages,
remains))
return resources
def limit_check(self, context, resources, values, project_id=None,
user_id=None):
self.called.append(('limit_check', context, resources,
values, project_id, user_id))
def reserve(self, context, resources, deltas, expire=None,
project_id=None, user_id=None):
self.called.append(('reserve', context, resources, deltas,
expire, project_id, user_id))
return self.reservations
def commit(self, context, reservations, project_id=None, user_id=None):
self.called.append(('commit', context, reservations, project_id,
user_id))
def rollback(self, context, reservations, project_id=None, user_id=None):
self.called.append(('rollback', context, reservations, project_id,
user_id))
def usage_reset(self, context, resources):
self.called.append(('usage_reset', context, resources))
def destroy_all_by_project_and_user(self, context, project_id, user_id):
self.called.append(('destroy_all_by_project_and_user', context,
project_id, user_id))
def destroy_all_by_project(self, context, project_id):
self.called.append(('destroy_all_by_project', context, project_id))
def expire(self, context):
self.called.append(('expire', context))
class BaseResourceTestCase(test.TestCase):
def test_no_flag(self):
resource = quota.BaseResource('test_resource')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, None)
self.assertEqual(resource.default, -1)
def test_with_flag(self):
# We know this flag exists, so use it...
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, 10)
def test_with_flag_no_quota(self):
self.flags(quota_instances=-1)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, -1)
def test_quota_no_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver()
context = FakeContext(None, None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 10)
def test_quota_with_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_no_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 20)
def test_quota_with_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
),
by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_override_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
override_project=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.assertEqual(quota_value, 20)
def test_quota_with_project_override_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=15),
override_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
self.assertEqual(quota_value, 20)
class QuotaEngineTestCase(test.TestCase):
def test_init(self):
quota_obj = quota.QuotaEngine()
self.assertEqual(quota_obj._resources, {})
self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
def test_init_override_string(self):
quota_obj = quota.QuotaEngine(
quota_driver_class='nova.tests.test_quota.FakeDriver')
self.assertEqual(quota_obj._resources, {})
self.assertIsInstance(quota_obj._driver, FakeDriver)
def test_init_override_obj(self):
quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
self.assertEqual(quota_obj._resources, {})
self.assertEqual(quota_obj._driver, FakeDriver)
def test_register_resource(self):
quota_obj = quota.QuotaEngine()
resource = quota.AbsoluteResource('test_resource')
quota_obj.register_resource(resource)
self.assertEqual(quota_obj._resources, dict(test_resource=resource))
def test_register_resources(self):
quota_obj = quota.QuotaEngine()
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource3'),
]
quota_obj.register_resources(resources)
self.assertEqual(quota_obj._resources, dict(
test_resource1=resources[0],
test_resource2=resources[1],
test_resource3=resources[2],
))
def test_get_by_project_and_user(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_user=dict(
fake_user=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project_and_user(context, 'test_project',
'fake_user', 'test_resource')
self.assertEqual(driver.called, [
('get_by_project_and_user', context, 'test_project',
'fake_user', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
self.assertEqual(driver.called, [
('get_by_project', context, 'test_project', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
self.assertEqual(driver.called, [
('get_by_class', context, 'test_class', 'test_resource'),
])
self.assertEqual(result, 42)
def _make_quota_obj(self, driver):
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
resources = [
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource1'),
]
quota_obj.register_resources(resources)
return quota_obj
def test_get_defaults(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
self.assertEqual(driver.called, [
('get_defaults', context, quota_obj._resources),
])
self.assertEqual(result, quota_obj._resources)
def test_get_class_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_class_quotas(context, 'test_class')
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual(driver.called, [
('get_class_quotas', context, quota_obj._resources,
'test_class', True),
('get_class_quotas', context, quota_obj._resources,
'test_class', False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_user_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_user_quotas(context, 'test_project',
'fake_user')
result2 = quota_obj.get_user_quotas(context, 'test_project',
'fake_user',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_user_quotas', context, quota_obj._resources,
'test_project', 'fake_user', None, True, True),
('get_user_quotas', context, quota_obj._resources,
'test_project', 'fake_user', 'test_class', False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_project_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project')
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_project_quotas', context, quota_obj._resources,
'test_project', None, True, True, False),
('get_project_quotas', context, quota_obj._resources,
'test_project', 'test_class', False, False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource5',
True, foo='bar')
def test_count_wrong_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource1',
True, foo='bar')
def test_count(self):
def fake_count(context, *args, **kwargs):
self.assertEqual(args, (True,))
self.assertEqual(kwargs, dict(foo='bar'))
return 5
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.register_resource(quota.CountableResource('test_resource5',
fake_count))
result = quota_obj.count(context, 'test_resource5', True, foo='bar')
self.assertEqual(result, 5)
def test_limit_check(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
test_resource3=2, test_resource4=1)
self.assertEqual(driver.called, [
('limit_check', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None, None),
])
def test_reserve(self):
context = FakeContext(None, None)
driver = FakeDriver(reservations=[
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource4=1)
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
result3 = quota_obj.reserve(context, project_id='fake_project',
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None, None, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), 3600, None, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), None, 'fake_project', None),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result3, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
def test_commit(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('commit', context, ['resv-01', 'resv-02', 'resv-03'], None,
None),
])
def test_rollback(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None,
None),
])
def test_usage_reset(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.usage_reset(context, ['res1', 'res2', 'res3'])
self.assertEqual(driver.called, [
('usage_reset', context, ['res1', 'res2', 'res3']),
])
def test_destroy_all_by_project_and_user(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project_and_user(context,
'test_project', 'fake_user')
self.assertEqual(driver.called, [
('destroy_all_by_project_and_user', context, 'test_project',
'fake_user'),
])
def test_destroy_all_by_project(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project(context, 'test_project')
self.assertEqual(driver.called, [
('destroy_all_by_project', context, 'test_project'),
])
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
self.assertEqual(driver.called, [
('expire', context),
])
def test_resources(self):
quota_obj = self._make_quota_obj(None)
self.assertEqual(quota_obj.resources,
['test_resource1', 'test_resource2',
'test_resource3', 'test_resource4'])
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(DbQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_floating_ips=10,
quota_fixed_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_bytes=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.driver = quota.DbQuotaDriver()
self.calls = []
self.useFixture(test.TimeOverride())
def test_get_defaults(self):
# Use our pre-defined resources
self._stub_quota_class_get_default()
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
floating_ips=10,
fixed_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
def fake_qcgd(context):
self.calls.append('quota_class_get_default')
return dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
return dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
floating_ips=10,
fixed_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class', False)
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
def _stub_get_by_project_and_user(self):
def fake_qgabpau(context, project_id, user_id):
self.calls.append('quota_get_all_by_project_and_user')
self.assertEqual(project_id, 'test_project')
self.assertEqual(user_id, 'fake_user')
return dict(
cores=10,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return {
'cores': 10,
'injected_files': 2,
'injected_file_path_bytes': 127,
}
def fake_qugabpau(context, project_id, user_id):
self.calls.append('quota_usage_get_all_by_project_and_user')
self.assertEqual(project_id, 'test_project')
self.assertEqual(user_id, 'fake_user')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpau)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project_and_user',
fake_qugabpau)
self._stub_quota_class_get_all_by_name()
def test_get_user_quotas(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self._stub_quota_class_get_all_by_name()
self._stub_quota_class_get_default()
def test_get_project_quotas(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_alt_context_no_class(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', None),
quota.QUOTAS._resources, 'test_project', 'fake_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
])
self.assertEqual(result, dict(
instances=dict(
limit=10,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=50 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=128,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=10 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_no_class(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_alt_context_with_class(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user',
quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_with_class(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project', quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_no_defaults(self):
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user',
defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_no_usages(self):
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
floating_ips=dict(
limit=10,
),
fixed_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
floating_ips=dict(
limit=10,
),
fixed_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
))
def _stub_get_settable_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True, remains=False):
self.calls.append('get_project_quotas')
result = {}
for k, v in resources.items():
if k == 'instances':
remains = v.default - 5
in_use = 1
else:
remains = v.default
in_use = 0
result[k] = {'limit': v.default, 'in_use': in_use,
'reserved': 0, 'remains': remains}
return result
def fake_get_user_quotas(context, resources, project_id, user_id,
quota_class=None, defaults=True,
usages=True):
self.calls.append('get_user_quotas')
result = {}
for k, v in resources.items():
if k == 'instances':
in_use = 1
else:
in_use = 0
result[k] = {'limit': v.default,
'in_use': in_use, 'reserved': 0}
return result
def fake_qgabpau(context, project_id, user_id):
self.calls.append('quota_get_all_by_project_and_user')
return {'instances': 2}
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
self.stubs.Set(self.driver, 'get_user_quotas',
fake_get_user_quotas)
self.stubs.Set(db, 'quota_get_all_by_project_and_user',
fake_qgabpau)
def test_get_settable_quotas_with_user(self):
self._stub_get_settable_quotas()
result = self.driver.get_settable_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', user_id='test_user')
self.assertEqual(self.calls, [
'get_project_quotas',
'get_user_quotas',
'quota_get_all_by_project_and_user',
])
self.assertEqual(result, {
'instances': {
'minimum': 1,
'maximum': 7,
},
'cores': {
'minimum': 0,
'maximum': 20,
},
'ram': {
'minimum': 0,
'maximum': 50 * 1024,
},
'floating_ips': {
'minimum': 0,
'maximum': 10,
},
'fixed_ips': {
'minimum': 0,
'maximum': 10,
},
'metadata_items': {
'minimum': 0,
'maximum': 128,
},
'injected_files': {
'minimum': 0,
'maximum': 5,
},
'injected_file_content_bytes': {
'minimum': 0,
'maximum': 10 * 1024,
},
'injected_file_path_bytes': {
'minimum': 0,
'maximum': 255,
},
'security_groups': {
'minimum': 0,
'maximum': 10,
},
'security_group_rules': {
'minimum': 0,
'maximum': 20,
},
'key_pairs': {
'minimum': 0,
'maximum': 100,
},
})
def test_get_settable_quotas_without_user(self):
self._stub_get_settable_quotas()
result = self.driver.get_settable_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'get_project_quotas',
])
self.assertEqual(result, {
'instances': {
'minimum': 5,
'maximum': -1,
},
'cores': {
'minimum': 0,
'maximum': -1,
},
'ram': {
'minimum': 0,
'maximum': -1,
},
'floating_ips': {
'minimum': 0,
'maximum': -1,
},
'fixed_ips': {
'minimum': 0,
'maximum': -1,
},
'metadata_items': {
'minimum': 0,
'maximum': -1,
},
'injected_files': {
'minimum': 0,
'maximum': -1,
},
'injected_file_content_bytes': {
'minimum': 0,
'maximum': -1,
},
'injected_file_path_bytes': {
'minimum': 0,
'maximum': -1,
},
'security_groups': {
'minimum': 0,
'maximum': -1,
},
'security_group_rules': {
'minimum': 0,
'maximum': -1,
},
'key_pairs': {
'minimum': 0,
'maximum': -1,
},
})
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True, remains=False):
self.calls.append('get_project_quotas')
return dict((k, dict(limit=v.default))
for k, v in resources.items())
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
def test_get_quotas_has_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync_no_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['metadata_items'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_has_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['instances'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
'floating_ips', 'security_groups'],
True)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
floating_ips=10,
security_groups=10,
))
def test_get_quotas_no_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['metadata_items', 'injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes',
'security_group_rules'], False)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_group_rules=20,
))
def test_limit_check_under(self):
self._stub_get_project_quotas()
self.assertRaises(exception.InvalidQuotaValue,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=-1))
def test_limit_check_over(self):
self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=129))
def test_limit_check_unlimited(self):
self.flags(quota_metadata_items=-1)
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=32767))
def test_limit_check(self):
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=128))
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire='invalid')
self.assertEqual(self.calls, [])
def test_reserve_default_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 500, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 86400),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_usage_reset(self):
calls = []
def fake_quota_usage_update(context, project_id, user_id, resource,
**kwargs):
calls.append(('quota_usage_update', context, project_id, user_id,
resource, kwargs))
if resource == 'nonexist':
raise exception.QuotaUsageNotFound(project_id=project_id)
self.stubs.Set(db, 'quota_usage_update', fake_quota_usage_update)
ctx = FakeContext('test_project', 'test_class')
resources = ['res1', 'res2', 'nonexist', 'res4']
self.driver.usage_reset(ctx, resources)
# Make sure we had some calls
self.assertEqual(len(calls), len(resources))
# Extract the elevated context that was used and do some
# sanity checks
elevated = calls[0][1]
self.assertEqual(elevated.project_id, ctx.project_id)
self.assertEqual(elevated.quota_class, ctx.quota_class)
self.assertEqual(elevated.is_admin, True)
# Now check that all the expected calls were made
exemplar = [('quota_usage_update', elevated, 'test_project',
'fake_user', res, dict(in_use=-1)) for res in resources]
self.assertEqual(calls, exemplar)
class FakeSession(object):
def begin(self):
return self
def add(self, instance):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
pass
class QuotaReserveSqlAlchemyTestCase(test.TestCase):
# nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
# own test case, and since it's a quota manipulator, this is the
# best place to put it...
def setUp(self):
super(QuotaReserveSqlAlchemyTestCase, self).setUp()
self.sync_called = set()
self.quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
fixed_ips=5,
)
self.deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
fixed_ips=2,
)
def make_sync(res_name):
def sync(context, project_id, user_id, session):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0}
return sync
self.resources = {}
for res_name in ('instances', 'cores', 'ram', 'fixed_ips'):
method_name = '_sync_%s' % res_name
sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name)
res = quota.ReservableResource(res_name, '_sync_%s' % res_name)
self.resources[res_name] = res
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
self.reservations_created = {}
self.usages_list = [
dict(resource='instances',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=2 * 1024,
until_refresh=None),
dict(resource='fixed_ips',
project_id='test_project',
user_id=None,
in_use=2,
reserved=2,
until_refresh=None),
]
def fake_get_session():
return FakeSession()
def fake_get_project_quota_usages(context, session, project_id):
return self.usages.copy()
def fake_get_user_quota_usages(context, session, project_id, user_id):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, user_id, resource,
in_use, reserved, until_refresh,
session=None, save=True):
quota_usage_ref = self._make_quota_usage(
project_id, user_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
def fake_reservation_create(context, uuid, usage_id, project_id,
user_id, resource, delta, expire,
session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, user_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_project_quota_usages',
fake_get_project_quota_usages)
self.stubs.Set(sqa_api, '_get_user_quota_usages',
fake_get_user_quota_usages)
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
self.useFixture(test.TimeOverride())
def _make_quota_usage(self, project_id, user_id, resource, in_use,
reserved, until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.created_at = created_at
quota_usage_ref.updated_at = updated_at
quota_usage_ref.deleted_at = None
quota_usage_ref.deleted = False
return quota_usage_ref
def init_usage(self, project_id, user_id, resource, in_use, reserved=0,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
if resource == 'fixed_ips':
user_id = None
quota_usage_ref = self._make_quota_usage(project_id, user_id, resource,
in_use, reserved,
until_refresh,
created_at, updated_at)
self.usages[resource] = quota_usage_ref
def compare_usage(self, usage_dict, expected):
for usage in expected:
resource = usage['resource']
for key, value in usage.items():
actual = getattr(usage_dict[resource], key)
self.assertEqual(actual, value,
"%s != %s on usage for resource %s" %
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, user_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.created_at = created_at
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
return reservation_ref
def compare_reservation(self, reservations, expected):
reservations = set(reservations)
for resv in expected:
resource = resv['resource']
resv_obj = self.reservations_created[resource]
self.assertIn(resv_obj.uuid, reservations)
reservations.discard(resv_obj.uuid)
for key, value in resv.items():
actual = getattr(resv_obj, key)
self.assertEqual(actual, value,
"%s != %s on reservation for resource %s" %
(actual, value, resource))
self.assertEqual(len(reservations), 0)
def _update_reservations_list(self, usage_id_change=False,
delta_change=False):
reservations_list = [
dict(resource='instances',
project_id='test_project',
delta=2),
dict(resource='cores',
project_id='test_project',
delta=4),
dict(resource='ram',
delta=2 * 1024),
dict(resource='fixed_ips',
project_id='test_project',
delta=2),
]
if usage_id_change:
reservations_list[0]["usage_id"] = self.usages_created['instances']
reservations_list[1]["usage_id"] = self.usages_created['cores']
reservations_list[2]["usage_id"] = self.usages_created['ram']
reservations_list[3]["usage_id"] = self.usages_created['fixed_ips']
else:
reservations_list[0]["usage_id"] = self.usages['instances']
reservations_list[1]["usage_id"] = self.usages['cores']
reservations_list[2]["usage_id"] = self.usages['ram']
reservations_list[3]["usage_id"] = self.usages['fixed_ips']
if delta_change:
reservations_list[0]["delta"] = -2
reservations_list[1]["delta"] = -4
reservations_list[2]["delta"] = -2 * 1024
reservations_list[3]["delta"] = -2
return reservations_list
def _init_usages(self, *in_use, **kwargs):
for i, option in enumerate(('instances', 'cores', 'ram', 'fixed_ips')):
self.init_usage('test_project', 'fake_user',
option, in_use[i], **kwargs)
return FakeContext('test_project', 'test_class')
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["in_use"] = 0
self.usages_list[1]["in_use"] = 0
self.usages_list[2]["in_use"] = 0
self.usages_list[3]["in_use"] = 0
self.compare_usage(self.usages_created, self.usages_list)
reservations_list = self._update_reservations_list(True)
self.compare_reservation(result, reservations_list)
def test_quota_reserve_negative_in_use(self):
context = self._init_usages(-1, -1, -1, -1, until_refresh=1)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["until_refresh"] = 5
self.usages_list[1]["until_refresh"] = 5
self.usages_list[2]["until_refresh"] = 5
self.usages_list[3]["until_refresh"] = 5
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_until_refresh(self):
context = self._init_usages(3, 3, 3, 3, until_refresh=1)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["until_refresh"] = 5
self.usages_list[1]["until_refresh"] = 5
self.usages_list[2]["until_refresh"] = 5
self.usages_list[3]["until_refresh"] = 5
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
context = self._init_usages(3, 3, 3, 3, created_at=record_created,
updated_at=record_created)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, max_age)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_no_refresh(self):
context = self._init_usages(3, 3, 3, 3)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 3
self.usages_list[1]["in_use"] = 3
self.usages_list[2]["in_use"] = 3
self.usages_list[3]["in_use"] = 3
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_unders(self):
context = self._init_usages(1, 3, 1 * 1024, 1)
self.deltas["instances"] = -2
self.deltas["cores"] = -4
self.deltas["ram"] = -2 * 1024
self.deltas["fixed_ips"] = -2
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 1
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 3
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 1 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 1
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
reservations_list = self._update_reservations_list(False, True)
self.compare_reservation(result, reservations_list)
def test_quota_reserve_overs(self):
context = self._init_usages(4, 8, 10 * 1024, 4)
self.assertRaises(exception.OverQuota,
sqa_api.quota_reserve,
context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 4
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 8
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 10 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 4
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_reduction(self):
context = self._init_usages(10, 20, 20 * 1024, 10)
self.deltas["instances"] = -2
self.deltas["cores"] = -4
self.deltas["ram"] = -2 * 1024
self.deltas["fixed_ips"] = -2
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 10
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 20
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 20 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 10
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
reservations_list = self._update_reservations_list(False, True)
self.compare_reservation(result, reservations_list)
class NoopQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(NoopQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_floating_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_bytes=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.expected_quotas = dict([(r, -1)
for r in quota.QUOTAS._resources])
self.driver = quota.NoopQuotaDriver()
def test_get_defaults(self):
# Use our pre-defined resources
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(self.expected_quotas, result)
def test_get_class_quotas(self):
result = self.driver.get_class_quotas(None,
quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.expected_quotas, result)
def test_get_class_quotas_no_defaults(self):
result = self.driver.get_class_quotas(None,
quota.QUOTAS._resources,
'test_class',
False)
self.assertEqual(self.expected_quotas, result)
def test_get_project_quotas(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project')
self.assertEqual(self.expected_quotas, result)
def test_get_user_quotas(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user')
self.assertEqual(self.expected_quotas, result)
def test_get_project_quotas_no_defaults(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project',
defaults=False)
self.assertEqual(self.expected_quotas, result)
def test_get_user_quotas_no_defaults(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user',
defaults=False)
self.assertEqual(self.expected_quotas, result)
def test_get_project_quotas_no_usages(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project',
usages=False)
self.assertEqual(self.expected_quotas, result)
def test_get_user_quotas_no_usages(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user',
usages=False)
self.assertEqual(self.expected_quotas, result)
|
py | 1a4fd582b7e339a2e1a377417875d5fab0183f00 | #!/usr/bin/env python3
import compilation
import config
import execution
import getopt
import models
import processing
import sqlalchemy
import sys
import templates
from utils import status, status_message
from termcolor import cprint
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from orm_classes import ThroughputSample, \
BatchSizeModelAccuracySample, \
ThroughputHeatmapSample, \
DOPModelAccuracySample, \
ThroughputWithHandImplementations, \
Base
import os
clear = lambda: os.system('clear')
def update_progress(progress):
print('\r[{0}] {1}%'.format('#'*int((progress/10)), progress))
def main(argv):
# Setup code for SQLAlchemy
engine = create_engine('sqlite:///rt.db')
Base.metadata.bin = engine
db_session_factory = sessionmaker(bind=engine)
session = db_session_factory()
opts, args = getopt.getopt(argv, 'ht:', ['type='])
if opts[0][0] == '-h':
print('main.py -t <experiment_type>')
print('Experiment types: throughput, batch_size_model_accuracy, worker_model_accuracy, heatmap, hand_implementation')
elif opts[0][0] in ('-t', '--type'):
if opts[0][1] == 'throughput':
throughput_experiments(session)
elif opts[0][1] == 'batch_size_model_accuracy':
batch_size_model_accuracy_experiments(session)
elif opts[0][1] == 'worker_model_accuracy':
worker_model_accuracy_experiments(session)
elif opts[0][1] == 'heatmap':
print('Starting heatmap experiments...')
heatmap_experiments(session)
elif opts[0][1] == 'hand_implementation':
print('Starting experiments with hand implementations...')
throughput_with_hand_implementations(session)
else:
print('Could not recognise the experiment type.')
print('Done.')
def heatmap_experiments(session):
exp_config = config.read_config()['max_throughput_heatmap']
app_name = exp_config['application_name']
deadline = exp_config['relative_deadline']
max_workers = exp_config['max_workers']
samples = exp_config['samples']
data_types = exp_config['data_types']
for data_type in data_types:
for input_i in range(len(exp_config['input_array_size'])):
input_array_size = exp_config['input_array_size'][input_i]
worker_wcet = exp_config['worker_wcet'][input_i]
# Iterate over the batch sizes
for dop in range(1, max_workers + 1, 1):
# Iterate over the batch sizes
non_viable_parameters = session.query(ThroughputHeatmapSample) \
.filter(
sqlalchemy.or_(
ThroughputHeatmapSample.missed_deadline == 1,
ThroughputHeatmapSample.compiled == 0,
ThroughputHeatmapSample.run_time_error == 1
)) \
.filter(ThroughputHeatmapSample.sample_application_name == app_name) \
.filter(ThroughputHeatmapSample.input_size == input_array_size) \
.filter(ThroughputHeatmapSample.relative_deadline == deadline) \
.filter(ThroughputHeatmapSample.worker_wcet == worker_wcet) \
.filter(ThroughputHeatmapSample.dop == dop) \
.filter(ThroughputHeatmapSample.data_type == data_type) \
.count()
found_non_viable_batch_size = non_viable_parameters >= samples
batch_size = 0
while not found_non_viable_batch_size:
batch_size += 1
# Check if the current data point already exists
'''query_result = session.query(ThroughputHeatmapSample.sample).get(
(app_name,
input_array_size,
deadline,
worker_wcet,
batch_size,
dop))'''
sample_count = session.query(ThroughputHeatmapSample) \
.filter(ThroughputHeatmapSample.sample_application_name == app_name) \
.filter(ThroughputHeatmapSample.input_size == input_array_size) \
.filter(ThroughputHeatmapSample.relative_deadline == deadline) \
.filter(ThroughputHeatmapSample.worker_wcet == worker_wcet) \
.filter(ThroughputHeatmapSample.dop == dop) \
.filter(ThroughputHeatmapSample.batch_size == batch_size) \
.filter(ThroughputHeatmapSample.data_type == data_type) \
.count()
print('Sample count: ' + str(sample_count))
print('Max. samples: ' + str(samples))
print('Collect more samples: ' + str(sample_count < samples))
print('Dop: ' + str(dop))
print('Batch size: ' + str(batch_size))
print('Data type: ' + str(data_type))
# input('Press...')
while sample_count < samples:
succeeded = True
compiled = True
run_time_error = False
missed_deadline = False
measured_min_period = -1
# Measure the max. throughput
succeeded &= status('Creating source file from template...',
templates.create_app_for_throughput_experiments(
app_name,
300, # Period
input_array_size,
deadline,
worker_wcet,
dop,
(False if batch_size == 1 else True), # True == batching is on
batch_size,
dop == 1,
data_type
))
succeeded &= status('Compiling...', compilation.compile_farm())
if not succeeded:
cprint('Measure max. throughput | Could not compile the application. DOP: {} Batch size {}'\
.format(dop, batch_size), 'red')
compiled = False
else:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
if not succeeded:
cprint('Measure max. throughput | Could not run the application. DOP: {} Batch size {}' \
.format(dop, batch_size), 'red')
run_time_error = True
else:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
measured_min_period = processing.compute_interarrival_time(output, batch_size, dop)
# check if batch size and measured period are viable
succeeded &= status('Creating source file from template...',
templates.create_app_for_throughput_heatmap_experiments(
app_name,
measured_min_period, # period
input_array_size,
deadline,
dop,
worker_wcet,
batch_size,
dop == 1,
data_type
))
# Check if the current batch size is viable
succeeded &= status('Compiling...', compilation.compile_farm())
if not succeeded:
cprint('Check if the current batch size is viable | Could not compile the application. DOP: {} Batch size {}'\
.format(dop, batch_size), 'red')
compiled = False
else:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
if not succeeded:
cprint('Check if the current batch size is viable | Could not run the application. DOP: {} Batch size {}' \
.format(dop, batch_size), 'red')
run_time_error = True
else:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
missed_deadline = processing.check_if_deadline_has_been_missed(output, deadline)
if missed_deadline:
cprint('Check if the current batch size is viable | Jobs miss their deadline. DOP: {} Batch size {}' \
.format(dop, batch_size), 'red')
succeeded = False
# save result
sample = ThroughputHeatmapSample(
sample_application_name=app_name,
input_size =input_array_size,
relative_deadline =deadline,
worker_wcet =worker_wcet,
batch_size =batch_size,
dop =dop,
min_period =measured_min_period,
sample =sample_count + 1,
data_type =data_type,
compiled =compiled,
missed_deadline =missed_deadline,
run_time_error =run_time_error
)
session.add(sample)
session.commit()
sample_count += 1
found_non_viable_batch_size |= not succeeded
def run_worker_model_accuracy_experiment(sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
subtract_from_dop):
succeeded = True
# Compute batch size and worker count
computed_batch_size, computed_dop = models.compute_optimal_dop_and_batch_size(worker_wcet, period, relative_deadline)
status_message('DEBUG | batch_size: {}, dop: {}'.format(computed_batch_size, computed_dop))
# Generate source code from template
succeeded &= status('Creating source files from templates...',
templates.create_app_for_worker_model_accuracy_experiments(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
computed_batch_size,
computed_dop,
subtract_from_dop
))
# Compile
if succeeded:
succeeded &= status('Compiling...', compilation.compile_farm())
# Run the experiment
if succeeded:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
# Process the output
matched_throughput = False
if succeeded:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
# Add 10ns to the period account for the accuracy of the board's timers
matched_throughput = (processing.compute_interarrival_time(
output,
internal_param['batch_size'],
internal_param['dop']) <= period + 10)
print('Measured min. period: {}'.format(processing.compute_interarrival_time(
output,
internal_param['batch_size'],
internal_param['dop'])))
return succeeded, matched_throughput, internal_param['batch_size'], internal_param['dop']
def worker_model_accuracy_experiments(session):
benchmarks = config.read_config()['dop_model_accuracy']
for app in benchmarks.keys():
bench_config = benchmarks[app]
relative_deadline_list = bench_config['relative_deadline']
input_array_size_list = bench_config['input_array_size']
worker_wcet_list = bench_config['worker_wcet']
period_start_list = bench_config['period_start']
period_end_list = bench_config['period_end']
period_steps_list = bench_config['period_steps']
samples = bench_config['samples']
for i in range(len(input_array_size_list)):
relative_deadline = relative_deadline_list[i]
input_array_size = input_array_size_list[i]
worker_wcet = worker_wcet_list[i]
period_start = period_start_list[i]
period_end = period_end_list[i]
period_steps = period_steps_list[i]
# Iterate over all periods
for period in range(period_start, period_end + period_steps, period_steps):
# Find the optimum and test predictions
for is_oracle in [False, True]:
sample_count = session.query(DOPModelAccuracySample) \
.filter(DOPModelAccuracySample.sample_application_name == app) \
.filter(DOPModelAccuracySample.input_size == input_array_size) \
.filter(DOPModelAccuracySample.relative_deadline == relative_deadline) \
.filter(DOPModelAccuracySample.worker_wcet == worker_wcet) \
.filter(DOPModelAccuracySample.period == period) \
.filter(DOPModelAccuracySample.is_oracle == is_oracle) \
.count()
print('Is oracle: {}'.format(is_oracle))
print('Sample count: {}'.format(sample_count))
while sample_count < samples:
if is_oracle:
print('Finding the minimum DOP...')
matched_throughput = True
subtract_from_dop = 0
while matched_throughput:
print('Subtract from DOP: ' + str(subtract_from_dop))
succeeded, matched_throughput, batch_size, dop = run_worker_model_accuracy_experiment(
app,
period,
input_array_size,
relative_deadline,
worker_wcet,
subtract_from_dop)
print('Matched throughput: ' + str(matched_throughput))
if not succeeded:
status_message('Oracle experiments failed!')
exit(0)
if matched_throughput and not dop == 1:
subtract_from_dop += 1
elif matched_throughput and dop == 1:
break
elif not matched_throughput:
if subtract_from_dop == 0:
status_message('ERROR | The DOP predicted by our model is too low')
exit(0)
dop += 1
matched_throughput = True
else:
succeeded, matched_throughput, batch_size, dop = run_worker_model_accuracy_experiment(
app,
period,
input_array_size,
relative_deadline,
worker_wcet,
0 # Subtract from DOP
)
if succeeded:
sample = DOPModelAccuracySample(
sample_application_name=app,
input_size =input_array_size,
relative_deadline =relative_deadline,
worker_wcet =worker_wcet,
period =period,
is_oracle =is_oracle,
sample =sample_count + 1,
batch_size =batch_size,
dop =dop,
success =succeeded,
matched_throughput =matched_throughput
)
session.add(sample)
session.commit()
sample_count += 1
else:
status_message('Compilation or execution did not succeed. Exiting...')
exit(0)
def run_batch_size_accuracy_experiment(sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size=0):
succeeded = True
# Compute batch size and worker count
computed_batch_size, computed_dop = models.compute_optimal_dop_and_batch_size(worker_wcet, period,
relative_deadline)
status_message('DEBUG | batch_size: {}, dop: {}'.format(computed_batch_size, computed_dop))
# Generate source code from template
succeeded &= status('Creating source files from templates...',
templates.create_app_for_batch_size_accuracy_experiments(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
computed_batch_size,
computed_dop,
add_to_batch_size=add_to_batch_size
))
# Compile
if succeeded:
status_message(('DEBUG | period: {}, input_array_size: {}, relative_deadline: {},' +
' worker_wcet: {}, add_to_batch_size: {}')
.format(period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size))
succeeded &= status('Compiling...', compilation.compile_farm())
else:
status_message("Could not create the sample application.")
exit(0)
# Run the experiment
if succeeded:
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
else:
status_message("Could not compile the sample application.")
exit(0)
# Process the output
missed_deadline = False
if succeeded:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
missed_deadline = processing.check_if_deadline_has_been_missed(output, relative_deadline)
else:
status_message("Could not run the sample application.")
exit(0)
return succeeded, missed_deadline, internal_param['batch_size'], internal_param['dop']
def batch_size_model_accuracy_experiments(session):
benchmarks = config.read_config()['batch_size_model_accuracy']
for sample_application in benchmarks.keys():
bench_config = benchmarks[sample_application]
relative_deadline_list = bench_config['relative_deadline']
input_array_size_list = bench_config['input_array_size']
worker_wcet_list = bench_config['worker_wcet']
period_start_list = bench_config['period_start']
period_end_list = bench_config['period_end']
period_steps_list = bench_config['period_steps']
samples = bench_config['samples']
for i in range(len(input_array_size_list)):
relative_deadline = relative_deadline_list[i]
input_array_size = input_array_size_list[i]
worker_wcet = worker_wcet_list[i]
period_start = period_start_list[i]
period_end = period_end_list[i]
period_steps = period_steps_list[i]
# Iterate over all periods
for period in range(period_start, period_end + period_steps, period_steps):
# Find the optimum and test predictions
for is_oracle in [False, True]:
# Check if database entry for the current problem instance exists already
sample_count = session.query(BatchSizeModelAccuracySample) \
.filter(BatchSizeModelAccuracySample.sample_application_name == sample_application) \
.filter(BatchSizeModelAccuracySample.input_size == input_array_size) \
.filter(BatchSizeModelAccuracySample.relative_deadline == relative_deadline) \
.filter(BatchSizeModelAccuracySample.worker_wcet == worker_wcet) \
.filter(BatchSizeModelAccuracySample.period == period) \
.filter(BatchSizeModelAccuracySample.is_oracle == is_oracle) \
.count()
while sample_count < samples:
add_to_batch_size = 0
if is_oracle:
# Find the optimum
missed_deadline = False
# TODO: Refactor duplication
add_to_batch_size = 0
while not missed_deadline:
succeeded, missed_deadline, batch_size, _ = run_batch_size_accuracy_experiment(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size=add_to_batch_size)
if not succeeded:
status_message('ERROR | Oracle experiments failed!')
exit(0)
if not missed_deadline:
add_to_batch_size += 1
else:
if add_to_batch_size == 0:
status_message('ERROR | The batch size chosen by our model is too large.')
exit(0)
# This value will be stored in the DB
# Subtract by 1 since the application fails to meet deadlines with the current
# batch size
status_message('DEBUG | Missed deadlines')
batch_size -= 1
missed_deadline = False
else:
succeeded, missed_deadline, batch_size, _ = run_batch_size_accuracy_experiment(
sample_application,
period,
input_array_size,
relative_deadline,
worker_wcet,
add_to_batch_size)
if missed_deadline:
status_message('ERROR | The batch size chosen by our model is too large.')
exit(0)
# Save results
if succeeded:
sample = BatchSizeModelAccuracySample(
sample_application_name=sample_application,
input_size =input_array_size,
relative_deadline =relative_deadline,
worker_wcet =worker_wcet,
period =period,
is_oracle =is_oracle,
sample =sample_count + 1,
batch_size =batch_size,
success =succeeded,
deadline_missed =missed_deadline
)
session.add(sample)
session.commit()
sample_count += 1
else:
status_message('Compilation or execution did not succeed. Exiting...')
def throughput_experiments(session):
benchmarks = config.read_config()['throughput']
for benchmark in benchmarks.keys():
# Read config file
bench_config = benchmarks[benchmark]
wcets = bench_config['wcet']
input_sizes = bench_config['input_array_size']
rel_dead_start = bench_config['relative_deadline_start']
rel_dead_steps = bench_config['relative_deadline_steps']
rel_dead_stop = bench_config['relative_deadline_stop']
workers_start = bench_config['workers_start']
workers_steps = bench_config['workers_steps']
workers_stop = bench_config['workers_stop']
samples = bench_config['samples']
total_number_of_experiments = 2 * len(wcets) \
* len(range(workers_start, workers_stop + 1, workers_steps)) \
* len(range(rel_dead_start, rel_dead_stop + rel_dead_steps, rel_dead_steps))
experiment_count = 0
# The baseline does not use batching
for with_batching in [True, False]:
# Sweep over the parameter space
# Parameter: input sizes + corresp. WCETs
for wcet_index in range(len(wcets)):
wcet = wcets[wcet_index]
input_size = input_sizes[wcet_index]
batch_sizes = bench_config['batch_sizes'][wcet_index]
for batch_size in batch_sizes:
if not with_batching:
batch_size = 1
# Parameter: worker count
for dop in range(workers_start, workers_stop + 1, workers_steps):
# Parameter: relative deadline
for rel_dead in range(rel_dead_start, rel_dead_stop + rel_dead_steps, rel_dead_steps):
clear()
update_progress(experiment_count / total_number_of_experiments)
print('Experiment: {}, with batching: {}, WCET: {}, DOP: {}, D: {}, Batch size: {}'.format(
benchmark,
with_batching,
wcet,
dop,
rel_dead,
batch_size))
# Check if data for this current parameter set exists
# and execute experiments if they do not exist
sample_count = session.query(ThroughputSample) \
.filter(ThroughputSample.experiment_name == benchmark) \
.filter(ThroughputSample.input_size == input_size) \
.filter(ThroughputSample.relative_deadline == rel_dead) \
.filter(ThroughputSample.worker_wcet == wcet) \
.filter(ThroughputSample.with_batching == int(with_batching)) \
.filter(ThroughputSample.batch_size == batch_size) \
.count()
while sample_count < samples:
# Prepare experiments
status_code = True
status_code &= status('Creating source files from templates... ', templates.create_app_for_throughput_experiments(
benchmark,
300, # period. This does not set the period with which new input data arrives in this case.
# This is just a dummy values that is need to compute the size of the task
# farm internal buffer.
input_size,
rel_dead,
wcet,
dop,
with_batching,
batch_size))
compilation_succeeded = False
if status_code:
status_code &= status('Compiling... ', compilation.compile_farm())
compilation_succeeded = status_code
# Run experiment
if status_code:
execution_status, out = execution.execute_farm()
status_code &= status('Executing... ', execution_status)
print(out)
# Prepare results
if status_code:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
period = processing.compute_interarrival_time(output, batch_size, dop)
# Check if the application could successfully compiled and run
status_message('Compilation and execution was successful')
else:
status_message('Compilation or execution did not succeed. Exiting...')
break
# Print the result to the console
status_message('Found min. interarrival time: ' + str(period))
# Store the result in the database
sample = ThroughputSample(
experiment_name =benchmark,
input_size =input_size,
relative_deadline=rel_dead,
worker_wcet =wcet,
dop =dop,
with_batching =int(with_batching),
sample =sample_count + 1,
success =int(compilation_succeeded))
sample.batch_size = batch_size
sample.min_interarrival_time = period
# Save result
session.add(sample)
session.commit()
sample_count += 1
experiment_count += 1
def throughput_with_hand_implementations(session):
benchmarks = config.read_config()['throughput_with_hand_implementations']
for benchmark_in_config in benchmarks.keys():
# Read config file
bench_config = benchmarks[benchmark_in_config]
wcets = bench_config['wcets']
input_sizes = bench_config['input_array_sizes']
rel_deadlines = bench_config['relative_deadlines']
samples = bench_config['samples']
total_number_of_experiments = 2 * \
len(input_sizes) * \
len(rel_deadlines)
experiment_count = 1
for is_hand_implementation in [False, True]:
if is_hand_implementation:
benchmark = 'hand_implemented_' + benchmark_in_config
else:
benchmark = benchmark_in_config
for input_size_index in range(len(input_sizes)):
dop = 6
wcet = wcets[input_size_index]
input_size = input_sizes[input_size_index]
rel_dead = rel_deadlines[input_size_index]
clear()
update_progress((experiment_count / total_number_of_experiments) * 100)
print('Experiment: {}, WCET: {}, DOP: {}, D: {}'.format(
benchmark,
wcet,
dop,
rel_dead
))
sample_count = session.query(ThroughputWithHandImplementations) \
.filter(ThroughputWithHandImplementations.sample_application_name == benchmark_in_config) \
.filter(ThroughputWithHandImplementations.input_size == input_size) \
.filter(ThroughputWithHandImplementations.relative_deadline == rel_dead) \
.filter(ThroughputWithHandImplementations.worker_wcet == wcet) \
.filter(ThroughputWithHandImplementations.dop == dop) \
.filter(ThroughputWithHandImplementations.is_hand_implementation == is_hand_implementation) \
.count()
while sample_count < samples:
# Find max. batch size
batch_size = 1
if not is_hand_implementation:
while True:
succeeded = True
if is_hand_implementation:
succeeded &= status('Creating source files from template...',
templates.create_app_for_comparison_with_hand_implementations(
benchmark,
250, # period
input_size,
batch_size,
dop,
'batch_size_accuracy'
))
else:
succeeded &= status('Creating source files from template...',
templates.create_app_for_batch_size_accuracy_experiments(
benchmark,
250, # period
input_size,
rel_dead,
wcet,
batch_size,
dop,
0 # subtract_from_dop
))
if not succeeded:
cprint('ERROR: Could not generate source file', 'red')
exit(0)
succeeded &= status('Compiling...', compilation.compile_farm())
if not succeeded:
cprint('Check if the current batch size is viable | Could not compile the application', 'blue')
break
execution_status, out = execution.execute_farm()
succeeded &= status('Executing...', execution_status)
if not succeeded:
cprint('Check if the current batch size is viable | Could not run the application', 'blue')
break
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
missed_deadline = processing.check_if_deadline_has_been_missed(output, rel_dead)
if missed_deadline:
cprint('Check if the current batch size is viable | ' +
'Jobs miss their deadline. DOP: {} Batch size {}'.format(dop, batch_size), 'blue')
break
batch_size += 1
clear()
update_progress((experiment_count / total_number_of_experiments) * 100)
batch_size -= 1
else:
instance = session.query(ThroughputWithHandImplementations) \
.filter(ThroughputWithHandImplementations.sample_application_name == benchmark_in_config) \
.filter(ThroughputWithHandImplementations.input_size == input_size) \
.filter(ThroughputWithHandImplementations.relative_deadline == rel_dead) \
.filter(ThroughputWithHandImplementations.worker_wcet == wcet) \
.filter(ThroughputWithHandImplementations.dop == dop) \
.filter(ThroughputWithHandImplementations.is_hand_implementation == 0) \
.first()
batch_size = instance.batch_size
print('Batch size in DB: {}'.format(batch_size))
if batch_size == 0:
cprint('ERROR: Could not compile or run an application with batch size 1', 'red')
exit(0)
clear()
update_progress((experiment_count / total_number_of_experiments) * 100)
cprint('Finding maximum throughput with the found maximum batch size...', 'blue')
# Measure max. throughput with the found batch size
# Prepare experiments
status_code = True
if is_hand_implementation:
status_code &= status('Creating source files from templates... ',
templates.create_app_for_comparison_with_hand_implementations(
benchmark,
250, # period
input_size,
batch_size,
dop,
'throughput'
))
else:
status_code &= status('Create source file from templates...',
templates.create_app_for_throughput_experiments(
benchmark,
250, # period,
input_size,
rel_dead,
wcet,
dop,
True, # with_batching
batch_size
))
if status_code:
status_code &= status('Compiling... ', compilation.compile_farm())
else:
cprint('ERROR: Could not generate source code for a sample application', 'red')
exit(0)
# Run experiment
if status_code:
execution_status, out = execution.execute_farm()
status_code &= status('Executing... ', execution_status)
print(out)
else:
cprint('ERROR: Could not compile a sample application', 'red')
exit(0)
# Prepare results
if status_code:
internal_param, output = processing.parse_output_to_dict(out.decode('utf-8'))
period = processing.compute_interarrival_time(output, batch_size, dop)
# Check if the application could successfully compiled and run
status_message('Compilation and execution was successful')
else:
cprint('ERROR: Could not execute a sample application', 'red')
exit(0)
# Print the result to the console
status_message('Found min. period: ' + str(period))
# Store the result in the database
sample = ThroughputWithHandImplementations(
sample_application_name=benchmark_in_config,
input_size =input_size,
relative_deadline =rel_dead,
worker_wcet =wcet,
dop =dop,
is_hand_implementation =is_hand_implementation,
sample_count =sample_count+1,
batch_size =batch_size,
min_period =period)
# Save result
session.add(sample)
session.commit()
sample_count += 1
experiment_count += 1
'''The below experiment likely does not make sense'''
'''def throughput_loss_due_to_non_optimal_batch_size_experiments(session):
experiments = session.query(BatchSizeModelAccuracySample).all()
for experiment in experiments:
app_name = experiment.sample_application_name
input_size = experiment.input_size
relative_deadline = experiment.relative_deadline
worker_wcet = experiment.worker_wcet
period = experiment.period
is_oracle = experiment.is_oracle
batch_size = experiment.batch_size
print(experiments[0].sample_application_name)'''
if __name__ == '__main__':
main(sys.argv[1:]) |
py | 1a4fd5af7255565ca4ef372034ffcdcc891bee8e | # Describing possession
# Describing things by color
# Describing kinship
# Describing movement to/from
# Describing locations
# Greetings and farewells
# Face-changing speech (Thanking, apologizing)
# Asking questions about where, what, how, when, who, etc
# Describing tastes
# A set of words/skills/structures that are known
# A set of words/skills/structures that are new
# A set of words/skills/structures that are next in the 'skill tree'
# Generate sentences/scenarios that use the known words, include some 'to study' words, and ignore as much as possible other new words.
|
py | 1a4fd65f2c1062808ae6d66f4696978cda15c618 | import logging
from argparse import ArgumentParser
from .server import Server
logger = logging.getLogger(__name__)
def parse_args():
parser = ArgumentParser(prog="contiflowpump_service", description="Start this SiLA 2 server")
parser.add_argument("-a", "--ip-address", default="127.0.0.1", help="The IP address (default: '127.0.0.1')")
parser.add_argument("-p", "--port", type=int, default=50052, help="The port (default: 50052)")
parser.add_argument("--disable-discovery", action="store_true", help="Disable SiLA Server Discovery")
log_level_group = parser.add_mutually_exclusive_group()
log_level_group.add_argument("-q", "--quiet", action="store_true", help="Only log errors")
log_level_group.add_argument("-v", "--verbose", action="store_true", help="Enable verbose logging")
log_level_group.add_argument("-d", "--debug", action="store_true", help="Enable debug logging")
return parser.parse_args()
def start_server(args):
server = Server()
try:
server.start_insecure(args.ip_address, args.port, enable_discovery=not args.disable_discovery)
print(f"Server startup complete, running on {args.ip_address}:{args.port}. Press Enter to stop it")
try:
input()
except KeyboardInterrupt:
pass
finally:
server.stop()
print("Stopped server")
def setup_basic_logging(args):
level = logging.WARNING
if args.verbose:
level = logging.INFO
if args.debug:
level = logging.DEBUG
if args.quiet:
level = logging.ERROR
logging.basicConfig(level=level, format="%(asctime)s:%(levelname)s:%(name)s:%(message)s")
if __name__ == "__main__":
args = parse_args()
setup_basic_logging(args)
start_server(args)
|
py | 1a4fd66e3fd1d711049c1bb520763b150576cd8e | """
def factorial(n):
result=1
for num in range(1,n+1):
result*=num
return result
print(factorial(7)//factorial(3)//factorial(4))
"""
"""
from random import randint
def roll(n=2):
total=0
for i in range(n):
total+=randint(1,6)
return total
def add(a=0,b=0,c=0):
return a+b+c
print(roll())
print(roll(3))
"""
"""
def add(*num):
result=1
for i in num:
result+=i
return result
print(add())
print(add(1))
print(add(1,2,3,4))
"""
import module1 as m1
import module2 as m2
import module3 as m3
m1.care()
m2.hello()
m3.bar()
#////////def練習/////////
"""
def gcd(x,y):
if x>y:
x,y=y,x
for factor in range(x,1,-1):
if x%factor==0 and y%factor==0:
return factor
def lcm(x,y):
return x*y//gcd(x,y)
print(gcd(15,27))
print(lcm(15,27))
"""
|
py | 1a4fd75b4da7dab08304c2f2df0c500966ada81e | """
Django settings for apple_head_34247 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'apple_head_34247.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apple_head_34247.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
|
py | 1a4fd79f4ec8e66de8aa0dbbfd6034ddde880b55 | """Tests for SDEC Plots."""
from tardis.base import run_tardis
import pytest
import pandas as pd
import numpy as np
import os
from copy import deepcopy
from tardis.visualization.tools.sdec_plot import SDECData, SDECPlotter
import astropy.units as u
from matplotlib.collections import PolyCollection
from matplotlib.lines import Line2D
import tables
import re
def make_valid_name(testid):
"""
Sanitize pytest IDs to make them valid HDF group names.
Parameters
----------
testid : str
ID to sanitize.
Returns
-------
testid : str
Sanitized ID.
"""
testid = testid.replace("-", "_")
testid = "_" + testid
return testid
@pytest.fixture(scope="module")
def simulation_simple(config_verysimple, atomic_dataset):
"""
Instantiate SDEC plotter using a simple simulation model.
Parameters
----------
config_verysimple : tardis.io.config_reader.Configuration
Configuration object for a very simple simulation.
atomic_dataset : str or tardis.atomic.AtomData
Atomic data.
Returns
-------
sim: tardis.simulation.base.Simulation
Simulation object.
"""
# Setup simulation configuration using config_verysimple and
# override properties in such a way to make the simulation run faster
config_verysimple.montecarlo.iterations = 3
config_verysimple.montecarlo.no_of_packets = 4000
config_verysimple.montecarlo.last_no_of_packets = -1
config_verysimple.spectrum.virtual.virtual_packet_logging = True
config_verysimple.montecarlo.no_of_virtual_packets = 1
config_verysimple.spectrum.num = 2000
atomic_data = deepcopy(atomic_dataset)
sim = run_tardis(
config_verysimple,
atom_data=atomic_data,
show_convergence_plots=False,
)
return sim
@pytest.fixture(scope="module")
def sdec_ref_data_path(tardis_ref_path):
"""
Return the path to the reference data for the SDEC plots.
Parameters
----------
tardis_ref_path : str
Path to the reference data directory.
Returns
-------
str
Path to SDEC reference data.
"""
return os.path.abspath(os.path.join(tardis_ref_path, "sdec_ref.h5"))
class TestSDECPlotter:
"""Test the SDECPlotter class."""
@pytest.fixture(scope="class", autouse=True)
def create_hdf_file(self, request, sdec_ref_data_path):
"""
Create an HDF5 file object.
Parameters
----------
request : _pytest.fixtures.SubRequest
sdec_ref_data_path : str
Path to the reference data for the SDEC plots.
Yields
-------
h5py._hl.files.File
HDF5 file object.
"""
cls = type(self)
if request.config.getoption("--generate-reference"):
cls.hdf_file = tables.open_file(sdec_ref_data_path, "w")
else:
cls.hdf_file = tables.open_file(sdec_ref_data_path, "r")
yield cls.hdf_file
cls.hdf_file.close()
@pytest.fixture(scope="class")
def plotter(self, simulation_simple):
"""
Create a SDECPlotter object.
Parameters
----------
simulation_simple : tardis.simulation.base.Simulation
Simulation object.
Returns
-------
tardis.visualization.tools.sdec_plot.SDECPlotter
"""
return SDECPlotter.from_simulation(simulation_simple)
@pytest.fixture(scope="class")
def observed_spectrum(self):
"""
Return the observed spectrum.
Returns
-------
Tuple of two astropy.units.quantity.Quantity values.
"""
test_data = np.loadtxt(
"tardis/visualization/tools/tests/data/observed_spectrum_test_data.dat"
)
observed_spectrum_wavelength, observed_spectrum_flux = test_data.T
observed_spectrum_wavelength = observed_spectrum_wavelength * u.AA
observed_spectrum_flux = (
observed_spectrum_flux * u.erg / (u.s * u.cm ** 2 * u.AA)
)
return observed_spectrum_wavelength, observed_spectrum_flux
@pytest.mark.parametrize("species", [["Si II", "Ca II", "C", "Fe I-V"]])
def test_parse_species_list(self, request, plotter, species):
"""
Test _parse_species_list method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
species : list
"""
plotter._parse_species_list(species)
subgroup_name = make_valid_name(request.node.callspec.id)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group, name="_full_species_list", obj=plotter._full_species_list
)
self.hdf_file.create_carray(
group, name="_species_list", obj=plotter._species_list
)
self.hdf_file.create_carray(
group, name="_keep_colour", obj=plotter._keep_colour
)
pytest.skip("Reference data was generated during this run.")
else:
group = self.hdf_file.get_node("/" + subgroup_name)
# because plotter._full_species_list is an array of strings
np.testing.assert_equal(
np.asarray(plotter._full_species_list),
self.hdf_file.get_node(group, "_full_species_list")
.read()
.astype(str),
)
np.testing.assert_allclose(
np.asarray(plotter._species_list),
self.hdf_file.get_node(group, "_species_list"),
)
np.testing.assert_allclose(
np.asarray(plotter._keep_colour),
self.hdf_file.get_node(group, "_keep_colour"),
)
@pytest.mark.parametrize("packets_mode", ["virtual", "real"])
@pytest.mark.parametrize("packet_wvl_range", [[500, 9000] * u.AA])
@pytest.mark.parametrize("distance", [10 * u.Mpc, 50 * u.Mpc])
@pytest.mark.parametrize("nelements", [1, 3])
def test_calculate_plotting_data(
self,
request,
plotter,
packets_mode,
packet_wvl_range,
distance,
nelements,
):
"""
Test _calculate_plotting_data method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
packets_mode : str
packet_wvl_range : astropy.units.quantity.Quantity
distance : astropy.units.quantity.Quantity
nelements : int
"""
plotter._calculate_plotting_data(
packets_mode, packet_wvl_range, distance, nelements
)
# each group is a different combination of arguments
subgroup_name = make_valid_name(request.node.callspec.id)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group,
name="plot_frequency_bins",
obj=plotter.plot_frequency_bins.cgs.value,
)
self.hdf_file.create_carray(
group,
name="plot_wavelength",
obj=plotter.plot_wavelength.cgs.value,
)
self.hdf_file.create_carray(
group,
name="plot_frequency",
obj=plotter.plot_frequency.cgs.value,
)
self.hdf_file.create_carray(
group,
name="packet_wvl_range_mask",
obj=plotter.packet_wvl_range_mask,
)
self.hdf_file.create_carray(
group, name="emission_species", obj=plotter.emission_species
)
self.hdf_file.create_carray(
group, name="absorption_species", obj=plotter.absorption_species
)
self.hdf_file.create_carray(
group,
name="modeled_spectrum_luminosity",
obj=plotter.modeled_spectrum_luminosity.cgs.value,
)
if isinstance(plotter.lum_to_flux, u.quantity.Quantity):
self.hdf_file.create_array(
group, name="lum_to_flux", obj=plotter.lum_to_flux.cgs.value
)
else:
self.hdf_file.create_array(
group, name="lum_to_flux", obj=plotter.lum_to_flux
)
self.hdf_file.create_carray(
group, name="species", obj=plotter.species.astype(np.float64)
)
plotter.absorption_luminosities_df.to_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/absorption_luminosities_df",
)
plotter.emission_luminosities_df.to_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/emission_luminosities_df",
)
plotter.total_luminosities_df.to_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/total_luminosities_df",
)
pytest.skip("Reference data was generated during this run.")
else:
# use the subgroup id to iterate over the hdf file
group = self.hdf_file.get_node("/" + subgroup_name)
np.testing.assert_allclose(
plotter.plot_frequency_bins.cgs.value,
self.hdf_file.get_node(group, "plot_frequency_bins"),
)
np.testing.assert_allclose(
plotter.plot_wavelength.cgs.value,
self.hdf_file.get_node(group, "plot_wavelength"),
)
np.testing.assert_allclose(
plotter.plot_frequency.cgs.value,
self.hdf_file.get_node(group, "plot_frequency"),
)
np.testing.assert_allclose(
plotter.modeled_spectrum_luminosity.cgs.value,
self.hdf_file.get_node(group, "modeled_spectrum_luminosity"),
)
np.testing.assert_allclose(
plotter.packet_wvl_range_mask,
self.hdf_file.get_node(group, "packet_wvl_range_mask"),
)
np.testing.assert_allclose(
plotter.absorption_species,
self.hdf_file.get_node(group, "absorption_species"),
)
np.testing.assert_allclose(
plotter.emission_species,
self.hdf_file.get_node(group, "emission_species"),
)
if isinstance(plotter.lum_to_flux, u.quantity.Quantity):
assert (
plotter.lum_to_flux.cgs.value
== self.hdf_file.get_node(group, "lum_to_flux"),
)
else:
assert plotter.lum_to_flux == self.hdf_file.get_node(
group, "lum_to_flux"
)
np.testing.assert_allclose(
plotter.species.astype(np.float64),
self.hdf_file.get_node(group, "species"),
)
pd.testing.assert_frame_equal(
plotter.absorption_luminosities_df,
pd.read_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/absorption_luminosities_df",
),
)
pd.testing.assert_frame_equal(
plotter.emission_luminosities_df,
pd.read_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/emission_luminosities_df",
),
)
pd.testing.assert_frame_equal(
plotter.total_luminosities_df,
pd.read_hdf(
self.hdf_file.filename,
key=f"{subgroup_name}/total_luminosities_df",
),
)
@pytest.mark.parametrize("packets_mode", ["virtual", "real"])
@pytest.mark.parametrize("packet_wvl_range", [[500, 9000] * u.AA, None])
@pytest.mark.parametrize("distance", [10 * u.Mpc, None])
@pytest.mark.parametrize("show_modeled_spectrum", [True, False])
@pytest.mark.parametrize("nelements", [1, None])
@pytest.mark.parametrize(
"species_list", [["Si II", "Ca II", "C", "Fe I-V"], None]
)
def test_generate_plot_mpl(
self,
request,
plotter,
packets_mode,
packet_wvl_range,
distance,
show_modeled_spectrum,
observed_spectrum,
nelements,
species_list,
):
"""
Test generate_plot_mpl method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
packets_mode : str
packet_wvl_range : astropy.units.quantity.Quantity
distance : astropy.units.quantity.Quantity
show_modeled_spectrum : bool
observed_spectrum : tuple of two astropy.units.quantity.Quantity values
nelements : int
species_list : list of str
"""
subgroup_name = make_valid_name("mpl" + request.node.callspec.id)
fig = plotter.generate_plot_mpl(
packets_mode=packets_mode,
packet_wvl_range=packet_wvl_range,
distance=distance,
show_modeled_spectrum=show_modeled_spectrum,
observed_spectrum=observed_spectrum if distance else None,
nelements=nelements,
species_list=species_list,
)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group, name="_species_name", obj=plotter._species_name
)
self.hdf_file.create_carray(
group, name="_color_list", obj=plotter._color_list
)
fig_subgroup = self.hdf_file.create_group(
group,
name="fig_data",
)
for index, data in enumerate(fig.get_children()):
trace_group = self.hdf_file.create_group(
fig_subgroup,
name="_" + str(index),
)
if isinstance(data.get_label(), str):
self.hdf_file.create_array(
trace_group, name="label", obj=data.get_label().encode()
)
# save artists which correspond to element contributions
if isinstance(data, PolyCollection):
for index, path in enumerate(data.get_paths()):
self.hdf_file.create_carray(
trace_group,
name="path" + str(index),
obj=path.vertices,
)
# save line plots
if isinstance(data, Line2D):
self.hdf_file.create_carray(
trace_group,
name="data",
obj=data.get_xydata(),
)
self.hdf_file.create_carray(
trace_group, name="path", obj=data.get_path().vertices
)
pytest.skip("Reference data was generated during this run.")
else:
group = self.hdf_file.get_node("/" + subgroup_name)
# test output of the _make_colorbar_labels function
assert (
plotter._species_name
== self.hdf_file.get_node(group, "_species_name")
.read()
.astype(str),
)
# test output of the _make_colorbar_colors function
np.testing.assert_allclose(
np.asarray(np.asarray(plotter._color_list)),
self.hdf_file.get_node(group, "_color_list"),
)
fig_subgroup = self.hdf_file.get_node(group, "fig_data")
for index, data in enumerate(fig.get_children()):
trace_group = self.hdf_file.get_node(
fig_subgroup, "_" + str(index)
)
if isinstance(data.get_label(), str):
assert (
data.get_label()
== self.hdf_file.get_node(trace_group, "label")
.read()
.decode()
)
# test element contributions
if isinstance(data, PolyCollection):
for index, path in enumerate(data.get_paths()):
np.testing.assert_allclose(
path.vertices,
self.hdf_file.get_node(
trace_group, "path" + str(index)
),
)
# compare line plot data
if isinstance(data, Line2D):
np.testing.assert_allclose(
data.get_xydata(),
self.hdf_file.get_node(trace_group, "data"),
)
np.testing.assert_allclose(
data.get_path().vertices,
self.hdf_file.get_node(trace_group, "path"),
)
@pytest.mark.parametrize("packets_mode", ["virtual", "real"])
@pytest.mark.parametrize("packet_wvl_range", [[500, 9000] * u.AA, None])
@pytest.mark.parametrize("distance", [10 * u.Mpc, None])
@pytest.mark.parametrize("show_modeled_spectrum", [True, False])
@pytest.mark.parametrize("nelements", [1, None])
@pytest.mark.parametrize(
"species_list", [["Si II", "Ca II", "C", "Fe I-V"], None]
)
def test_generate_plot_ply(
self,
request,
plotter,
packets_mode,
packet_wvl_range,
distance,
show_modeled_spectrum,
observed_spectrum,
nelements,
species_list,
):
"""
Test generate_plot_mpl method.
Parameters
----------
request : _pytest.fixtures.SubRequest
plotter : tardis.visualization.tools.sdec_plot.SDECPlotter
packets_mode : str
packet_wvl_range : astropy.units.quantity.Quantity
distance : astropy.units.quantity.Quantity
show_modeled_spectrum : bool
observed_spectrum : tuple of two astropy.units.quantity.Quantity values
nelements : int
species_list : list of str
"""
subgroup_name = make_valid_name("ply" + request.node.callspec.id)
fig = plotter.generate_plot_ply(
packets_mode=packets_mode,
packet_wvl_range=packet_wvl_range,
distance=distance,
show_modeled_spectrum=show_modeled_spectrum,
observed_spectrum=observed_spectrum if distance else None,
nelements=nelements,
species_list=species_list,
)
if request.config.getoption("--generate-reference"):
group = self.hdf_file.create_group(
self.hdf_file.root,
name=subgroup_name,
)
self.hdf_file.create_carray(
group, name="_species_name", obj=plotter._species_name
)
self.hdf_file.create_carray(
group, name="_color_list", obj=plotter._color_list
)
fig_subgroup = self.hdf_file.create_group(
group,
name="fig_data",
)
for index, data in enumerate(fig.data):
trace_group = self.hdf_file.create_group(
fig_subgroup,
name="_" + str(index),
)
if data.stackgroup:
self.hdf_file.create_array(
trace_group,
name="stackgroup",
obj=data.stackgroup.encode(),
)
if data.name:
self.hdf_file.create_array(
trace_group,
name="name",
obj=data.name.encode(),
)
self.hdf_file.create_carray(
trace_group,
name="x",
obj=data.x,
)
self.hdf_file.create_carray(
trace_group,
name="y",
obj=data.y,
)
pytest.skip("Reference data was generated during this run.")
else:
group = self.hdf_file.get_node("/", subgroup_name)
# test output of the _make_colorbar_labels function
assert (
plotter._species_name
== self.hdf_file.get_node(group, "_species_name")
.read()
.astype(str),
)
# test output of the _make_colorbar_colors function
np.testing.assert_allclose(
np.asarray(np.asarray(plotter._color_list)),
self.hdf_file.get_node(group, "_color_list"),
)
fig_subgroup = self.hdf_file.get_node(group, "fig_data")
for index, data in enumerate(fig.data):
trace_group = self.hdf_file.get_node(
fig_subgroup, "_" + str(index)
)
if data.stackgroup:
assert (
data.stackgroup
== self.hdf_file.get_node(trace_group, "stackgroup")
.read()
.decode()
)
if data.name:
assert (
data.name
== self.hdf_file.get_node(trace_group, "name")
.read()
.decode()
)
np.testing.assert_allclose(
self.hdf_file.get_node(trace_group, "x"), data.x
)
np.testing.assert_allclose(
self.hdf_file.get_node(trace_group, "y"), data.y
)
|
py | 1a4fd860cf13dc8e282102ea1b0d9f8a1aac2c9a | import os
import time
import unittest
from time import sleep
import pytest
import requests
from jina import JINA_GLOBAL
from jina.enums import FlowOptimizeLevel, SocketType
from jina.flow import Flow
from jina.main.checker import NetworkChecker
from jina.main.parser import set_pea_parser, set_ping_parser
from jina.main.parser import set_pod_parser
from jina.peapods.pea import BasePea
from jina.peapods.pod import BasePod
from tests import JinaTestCase, random_docs
cur_dir = os.path.dirname(os.path.abspath(__file__))
class FlowTestCase(JinaTestCase):
def test_ping(self):
a1 = set_pea_parser().parse_args([])
a2 = set_ping_parser().parse_args(['0.0.0.0', str(a1.port_ctrl), '--print-response'])
a3 = set_ping_parser().parse_args(['0.0.0.1', str(a1.port_ctrl), '--timeout', '1000'])
with self.assertRaises(SystemExit) as cm:
with BasePea(a1):
NetworkChecker(a2)
self.assertEqual(cm.exception.code, 0)
# test with bad addresss
with self.assertRaises(SystemExit) as cm:
with BasePea(a1):
NetworkChecker(a3)
self.assertEqual(cm.exception.code, 1)
def test_flow_with_jump(self):
f = (Flow().add(name='r1', uses='_forward')
.add(name='r2', uses='_forward')
.add(name='r3', uses='_forward', needs='r1')
.add(name='r4', uses='_forward', needs='r2')
.add(name='r5', uses='_forward', needs='r3')
.add(name='r6', uses='_forward', needs='r4')
.add(name='r8', uses='_forward', needs='r6')
.add(name='r9', uses='_forward', needs='r5')
.add(name='r10', uses='_merge', needs=['r9', 'r8']))
with f:
f.dry_run()
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r1']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUB_BIND)
node = f._pod_nodes['r2']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r3']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r4']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r5']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r6']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r8']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r9']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r10']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_BIND)
for name, node in f._pod_nodes.items():
self.assertEqual(node.peas_args['peas'][0], node.head_args)
self.assertEqual(node.peas_args['peas'][0], node.tail_args)
f.save_config('tmp.yml')
Flow.load_config('tmp.yml')
with Flow.load_config('tmp.yml') as fl:
fl.dry_run()
self.add_tmpfile('tmp.yml')
def test_simple_flow(self):
bytes_gen = (b'aaa' for _ in range(10))
def bytes_fn():
for _ in range(100):
yield b'aaa'
f = (Flow()
.add(uses='_forward'))
with f:
f.index(input_fn=bytes_gen)
with f:
f.index(input_fn=bytes_fn)
with f:
f.index(input_fn=bytes_fn)
f.index(input_fn=bytes_fn)
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['pod0']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_BIND)
for name, node in f._pod_nodes.items():
self.assertEqual(node.peas_args['peas'][0], node.head_args)
self.assertEqual(node.peas_args['peas'][0], node.tail_args)
def test_load_flow_from_yaml(self):
with open(os.path.join(cur_dir, '../yaml/test-flow.yml')) as fp:
a = Flow.load_config(fp)
with open(os.path.join(cur_dir, '../yaml/swarm-out.yml'), 'w') as fp, a:
a.to_swarm_yaml(fp)
self.add_tmpfile(os.path.join(cur_dir, '../yaml/swarm-out.yml'))
def test_flow_identical(self):
with open(os.path.join(cur_dir, '../yaml/test-flow.yml')) as fp:
a = Flow.load_config(fp)
b = (Flow()
.add(name='chunk_seg', parallel=3)
.add(name='wqncode1', parallel=2)
.add(name='encode2', parallel=2, needs='chunk_seg')
.join(['wqncode1', 'encode2']))
a.save_config('test2.yml')
c = Flow.load_config('test2.yml')
self.assertEqual(a, b)
self.assertEqual(a, c)
self.add_tmpfile('test2.yml')
with a as f:
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['chunk_seg']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.head_args.socket_out, SocketType.ROUTER_BIND)
for arg in node.peas_args['peas']:
self.assertEqual(arg.socket_in, SocketType.DEALER_CONNECT)
self.assertEqual(arg.socket_out, SocketType.PUSH_CONNECT)
self.assertEqual(node.tail_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUB_BIND)
node = f._pod_nodes['wqncode1']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.head_args.socket_out, SocketType.ROUTER_BIND)
for arg in node.peas_args['peas']:
self.assertEqual(arg.socket_in, SocketType.DEALER_CONNECT)
self.assertEqual(arg.socket_out, SocketType.PUSH_CONNECT)
self.assertEqual(node.tail_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['encode2']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.head_args.socket_out, SocketType.ROUTER_BIND)
for arg in node.peas_args['peas']:
self.assertEqual(arg.socket_in, SocketType.DEALER_CONNECT)
self.assertEqual(arg.socket_out, SocketType.PUSH_CONNECT)
self.assertEqual(node.tail_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
def test_dryrun(self):
f = (Flow()
.add(name='dummyEncoder', uses=os.path.join(cur_dir, '../mwu-encoder/mwu_encoder.yml')))
with f:
f.dry_run()
def test_pod_status(self):
args = set_pod_parser().parse_args(['--parallel', '3'])
with BasePod(args) as p:
self.assertEqual(len(p.status), p.num_peas)
for v in p.status:
self.assertIsNotNone(v)
def test_flow_no_container(self):
f = (Flow()
.add(name='dummyEncoder', uses=os.path.join(cur_dir, '../mwu-encoder/mwu_encoder.yml')))
with f:
f.index(input_fn=random_docs(10))
def test_flow_yaml_dump(self):
f = Flow(logserver_config=os.path.join(cur_dir, '../yaml/test-server-config.yml'),
optimize_level=FlowOptimizeLevel.IGNORE_GATEWAY,
no_gateway=True)
f.save_config('test1.yml')
fl = Flow.load_config('test1.yml')
self.assertEqual(f.args.logserver_config, fl.args.logserver_config)
self.assertEqual(f.args.optimize_level, fl.args.optimize_level)
self.add_tmpfile('test1.yml')
def test_flow_log_server(self):
f = Flow.load_config(os.path.join(cur_dir, '../yaml/test_log_server.yml'))
with f:
self.assertTrue(hasattr(JINA_GLOBAL.logserver, 'ready'))
# Ready endpoint
a = requests.get(
JINA_GLOBAL.logserver.address +
'/status/ready',
timeout=5)
self.assertEqual(a.status_code, 200)
# YAML endpoint
a = requests.get(
JINA_GLOBAL.logserver.address +
'/data/yaml',
timeout=5)
self.assertTrue(a.text.startswith('!Flow'))
self.assertEqual(a.status_code, 200)
# Pod endpoint
a = requests.get(
JINA_GLOBAL.logserver.address +
'/data/api/pod',
timeout=5)
self.assertTrue('pod' in a.json())
self.assertEqual(a.status_code, 200)
# Shutdown endpoint
a = requests.get(
JINA_GLOBAL.logserver.address +
'/action/shutdown',
timeout=5)
self.assertEqual(a.status_code, 200)
# Check ready endpoint after shutdown, check if server stopped
with self.assertRaises(requests.exceptions.ConnectionError):
a = requests.get(
JINA_GLOBAL.logserver.address +
'/status/ready',
timeout=5)
def test_shards(self):
f = Flow().add(name='doc_pb', uses=os.path.join(cur_dir, '../yaml/test-docpb.yml'), parallel=3,
separated_workspace=True)
with f:
f.index(input_fn=random_docs(1000), random_doc_id=False)
with f:
pass
self.add_tmpfile('test-docshard-tmp')
time.sleep(2)
def test_py_client(self):
f = (Flow().add(name='r1', uses='_forward')
.add(name='r2', uses='_forward')
.add(name='r3', uses='_forward', needs='r1')
.add(name='r4', uses='_forward', needs='r2')
.add(name='r5', uses='_forward', needs='r3')
.add(name='r6', uses='_forward', needs='r4')
.add(name='r8', uses='_forward', needs='r6')
.add(name='r9', uses='_forward', needs='r5')
.add(name='r10', uses='_merge', needs=['r9', 'r8']))
with f:
f.dry_run()
from jina.clients import py_client
py_client(port_expose=f.port_expose, host=f.host).dry_run(as_request='index')
with f:
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r1']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUB_BIND)
node = f._pod_nodes['r2']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r3']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r4']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r5']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r6']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r8']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r9']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r10']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_BIND)
for name, node in f._pod_nodes.items():
self.assertEqual(node.peas_args['peas'][0], node.head_args)
self.assertEqual(node.peas_args['peas'][0], node.tail_args)
def test_dry_run_with_two_pathways_diverging_at_gateway(self):
f = (Flow().add(name='r2', uses='_forward')
.add(name='r3', uses='_forward', needs='gateway')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUB_BIND)
node = f._pod_nodes['r2']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r3']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
for name, node in f._pod_nodes.items():
self.assertEqual(node.peas_args['peas'][0], node.head_args)
self.assertEqual(node.peas_args['peas'][0], node.tail_args)
f.dry_run()
def test_dry_run_with_two_pathways_diverging_at_non_gateway(self):
f = (Flow().add(name='r1', uses='_forward')
.add(name='r2', uses='_forward')
.add(name='r3', uses='_forward', needs='r1')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r1']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUB_BIND)
node = f._pod_nodes['r2']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r3']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
for name, node in f._pod_nodes.items():
self.assertEqual(node.peas_args['peas'][0], node.head_args)
self.assertEqual(node.peas_args['peas'][0], node.tail_args)
f.dry_run()
@pytest.mark.skip('this leads to zmq address conflicts on github')
def test_refactor_num_part(self):
sleep(3)
f = (Flow().add(name='r1', uses='_logforward', needs='gateway')
.add(name='r2', uses='_logforward', needs='gateway')
.join(['r1', 'r2']))
with f:
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r1']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUB_BIND)
node = f._pod_nodes['r2']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
for name, node in f._pod_nodes.items():
self.assertEqual(node.peas_args['peas'][0], node.head_args)
self.assertEqual(node.peas_args['peas'][0], node.tail_args)
f.index_lines(lines=['abbcs', 'efgh'])
def test_refactor_num_part_proxy(self):
f = (Flow().add(name='r1', uses='_logforward')
.add(name='r2', uses='_logforward', needs='r1')
.add(name='r3', uses='_logforward', needs='r1')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r1']
self.assertEqual(node.head_args.socket_in, SocketType.PULL_BIND)
self.assertEqual(node.tail_args.socket_out, SocketType.PUB_BIND)
node = f._pod_nodes['r2']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
node = f._pod_nodes['r3']
self.assertEqual(node.head_args.socket_in, SocketType.SUB_CONNECT)
self.assertEqual(node.tail_args.socket_out, SocketType.PUSH_CONNECT)
for name, node in f._pod_nodes.items():
self.assertEqual(node.peas_args['peas'][0], node.head_args)
self.assertEqual(node.peas_args['peas'][0], node.tail_args)
f.index_lines(lines=['abbcs', 'efgh'])
def test_refactor_num_part_proxy_2(self):
f = (Flow().add(name='r1', uses='_logforward')
.add(name='r2', uses='_logforward', needs='r1', parallel=2)
.add(name='r3', uses='_logforward', needs='r1', parallel=3, polling='ALL')
.join(['r2', 'r3']))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
def test_refactor_num_part_2(self):
f = (Flow()
.add(name='r1', uses='_logforward', needs='gateway', parallel=3, polling='ALL'))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
f = (Flow()
.add(name='r1', uses='_logforward', needs='gateway', parallel=3))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
def test_index_text_files(self):
def validate(req):
for d in req.docs:
self.assertNotEqual(d.text, '')
f = (Flow(read_only=True).add(uses=os.path.join(cur_dir, '../yaml/datauriindex.yml'), timeout_ready=-1))
with f:
f.index_files('*.py', output_fn=validate, callback_on_body=True)
self.add_tmpfile('doc.gzip')
def test_flow_with_publish_driver(self):
f = (Flow()
.add(name='r2', uses='!OneHotTextEncoder')
.add(name='r3', uses='!OneHotTextEncoder', needs='gateway')
.join(needs=['r2', 'r3']))
def validate(req):
for d in req.docs:
self.assertIsNotNone(d.embedding)
with f:
f.index_lines(lines=['text_1', 'text_2'], output_fn=validate, callback_on_body=True)
if __name__ == '__main__':
unittest.main()
|
py | 1a4fd96f8312e0d7d686c4960ae8a027a759717a | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys
import os
from random import SystemRandom
import base64
import hmac
def generate_salt():
# This uses os.urandom() underneath
cryptogen = SystemRandom()
# Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for _ in range(16)]
return ''.join([format(r, 'x') for r in salt_sequence])
def generate_password():
"""Create 32 byte b64 password"""
return base64.urlsafe_b64encode(os.urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
if len(sys.argv) < 2:
sys.stderr.write('Please include username (and an optional password, will generate one if not provided) as an argument.\n')
sys.exit(0)
username = sys.argv[1]
salt = generate_salt()
if len(sys.argv) > 2:
password = sys.argv[2]
else:
password = generate_password()
password_hmac = password_to_hmac(salt, password)
print('String to be appended to bitcoinrush.conf:')
print('rpcauth={0}:{1}${2}'.format(username, salt, password_hmac))
print('Your password:\n{0}'.format(password))
if __name__ == '__main__':
main()
|
py | 1a4fdac7a2f6d7698621f61573f115dbf6b18e4d | """A training script of PPO on OpenAI Gym Mujoco environments.
This script follows the settings of https://arxiv.org/abs/1709.06560 as much
as possible.
"""
import argparse
import functools
import chainer
from chainer import functions as F
from chainer import links as L
import gym
import gym.spaces
import numpy as np
import chainerrl
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import misc
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--env', type=str, default='Hopper-v2',
help='OpenAI Gym MuJoCo env to perform algorithm on.')
parser.add_argument('--num-envs', type=int, default=1,
help='Number of envs run in parallel.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--steps', type=int, default=2 * 10 ** 6,
help='Total number of timesteps to train the agent.')
parser.add_argument('--eval-interval', type=int, default=100000,
help='Interval in timesteps between evaluations.')
parser.add_argument('--eval-n-runs', type=int, default=100,
help='Number of episodes run for each evaluation.')
parser.add_argument('--render', action='store_true',
help='Render env states in a GUI window.')
parser.add_argument('--demo', action='store_true',
help='Just run evaluation, not training.')
parser.add_argument('--load-pretrained', action='store_true',
default=False)
parser.add_argument('--load', type=str, default='',
help='Directory to load agent from.')
parser.add_argument('--logger-level', type=int, default=logging.INFO,
help='Level of the root logger.')
parser.add_argument('--monitor', action='store_true',
help='Wrap env with gym.wrappers.Monitor.')
parser.add_argument('--log-interval', type=int, default=1000,
help='Interval in timesteps between outputting log'
' messages during training')
parser.add_argument('--update-interval', type=int, default=2048,
help='Interval in timesteps between model updates.')
parser.add_argument('--epochs', type=int, default=10,
help='Number of epochs to update model for per PPO'
' iteration.')
parser.add_argument('--batch-size', type=int, default=64,
help='Minibatch size')
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL
misc.set_random_seed(args.seed, gpus=(args.gpu,))
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.num_envs) + args.seed * args.num_envs
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = chainerrl.wrappers.Monitor(env, args.outdir)
if args.render:
env = chainerrl.wrappers.Render(env)
return env
def make_batch_env(test):
return chainerrl.envs.MultiprocessVectorEnv(
[functools.partial(make_env, idx, test)
for idx, env in enumerate(range(args.num_envs))])
# Only for getting timesteps, and obs-action spaces
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.max_episode_steps
obs_space = sample_env.observation_space
action_space = sample_env.action_space
print('Observation space:', obs_space)
print('Action space:', action_space)
assert isinstance(action_space, gym.spaces.Box)
# Normalize observations based on their empirical mean and variance
obs_normalizer = chainerrl.links.EmpiricalNormalization(
obs_space.low.size, clip_threshold=5)
# While the original paper initialized weights by normal distribution,
# we use orthogonal initialization as the latest openai/baselines does.
winit = chainerrl.initializers.Orthogonal(1.)
winit_last = chainerrl.initializers.Orthogonal(1e-2)
action_size = action_space.low.size
policy = chainer.Sequential(
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, action_size, initialW=winit_last),
chainerrl.policies.GaussianHeadWithStateIndependentCovariance(
action_size=action_size,
var_type='diagonal',
var_func=lambda x: F.exp(2 * x), # Parameterize log std
var_param_init=0, # log std = 0 => std = 1
),
)
vf = chainer.Sequential(
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, 64, initialW=winit),
F.tanh,
L.Linear(None, 1, initialW=winit),
)
# Combine a policy and a value function into a single model
model = chainerrl.links.Branched(policy, vf)
opt = chainer.optimizers.Adam(3e-4, eps=1e-5)
opt.setup(model)
agent = PPO(
model,
opt,
obs_normalizer=obs_normalizer,
gpu=args.gpu,
update_interval=args.update_interval,
minibatch_size=args.batch_size,
epochs=args.epochs,
clip_eps_vf=None,
entropy_coef=0,
standardize_advantages=True,
gamma=0.995,
lambd=0.97,
)
if args.load or args.load_pretrained:
# either load or load_pretrained must be false
assert not args.load or not args.load_pretrained
if args.load:
agent.load(args.load)
else:
agent.load(misc.download_model(
"PPO", args.env,
model_type="final")[0])
if args.demo:
env = make_batch_env(True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_batch_with_evaluation(
agent=agent,
env=make_batch_env(False),
eval_env=make_batch_env(True),
outdir=args.outdir,
steps=args.steps,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
log_interval=args.log_interval,
max_episode_len=timestep_limit,
save_best_so_far_agent=False,
)
if __name__ == '__main__':
main()
|
py | 1a4fdb1ee70a5773dab5d28901df6525414453d7 | from string import punctuation, digits
import numpy as np
import random
# Part I
#pragma: coderesponse template
def get_order(n_samples):
try:
with open(str(n_samples) + '.txt') as fp:
line = fp.readline()
return list(map(int, line.split(',')))
except FileNotFoundError:
random.seed(1)
indices = list(range(n_samples))
random.shuffle(indices)
return indices
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_single(feature_vector, label, theta, theta_0):
"""
Finds the hinge loss on a single data point given specific classification
parameters.
Args:
feature_vector - A numpy array describing the given data point.
label - A real valued number, the correct classification of the data
point.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given data point and parameters.
"""
# Your code here
# raise NotImplementedError
agreement = label * (np.dot(theta, feature_vector) + theta_0)
if agreement >= 1:
h_loss = 0
else:
h_loss = 1 - agreement
return h_loss
#pragma: coderesponse end
#pragma: coderesponse template
def hinge_loss_full(feature_matrix, labels, theta, theta_0):
"""
Finds the total hinge loss on a set of data given specific classification
parameters.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A real number representing the hinge loss associated with the
given dataset and parameters. This number should be the average hinge
loss across all of the points in the feature matrix.
"""
# Your code here
# raise NotImplementedError
n = feature_matrix.shape[0]
theta_vec = np.repeat(theta_0, n)
# gives the nx1 agreement vector
agreement = labels.T * (np.matmul(feature_matrix, theta) + theta_vec.T)
h_loss = (1.0/n) * np.sum(1 - agreement[agreement < 1])
return h_loss
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron_single_step_update(
feature_vector,
label,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the perceptron algorithm.
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
current_theta - The current theta being used by the perceptron
algorithm before this update.
current_theta_0 - The current theta_0 being used by the perceptron
algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
test = label * (np.dot(current_theta, feature_vector) + current_theta_0)
if test <= 10**-9:
current_theta += label * feature_vector
current_theta_0 += label
return current_theta, current_theta_0
# raise NotImplementedError
#pragma: coderesponse end
#pragma: coderesponse template
def perceptron(feature_matrix, labels, T):
"""
Runs the full perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
theta, the linear classification parameter, after T iterations through the
feature matrix and the second element is a real number with the value of
theta_0, the offset classification parameter, after T iterations through
the feature matrix.
"""
# Your code here
n_cols = feature_matrix.shape[1]
# initialize
theta = np.zeros(n_cols)
theta_0 = 0.0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i, :], labels[i], theta, theta_0)
return theta, theta_0
# raise NotImplementedError
#pragma: coderesponse end
#pragma: coderesponse template
def average_perceptron(feature_matrix, labels, T):
"""
Runs the average perceptron algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
NOTE: Iterate the data matrix by the orders returned by get_order(feature_matrix.shape[0])
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the perceptron algorithm
should iterate through the feature matrix.
Returns: A tuple where the first element is a numpy array with the value of
the average theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the average theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
Hint: It is difficult to keep a running average; however, it is simple to
find a sum and divide.
"""
# Your code here
# raise NotImplementedError
iter_times = feature_matrix.shape[0] * T
n_cols = feature_matrix.shape[1]
# initialize
theta = np.zeros(n_cols)
theta_0 = 0.0
# track theta
theta_sum = np.zeros(n_cols)
theta_0_sum = 0.0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = perceptron_single_step_update(feature_matrix[i, :], labels[i], theta, theta_0)
theta_sum += theta
theta_0_sum += theta_0
theta_final = theta_sum / iter_times
theta_0_final = theta_0_sum / iter_times
return theta_final, theta_0_final
#pragma: coderesponse end
#pragma: coderesponse template
def pegasos_single_step_update(
feature_vector,
label,
L,
eta,
current_theta,
current_theta_0):
"""
Properly updates the classification parameter, theta and theta_0, on a
single step of the Pegasos algorithm
Args:
feature_vector - A numpy array describing a single data point.
label - The correct classification of the feature vector.
L - The lamba value being used to update the parameters.
eta - Learning rate to update parameters.
current_theta - The current theta being used by the Pegasos
algorithm before this update.
current_theta_0 - The current theta_0 being used by the
Pegasos algorithm before this update.
Returns: A tuple where the first element is a numpy array with the value of
theta after the current update has completed and the second element is a
real valued number with the value of theta_0 after the current updated has
completed.
"""
# Your code here
# raise NotImplementedError
if label * (np.dot(current_theta, feature_vector) + current_theta_0) <= 1:
current_theta = (1 - eta * L) * current_theta + eta * label * feature_vector
current_theta_0 = current_theta_0 + eta * label
else:
current_theta = (1 - eta * L) * current_theta
current_theta_0 = current_theta_0
return current_theta, current_theta_0
#pragma: coderesponse end
#pragma: coderesponse template
def pegasos(feature_matrix, labels, T, L):
"""
Runs the Pegasos algorithm on a given set of data. Runs T
iterations through the data set, there is no need to worry about
stopping early.
For each update, set learning rate = 1/sqrt(t),
where t is a counter for the number of updates performed so far (between 1
and nT inclusive).
NOTE: Please use the previously implemented functions when applicable.
Do not copy paste code from previous parts.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
labels - A numpy array where the kth element of the array is the
correct classification of the kth row of the feature matrix.
T - An integer indicating how many times the algorithm
should iterate through the feature matrix.
L - The lamba value being used to update the Pegasos
algorithm parameters.
Returns: A tuple where the first element is a numpy array with the value of
the theta, the linear classification parameter, found after T
iterations through the feature matrix and the second element is a real
number with the value of the theta_0, the offset classification
parameter, found after T iterations through the feature matrix.
"""
# Your code here
# raise NotImplementedError
n_cols = feature_matrix.shape[1]
# n_iters = n_rows * T
theta = np.zeros(n_cols)
theta_0 = 0.0
eta = 1.0
n_update = 0
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
theta, theta_0 = pegasos_single_step_update(feature_matrix[i, :], labels[i], L, eta, theta, theta_0)
n_update += 1
eta = 1.0 / np.sqrt(n_update + 1)
return theta, theta_0
#pragma: coderesponse end
# Part II
#pragma: coderesponse template
def classify(feature_matrix, theta, theta_0):
"""
A classification function that uses theta and theta_0 to classify a set of
data points.
Args:
feature_matrix - A numpy matrix describing the given data. Each row
represents a single data point.
theta - A numpy array describing the linear classifier.
theta - A numpy array describing the linear classifier.
theta_0 - A real valued number representing the offset parameter.
Returns: A numpy array of 1s and -1s where the kth element of the array is
the predicted classification of the kth row of the feature matrix using the
given theta and theta_0. If a prediction is GREATER THAN zero, it should
be considered a positive classification.
"""
# Your code here
# raise NotImplementedError
y_hat = np.sign(np.matmul(feature_matrix, theta) + np.repeat(theta_0, feature_matrix.shape[0]))
y_hat[y_hat == 0] = -1
return y_hat
#pragma: coderesponse end
#pragma: coderesponse template
def classifier_accuracy(
classifier,
train_feature_matrix,
val_feature_matrix,
train_labels,
val_labels,
**kwargs):
"""
Trains a linear classifier using the perceptron algorithm with a given T
value. The classifier is trained on the train data. The classifier's
accuracy on the train and validation data is then returned.
Args:
classifier - A classifier function that takes arguments
(feature matrix, labels, **kwargs)
train_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
val_feature_matrix - A numpy matrix describing the training
data. Each row represents a single data point.
train_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the training
feature matrix.
val_labels - A numpy array where the kth element of the array
is the correct classification of the kth row of the validation
feature matrix.
**kwargs - Additional named arguments to pass to the classifier
(e.g. T or L)
Returns: A tuple in which the first element is the (scalar) accuracy of the
trained classifier on the training data and the second element is the
accuracy of the trained classifier on the validation data.
"""
# Your code here
# raise NotImplementedError
theta, theta_0 = classifier(train_feature_matrix, train_labels, **kwargs)
y_hat_train = classify(train_feature_matrix, theta, theta_0)
y_hat_val = classify(val_feature_matrix, theta, theta_0)
accuracy_train = accuracy(y_hat_train, train_labels)
accuracy_val = accuracy(y_hat_val, val_labels)
return accuracy_train, accuracy_val
#pragma: coderesponse end
#pragma: coderesponse template
def extract_words(input_string):
"""
Helper function for bag_of_words()
Inputs a text string
Returns a list of lowercase words in the string.
Punctuation and digits are separated out into their own words.
"""
for c in punctuation + digits:
input_string = input_string.replace(c, ' ' + c + ' ')
return input_string.lower().split()
#pragma: coderesponse end
#pragma: coderesponse template
def bag_of_words(texts):
"""
Inputs a list of string reviews
Returns a dictionary of unique unigrams occurring over the input while removing stopwords
Feel free to change this code as guided by Problem 9
"""
# Your code here
stopwords = np.loadtxt('stopwords.txt', dtype='str')
dictionary = {} # maps word to unique
for text in texts:
word_list = extract_words(text)
for word in word_list:
if word not in dictionary and word not in stopwords:
dictionary[word] = len(dictionary)
return dictionary
#pragma: coderesponse end
#pragma: coderesponse template
def extract_bow_feature_vectors(reviews, dictionary):
"""
Inputs a list of string reviews
Inputs the dictionary of words as given by bag_of_words
Returns the bag-of-words feature matrix representation of the data.
The returned matrix is of shape (n, m), where n is the number of reviews
and m the total number of entries in the dictionary.
Feel free to change this code as guided by Problem 9
"""
# Your code here
num_reviews = len(reviews)
feature_matrix = np.zeros([num_reviews, len(dictionary)])
for i, text in enumerate(reviews):
word_list = extract_words(text)
for word in word_list:
if word in dictionary:
feature_matrix[i, dictionary[word]] += 1
return feature_matrix
#pragma: coderesponse end
#pragma: coderesponse template
def accuracy(preds, targets):
"""
Given length-N vectors containing predicted and target labels,
returns the percentage and number of correct predictions.
"""
return (preds == targets).mean()
#pragma: coderesponse end
|
py | 1a4fdb65a27f1ec6fedf8d725afc8e9312117c39 | #
# The Python Imaging Library.
# $Id$
#
# IFUNC IM file handling for PIL
#
# history:
# 1995-09-01 fl Created.
# 1997-01-03 fl Save palette images
# 1997-01-08 fl Added sequence support
# 1997-01-23 fl Added P and RGB save support
# 1997-05-31 fl Read floating point images
# 1997-06-22 fl Save floating point images
# 1997-08-27 fl Read and save 1-bit images
# 1998-06-25 fl Added support for RGB+LUT images
# 1998-07-02 fl Added support for YCC images
# 1998-07-15 fl Renamed offset attribute to avoid name clash
# 1998-12-29 fl Added I;16 support
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
# 2003-09-26 fl Added LA/PA support
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import re
from . import Image, ImageFile, ImagePalette
from ._binary import i8
__version__ = "0.7"
# --------------------------------------------------------------------
# Standard tags
COMMENT = "Comment"
DATE = "Date"
EQUIPMENT = "Digitalization equipment"
FRAMES = "File size (no of images)"
LUT = "Lut"
NAME = "Name"
SCALE = "Scale (x,y)"
SIZE = "Image size (x*y)"
MODE = "Image type"
TAGS = {COMMENT: 0, DATE: 0, EQUIPMENT: 0, FRAMES: 0, LUT: 0, NAME: 0,
SCALE: 0, SIZE: 0, MODE: 0}
OPEN = {
# ifunc93/p3cfunc formats
"0 1 image": ("1", "1"),
"L 1 image": ("1", "1"),
"Greyscale image": ("L", "L"),
"Grayscale image": ("L", "L"),
"RGB image": ("RGB", "RGB;L"),
"RLB image": ("RGB", "RLB"),
"RYB image": ("RGB", "RLB"),
"B1 image": ("1", "1"),
"B2 image": ("P", "P;2"),
"B4 image": ("P", "P;4"),
"X 24 image": ("RGB", "RGB"),
"L 32 S image": ("I", "I;32"),
"L 32 F image": ("F", "F;32"),
# old p3cfunc formats
"RGB3 image": ("RGB", "RGB;T"),
"RYB3 image": ("RGB", "RYB;T"),
# extensions
"LA image": ("LA", "LA;L"),
"RGBA image": ("RGBA", "RGBA;L"),
"RGBX image": ("RGBX", "RGBX;L"),
"CMYK image": ("CMYK", "CMYK;L"),
"YCC image": ("YCbCr", "YCbCr;L"),
}
# ifunc95 extensions
for i in ["8", "8S", "16", "16S", "32", "32F"]:
OPEN["L %s image" % i] = ("F", "F;%s" % i)
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
for i in ["16", "16L", "16B"]:
OPEN["L %s image" % i] = ("I;%s" % i, "I;%s" % i)
OPEN["L*%s image" % i] = ("I;%s" % i, "I;%s" % i)
for i in ["32S"]:
OPEN["L %s image" % i] = ("I", "I;%s" % i)
OPEN["L*%s image" % i] = ("I", "I;%s" % i)
for i in range(2, 33):
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
# --------------------------------------------------------------------
# Read IM directory
split = re.compile(br"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
def number(s):
try:
return int(s)
except ValueError:
return float(s)
##
# Image plugin for the IFUNC IM file format.
class ImImageFile(ImageFile.ImageFile):
format = "IM"
format_description = "IFUNC Image Memory"
_close_exclusive_fp_after_loading = False
def _open(self):
# Quick rejection: if there's not an LF among the first
# 100 bytes, this is (probably) not a text header.
if b"\n" not in self.fp.read(100):
raise SyntaxError("not an IM file")
self.fp.seek(0)
n = 0
# Default values
self.info[MODE] = "L"
self.info[SIZE] = (512, 512)
self.info[FRAMES] = 1
self.rawmode = "L"
while True:
s = self.fp.read(1)
# Some versions of IFUNC uses \n\r instead of \r\n...
if s == b"\r":
continue
if not s or s == b'\0' or s == b'\x1A':
break
# FIXME: this may read whole file if not a text file
s = s + self.fp.readline()
if len(s) > 100:
raise SyntaxError("not an IM file")
if s[-2:] == b'\r\n':
s = s[:-2]
elif s[-1:] == b'\n':
s = s[:-1]
try:
m = split.match(s)
except re.error:
raise SyntaxError("not an IM file")
if m:
k, v = m.group(1, 2)
# Don't know if this is the correct encoding,
# but a decent guess (I guess)
k = k.decode('latin-1', 'replace')
v = v.decode('latin-1', 'replace')
# Convert value as appropriate
if k in [FRAMES, SCALE, SIZE]:
v = v.replace("*", ",")
v = tuple(map(number, v.split(",")))
if len(v) == 1:
v = v[0]
elif k == MODE and v in OPEN:
v, self.rawmode = OPEN[v]
# Add to dictionary. Note that COMMENT tags are
# combined into a list of strings.
if k == COMMENT:
if k in self.info:
self.info[k].append(v)
else:
self.info[k] = [v]
else:
self.info[k] = v
if k in TAGS:
n += 1
else:
raise SyntaxError("Syntax error in IM header: " +
s.decode('ascii', 'replace'))
if not n:
raise SyntaxError("Not an IM file")
# Basic attributes
self._size = self.info[SIZE]
self.mode = self.info[MODE]
# Skip forward to start of image data
while s and s[0:1] != b'\x1A':
s = self.fp.read(1)
if not s:
raise SyntaxError("File truncated")
if LUT in self.info:
# convert lookup table to palette or lut attribute
palette = self.fp.read(768)
greyscale = 1 # greyscale palette
linear = 1 # linear greyscale palette
for i in range(256):
if palette[i] == palette[i+256] == palette[i+512]:
if i8(palette[i]) != i:
linear = 0
else:
greyscale = 0
if self.mode == "L" or self.mode == "LA":
if greyscale:
if not linear:
self.lut = [i8(c) for c in palette[:256]]
else:
if self.mode == "L":
self.mode = self.rawmode = "P"
elif self.mode == "LA":
self.mode = self.rawmode = "PA"
self.palette = ImagePalette.raw("RGB;L", palette)
elif self.mode == "RGB":
if not greyscale or not linear:
self.lut = [i8(c) for c in palette]
self.frame = 0
self.__offset = offs = self.fp.tell()
self.__fp = self.fp # FIXME: hack
if self.rawmode[:2] == "F;":
# ifunc95 formats
try:
# use bit decoder (if necessary)
bits = int(self.rawmode[2:])
if bits not in [8, 16, 32]:
self.tile = [("bit", (0, 0)+self.size, offs,
(bits, 8, 3, 0, -1))]
return
except ValueError:
pass
if self.rawmode in ["RGB;T", "RYB;T"]:
# Old LabEye/3PC files. Would be very surprised if anyone
# ever stumbled upon such a file ;-)
size = self.size[0] * self.size[1]
self.tile = [("raw", (0, 0)+self.size, offs, ("G", 0, -1)),
("raw", (0, 0)+self.size, offs+size, ("R", 0, -1)),
("raw", (0, 0)+self.size, offs+2*size, ("B", 0, -1))]
else:
# LabEye/IFUNC files
self.tile = [("raw", (0, 0)+self.size, offs,
(self.rawmode, 0, -1))]
@property
def n_frames(self):
return self.info[FRAMES]
@property
def is_animated(self):
return self.info[FRAMES] > 1
def seek(self, frame):
if not self._seek_check(frame):
return
self.frame = frame
if self.mode == "1":
bits = 1
else:
bits = 8 * len(self.mode)
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
offs = self.__offset + frame * size
self.fp = self.__fp
self.tile = [("raw", (0, 0)+self.size, offs, (self.rawmode, 0, -1))]
def tell(self):
return self.frame
def _close__fp(self):
try:
self.__fp.close()
except AttributeError:
pass
finally:
self.__fp = None
#
# --------------------------------------------------------------------
# Save IM files
SAVE = {
# mode: (im type, raw mode)
"1": ("0 1", "1"),
"L": ("Greyscale", "L"),
"LA": ("LA", "LA;L"),
"P": ("Greyscale", "P"),
"PA": ("LA", "PA;L"),
"I": ("L 32S", "I;32S"),
"I;16": ("L 16", "I;16"),
"I;16L": ("L 16L", "I;16L"),
"I;16B": ("L 16B", "I;16B"),
"F": ("L 32F", "F;32F"),
"RGB": ("RGB", "RGB;L"),
"RGBA": ("RGBA", "RGBA;L"),
"RGBX": ("RGBX", "RGBX;L"),
"CMYK": ("CMYK", "CMYK;L"),
"YCbCr": ("YCC", "YCbCr;L")
}
def _save(im, fp, filename):
try:
image_type, rawmode = SAVE[im.mode]
except KeyError:
raise ValueError("Cannot save %s images as IM" % im.mode)
frames = im.encoderinfo.get("frames", 1)
fp.write(("Image type: %s image\r\n" % image_type).encode('ascii'))
if filename:
fp.write(("Name: %s\r\n" % filename).encode('ascii'))
fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode('ascii'))
fp.write(("File size (no of images): %d\r\n" % frames).encode('ascii'))
if im.mode == "P":
fp.write(b"Lut: 1\r\n")
fp.write(b"\000" * (511-fp.tell()) + b"\032")
if im.mode == "P":
fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes
ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, -1))])
#
# --------------------------------------------------------------------
# Registry
Image.register_open(ImImageFile.format, ImImageFile)
Image.register_save(ImImageFile.format, _save)
Image.register_extension(ImImageFile.format, ".im")
|
py | 1a4fdbb937939f6560aa63ca60a6fa96d1bcb140 | # Generated by Django 3.1.2 on 2020-10-06 17:37
import django.db.models.deletion
import django_countries.fields
import django_inet.models
from django.db import migrations, models
import django_peeringdb.models.abstract
class Migration(migrations.Migration):
dependencies = [
("django_peeringdb", "0011_ixlan_ixf_fields"),
]
operations = [
migrations.AlterField(
model_name="network",
name="info_prefixes4",
field=models.PositiveIntegerField(
blank=True,
help_text="Recommended maximum number of IPv4 routes/prefixes to be configured on peering sessions for this ASN",
null=True,
verbose_name="IPv4 Prefixes",
),
),
migrations.AlterField(
model_name="network",
name="info_prefixes6",
field=models.PositiveIntegerField(
blank=True,
help_text="Recommended maximum number of IPv6 routes/prefixes to be configured on peering sessions for this ASN",
null=True,
verbose_name="IPv6 Prefixes",
),
),
migrations.AlterField(
model_name="network",
name="info_type",
field=models.CharField(
blank=True,
choices=[
("", "Not Disclosed"),
("Not Disclosed", "Not Disclosed"),
("NSP", "NSP"),
("Content", "Content"),
("Cable/DSL/ISP", "Cable/DSL/ISP"),
("Enterprise", "Enterprise"),
("Educational/Research", "Educational/Research"),
("Non-Profit", "Non-Profit"),
("Route Server", "Route Server"),
("Network Services", "Network Services"),
("Route Collector", "Route Collector"),
("Government", "Government"),
],
default="Not Disclosed",
max_length=60,
verbose_name="Network Type",
),
),
]
|
py | 1a4fdca7dd883bde551fae5c69b85a3103d27e8e | # Flask-TurnKey Version 0.0.1
# Models.py
import datetime
from app import db
from peewee import *
from flask_turboduck.auth import BaseUser
# -----------------------------------------------------
# User Class
class User(db.Model, BaseUser):
username = CharField(unique=True)
password = CharField()
email = CharField()
join_date = DateTimeField(default=datetime.datetime.now)
active = BooleanField(default=True)
admin = BooleanField(default=False)
def __unicode__(self):
return self.username
|
py | 1a4fdd46eed4fdfad9ca9178ca3eb07e63ddb54d | from django.contrib import admin
# from django.contrib.admin import ModelAdmin
from leaflet.admin import LeafletGeoAdmin
from .models import (
RainfallEvent,
Pixel,
Gauge
)
# customize admin site info
admin.site.site_header = '3RWW API'
admin.site.site_title = '3RWW API'
admin.site.index_title = '3RWW API'
class RainfallEventAdmin(admin.ModelAdmin):
list_filter = ('start_dt', 'end_dt')
search_fields = ['start_dt', 'end_dt', 'report_label', 'event_label']
for i in [
[RainfallEvent, RainfallEventAdmin],
[Pixel, LeafletGeoAdmin],
[Gauge, LeafletGeoAdmin]
]:
admin.site.register(*i) |
bzl | 1a4fdd63e591cc5fd0cfe299d1d4522a6d6ac93a | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# LOCAL MODIFICATIONS:
# this has two PRs patched in:
# https://github.com/bazelbuild/bazel-skylib/pull/323
# https://github.com/bazelbuild/bazel-skylib/pull/324
"""Implementation of copy_file macro and underlying rules.
These rules copy a file or directory to another location using Bash (on Linux/macOS) or
cmd.exe (on Windows). `_copy_xfile` marks the resulting file executable,
`_copy_file` does not.
"""
# Hints for Bazel spawn strategy
_execution_requirements = {
# Copying files is entirely IO-bound and there is no point doing this work remotely.
# Also, remote-execution does not allow source directory inputs, see
# https://github.com/bazelbuild/bazel/commit/c64421bc35214f0414e4f4226cc953e8c55fa0d2
# So we must not attempt to execute remotely in that case.
"no-remote-exec": "1",
}
def _hash_file(file):
return str(hash(file.path))
# buildifier: disable=function-docstring
def copy_cmd(ctx, src, dst):
# Most Windows binaries built with MSVC use a certain argument quoting
# scheme. Bazel uses that scheme too to quote arguments. However,
# cmd.exe uses different semantics, so Bazel's quoting is wrong here.
# To fix that we write the command to a .bat file so no command line
# quoting or escaping is required.
# Put a hash of the file name into the name of the generated batch file to
# make it unique within the package, so that users can define multiple copy_file's.
bat = ctx.actions.declare_file("%s-%s-cmd.bat" % (ctx.label.name, _hash_file(src)))
# Flags are documented at
# https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/copy
# https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/robocopy
# NB: robocopy return non-zero exit codes on success so we must exit 0 after calling it
if dst.is_directory:
cmd_tmpl = "@robocopy \"{src}\" \"{dst}\" /E >NUL & @exit 0"
mnemonic = "CopyDirectory"
progress_message = "Copying directory %s" % src.path
else:
cmd_tmpl = "@copy /Y \"{src}\" \"{dst}\" >NUL"
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src.path
ctx.actions.write(
output = bat,
# Do not use lib/shell.bzl's shell.quote() method, because that uses
# Bash quoting syntax, which is different from cmd.exe's syntax.
content = cmd_tmpl.format(
src = src.path.replace("/", "\\"),
dst = dst.path.replace("/", "\\"),
),
is_executable = True,
)
ctx.actions.run(
inputs = [src],
tools = [bat],
outputs = [dst],
executable = "cmd.exe",
arguments = ["/C", bat.path.replace("/", "\\")],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _execution_requirements,
)
# buildifier: disable=function-docstring
def copy_bash(ctx, src, dst):
if dst.is_directory:
cmd_tmpl = "rm -rf \"$2\" && cp -rf \"$1/\" \"$2\""
mnemonic = "CopyDirectory"
progress_message = "Copying directory %s" % src.path
else:
cmd_tmpl = "cp -f \"$1\" \"$2\""
mnemonic = "CopyFile"
progress_message = "Copying file %s" % src.path
ctx.actions.run_shell(
tools = [src],
outputs = [dst],
command = cmd_tmpl,
arguments = [src.path, dst.path],
mnemonic = mnemonic,
progress_message = progress_message,
use_default_shell_env = True,
execution_requirements = _execution_requirements,
)
def _copy_file_impl(ctx):
# When creating a directory, declare that to Bazel so downstream rules
# see it as a TreeArtifact and handle correctly, e.g. for remote execution
if getattr(ctx.attr, "is_directory", False):
output = ctx.actions.declare_directory(ctx.attr.out)
else:
output = ctx.outputs.out
if ctx.attr.allow_symlink:
if output.is_directory:
fail("Cannot use both is_directory and allow_symlink")
ctx.actions.symlink(
output = output,
target_file = ctx.file.src,
is_executable = ctx.attr.is_executable,
)
elif ctx.attr.is_windows:
copy_cmd(ctx, ctx.file.src, output)
else:
copy_bash(ctx, ctx.file.src, output)
files = depset(direct = [output])
runfiles = ctx.runfiles(files = [output])
if ctx.attr.is_executable:
return [DefaultInfo(files = files, runfiles = runfiles, executable = output)]
else:
return [DefaultInfo(files = files, runfiles = runfiles)]
_ATTRS = {
"src": attr.label(mandatory = True, allow_single_file = True),
"is_windows": attr.bool(mandatory = True),
"is_executable": attr.bool(mandatory = True),
"allow_symlink": attr.bool(mandatory = True),
}
_copy_directory = rule(
implementation = _copy_file_impl,
provides = [DefaultInfo],
attrs = dict(_ATTRS, **{
"is_directory": attr.bool(default = True),
# Cannot declare out as an output here, because there's no API for declaring
# TreeArtifact outputs.
"out": attr.string(mandatory = True),
}),
)
_copy_file = rule(
implementation = _copy_file_impl,
provides = [DefaultInfo],
attrs = dict(_ATTRS, **{
"out": attr.output(mandatory = True),
}),
)
_copy_xfile = rule(
implementation = _copy_file_impl,
executable = True,
provides = [DefaultInfo],
attrs = dict(_ATTRS, **{
"out": attr.output(mandatory = True),
}),
)
def copy_file(name, src, out, is_directory = False, is_executable = False, allow_symlink = False, **kwargs):
"""Copies a file or directory to another location.
`native.genrule()` is sometimes used to copy files (often wishing to rename them). The 'copy_file' rule does this with a simpler interface than genrule.
This rule uses a Bash command on Linux/macOS/non-Windows, and a cmd.exe command on Windows (no Bash is required).
If using this rule with source directories, it is recommended that you use the
`--host_jvm_args=-DBAZEL_TRACK_SOURCE_DIRECTORIES=1` startup option so that changes
to files within source directories are detected. See
https://github.com/bazelbuild/bazel/commit/c64421bc35214f0414e4f4226cc953e8c55fa0d2
for more context.
Args:
name: Name of the rule.
src: A Label. The file or directory to make a copy of.
(Can also be the label of a rule that generates a file or directory.)
out: Path of the output file, relative to this package.
is_directory: treat the source file as a directory
Workaround for https://github.com/bazelbuild/bazel/issues/12954
is_executable: A boolean. Whether to make the output file executable. When
True, the rule's output can be executed using `bazel run` and can be
in the srcs of binary and test rules that require executable sources.
WARNING: If `allow_symlink` is True, `src` must also be executable.
allow_symlink: A boolean. Whether to allow symlinking instead of copying.
When False, the output is always a hard copy. When True, the output
*can* be a symlink, but there is no guarantee that a symlink is
created (i.e., at the time of writing, we don't create symlinks on
Windows). Set this to True if you need fast copying and your tools can
handle symlinks (which most UNIX tools can).
**kwargs: further keyword arguments, e.g. `visibility`
"""
copy_file_impl = _copy_file
if is_executable:
copy_file_impl = _copy_xfile
elif is_directory:
copy_file_impl = _copy_directory
copy_file_impl(
name = name,
src = src,
out = out,
is_windows = select({
"@bazel_tools//src/conditions:host_windows": True,
"//conditions:default": False,
}),
is_executable = is_executable,
allow_symlink = allow_symlink,
**kwargs
)
|
py | 1a4fdd85e2a6fc97bc23a031013edf5c4a04e50d | # qubit number=4
# total number=43
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.y(input_qubit[1]) # number=37
prog.h(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=32
prog.h(input_qubit[0]) # number=40
prog.cz(input_qubit[3],input_qubit[0]) # number=41
prog.h(input_qubit[0]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=26
prog.z(input_qubit[3]) # number=27
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[3],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[2]) # number=38
prog.y(input_qubit[2]) # number=39
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3069.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | 1a4fde60691653e319dc75459b985f1fbb56679e | from django.contrib.auth.models import User
from rest_framework import serializers
from lists.models import Todo, TodoList
class UserSerializer(serializers.ModelSerializer):
todolists = serializers.PrimaryKeyRelatedField(
many=True, queryset=TodoList.objects.all()
)
class Meta:
model = User
fields = ("id", "username", "last_login", "date_joined", "todolists")
class TodoListSerializer(serializers.ModelSerializer):
creator = serializers.ReadOnlyField(source="creator.username")
class Meta:
model = TodoList
fields = ("id", "title", "created_at", "creator", "todos")
class TodoSerializer(serializers.ModelSerializer):
creator = serializers.ReadOnlyField(source="creator.username")
class Meta:
model = Todo
fields = (
"id",
"todolist",
"description",
"created_at",
"creator",
"is_finished",
"finished_at",
)
|
py | 1a4fde7ab994377669bd6b6582c6703860828e23 | # ------------------------------------------------------------------------
# BEAUTY DETR
# Copyright (c) 2022 Ayush Jain & Nikolaos Gkanatsios
# Licensed under CC-BY-NC [see LICENSE for details]
# All Rights Reserved
# ------------------------------------------------------------------------
# Parts adapted from Group-Free
# Copyright (c) 2021 Ze Liu. All Rights Reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------
"""Main script for language modulation."""
import os
import numpy as np
import torch
import torch.distributed as dist
from main_utils import parse_option, BaseTrainTester
from data.model_util_scannet import ScannetDatasetConfig
from src.joint_det_dataset import Joint3DDataset
from src.grounding_evaluator import GroundingEvaluator
from models import BeaUTyDETR
from models import APCalculator, parse_predictions, parse_groundtruths
import ipdb
st = ipdb.set_trace
class TrainTester(BaseTrainTester):
"""Train/test a language grounder."""
def __init__(self, args):
"""Initialize."""
super().__init__(args)
@staticmethod
def get_datasets(args):
"""Initialize datasets."""
dataset_dict = {} # dict to use multiple datasets
for dset in args.dataset:
dataset_dict[dset] = 1
if args.joint_det:
dataset_dict['scannet'] = 10
print('Loading datasets:', sorted(list(dataset_dict.keys())))
train_dataset = Joint3DDataset(
dataset_dict=dataset_dict,
test_dataset=args.test_dataset,
split='train' if not args.debug else 'val',
use_color=args.use_color, use_height=args.use_height,
overfit=args.debug,
data_path=args.data_root,
detect_intermediate=args.detect_intermediate,
use_multiview=args.use_multiview,
butd=args.butd,
butd_gt=args.butd_gt,
butd_cls=args.butd_cls,
augment_det=args.augment_det
)
test_dataset = Joint3DDataset(
dataset_dict=dataset_dict,
test_dataset=args.test_dataset,
split='val' if not args.eval_train else 'train',
use_color=args.use_color, use_height=args.use_height,
overfit=args.debug,
data_path=args.data_root,
detect_intermediate=args.detect_intermediate,
use_multiview=args.use_multiview,
butd=args.butd,
butd_gt=args.butd_gt,
butd_cls=args.butd_cls
)
return train_dataset, test_dataset
@staticmethod
def get_model(args):
"""Initialize the model."""
num_input_channel = int(args.use_color) * 3
if args.use_height:
num_input_channel += 1
if args.use_multiview:
num_input_channel += 128
if args.use_soft_token_loss:
num_class = 256
else:
num_class = 19
model = BeaUTyDETR(
num_class=num_class,
num_obj_class=485,
input_feature_dim=num_input_channel,
num_queries=args.num_target,
num_decoder_layers=args.num_decoder_layers,
self_position_embedding=args.self_position_embedding,
contrastive_align_loss=args.use_contrastive_align,
butd=args.butd or args.butd_gt or args.butd_cls,
pointnet_ckpt=args.pp_checkpoint,
self_attend=args.self_attend
)
return model
@staticmethod
def _get_inputs(batch_data):
return {
'point_clouds': batch_data['point_clouds'].float(),
'text': batch_data['utterances'],
"det_boxes": batch_data['all_detected_boxes'],
"det_bbox_label_mask": batch_data['all_detected_bbox_label_mask'],
"det_class_ids": batch_data['all_detected_class_ids']
}
@torch.no_grad()
def evaluate_one_epoch(self, epoch, test_loader,
model, criterion, set_criterion, args):
"""
Eval grounding after a single epoch.
Some of the args:
model: a nn.Module that returns end_points (dict)
criterion: a function that returns (loss, end_points)
"""
if args.test_dataset == 'scannet':
return self.evaluate_one_epoch_det(
epoch, test_loader, model,
criterion, set_criterion, args
)
stat_dict = {}
model.eval() # set model to eval mode (for bn and dp)
if args.num_decoder_layers > 0:
prefixes = ['last_', 'proposal_']
prefixes = ['last_']
prefixes.append('proposal_')
else:
prefixes = ['proposal_'] # only proposal
prefixes += [f'{i}head_' for i in range(args.num_decoder_layers - 1)]
evaluator = GroundingEvaluator(
only_root=True, thresholds=[0.25, 0.5],
topks=[1, 5, 10], prefixes=prefixes,
filter_non_gt_boxes=args.butd_cls
)
# Main eval branch
for batch_idx, batch_data in enumerate(test_loader):
stat_dict, end_points = self._main_eval_branch(
batch_idx, batch_data, test_loader, model, stat_dict,
criterion, set_criterion, args
)
if evaluator is not None:
for prefix in prefixes:
evaluator.evaluate(end_points, prefix)
evaluator.synchronize_between_processes()
if dist.get_rank() == 0:
if evaluator is not None:
evaluator.print_stats()
return None
@torch.no_grad()
def evaluate_one_epoch_det(self, epoch, test_loader,
model, criterion, set_criterion, args):
"""
Eval grounding after a single epoch.
Some of the args:
model: a nn.Module that returns end_points (dict)
criterion: a function that returns (loss, end_points)
"""
dataset_config = ScannetDatasetConfig(18)
# Used for AP calculation
CONFIG_DICT = {
'remove_empty_box': False, 'use_3d_nms': True,
'nms_iou': 0.25, 'use_old_type_nms': False, 'cls_nms': True,
'per_class_proposal': True, 'conf_thresh': 0.0,
'dataset_config': dataset_config,
'hungarian_loss': True
}
stat_dict = {}
model.eval() # set model to eval mode (for bn and dp)
if set_criterion is not None:
set_criterion.eval()
if args.num_decoder_layers > 0:
prefixes = ['last_', 'proposal_']
prefixes += [
f'{i}head_' for i in range(args.num_decoder_layers - 1)
]
else:
prefixes = ['proposal_'] # only proposal
prefixes = ['last_']
ap_calculator_list = [
APCalculator(iou_thresh, dataset_config.class2type)
for iou_thresh in args.ap_iou_thresholds
]
mAPs = [
[iou_thresh, {k: 0 for k in prefixes}]
for iou_thresh in args.ap_iou_thresholds
]
batch_pred_map_cls_dict = {k: [] for k in prefixes}
batch_gt_map_cls_dict = {k: [] for k in prefixes}
# Main eval branch
wordidx = np.array([
0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11,
12, 13, 13, 14, 15, 16, 16, 17, 17, 18, 18
])
tokenidx = np.array([
1, 2, 3, 5, 7, 9, 11, 13, 15, 17, 18, 19, 21, 23,
25, 27, 29, 31, 32, 34, 36, 38, 39, 41, 42, 44, 45
])
for batch_idx, batch_data in enumerate(test_loader):
stat_dict, end_points = self._main_eval_branch(
batch_idx, batch_data, test_loader, model, stat_dict,
criterion, set_criterion, args
)
# contrast
proj_tokens = end_points['proj_tokens'] # (B, tokens, 64)
proj_queries = end_points['last_proj_queries'] # (B, Q, 64)
sem_scores = torch.matmul(proj_queries, proj_tokens.transpose(-1, -2))
sem_scores_ = sem_scores / 0.07 # (B, Q, tokens)
sem_scores = torch.zeros(sem_scores_.size(0), sem_scores_.size(1), 256)
sem_scores = sem_scores.to(sem_scores_.device)
sem_scores[:, :sem_scores_.size(1), :sem_scores_.size(2)] = sem_scores_
end_points['last_sem_cls_scores'] = sem_scores
# end contrast
sem_cls = torch.zeros_like(end_points['last_sem_cls_scores'])[..., :19]
for w, t in zip(wordidx, tokenidx):
sem_cls[..., w] += end_points['last_sem_cls_scores'][..., t]
end_points['last_sem_cls_scores'] = sem_cls
# Parse predictions
# for prefix in prefixes:
prefix = 'last_'
batch_pred_map_cls = parse_predictions(
end_points, CONFIG_DICT, prefix,
size_cls_agnostic=True)
batch_gt_map_cls = parse_groundtruths(
end_points, CONFIG_DICT,
size_cls_agnostic=True)
batch_pred_map_cls_dict[prefix].append(batch_pred_map_cls)
batch_gt_map_cls_dict[prefix].append(batch_gt_map_cls)
mAP = 0.0
# for prefix in prefixes:
prefix = 'last_'
for (batch_pred_map_cls, batch_gt_map_cls) in zip(
batch_pred_map_cls_dict[prefix],
batch_gt_map_cls_dict[prefix]):
for ap_calculator in ap_calculator_list:
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# Evaluate average precision
for i, ap_calculator in enumerate(ap_calculator_list):
metrics_dict = ap_calculator.compute_metrics()
self.logger.info(
'=====================>'
f'{prefix} IOU THRESH: {args.ap_iou_thresholds[i]}'
'<====================='
)
for key in metrics_dict:
self.logger.info(f'{key} {metrics_dict[key]}')
if prefix == 'last_' and ap_calculator.ap_iou_thresh > 0.3:
mAP = metrics_dict['mAP']
mAPs[i][1][prefix] = metrics_dict['mAP']
ap_calculator.reset()
for mAP in mAPs:
self.logger.info(
f'IoU[{mAP[0]}]:\t'
+ ''.join([
f'{key}: {mAP[1][key]:.4f} \t'
for key in sorted(mAP[1].keys())
])
)
return None
if __name__ == '__main__':
os.environ["TOKENIZERS_PARALLELISM"] = "false"
opt = parse_option()
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
train_tester = TrainTester(opt)
ckpt_path = train_tester.main(opt)
|
py | 1a4fdf334ef046277888761918e9f8e2eabd5774 |
print('Hello Chennai'); |
py | 1a4fe0750f9975e744ba4ef3c9da744dd16a2ca1 | from ast import literal_eval
from django.db import models
from django.conf import settings
from django.utils.functional import cached_property
__all__ = ['BaseMetadata']
class MetadataManager(models.Manager):
'''Manager that optimize the queries by selecting the foreign objects'''
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.select_related('data_location').prefetch_related('tags')
return queryset
class BaseMetadata(models.Model):
'''Abstract base model for the Metadata models'''
oid = models.TextField('Observation ID', help_text = 'Unique identification string for the observation metadata, usually in the form YYYYMMDDHHMMSS; cannot be modified once it is set', unique=True, db_index=True)
fits_header = models.TextField(null=True, blank=True)
data_location = models.ForeignKey('dataset.DataLocation', related_name='%(app_label)s_%(class)s', null=True, blank=True, on_delete=models.SET_NULL)
tags = models.ManyToManyField('metadata.Tag', related_name='%(app_label)s_%(class)s', blank=True)
date_beg = models.DateTimeField('DATE-BEG', help_text='Start time of the observation [UTC]', blank=True, null=True, db_index=True)
date_end = models.DateTimeField('DATE-END', help_text='End time of the observation [UTC]', blank=True, null=True, db_index=True)
wavemin = models.FloatField('WAVEMIN', help_text='Min value of the observation spectral range [nm]', blank=True, null=True, db_index=True)
wavemax = models.FloatField('WAVEMAX', help_text='Max value of the observation spectral range [nm]', blank=True, null=True, db_index=True)
objects = MetadataManager()
class Meta:
abstract = True
ordering = ['date_beg']
def __str__(self):
return self.oid
@cached_property
def tags_names(self):
return self.tags.values_list('name', flat=True)
|
py | 1a4fe15a6ffc6c6be994ca8dd9511478f7b27c9a | from gym.spaces import Space
from typing import Union
from ray.rllib.utils.framework import check_framework, try_import_tf, \
TensorType
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import DeveloperAPI
tf = try_import_tf()
@DeveloperAPI
class Exploration:
"""Implements an exploration strategy for Policies.
An Exploration takes model outputs, a distribution, and a timestep from
the agent and computes an action to apply to the environment using an
implemented exploration schema.
"""
def __init__(self,
action_space: Space,
num_workers: int,
worker_index: int,
framework: str = "tf"):
"""
Args:
action_space (Space): The action space in which to explore.
num_workers (int): The overall number of workers used.
worker_index (int): The index of the worker using this class.
framework (str): One of "tf" or "torch".
"""
self.action_space = action_space
self.num_workers = num_workers
self.worker_index = worker_index
self.framework = check_framework(framework)
@DeveloperAPI
def before_compute_actions(self,
*,
timestep=None,
explore=None,
tf_sess=None,
**kwargs):
"""Hook for preparations before policy.compute_actions() is called.
Args:
timestep (Optional[TensorType]): An optional timestep tensor.
explore (Optional[TensorType]): An optional explore boolean flag.
tf_sess (Optional[tf.Session]): The tf-session object to use.
**kwargs: Forward compatibility kwargs.
"""
pass
@DeveloperAPI
def get_exploration_action(self,
distribution_inputs: TensorType,
action_dist_class: type,
model: ModelV2,
timestep: Union[int, TensorType],
explore: bool = True):
"""Returns a (possibly) exploratory action and its log-likelihood.
Given the Model's logits outputs and action distribution, returns an
exploratory action.
Args:
distribution_inputs (TensorType): The output coming from the model,
ready for parameterizing a distribution
(e.g. q-values or PG-logits).
action_dist_class (class): The action distribution class
to use.
model (ModelV2): The Model object.
timestep (int|TensorType): The current sampling time step. It can
be a tensor for TF graph mode, otherwise an integer.
explore (bool): True: "Normal" exploration behavior.
False: Suppress all exploratory behavior and return
a deterministic action.
Returns:
Tuple:
- The chosen exploration action or a tf-op to fetch the exploration
action from the graph.
- The log-likelihood of the exploration action.
"""
pass
@DeveloperAPI
def on_episode_start(self,
policy,
*,
environment=None,
episode=None,
tf_sess=None):
"""Handles necessary exploration logic at the beginning of an episode.
Args:
policy (Policy): The Policy object that holds this Exploration.
environment (BaseEnv): The environment object we are acting in.
episode (int): The number of the episode that is starting.
tf_sess (Optional[tf.Session]): In case of tf, the session object.
"""
pass
@DeveloperAPI
def on_episode_end(self,
policy,
*,
environment=None,
episode=None,
tf_sess=None):
"""Handles necessary exploration logic at the end of an episode.
Args:
policy (Policy): The Policy object that holds this Exploration.
environment (BaseEnv): The environment object we are acting in.
episode (int): The number of the episode that is starting.
tf_sess (Optional[tf.Session]): In case of tf, the session object.
"""
pass
@DeveloperAPI
def postprocess_trajectory(self, policy, sample_batch, tf_sess=None):
"""Handles post-processing of done episode trajectories.
Changes the given batch in place. This callback is invoked by the
sampler after policy.postprocess_trajectory() is called.
Args:
policy (Policy): The owning policy object.
sample_batch (SampleBatch): The SampleBatch object to post-process.
tf_sess (Optional[tf.Session]): An optional tf.Session object.
"""
return sample_batch
@DeveloperAPI
def get_info(self):
"""Returns a description of the current exploration state.
This is not necessarily the state itself (and cannot be used in
set_state!), but rather useful (e.g. debugging) information.
Returns:
dict: A description of the Exploration (not necessarily its state).
This may include tf.ops as values in graph mode.
"""
return {}
|
py | 1a4fe1aee27dfdafb18aca44e7430bdfcea42b91 | from torchsummary import summary
from models.ResNetBlocks import *
from models.ResNetSE34L import *
def MainModel(nOut=256, **kwargs):
# Number of filters
num_filters = [32, 64, 128, 256]
model = ResNetSE(SEBasicBlock, [3, 4, 6, 3], num_filters, nOut, **kwargs)
return model
|
py | 1a4fe1af2268f2a05d07af8c7b8ba489887dfa53 | import abc
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
from winter.core import ComponentMethod
from .throws import get_throws
NotHandled = object()
class ExceptionHandler(abc.ABC):
@abc.abstractmethod
def handle(self, exception: Exception, **kwargs): # pragma: no cover
pass
class ExceptionHandlersRegistry:
HandlersMap = Dict[Type[Exception], ExceptionHandler]
def __init__(self):
self._handlers: ExceptionHandlersRegistry.HandlersMap = {}
self._auto_handle_exceptions = set()
super().__init__()
@property
def auto_handle_exception_classes(self) -> Tuple[Type[Exception], ...]:
return tuple(self._auto_handle_exceptions)
def add_handler(
self,
exception_cls: Type[Exception],
handler_cls: Type[ExceptionHandler],
*,
auto_handle: bool = False,
):
assert exception_cls not in self._handlers
self._handlers[exception_cls] = handler_cls()
if auto_handle:
self._auto_handle_exceptions.add(exception_cls)
def get_handler(
self,
exception: Union[Type[Exception], Exception],
) -> Optional[ExceptionHandler]:
exception_type = type(exception) if isinstance(exception, Exception) else exception
for exception_cls, handler in self._handlers.items():
if issubclass(exception_type, exception_cls):
return handler
return None
class MethodExceptionsManager:
def __init__(self, method: ComponentMethod):
super().__init__()
self._method = method
self._handlers_by_exception = get_throws(self._method)
@property
def declared_exception_classes(self) -> Tuple[Type[Exception], ...]:
return tuple(self._handlers_by_exception.keys())
@property
def exception_classes(self) -> Tuple[Type[Exception], ...]:
return self.declared_exception_classes + exception_handlers_registry.auto_handle_exception_classes
def get_handler(self, exception: Union[Type[Exception], Exception]) -> Optional[ExceptionHandler]:
exception_type = type(exception) if isinstance(exception, Exception) else exception
for exception_cls, handler in self._handlers_by_exception.items():
if handler is not None and issubclass(exception_type, exception_cls):
return handler
return exception_handlers_registry.get_handler(exception)
exception_handlers_registry = ExceptionHandlersRegistry()
|
py | 1a4fe20ddf52626443fc7327b15b165252425e05 | # -*- coding: utf-8 -*-
#
# TracPro documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 2 13:52:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TracPro'
copyright = u'%s, UNICEF' % datetime.datetime.now().year
author = u'UNICEF'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = "1.9.1-dev"
# The short X.Y version.
version = "1.9"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TracProdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TracPro.tex', u'TracPro Documentation',
u'UNICEF', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tracpro', u'TracPro Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TracPro', u'TracPro Documentation',
author, 'TracPro', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
py | 1a4fe21afa577fa2e6a1f82528f45acdd977d6b7 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import warnings
from multiprocessing import Process # noqa: F401
from multiprocessing import Manager # noqa: F401
import time
import sys
from paddle import compat as cpt
# deprecated module import
from paddle.fluid import core
from paddle.fluid.framework import _set_expected_place
from paddle.fluid.dygraph import parallel_helper
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401
__all__ = []
ParallelStrategy = core.ParallelStrategy
# NOTE(chenweihang): Maintain a global parallel env to avoid
# initializing ParallelEnv every time and improve performance
_global_parallel_env = None
def _get_global_parallel_env():
global _global_parallel_env
if _global_parallel_env is None:
_global_parallel_env = ParallelEnv()
return _global_parallel_env
def _start_kv_server(port, http_server_d, size):
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(int(port), size=size)
http_server.start()
wait_seconds = 3
while http_server_d.get("running", False) or not http_server.should_stop():
time.sleep(wait_seconds)
http_server.stop()
def init_parallel_env():
"""
Initialize parallel training environment in dynamic graph mode.
.. note::
Now initialize both `NCCL` and `GLOO` contexts for communication.
Returns:
None
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear1 = nn.Linear(10, 10)
self._linear2 = nn.Linear(10, 1)
def forward(self, x):
return self._linear2(self._linear1(x))
def train():
# 1. initialize parallel environment
dist.init_parallel_env()
# 2. create data parallel layer & optimizer
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.MSELoss()
adam = opt.Adam(
learning_rate=0.001, parameters=dp_layer.parameters())
# 3. run layer
inputs = paddle.randn([10, 10], 'float32')
outputs = dp_layer(inputs)
labels = paddle.randn([10, 1], 'float32')
loss = loss_fn(outputs, labels)
loss.backward()
adam.step()
adam.clear_grad()
if __name__ == '__main__':
dist.spawn(train)
"""
# 0. get env & check world size
global _global_parallel_env
# when call init_parallel_env, need update `_global_parallel_env`
_global_parallel_env = ParallelEnv()
parallel_env = _global_parallel_env
# if not parallel, `init_parallel_env` do nothing
if parallel_env.world_size < 2:
warnings.warn(
"Currently not a parallel execution environment, `paddle.distributed.init_parallel_env` will not do anything."
)
return
# 1. gpu xpu check, must be gpu or xpu
if not core.is_compiled_with_cuda() and not core.is_compiled_with_xpu():
raise NotImplementedError(
"Cannot initialize parallel environment in CPU-only version, now only "
"supports initializing the GPU and XPU parallel environment. Please recompile "
"or reinstall paddle with GPU or XPU support.")
# 2. check env
def _check_var_exists(var_name):
var = os.environ.get(var_name, None)
if var is None:
raise ValueError("paddle.distributed initialize error, "
"environment variable %s is needed, but not set." %
var_name)
if core.is_compiled_with_cuda():
_check_var_exists("FLAGS_selected_gpus")
elif core.is_compiled_with_xpu():
_check_var_exists('FLAGS_selected_xpus')
_check_var_exists("PADDLE_TRAINER_ID")
_check_var_exists("PADDLE_CURRENT_ENDPOINT")
_check_var_exists("PADDLE_TRAINERS_NUM")
_check_var_exists("PADDLE_TRAINER_ENDPOINTS")
# 3: init gloo context (step 1: httpsever start)
init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if init_gloo:
ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
manager = Manager()
# glboal dict to store status
http_server_d = manager.dict()
http_server_d["running"] = False
if parallel_env.rank == 0:
# The scope for worker used by http server is '_worker'
size = {'_worker': parallel_env.world_size}
http_server = Process(
target=_start_kv_server,
args=(int(ep_rank_0[1]), http_server_d, size))
http_server.daemon = True
http_server_d["running"] = True
http_server.start()
# 4. init NCCL ParallelStrategy
strategy = ParallelStrategy()
if parallel_helper._is_parallel_ctx_initialized():
warnings.warn("The parallel environment has been initialized.")
strategy.nranks = parallel_env.world_size
strategy.local_rank = parallel_env.rank
strategy.trainer_endpoints = parallel_env.trainer_endpoints
strategy.current_endpoint = parallel_env.current_endpoint
strategy.nrings = parallel_env.nrings
# NOTE(chenweihang): [ why config global place here? ]
# the dygraph mode will be set to default mode,
# users will not call `dygraph.guard` or `enable_dygraph`
# directly, if they want to switch default place,
# they need to call a function to change default place,
# here just set correctly place to users
if core.is_compiled_with_cuda():
place = core.CUDAPlace(parallel_env.device_id)
elif core.is_compiled_with_xpu():
place = core.XPUPlace(parallel_env.device_id)
_set_expected_place(place)
# init nccl or bkcl context
if core.is_compiled_with_cuda():
parallel_helper._set_parallel_ctx(
core.NCCLParallelContext(strategy, place))
elif core.is_compiled_with_xpu():
parallel_helper._set_parallel_ctx(
core.BKCLParallelContext(strategy, place))
other_endpoints = strategy.trainer_endpoints[:]
other_endpoints.remove(strategy.current_endpoint)
if strategy.local_rank == 0:
wait_server_ready(other_endpoints)
parallel_helper._init_parallel_ctx()
# 5: init gloo context (step 2: gloo init)
# dividing init_gloo into two part beacause nccl and gloo
# are separately looking for free ports which sometimes
# leads to port-conflict.
if init_gloo:
wait_server_ready([parallel_env.trainer_endpoints[0]])
gloo_strategy = core.GlooParallelStrategy()
gloo_strategy.rank = parallel_env.rank
gloo_strategy.rank_num = parallel_env.world_size
gloo_strategy.ip_address = ep_rank_0[0]
gloo_strategy.ip_port = int(ep_rank_0[1])
default_init_timeout_seconds = 3600
default_run_timeout_seconds = 9999999
gloo_strategy.init_seconds = default_init_timeout_seconds
gloo_strategy.run_seconds = default_run_timeout_seconds
gloo = core.GlooParallelContext(gloo_strategy)
gloo.init()
if parallel_env.rank == 0:
http_server_d["running"] = False
http_server.join()
def get_rank():
"""
Returns the rank of current trainer.
Its value is equal to the value of the environment variable ``PADDLE_TRAINER_ID`` .
The default value is 0.
Returns:
(int) The rank of current trainer.
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
# execute this command in terminal: export PADDLE_TRAINER_ID=0
print("The rank is %d" % dist.get_rank())
# The rank is 0
"""
return _get_global_parallel_env().rank
def get_world_size():
"""
Returns the number of trainers (number of processes participating in current job).
Its value is equal to the value of the environment variable ``PADDLE_TRAINERS_NUM`` .
The default value is 1.
Returns:
(int) The number of trainers.
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
# execute this command in terminal: export PADDLE_TRAINERS_NUM=4
print("The world_size is %d" % dist.get_world_size())
# The world_size is 4
"""
return _get_global_parallel_env().world_size
|
py | 1a4fe2a0148f19c33f75d4eb7d079aded15b4a71 | from .converters import BetterMemberConverter, BetterUserconverter, guildinfo |
py | 1a4fe2dc50f33ec0bcfb96e873a2c4b6a4f1d816 |
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = 'help'
usage = """
%prog <command>"""
summary = 'Show help for commands.'
ignore_require_venv = True
def run(self, options, args):
from pip._internal.commands import commands_dict, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
command = commands_dict[cmd_name]()
command.parser.print_help()
return SUCCESS
|
py | 1a4fe40aa6eef969719ab20b34d1e9156144719c | #!/usr/bin/env python
#
# Created on Nov 14, 2017
# @author: [email protected], [email protected]
#
# AVISDK based Script to get the status and configuration information of the Virtual Services
#
# Requires AVISDK ("pip install avisdk") and PrettyTable ("pip install PrettyTable")
# Usage:- python avi_virtual_service_info.py -c <Controller-IP> -u <user-name> -p <password>
# Note:- This script works for Avi Controler version 17.1.1 onwards
import json
import argparse
from avi.sdk.avi_api import ApiSession
from requests.packages import urllib3
from prettytable import PrettyTable
from prettytable import ALL as ALL
urllib3.disable_warnings()
def get_vs_list(api, api_version):
vs_list = []
rsp = api.get('virtualservice', api_version=api_version)
for vs in rsp.json()['results']:
vs_list.append(vs['uuid'])
return vs_list
def get_vs_oper_info(api, api_version, vs_list):
oper_dict = {}
for vs in vs_list:
rsp = api.get('virtualservice-inventory/%s' % vs, api_version=api_version)
vs_data = rsp.json()
req_vs_data = { "state": vs_data['runtime']['oper_status']['state'], "name": vs_data['config']['name'],
"uuid": vs_data['config']['uuid'] }
i = 1
for vips in vs_data['config']['vip']:
req_vs_data["vip_"+str(i)] = vips
i = i+1
j = 1
for dns in vs_data['config']['dns_info']:
req_vs_data["dns_"+str(j)] = dns
j = j+1
if vs_data['runtime']['oper_status']['state'] in oper_dict.keys():
oper_dict[vs_data['runtime']['oper_status']['state']].append(req_vs_data)
else:
oper_dict[vs_data['runtime']['oper_status']['state']] = []
oper_dict[vs_data['runtime']['oper_status']['state']].append(req_vs_data)
return oper_dict
def main():
#Getting Required Args
parser = argparse.ArgumentParser(description="AVISDK based Script to get the status and configuration"+
" information of the Virtual Services")
parser.add_argument("-u", "--username", required=True, help="Login username")
parser.add_argument("-p", "--password", required=True, help="Login password")
parser.add_argument("-c", "--controller", required=True, help="Controller IP address")
parser.add_argument("-t", "--tenant", required=False, help="Tenant Name")
parser.add_argument("-a", "--api_version", required=False, help="Tenant Name")
args = parser.parse_args()
user = args.username
host = args.controller
password = args.password
if args.tenant:
tenant=args.tenant
else:
tenant="*"
if args.api_version:
api_version=args.api_version
else:
api_version="17.1.1"
#Getting API session for the intended Controller.
api = ApiSession.get_session(host, user, password, tenant=tenant, api_version=api_version)
#Getting the list of VirtualService(s).
vs_list = get_vs_list(api, api_version)
#Getting VS information
oper_dict = get_vs_oper_info(api, api_version, vs_list)
#print "Final Oper Dict:" + str(oper_dict)
for state, vs in oper_dict.iteritems():
print("VS in State:%s [%s]" % (state, len(vs)))
table = PrettyTable(hrules=ALL)
table.field_names = ["VS Name","VIP_ID", "VIP_Address", "DNS_INFO"]
for vss in vs:
vips = list()
dns_info = list()
vip_count = 0
dns_count = 0
if 'vip_1' in vss.keys():
vips = [value for key, value in vss.iteritems() if 'vip' in key.lower()]
vip_count = len(vips)
if 'dns_1' in vss.keys():
dns_info = [value for key, value in vss.iteritems() if 'dns' in key.lower()]
dns_count = len(dns_info)
vs_name = vss['name']
vip_ids = ''
vips_list = ''
dns_list = ''
for vip in vips:
vip_ids += vip['vip_id'] + "\n"
vips_list += vip['ip_address']['addr']
if vip.get('floating_ip', None):
vips_list += '- ' + vip['floating_ip']['addr']
vips_list+='\n'
for dns in dns_info:
dns_list += dns['fqdn'] + "\n"
table.add_row([vs_name, vip_ids[:-1], vips_list[:-1], dns_list[:-1]])
print table
print "\n"
if __name__ == "__main__":
main()
|
py | 1a4fe471a4a281c15b1b3ecb8b3e5460adf83cf7 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Test case ID : C14861501
Test Case Title : Verify PxMesh is auto-assigned when Collider component is added after Rendering Mesh component
"""
# fmt: off
class Tests():
create_entity = ("Created test entity", "Failed to create test entity")
mesh_added = ("Added Mesh component", "Failed to add Mesh component")
physx_collider_added = ("Added PhysX Collider component", "Failed to add PhysX Collider component")
assign_mesh_asset = ("Assigned Mesh asset to Mesh component", "Failed to assign mesh asset to Mesh component")
automatic_shape_change = ("Shape was changed automatically", "Shape failed to change automatically")
# fmt: on
def C14861501_PhysXCollider_RenderMeshAutoAssigned():
"""
Summary:
Create entity with Mesh component and assign a render mesh to the Mesh component. Add Physics Collider component
and Verify that the physics mesh asset is auto-assigned.
Expected Behavior:
The physics asset in PhysX Collider component is auto-assigned
Test Steps:
1) Load the empty level
2) Create an entity
3) Add Mesh component
4) Assign a render mesh asset to Mesh component (the fbx mesh having both Static mesh and PhysX collision Mesh)
5) Add PhysX Collider component
6) The physics asset in PhysX Collider component is auto-assigned.
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
# Builtins
import os
# Helper Files
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.editor_entity_utils import EditorEntity as Entity
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
from asset_utils import Asset
# Asset paths
STATIC_MESH = os.path.join("assets", "c14861501_physxcollider_rendermeshautoassigned", "spherebot", "r0-b_body.azmodel")
PHYSX_MESH = os.path.join(
"assets", "c14861501_physxcollider_rendermeshautoassigned", "spherebot", "r0-b_body.pxmesh"
)
helper.init_idle()
# 1) Load the empty level
helper.open_level("Physics", "Base")
# 2) Create an entity
test_entity = Entity.create_editor_entity("test_entity")
Report.result(Tests.create_entity, test_entity.id.IsValid())
# 3) Add Mesh component
mesh_component = test_entity.add_component("Mesh")
Report.result(Tests.mesh_added, test_entity.has_component("Mesh"))
# 4) Assign a render mesh asset to Mesh component (the fbx mesh having both Static mesh and PhysX collision Mesh)
mesh_asset = Asset.find_asset_by_path(STATIC_MESH)
mesh_component.set_component_property_value("Controller|Configuration|Mesh Asset", mesh_asset.id)
mesh_asset.id = mesh_component.get_component_property_value("Controller|Configuration|Mesh Asset")
Report.result(Tests.assign_mesh_asset, mesh_asset.get_path() == STATIC_MESH.replace(os.sep, "/"))
# 5) Add PhysX Collider component
test_component = test_entity.add_component("PhysX Collider")
Report.result(Tests.physx_collider_added, test_entity.has_component("PhysX Collider"))
# 6) The physics asset in PhysX Collider component is auto-assigned.
asset_id = test_component.get_component_property_value("Shape Configuration|Asset|PhysX Mesh")
test_asset = Asset(asset_id)
Report.result(Tests.automatic_shape_change, test_asset.get_path() == PHYSX_MESH.replace(os.sep, "/"))
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
Report.start_test(C14861501_PhysXCollider_RenderMeshAutoAssigned)
|
py | 1a4fe516558989cb517067c45aa1b059c81fbbc3 | import numpy as np
class VineyardAnalysis():
def __init__(self):
self.name = "Vineyard Suitability Analysis Function"
self.description = "This function computes vineyard suitability given elevation, slope, aspect, and soil-type rasters."
def getParameterInfo(self):
return [
{
'name': 'elevation',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': 'Elevation Raster',
'description': "The primary single-band raster where pixel values represent elevation in meters."
},
{
'name': 'slope',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': 'Slope Raster',
'description': "A single-band raster where pixel values represent slope."
},
{
'name': 'aspect',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': 'Aspect Raster',
'description': "A single-band raster where pixel values represent aspect."
},
{
'name': 'soiltype',
'dataType': 'raster',
'value': None,
'required': False,
'displayName': 'Soil Type Raster',
'description': "A single-band thematic raster where pixel values represent soil type."
},
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 2 | 4 | 8, # inherit all but the pixel type from the input raster
'invalidateProperties': 2 | 4 | 8, # reset any statistics and histogram that might be held by the parent dataset (because this function modifies pixel values).
'inputMask': True # We need the input raster mask in .updatePixels().
}
def updateRasterInfo(self, **kwargs):
kwargs['output_info']['bandCount'] = 1
kwargs['output_info']['pixelType'] = 'u1'
kwargs['output_info']['statistics'] = ({'minimum': 0, 'maximum': 3}, )
kwargs['output_info']['noData'] = np.array([0], 'u1')
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
elev = np.array(pixelBlocks['elevation_pixels'], 'f4')
slope = np.array(pixelBlocks['slope_pixels'], 'f4')
aspect = np.array(pixelBlocks['aspect_pixels'], 'f4')
#soil = np.array(pixelBlocks['soiltype_pixels'], 'i8')
E = (elev > 30).astype('u1') & (elev < 400).astype('u1')
S = (slope > 5).astype('u1') & (slope < 60).astype('u1')
A = (aspect > 0).astype('u1') & (aspect < 200).astype('u1')
pixelBlocks['output_pixels'] = (E + S + A).astype(props['pixelType'])
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
if bandIndex == -1:
keyMetadata['datatype'] = 'Scientific'
keyMetadata['variable'] = 'VineyardSuitability'
elif bandIndex == 0:
keyMetadata['wavelengthmin'] = None # reset inapplicable band-specific key metadata
keyMetadata['wavelengthmax'] = None
keyMetadata['bandname'] = 'VineyardSuitability'
return keyMetadata
|
py | 1a4fe5aeb5596723a8b4b41a2500a43fbd637a89 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 16:54:54 2019
@author: similarities
"""
import matplotlib.pyplot as plt
import numpy as np
import os
class FwhmImageProcessing:
def __init__(self, filename, lambda_fundamental, maximum_harmonic, harmonic_number):
self.filename = filename
self.filedescription = self.filename[31:42] + '_' + self.filename[-6:-4]
self.y_min = 0
self.y_max = 2048
self.x_min = 150
self.x_max = 1300
self.picture = np.empty([])
self.harmonic_selected = harmonic_number
self.x_backsubstracted = np.empty([2048, 2048])
self.lambda_fundamental = lambda_fundamental
self.line_out = np.zeros([self.y_max, 1])
self.line_out_x = np.arange(self.x_min, self.x_max)
self.calibration_to_msr = 17.5 / 2048
self.full_divergence = 17.5
self.normalization_factor_mrad = np.zeros([20, 1])
self.border_up, self.border_down = self.energy_range()
self.maximum_harmonic = maximum_harmonic
self.result_array = np.zeros([self.maximum_harmonic, 5])
def open_file(self):
self.picture = plt.imread(self.filename)
return self.picture
def background_y(self):
back_mean = np.mean(self.picture[:, 1600:1700], axis=1)
for x in range(0, self.y_max):
self.x_backsubstracted[::, x] = self.picture[::, x] - back_mean[x]
self.background_x()
plt.figure(1)
# plt.ylim(100, 1000)
plt.imshow(self.x_backsubstracted)
plt.vlines(self.x_min, 0, 2048)
plt.vlines(self.x_max, 0, 2048)
return self.x_backsubstracted
def background_x(self):
back_mean = np.mean(self.picture[1880:1948, :], axis=0)
for x in range(0, 2048):
self.x_backsubstracted[x, ::] = self.picture[x, ::] - back_mean[x]
return self.x_backsubstracted
def energy_range(self):
print(self.harmonic_selected, ':')
previous_harmonic = self.lambda_fundamental / (self.harmonic_selected - 0.3)
next_harmonic = self.lambda_fundamental / (self.harmonic_selected + 0.3)
self.border_up = np.int(self.nm_in_px(previous_harmonic))
self.border_down = np.int(self.nm_in_px(next_harmonic))
print(self.border_up, self.border_down, "ROI in px")
self.pixel_range = np.int(self.border_down - self.border_up)
print(self.pixel_range, 'ROI in pixel range')
self.plot_roi_on_image()
return self.border_up, self.border_down
def nm_in_px(self, px_in):
return int(4.71439193e-01 * px_in ** 2 - 1.06651902e+02 * px_in + 4.29603367e+03)
def plot_roi_on_image(self):
plt.figure(1)
plt.hlines(self.border_up, xmin=0, xmax=2048, color="w", linewidth=0.1)
plt.hlines(self.border_down, xmin=0, xmax=2048, color="g", linewidth=0.1)
def sum_over_pixel_range_y(self):
self.line_out = self.x_backsubstracted[self.border_up: self.border_down, ::]
self.line_out = np.sum(self.line_out, axis=0)
self.line_out = self.line_out[self.x_min:self.x_max]
return self.line_out
def correction_background(self, value):
self.line_out[::] = self.line_out[::] - value
return self.line_out
def integrated_signal_in_lineout(self):
integrated = np.sum(self.line_out[::])
return integrated
def plot_x_y(self, x, y, name, plot_number, axis_x_name, axis_y_name):
plt.figure(plot_number)
plt.plot(x, y, label=name)
plt.xlabel(str(axis_x_name))
plt.ylabel(str(axis_y_name))
plt.legend()
def calibrate_px_to_msr(self, array_x):
array_x[::] = array_x[::] * self.calibration_to_msr
return array_x
def prepare_for_stepfunction(self):
self.sum_over_pixel_range_y()
maximum = np.amax(self.line_out[::])
minimum = np.amin(self.line_out[::])
if minimum < 0:
self.correction_background(minimum)
maximum = np.amax(self.line_out[::])
minimum = np.amin(self.line_out[::])
half_max = (maximum - minimum) / 2
# self.plot_x_y(self.line_out_x, self.line_out, 'linout_corrected', 2, 'px', 'counts')
self.plot_x_y(self.line_out_x, self.line_out, str(self.harmonic_selected), 2, 'px', 'counts')
return half_max
def step_function_for_fwhm(self):
half_max = self.prepare_for_stepfunction()
# width of step function is FWHM
d = np.sign(half_max - self.line_out[::]) - 1
self.line_out_x = self.calibrate_px_to_msr(self.line_out_x)
self.plot_x_y(self.line_out_x, d, 'stepfunction', 3, 'mrad', 'value')
self.line_out_x = np.arange(self.x_min, self.x_max)
result_FWHM = 1. * self.calibration_to_msr * (np.amax(np.nonzero(d)) - np.amin(np.nonzero(d)))
return result_FWHM
def px_in_nm(self, px_number):
return 1.24679344e-06 * px_number ** 2 - 1.65566701e-02 * px_number + 5.22598053e+01
def delta_energy(self):
delta = self.px_in_nm(self.border_up) - self.px_in_nm(self.border_down)
energy_nm = (self.lambda_fundamental / self.harmonic_selected)
delta_vs_energy = delta / energy_nm
return energy_nm, delta_vs_energy
def batch_over_N(self):
for x in range(self.harmonic_selected, self.maximum_harmonic):
self.result_array[x, 0] = x
self.harmonic_selected = x
self.energy_range()
self.result_array[x, 1] = self.step_function_for_fwhm()
self.result_array[x, 2] = np.sum(self.line_out[::])
self.result_array[x, 4], self.result_array[x, 3] = self.delta_energy()
# clean for empty entries
self.result_array = np.delete(self.result_array, np.where(~self.result_array.any(axis=1))[0],
axis=0)
self.plot_scatter(self.result_array[::, 0], self.result_array[::, 1], self.filedescription,
'harmonic number N', 'divergence in mrad', 5)
self.save_data()
return self.result_array
def plot_scatter(self, x, y, name, axis_name_x, axis_name_y, plot_number):
plt.figure(plot_number)
plt.scatter(x, y, label=name)
plt.xlabel(axis_name_x)
plt.ylabel(axis_name_y)
#plt.legend()
def prepare_header(self):
self.integrated_signal_in_lineout()
self.delta_energy()
# insert header line and change index
header_names = (['harmonic_number', 'mrad', 'integrated_counts_in_delta_E', 'harmonic_in_nm', 'delta_E/E'])
parameter_info = (
['fundamental_nm:', str(self.lambda_fundamental), 'pixel_range:', str(self.border_down-self.border_up), 'xxxx'])
return np.vstack((header_names, self.result_array, parameter_info))
def save_data(self):
result = self.prepare_header()
plt.figure(1)
plt.savefig(self.filedescription + "_raw_roi_" + ".png", bbox_inches="tight", dpi=1000)
plt.figure(2)
plt.savefig(self.filedescription + "_integrated_lineout" + ".png", bbox_inches="tight", dpi=1000)
plt.figure(5)
plt.savefig(self.filedescription + "_div_mrad_FWHM" + ".png", bbox_inches="tight", dpi=1000)
print('saved data')
np.savetxt(self.filedescription + ".txt", result, delimiter=' ',
header='string', comments='',
fmt='%s')
def get_file_list(path_picture):
tif_files = []
counter = 0
for file in os.listdir(path_picture):
print(file)
try:
if file.endswith(".tif"):
tif_files.append(str(file))
counter = counter + 1
else:
print("only other files found")
except Exception as e:
raise e
print("no files found here")
return tif_files
def process_files(my_files, path):
for x in range(63, 64):
file = path +'/'+ my_files[x]
Processing_Picture = FwhmImageProcessing(file, 805 , 30, 17)
Processing_Picture.open_file()
Processing_Picture.background_y()
Processing_Picture.batch_over_N()
Processing_Picture.save_data()
plt.close(1)
plt.close(2)
plt.close(5)
my_files = get_file_list('rotated_20190123')
process_files(my_files, 'rotated_20190123')
|
py | 1a4fe5c53f9979beff00d8428cf1e4e405d0fe6a | # Hidden Markov Model
## Definition
Let $X_{n}$ and $Y_{n}$ be discrete-time stochastic process and $n \ge 1$. The pair $(X_{n}, Y_{n})$ is a hidden markov model if:
* $X_{n}$ is a markov process whose behavior is not directly observable("hidden")
* $P(Y_{n} = y_{n}|X_{1}=x_{1},...,X_{n}=x_{n}) = P(Y_{n}=y_{n}|X_{n}=x_{n})$ for every $n \ge 1$
The states of the process $X_{n}$ is called the hidden states, and $P(Y_{n}=y_{n}|X_{n}=x_{n})$ is called emission probability.
## compute probability given model
target: compute $P(O|\lambda)$ for any $O$ given $\lambda$.
### direct approach
$$P(O|\lambda) = \sum_{I}P(O,I|\lambda) =\sum_{I}P(O|I,\lambda)P(I|\lambda)$$
$I$ take on $N^{T}$ sums, computation complexity is $O(TN^{T})$, this does not work.
### forward
probability of $\{o_{1},...,o_{T}\}$ only depends on $\{o_{1},...,o_{T - 1}\}$ and $s_{t}$.
|
py | 1a4fe63577e2a449c6344e23eac618f84d1d92f2 | """
Author: Soubhik Sanyal
Copyright (c) 2019, Soubhik Sanyal
All rights reserved.
Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights on this
computer program.
You can only use this computer program if you have closed a license agreement with MPG or you get the right to use
the computer program from someone who is authorized to grant you that right.
Any use of the computer program without a valid license is prohibited and liable to prosecution.
Copyright 2019 Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG). acting on behalf of its
Max Planck Institute for Intelligent Systems and the Max Planck Institute for Biological Cybernetics.
All rights reserved.
More information about RingNet is available at https://ringnet.is.tue.mpg.de.
based on github.com/akanazawa/hmr
"""
# Sets default args
# Note all data format is NHWC because slim resnet wants NHWC.
import sys
from absl import flags
PRETRAINED_MODEL = './model.pkl'
flags.DEFINE_string('img_path', '/training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180424_033335_TA/selfie/IMG_0092.jpg', 'Image to run')
flags.DEFINE_string('out_folder', './output',
'The output path to store images')
flags.DEFINE_boolean('save_obj_file', True,
'If true the output meshes will be saved')
flags.DEFINE_boolean('save_flame_parameters', True,
'If true the camera and flame parameters will be saved')
flags.DEFINE_boolean('neutralize_expression', True,
'If true the camera and flame parameters will be saved')
flags.DEFINE_boolean('save_texture', True,
'If true the texture map will be stored')
flags.DEFINE_string('flame_model_path', './flame_model/generic_model.pkl', 'path to the neutral FLAME model')
flags.DEFINE_string('flame_texture_data_path', './flame_model/texture_data_512.npy', 'path to the FLAME texture data')
flags.DEFINE_string('load_path', PRETRAINED_MODEL, 'path to trained model')
flags.DEFINE_integer('batch_size', 1,
'Fixed to 1 for inference')
# Don't change if testing:
flags.DEFINE_integer('img_size', 224,
'Input image size to the network after preprocessing')
flags.DEFINE_string('data_format', 'NHWC', 'Data format')
# Flame parameters:
flags.DEFINE_integer('pose_params', 6,
'number of flame pose parameters')
flags.DEFINE_integer('shape_params', 100,
'number of flame shape parameters')
flags.DEFINE_integer('expression_params', 50,
'number of flame expression parameters')
def get_config():
config = flags.FLAGS
config(sys.argv)
return config
|
py | 1a4fe66f2b90e433d06dbe33418331739e5be86c |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "resp2/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
py | 1a4fe6b058a69a30b8164e4efa31b7206aeca3c0 | _base_ = './fovea_r50_fpn_4x4_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101'))) |
py | 1a4fe6eb166e2ba5e55755b60b89433756635414 | from lyrebird.mock import context
from flask import Response, stream_with_context
import json
class MockHandler:
"""
根据当前设置数据组的匹配条件,查找对应的mock数据。
如果没有找到匹配的数据则交由下一个处理器处理。
"""
def handle(self, handler_context):
data = context.application.data_manager.router.get_mock_data(handler_context.flow)
if data:
handler_context.response = self.data2response(data)
def data2response(self, data):
resp_info = json.loads(data.response.content)
code = resp_info['code']
headers = resp_info['headers']
headers['lyrebird'] = 'mock'
resp_data = data.response_data.content
if resp_data:
if type(resp_data) == str:
data_len = len(resp_data.encode())
else:
data_len = len(resp_data)
headers['Content-Length'] = data_len
def gen():
yield resp_data
return Response(stream_with_context(gen()), status=code, headers=headers)
|
py | 1a4fe803ac2bd605870b0a296e09a86e750b1d8b | from flask import Blueprint
from flask_admin.base import MenuLink
from flask_admin.consts import ICON_TYPE_IMAGE
from ddui.dash_app import app as dash_view
from airflow.plugins_manager import AirflowPlugin
ml_repo_website = MenuLink(
category='DataDriver',
name='Git repository',
url='https://gitlab.octo.com/dd/ddui.git',
icon_type=ICON_TYPE_IMAGE,
icon_value='brian/git.png'
)
ml_doc = MenuLink(
category='DataDriver',
name='DataDriver API documentation',
url='http://datadriver-doc-ddapi.s3-website-eu-west-1.amazonaws.com/',
icon_type=ICON_TYPE_IMAGE,
icon_value='brian/sigle.png'
)
ml_version = MenuLink(
category='DataDriver',
name='Version',
url='/dash/version',
icon_type=ICON_TYPE_IMAGE,
icon_value='brian/sigle.png'
)
brian_bp = Blueprint(
"brian_web", __name__,
template_folder='templates',
static_folder='static/brian',
static_url_path='/static/brian',
)
class DataDriverUIPlugin(AirflowPlugin):
name = 'DataDriver UI Plugin'
operators = []
hooks = []
executors = []
macros = []
admin_views = [dash_view]
flask_blueprints = [brian_bp]
menu_links = [ml_doc, ml_repo_website, ml_version] |
py | 1a4fe821d20b90a8097486cfcd7d9fa82d10b84f | """
Django settings for exmp project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'do0g!j0=1#e7e!9a!iljox&7an)-#^=9oloond(a#59!0=018j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'exmp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'exmp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
py | 1a4fe89b650efccff8779b2e55c169667609a929 | from threading import Thread, Event
from queue import Queue
import time
import numpy as np
import traceback
# This code originally used Process not Thread.
# Process is much slower to start (Process.start() is ~100 ms, Thread.start() is a few ms)
# The process-safe versions of Queue and Event are also significantly slower.
# On the other hand, CPU-bound Python threads can't run in parallel ("global interpreter lock").
# The overall problem is not CPU-bound - we should always be limited by tProc execution.
# In the worst case where the tProc is running fast, we should actually be waiting for IO a lot (due to the DMA).
# So we think it's safe to use threads.
# However, this is a complicated problem and we may ultimately need to mess around with sys.setswitchinterval() or go back to Process.
# To use Process instead of Thread, use the following import and change WORKERTYPE.
#from multiprocessing import Process, Queue, Event
class DataStreamer():
"""
Uses a separate thread to read data from the average buffers.
The class methods define the readout loop and initialization of the worker thread.
The QickSoc methods start_readout() and poll_data() are the external interface to the streamer.
We don't lock the QickSoc or the IPs. The user is responsible for not disrupting a readout in progress.
:param soc: The QickSoc object.
:type soc: QickSoc
"""
#WORKERTYPE = Process
WORKERTYPE = Thread
def __init__(self, soc):
self.soc = soc
self.start_worker()
def start_worker(self):
# Initialize flags and queues.
# Passes run commands from the main thread to the worker thread.
self.job_queue = Queue()
# Passes data from the worker thread to the main thread.
self.data_queue = Queue()
# Passes exceptions from the worker thread to the main thread.
self.error_queue = Queue()
# The main thread can use this flag to tell the worker thread to stop.
# The main thread clears the flag when starting readout.
self.stop_flag = Event()
# The worker thread uses this to tell the main thread when it's done.
# The main thread clears the flag when starting readout.
self.done_flag = Event()
self.done_flag.set()
# Process object for the streaming readout.
# daemon=True means the readout thread will be killed if the parent is killed
self.readout_worker = self.WORKERTYPE(target=self._run_readout, daemon=True)
self.readout_worker.start()
def stop_readout(self):
"""
Signal the readout loop to break.
"""
self.stop_flag.set()
def readout_running(self):
"""
Test if the readout loop is running.
:return: readout thread status
:rtype: bool
"""
return not self.done_flag.is_set()
def data_available(self):
"""
Test if data is available in the queue.
:return: data queue status
:rtype: bool
"""
return not self.data_queue.empty()
def _run_readout(self):
"""
Worker thread for the streaming readout
:param total_count: Number of data points expected
:type addr: int
:param counter_addr: Data memory address for the loop counter
:type counter_addr: int
:param ch_list: List of readout channels
:type addr: list
:param reads_per_count: Number of data points to expect per counter increment
:type reads_per_count: int
"""
while True:
try:
# wait for a job
total_count, counter_addr, ch_list, reads_per_count = self.job_queue.get(block=True)
#print("streamer loop: start", total_count)
count = 0
last_count = 0
# how many measurements to transfer at a time
stride = int(0.1 * self.soc.get_avg_max_length(0))
# bigger stride is more efficient, but the transfer size must never exceed AVG_MAX_LENGTH, so the stride should be set with some safety margin
# make sure count variable is reset to 0 before starting processor
self.soc.tproc.single_write(addr=counter_addr, data=0)
stats = []
t_start = time.time()
# if the tproc is configured for internal start, this will start the program
# for external start, the program will not start until a start pulse is received
self.soc.tproc.start()
# Keep streaming data until you get all of it
while last_count < total_count:
if self.stop_flag.is_set():
print("streamer loop: got stop flag")
break
count = self.soc.tproc.single_read(
addr=counter_addr)*reads_per_count
# wait until either you've gotten a full stride of measurements or you've finished (so you don't go crazy trying to download every measurement)
if count >= min(last_count+stride, total_count):
addr = last_count % self.soc.get_avg_max_length(0)
length = count-last_count
if length >= self.soc.get_avg_max_length(0):
raise RuntimeError("Overflowed the averages buffer (%d unread samples >= buffer size %d)."
% (length, self.soc.get_avg_max_length(0)) +
"\nYou need to slow down the tProc by increasing relax_delay." +
"\nIf the TQDM progress bar is enabled, disabling it may help.")
# transfers must be of even length; trim the length (instead of padding it)
# don't trim if this is the last read of the run
if count < last_count:
length -= length % 2
# buffer for each channel
d_buf = np.zeros((len(ch_list), 2, length))
# for each adc channel get the single shot data and add it to the buffer
for iCh, ch in enumerate(ch_list):
data = self.soc.get_accumulated(
ch=ch, address=addr, length=length)
d_buf[iCh] = data
last_count += length
stats = (time.time()-t_start, count, addr, length)
self.data_queue.put((length, (d_buf, stats)))
#if last_count==total_count: print("streamer loop: normal completion")
except Exception as e:
print("streamer loop: got exception")
traceback.print_exc()
# pass the exception to the main thread
self.error_queue.put(e)
# put dummy data in the data queue, to trigger a poll_data read
self.data_queue.put((0, (None, None)))
finally:
# we should set the done flag regardless of whether we completed readout, used the stop flag, or errored out
self.done_flag.set()
|
py | 1a4fe8c8a43f94f762fb9fbb76c6c9961c701643 | from django import template
register = template.Library()
@register.filter
def getcount(item, choice):
"""returns the number of times choice has been selected for item"""
return item.userchoices.filter(choice=choice).count()
@register.filter
def getuniqueitems(userchoices):
"""return a list of unique items given a bunch of userchoices"""
items = []
for userchoice in userchoices:
if userchoice.item not in items:
items.append(userchoice.item)
return items
@register.filter
def getzerochoiceitems(items, choice):
"""return a list of unique items where the given choice has been chosen zero times"""
returnitems = []
for item in items:
if item.userchoices.filter(choice=choice).count()==0:
if item not in returnitems:
returnitems.append(item)
return returnitems
|
py | 1a4fe923b6b54b4c495d28b3c3fc82486a8d23c8 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequestLogEntryCollection(object):
"""
Results of a workRequestLog search. Contains both workRequestLog items and other information, such as metadata.
"""
def __init__(self, **kwargs):
"""
Initializes a new WorkRequestLogEntryCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this WorkRequestLogEntryCollection.
:type items: list[oci.service_catalog.models.WorkRequestLogEntry]
"""
self.swagger_types = {
'items': 'list[WorkRequestLogEntry]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
"""
**[Required]** Gets the items of this WorkRequestLogEntryCollection.
List of workRequestLogEntries.
:return: The items of this WorkRequestLogEntryCollection.
:rtype: list[oci.service_catalog.models.WorkRequestLogEntry]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this WorkRequestLogEntryCollection.
List of workRequestLogEntries.
:param items: The items of this WorkRequestLogEntryCollection.
:type: list[oci.service_catalog.models.WorkRequestLogEntry]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
bzl | 1a4fe9d3b89a9f322c750afd6f84b5d6e4ef901d | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Partial implementations for resource processing.
Resources are procesed according to type, by a series of methods that deal with the specifics for
each resource type. Each of this methods returns a struct, which always have a `files` field
containing resource tuples as described in processor.bzl. Optionally, the structs can also have an
`infoplists` field containing a list of plists that should be merged into the root Info.plist.
"""
load(
"@build_bazel_rules_apple//apple/bundling:file_actions.bzl",
"file_actions",
)
load(
"@build_bazel_rules_apple//apple/internal/partials/support:resources_support.bzl",
"resources_support",
)
load(
"@build_bazel_rules_apple//apple/internal:intermediates.bzl",
"intermediates",
)
load(
"@build_bazel_rules_apple//apple/internal:outputs.bzl",
"outputs",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple/internal:resource_actions.bzl",
"resource_actions",
)
load(
"@build_bazel_rules_apple//apple/internal:resources.bzl",
"NewAppleResourceInfo",
"resources",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleInfo",
)
load(
"@bazel_skylib//lib:new_sets.bzl",
"sets",
)
load(
"@bazel_skylib//lib:partial.bzl",
"partial",
)
def _merge_root_infoplists(ctx, infoplists, out_infoplist, **kwargs):
"""Registers the root Info.plist generation action.
Args:
ctx: The target's rule context.
infoplists: List of plists that should be merged into the root Info.plist.
out_infoplist: Reference to the output Info plist.
**kwargs: Extra parameters forwarded into the merge_root_infoplists action.
Returns:
A list of tuples as described in processor.bzl with the Info.plist file
reference and the PkgInfo file if required.
"""
# TODO(b/73349137): Remove this symlink. It's only used so that the file has the proper name
# when bundled.
plist_symlink = intermediates.file(
ctx.actions,
ctx.label.name,
"Info.plist",
)
files = [plist_symlink]
file_actions.symlink(ctx, out_infoplist, plist_symlink)
out_pkginfo = None
if ctx.attr._needs_pkginfo:
out_pkginfo = intermediates.file(
ctx.actions,
ctx.label.name,
"PkgInfo",
)
files.append(out_pkginfo)
resource_actions.merge_root_infoplists(
ctx,
infoplists,
out_infoplist,
out_pkginfo,
**kwargs
)
return [(processor.location.content, None, depset(direct = files))]
def _deduplicate(resources_provider, avoid_provider, field):
"""Deduplicates and returns resources between 2 providers for a given field.
Deduplication happens by comparing the target path of a file and the files
themselves. If there are 2 resources with the same target path but different
contents, the files will not be deduplicated.
This approach is naïve in the sense that it deduplicates resources too
aggressively. We also need to compare the target that references the
resources so that they are not deduplicated if they are referenced within
multiple binary-containing bundles.
Args:
resources_provider: The provider with the resources to be bundled.
avoid_provider: The provider with the resources to avoid bundling.
field: The field to deduplicate resources on.
Returns:
A list of tuples with the resources present in avoid_providers removed from
resources_providers.
"""
# Build a dictionary with the file paths under each key for the avoided resources.
avoid_dict = {}
if avoid_provider and hasattr(avoid_provider, field):
for parent_dir, swift_module, files in getattr(avoid_provider, field):
key = "%s_%s" % (parent_dir or "root", swift_module or "root")
avoid_dict[key] = {x.short_path: None for x in files.to_list()}
# Get the resources to keep, compare them to the avoid_dict under the same
# key, and remove the duplicated file references. Then recreate the original
# tuple with only the remaining files, if any.
deduped_tuples = []
for parent_dir, swift_module, files in getattr(resources_provider, field):
key = "%s_%s" % (parent_dir or "root", swift_module or "root")
# Dictionary used as a set to mark files as processed by short_path to deduplicate generated
# files that may appear more than once if multiple architectures are being built.
multi_architecture_deduplication_set = {}
deduped_files = depset([])
for to_bundle_file in files.to_list():
short_path = to_bundle_file.short_path
if short_path in multi_architecture_deduplication_set:
continue
multi_architecture_deduplication_set[short_path] = None
if key in avoid_dict and short_path in avoid_dict[key]:
# If the resource file is present in the provider of resources to avoid, we compare
# the owners of the resource through the owners dictionaries of the providers. If
# there are owners present in resources_provider which are not present in
# avoid_provider, it means that there is at least one target that declares usage of
# the resource which is not accounted for in avoid_provider. If this is the case, we
# add the resource to be bundled in the bundle represented by resource_provider.
deduped_owners = [
o
for o in resources_provider.owners[short_path]
if o not in avoid_provider.owners[short_path]
]
if deduped_owners:
deduped_files = depset(
direct = [to_bundle_file],
transitive = [deduped_files],
)
else:
deduped_files = depset(direct = [to_bundle_file], transitive = [deduped_files])
if deduped_files:
deduped_tuples.append((parent_dir, swift_module, deduped_files))
return deduped_tuples
def _locales_requested(ctx):
"""Determines which locales to include when resource actions.
If the user has specified "apple.locales_to_include" we use those. Otherwise we don't filter.
'Base' is included by default to any given list of locales to include.
Args:
ctx: The rule context.
Returns:
A set of locales to include or None if all should be included.
"""
requested_locales = ctx.var.get("apple.locales_to_include")
if requested_locales != None:
return sets.make(["Base"] + [x.strip() for x in requested_locales.split(",")])
else:
return None
def _locale_for_path(resource_path):
"""Returns the detected locale for the given resource path."""
if not resource_path:
return None
loc = resource_path.find(".lproj")
if loc == -1:
return None
# If there was more after '.lproj', then it has to be a directory, otherwise
# it was part of some other extension.
if (loc + 6) > len(resource_path) and resource_path[loc + 6] != "/":
return None
locale_start = resource_path.rfind("/", end = loc)
if locale_start < 0:
return resource_path[0:loc]
return resource_path[locale_start + 1:loc]
def _validate_processed_locales(locales_requested, locales_included, locales_dropped):
"""Prints a warning if locales were dropped and none of the requested ones were included."""
if sets.length(locales_dropped):
# Display a warning if a locale was dropped and there are unfulfilled locale requests; it
# could mean that the user made a mistake in defining the locales they want to keep.
if not sets.is_equal(locales_requested, locales_included):
unused_locales = sets.difference(locales_requested, locales_included)
print("Warning: Did not have resources that matched " + sets.str(unused_locales) +
" in locale filter. Please verify apple.locales_to_include is defined" +
" properly.")
def _resources_partial_impl(
ctx,
bundle_id,
bundle_verification_targets,
plist_attrs,
targets_to_avoid,
top_level_attrs,
version_keys_required):
"""Implementation for the resource processing partial."""
providers = []
if hasattr(ctx.attr, "deps"):
providers.extend([
x[NewAppleResourceInfo]
for x in ctx.attr.deps
if NewAppleResourceInfo in x
])
# TODO(kaipi): Bucket top_level_attrs directly instead of collecting and
# splitting.
files = resources.collect(ctx.attr, res_attrs = top_level_attrs)
if files:
providers.append(resources.bucketize(files, owner = str(ctx.label)))
if plist_attrs:
plists = resources.collect(ctx.attr, res_attrs = plist_attrs)
plist_provider = resources.bucketize_typed(
plists,
owner = str(ctx.label),
bucket_type = "infoplists",
)
providers.append(plist_provider)
avoid_providers = [
x[NewAppleResourceInfo]
for x in targets_to_avoid
if NewAppleResourceInfo in x
]
avoid_provider = None
if avoid_providers:
# Call merge_providers with validate_all_resources_owned set, to ensure that all the
# resources from dependency bundles have an owner.
avoid_provider = resources.merge_providers(
avoid_providers,
validate_all_resources_owned = True,
)
final_provider = resources.merge_providers(providers, default_owner = str(ctx.label))
# Map of resource provider fields to a tuple that contains the method to use to process those
# resources and a boolean indicating whether the Swift module is required for that processing.
provider_field_to_action = {
"asset_catalogs": (resources_support.asset_catalogs, False),
"datamodels": (resources_support.datamodels, True),
"infoplists": (resources_support.infoplists, False),
"plists": (resources_support.plists_and_strings, False),
"pngs": (resources_support.pngs, False),
# TODO(b/113252360): Remove this once we can correctly process Fileset files.
"resource_zips": (resources_support.resource_zips, False),
"storyboards": (resources_support.storyboards, True),
"strings": (resources_support.plists_and_strings, False),
"texture_atlases": (resources_support.texture_atlases, False),
"unprocessed": (resources_support.noop, False),
"xibs": (resources_support.xibs, True),
}
# List containing all the files that the processor will bundle in their
# configured location.
bundle_files = []
fields = resources.populated_resource_fields(final_provider)
infoplists = []
locales_requested = _locales_requested(ctx)
locales_included = sets.make(["Base"])
locales_dropped = sets.make()
for field in fields:
processing_func, requires_swift_module = provider_field_to_action[field]
deduplicated = _deduplicate(final_provider, avoid_provider, field)
for parent_dir, swift_module, files in deduplicated:
if locales_requested:
locale = _locale_for_path(parent_dir)
if sets.contains(locales_requested, locale):
sets.insert(locales_included, locale)
elif locale != None:
sets.insert(locales_dropped, locale)
continue
processing_args = {
"ctx": ctx,
"parent_dir": parent_dir,
"files": files,
}
# Only pass the Swift module name if the type of resource to process
# requires it.
if requires_swift_module:
processing_args["swift_module"] = swift_module
result = processing_func(**processing_args)
bundle_files.extend(result.files)
if hasattr(result, "infoplists"):
infoplists.extend(result.infoplists)
if locales_requested:
_validate_processed_locales(locales_requested, locales_included, locales_dropped)
if bundle_id:
# If no bundle ID was given, do not process the root Info.plist and do not validate embedded
# bundles.
bundle_verification_infoplists = [
b.target[AppleBundleInfo].infoplist
for b in bundle_verification_targets
]
bundle_verification_required_values = [
(
b.target[AppleBundleInfo].infoplist,
[[b.parent_bundle_id_reference, bundle_id]],
)
for b in bundle_verification_targets
if hasattr(b, "parent_bundle_id_reference")
]
out_infoplist = outputs.infoplist(ctx)
bundle_files.extend(
_merge_root_infoplists(
ctx,
infoplists,
out_infoplist,
bundle_id = bundle_id,
child_plists = bundle_verification_infoplists,
child_required_values = bundle_verification_required_values,
version_keys_required = version_keys_required,
),
)
return struct(bundle_files = bundle_files, providers = [final_provider])
def resources_partial(
bundle_id = None,
bundle_verification_targets = [],
plist_attrs = [],
targets_to_avoid = [],
top_level_attrs = [],
version_keys_required = True):
"""Constructor for the resources processing partial.
This partial collects and propagates all resources that should be bundled in the target being
processed.
Args:
bundle_id: Optional bundle ID to use when processing resources. If no bundle ID is given,
the bundle will not contain a root Info.plist and no embedded bundle verification will
occur.
bundle_verification_targets: List of structs that reference embedable targets that need to
be validated. The structs must have a `target` field with the target containing an
Info.plist file that will be validated. The structs may also have a
`parent_bundle_id_reference` field that contains the plist path, in list form, to the
plist entry that must contain this target's bundle ID.
plist_attrs: List of attributes that should be processed as Info plists that should be
merged and processed.
targets_to_avoid: List of targets containing resources that should be deduplicated from the
target being processed.
top_level_attrs: List of attributes containing resources that need to be processed from the
target being processed.
version_keys_required: Whether to validate that the Info.plist version keys are correctly
configured.
Returns:
A partial that returns the bundle location of the resources and the resources provider.
"""
return partial.make(
_resources_partial_impl,
bundle_id = bundle_id,
bundle_verification_targets = bundle_verification_targets,
plist_attrs = plist_attrs,
targets_to_avoid = targets_to_avoid,
top_level_attrs = top_level_attrs,
version_keys_required = version_keys_required,
)
|
py | 1a4fea24f5934520e6722daf78eb80972586da07 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUNetconfSessionsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUNetconfManager(NURESTObject):
""" Represents a NetconfManager in the VSD
Notes:
Identifies Netconf Manager communicating with VSD, This can only be created by netconfmgr user
"""
__rest_name__ = "netconfmanager"
__resource_name__ = "netconfmanagers"
## Constants
CONST_STATUS_CONNECTED = "CONNECTED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_STATUS_JMS_DISCONNECTED = "JMS_DISCONNECTED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_STATUS_DISCONNECTED = "DISCONNECTED"
CONST_STATUS_INIT = "INIT"
def __init__(self, **kwargs):
""" Initializes a NetconfManager instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> netconfmanager = NUNetconfManager(id=u'xxxx-xxx-xxx-xxx', name=u'NetconfManager')
>>> netconfmanager = NUNetconfManager(data=my_dict)
"""
super(NUNetconfManager, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._release = None
self._entity_scope = None
self._assoc_entity_type = None
self._status = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="release", remote_name="release", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'CONNECTED', u'DISCONNECTED', u'INIT', u'JMS_DISCONNECTED'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.netconf_sessions = NUNetconfSessionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name of the Netconf Manager entity.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the Netconf Manager entity.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def release(self):
""" Get release value.
Notes:
Netconf Manager RPM release version
"""
return self._release
@release.setter
def release(self, value):
""" Set release value.
Notes:
Netconf Manager RPM release version
"""
self._release = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def status(self):
""" Get status value.
Notes:
VSD connection status with this Netconf Manager
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
VSD connection status with this Netconf Manager
"""
self._status = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
py | 1a4feb621dc03c1f87ea8bb51ea17d89f6f0638e | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .resnet import ResnetGenerator
from .unet import UnetGenerator
from .rrdb_net import RRDBNet
from .makeup import GeneratorPSGANAttention
from .deep_conv import DeepConvGenerator, ConditionalDeepConvGenerator
from .resnet_ugatit import ResnetUGATITGenerator
from .dcgenerator import DCGenerator
from .generater_animegan import AnimeGenerator, AnimeGeneratorLite
from .wav2lip import Wav2Lip
from .lesrcnn import LESRCNNGenerator
from .resnet_ugatit_p2c import ResnetUGATITP2CGenerator
from .generator_styleganv2 import StyleGANv2Generator
from .generator_pixel2style2pixel import Pixel2Style2Pixel
from .drn import DRNGenerator
from .generator_starganv2 import StarGANv2Generator, StarGANv2Style, StarGANv2Mapping, FAN
from .edvr import EDVRNet
from .generator_firstorder import FirstOrderGenerator
from .generater_lapstyle import DecoderNet, Encoder, RevisionNet
from .basicvsr import BasicVSRNet
from .mpr import MPRNet
from .iconvsr import IconVSR
from .gpen import GPEN
from .pan import PAN
from .basicvsr_plus_plus import BasicVSRPlusPlus
|
py | 1a4feb73abc7f4158e5e243ba704da09b246756c |
import numpy as np
import pandas as pd
from abc import abstractmethod
from gym.spaces import Space, Box
from typing import Dict
from trades import Trade, TradeType
from exchanges import InstrumentExchange
from slippage import RandomSlippageModel
class SimulatedExchange(InstrumentExchange):
"""An instrument exchange, in which the price history is based off the supplied data frame and
trade execution is largely decided by the designated slippage model.
If the `data_frame` parameter is not supplied upon initialization, it must be set before
the exchange can be used within a trading environment.
"""
def __init__(self, data_frame: pd.DataFrame = None, **kwargs):
super().__init__(base_instrument=kwargs.get('base_instrument', 'USD'), dtype=kwargs.get('dtype', np.float16))
if data_frame is not None:
self._data_frame = data_frame.astype(self._dtype)
self._commission_percent = kwargs.get('commission_percent', 0.3)
self._base_precision = kwargs.get('base_precision', 2)
self._instrument_precision = kwargs.get('instrument_precision', 8)
self._initial_balance = kwargs.get('initial_balance', 1E4)
self._min_order_amount = kwargs.get('min_order_amount', 1E-3)
self._min_trade_price = kwargs.get('min_trade_price', 1E-6)
self._max_trade_price = kwargs.get('max_trade_price', 1E6)
self._min_trade_amount = kwargs.get('min_trade_amount', 1E-3)
self._max_trade_amount = kwargs.get('max_trade_amount', 1E6)
max_allowed_slippage_percent = kwargs.get('max_allowed_slippage_percent', 1.0)
SlippageModelClass = kwargs.get('slippage_model', RandomSlippageModel)
self._slippage_model = SlippageModelClass(max_allowed_slippage_percent)
@property
def data_frame(self) -> pd.DataFrame:
"""The underlying data model backing the price and volume simulation."""
return self._data_frame
@data_frame.setter
def data_frame(self, data_frame: pd.DataFrame):
self._data_frame = data_frame
@property
def initial_balance(self) -> float:
return self._initial_balance
@property
def balance(self) -> float:
return self._balance
@property
def portfolio(self) -> Dict[str, float]:
return self._portfolio
@property
def trades(self) -> pd.DataFrame:
return self._trades
@property
def performance(self) -> pd.DataFrame:
return self._performance
@property
def observation_space(self) -> Space:
low = np.array([self._min_trade_price, ] * 4 + [self._min_trade_amount, ])
high = np.array([self._max_trade_price, ] * 4 + [self._max_trade_amount, ])
return Box(low=low, high=high, dtype=self._dtype)
@property
def has_next_observation(self) -> bool:
return self._current_step < len(self._data_frame) - 1
def next_observation(self) -> pd.DataFrame:
obs = self._data_frame.iloc[self._current_step]
self._current_step += 1
return obs
def current_price(self, symbol: str) -> float:
if len(self._data_frame) is 0:
self.next_observation()
return float(self._data_frame['close'].values[self._current_step])
def _is_valid_trade(self, trade: Trade) -> bool:
if trade.trade_type is TradeType.MARKET_BUY or trade.trade_type is TradeType.LIMIT_BUY:
return trade.amount >= self._min_order_amount and self._balance >= trade.amount * trade.price
elif trade.trade_type is TradeType.MARKET_SELL or trade.trade_type is TradeType.LIMIT_SELL:
return trade.amount >= self._min_order_amount and self._portfolio.get(trade.symbol, 0) >= trade.amount
return True
def _update_account(self, trade: Trade):
if trade.amount > 0:
self._trades = self._trades.append({
'step': self._current_step,
'symbol': trade.symbol,
'type': trade.trade_type,
'amount': trade.amount,
'price': trade.price
}, ignore_index=True)
if trade.is_buy:
self._balance -= trade.amount * trade.price
self._portfolio[trade.symbol] = self._portfolio.get(trade.symbol, 0) + trade.amount
elif trade.is_sell:
self._balance += trade.amount * trade.price
self._portfolio[trade.symbol] = self._portfolio.get(trade.symbol, 0) - trade.amount
self._portfolio[self._base_instrument] = self._balance
self._performance.append({
'balance': self.balance,
'net_worth': self.net_worth,
}, ignore_index=True)
def execute_trade(self, trade: Trade) -> Trade:
current_price = self.current_price(symbol=trade.symbol)
commission = self._commission_percent / 100
filled_trade = trade.copy()
if filled_trade.is_hold or not self._is_valid_trade(filled_trade):
filled_trade.amount = 0
elif filled_trade.is_buy:
price_adjustment = price_adjustment = (1 + commission)
filled_trade.price = round(current_price * price_adjustment, self._base_precision)
filled_trade.amount = round(
(filled_trade.price * filled_trade.amount) / filled_trade.price, self._instrument_precision)
elif filled_trade.is_sell:
price_adjustment = (1 - commission)
filled_trade.price = round(current_price * price_adjustment, self._base_precision)
filled_trade.amount = round(filled_trade.amount, self._instrument_precision)
filled_trade = self._slippage_model.fill_order(filled_trade, current_price)
self._update_account(filled_trade)
return filled_trade
def reset(self):
self._balance = self._initial_balance
self._portfolio = {self._base_instrument: self._balance}
self._trades = pd.DataFrame([], columns=['step', 'symbol', 'type', 'amount', 'price'])
self._performance = pd.DataFrame([], columns=['balance', 'net_worth'])
self._current_step = 0
|
py | 1a4febab217f4cc213b6da85d7e35068a04fa89c | #!/usr/bin/env python3
from omxplayer.player import OMXPlayer
from pathlib import Path
from time import sleep
import logging
logging.basicConfig(level=logging.INFO)
import socket
import pdb
noCommMode = False
if not noCommMode:
HOST = ''
PORT = 55555
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST,PORT))
sock.listen(1)
conn,addr = sock.accept()
vidPath = "raspi.avi"
player_log = logging.getLogger("Player 1")
player = OMXPlayer(vidPath,
dbus_name='org.mpris.MediaPlayer2.omxplayer1')
player.playEvent += lambda _: player_log.info("Play")
player.pauseEvent += lambda _: player_log.info("Pause")
player.stopEvent += lambda _: player_log.info("Stop")
player.set_aspect_mode('stretch')
player.set_video_pos(0, 0, 700, int(512*2.14))
sleep(10)
if noCommMode:
# for debugging
player.set_position(120*60)
# player.play()
# sleep(1)
# player.pause()
sleep(20)
player.set_position(130*60)
# player.play()
sleep(20)
player.set_position(140*60)
sleep(20)
player.stop()
else:
while True:
data = conn.recv(1024)
print('received: '+str(data))
if data=='term':
break
if '_' in data:
cmd = data.split('_')[0]
arg = float(data.split('_')[1])
if cmd=='pause':
player.set_position(arg)
player.play()
sleep(10)
player.pause()
elif cmd=='play':
player.set_position(arg)
conn.close()
player.quit()
|
py | 1a4fec271a9cd619579311d3fa8e37420073af34 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
import onnxruntime
import pytest
import shutil
from mxnet import gluon
from mxnet.test_utils import assert_almost_equal
@pytest.mark.skip(reason='Gluon no long support v1.x models since https://github.com/apache/incubator-mxnet/pull/20262')
def test_resnet50_v2(tmp_path):
try:
ctx = mx.cpu()
model = gluon.model_zoo.vision.resnet50_v2(pretrained=True, ctx=ctx)
BS = 1
inp = mx.random.uniform(0, 1, (1, 3, 224, 224))
model.hybridize(static_alloc=True)
out = model(inp)
prefix = "%s/resnet50" % tmp_path
model.export(prefix)
sym_file = "%s-symbol.json" % prefix
params_file = "%s-0000.params" % prefix
onnx_file = "%s.onnx" % prefix
dynamic_input_shapes = [('batch', 3, 224, 224)]
input_shapes = [(1, 3, 224, 224)]
input_types = [np.float32]
converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes,
input_types, onnx_file,
dynamic=True,
dynamic_input_shapes=dynamic_input_shapes)
ses_opt = onnxruntime.SessionOptions()
ses_opt.log_severity_level = 3
session = onnxruntime.InferenceSession(onnx_file, ses_opt)
BS = 10
inp = mx.random.uniform(0, 1, (1, 3, 224, 224))
mx_out = model(inp)
onnx_inputs = [inp]
input_dict = dict((session.get_inputs()[i].name, onnx_inputs[i].asnumpy())
for i in range(len(onnx_inputs)))
on_out = session.run(None, input_dict)
assert_almost_equal(mx_out, on_out, rtol=0.001, atol=0.01)
finally:
shutil.rmtree(tmp_path)
|
py | 1a4fecefc8bbd9c6263891ab0ae31d8945f356f6 | #!/usr/bin/python
import sys
import panflute as pf
from pangloss.config import merge_settings
from pangloss.backend import formats
def gloss(elem, doc):
if isinstance(elem, pf.OrderedList):
if elem.style == 'Example':
if doc.format in formats:
backend = doc.get_metadata(doc.format + 'Backend')
if backend in formats[doc.format]:
return formats[doc.format][backend](elem)
else:
return None
def gloss_refs(elem, doc):
if isinstance(elem, pf.Cite):
text = elem.content[0].text
if text[:4] == '@ex:':
if doc.format == 'latex':
ref = "\\ref{ex:" + text[4:] + "}"
fmt = doc.get_metadata('exampleRefFormat')
#pf.debug(fmt)
if isinstance(fmt, list):
ref = (fmt[0]).format(ref)
else:
ref = fmt.format(ref)
return pf.RawInline(ref, format = 'latex')
elif doc.format == 'html':
# TODO
pass
def main():
doc = pf.load(input_stream=sys.stdin)
merge_settings(doc)
pf.dump(pf.run_filters([gloss, gloss_refs], doc=doc),
output_stream=sys.stdout)
if __name__ == '__main__':
main()
|
py | 1a4fee8afdedda408cc3fcba824bf014960fa71d | #!/usr/bin/python3
"""Resets the datastore
Deletes all sqlite files. This will not reset nifi but reset lhipa and cl model states
"""
__author__ = "Martin Eigenmann"
__license__ = "unlicence"
__version__ = "0.0.1"
__email__ = "[email protected]"
__status__ = "Prototpye"
import json
import sys
import os
import sqlite3
for f in os.listdir('/data'):
if len(f.split('-')) == 1:
db = sqlite3.connect(f'/data/{f}')
cursor = db.cursor()
cursor.execute('DROP TABLE IF EXISTS et')
db.commit()
cursor.close()
db.close()
os.remove(f'/data/{f}')
print(json.dumps({ "reset": True })) |
py | 1a4fee935714629cbfdc5115857e45e07d9376f8 | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from mmseg.core import add_prefix
from ..builder import (SEGMENTORS, build_backbone, build_head, build_loss,
build_neck)
from .base import Base3DSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder3D(Base3DSegmentor):
"""3D Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be thrown during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
loss_regularization=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(EncoderDecoder3D, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self._init_loss_regularization(loss_regularization)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
assert self.with_decode_head, \
'3D EncoderDecoder Segmentor should have a decode_head'
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = build_head(decode_head)
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(build_head(head_cfg))
else:
self.auxiliary_head = build_head(auxiliary_head)
def _init_loss_regularization(self, loss_regularization):
"""Initialize ``loss_regularization``"""
if loss_regularization is not None:
if isinstance(loss_regularization, list):
self.loss_regularization = nn.ModuleList()
for loss_cfg in loss_regularization:
self.loss_regularization.append(build_loss(loss_cfg))
else:
self.loss_regularization = build_loss(loss_regularization)
def extract_feat(self, points):
"""Extract features from points."""
x = self.backbone(points)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, points, img_metas):
"""Encode points with backbone and decode into a semantic segmentation
map of the same size as input.
Args:
points (torch.Tensor): Input points of shape [B, N, 3+C].
img_metas (list[dict]): Meta information of each sample.
Returns:
torch.Tensor: Segmentation logits of shape [B, num_classes, N].
"""
x = self.extract_feat(points)
out = self._decode_head_forward_test(x, img_metas)
return out
def _decode_head_forward_train(self, x, img_metas, pts_semantic_mask):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
pts_semantic_mask,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, pts_semantic_mask):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
pts_semantic_mask,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, pts_semantic_mask, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def _loss_regularization_forward_train(self):
"""Calculate regularization loss for model weight in training."""
losses = dict()
if isinstance(self.loss_regularization, nn.ModuleList):
for idx, regularize_loss in enumerate(self.loss_regularization):
loss_regularize = dict(
loss_regularize=regularize_loss(self.modules()))
losses.update(add_prefix(loss_regularize, f'regularize_{idx}'))
else:
loss_regularize = dict(
loss_regularize=self.loss_regularization(self.modules()))
losses.update(add_prefix(loss_regularize, 'regularize'))
return losses
def forward_dummy(self, points):
"""Dummy forward function."""
seg_logit = self.encode_decode(points, None)
return seg_logit
def forward_train(self, points, img_metas, pts_semantic_mask):
"""Forward function for training.
Args:
points (list[torch.Tensor]): List of points of shape [N, C].
img_metas (list): Image metas.
pts_semantic_mask (list[torch.Tensor]): List of point-wise semantic
labels of shape [N].
Returns:
dict[str, Tensor]: Losses.
"""
points_cat = torch.stack(points)
pts_semantic_mask_cat = torch.stack(pts_semantic_mask)
# extract features using backbone
x = self.extract_feat(points_cat)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
pts_semantic_mask_cat)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, pts_semantic_mask_cat)
losses.update(loss_aux)
if self.with_regularization_loss:
loss_regularize = self._loss_regularization_forward_train()
losses.update(loss_regularize)
return losses
@staticmethod
def _input_generation(coords,
patch_center,
coord_max,
feats,
use_normalized_coord=False):
"""Generating model input.
Generate input by subtracting patch center and adding additional
features. Currently support colors and normalized xyz as features.
Args:
coords (torch.Tensor): Sampled 3D point coordinate of shape [S, 3].
patch_center (torch.Tensor): Center coordinate of the patch.
coord_max (torch.Tensor): Max coordinate of all 3D points.
feats (torch.Tensor): Features of sampled points of shape [S, C].
use_normalized_coord (bool, optional): Whether to use normalized
xyz as additional features. Defaults to False.
Returns:
torch.Tensor: The generated input data of shape [S, 3+C'].
"""
# subtract patch center, the z dimension is not centered
centered_coords = coords.clone()
centered_coords[:, 0] -= patch_center[0]
centered_coords[:, 1] -= patch_center[1]
# normalized coordinates as extra features
if use_normalized_coord:
normalized_coord = coords / coord_max
feats = torch.cat([feats, normalized_coord], dim=1)
points = torch.cat([centered_coords, feats], dim=1)
return points
def _sliding_patch_generation(self,
points,
num_points,
block_size,
sample_rate=0.5,
use_normalized_coord=False,
eps=1e-3):
"""Sampling points in a sliding window fashion.
First sample patches to cover all the input points.
Then sample points in each patch to batch points of a certain number.
Args:
points (torch.Tensor): Input points of shape [N, 3+C].
num_points (int): Number of points to be sampled in each patch.
block_size (float, optional): Size of a patch to sample.
sample_rate (float, optional): Stride used in sliding patch.
Defaults to 0.5.
use_normalized_coord (bool, optional): Whether to use normalized
xyz as additional features. Defaults to False.
eps (float, optional): A value added to patch boundary to guarantee
points coverage. Defaults to 1e-3.
Returns:
np.ndarray | np.ndarray:
- patch_points (torch.Tensor): Points of different patches of
shape [K, N, 3+C].
- patch_idxs (torch.Tensor): Index of each point in
`patch_points`, of shape [K, N].
"""
device = points.device
# we assume the first three dims are points' 3D coordinates
# and the rest dims are their per-point features
coords = points[:, :3]
feats = points[:, 3:]
coord_max = coords.max(0)[0]
coord_min = coords.min(0)[0]
stride = block_size * sample_rate
num_grid_x = int(
torch.ceil((coord_max[0] - coord_min[0] - block_size) /
stride).item() + 1)
num_grid_y = int(
torch.ceil((coord_max[1] - coord_min[1] - block_size) /
stride).item() + 1)
patch_points, patch_idxs = [], []
for idx_y in range(num_grid_y):
s_y = coord_min[1] + idx_y * stride
e_y = torch.min(s_y + block_size, coord_max[1])
s_y = e_y - block_size
for idx_x in range(num_grid_x):
s_x = coord_min[0] + idx_x * stride
e_x = torch.min(s_x + block_size, coord_max[0])
s_x = e_x - block_size
# extract points within this patch
cur_min = torch.tensor([s_x, s_y, coord_min[2]]).to(device)
cur_max = torch.tensor([e_x, e_y, coord_max[2]]).to(device)
cur_choice = ((coords >= cur_min - eps) &
(coords <= cur_max + eps)).all(dim=1)
if not cur_choice.any(): # no points in this patch
continue
# sample points in this patch to multiple batches
cur_center = cur_min + block_size / 2.0
point_idxs = torch.nonzero(cur_choice, as_tuple=True)[0]
num_batch = int(np.ceil(point_idxs.shape[0] / num_points))
point_size = int(num_batch * num_points)
replace = point_size > 2 * point_idxs.shape[0]
num_repeat = point_size - point_idxs.shape[0]
if replace: # duplicate
point_idxs_repeat = point_idxs[torch.randint(
0, point_idxs.shape[0],
size=(num_repeat, )).to(device)]
else:
point_idxs_repeat = point_idxs[torch.randperm(
point_idxs.shape[0])[:num_repeat]]
choices = torch.cat([point_idxs, point_idxs_repeat], dim=0)
choices = choices[torch.randperm(choices.shape[0])]
# construct model input
point_batches = self._input_generation(
coords[choices],
cur_center,
coord_max,
feats[choices],
use_normalized_coord=use_normalized_coord)
patch_points.append(point_batches)
patch_idxs.append(choices)
patch_points = torch.cat(patch_points, dim=0)
patch_idxs = torch.cat(patch_idxs, dim=0)
# make sure all points are sampled at least once
assert torch.unique(patch_idxs).shape[0] == points.shape[0], \
'some points are not sampled in sliding inference'
return patch_points, patch_idxs
def slide_inference(self, point, img_meta, rescale):
"""Inference by sliding-window with overlap.
Args:
point (torch.Tensor): Input points of shape [N, 3+C].
img_meta (dict): Meta information of input sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Returns:
Tensor: The output segmentation map of shape [num_classes, N].
"""
num_points = self.test_cfg.num_points
block_size = self.test_cfg.block_size
sample_rate = self.test_cfg.sample_rate
use_normalized_coord = self.test_cfg.use_normalized_coord
batch_size = self.test_cfg.batch_size * num_points
# patch_points is of shape [K*N, 3+C], patch_idxs is of shape [K*N]
patch_points, patch_idxs = self._sliding_patch_generation(
point, num_points, block_size, sample_rate, use_normalized_coord)
feats_dim = patch_points.shape[1]
seg_logits = [] # save patch predictions
for batch_idx in range(0, patch_points.shape[0], batch_size):
batch_points = patch_points[batch_idx:batch_idx + batch_size]
batch_points = batch_points.view(-1, num_points, feats_dim)
# batch_seg_logit is of shape [B, num_classes, N]
batch_seg_logit = self.encode_decode(batch_points, img_meta)
batch_seg_logit = batch_seg_logit.transpose(1, 2).contiguous()
seg_logits.append(batch_seg_logit.view(-1, self.num_classes))
# aggregate per-point logits by indexing sum and dividing count
seg_logits = torch.cat(seg_logits, dim=0) # [K*N, num_classes]
expand_patch_idxs = patch_idxs.unsqueeze(1).repeat(1, self.num_classes)
preds = point.new_zeros((point.shape[0], self.num_classes)).\
scatter_add_(dim=0, index=expand_patch_idxs, src=seg_logits)
count_mat = torch.bincount(patch_idxs)
preds = preds / count_mat[:, None]
# TODO: if rescale and voxelization segmentor
return preds.transpose(0, 1) # to [num_classes, K*N]
def whole_inference(self, points, img_metas, rescale):
"""Inference with full scene (one forward pass without sliding)."""
seg_logit = self.encode_decode(points, img_metas)
# TODO: if rescale and voxelization segmentor
return seg_logit
def inference(self, points, img_metas, rescale):
"""Inference with slide/whole style.
Args:
points (torch.Tensor): Input points of shape [B, N, 3+C].
img_metas (list[dict]): Meta information of each sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
if self.test_cfg.mode == 'slide':
seg_logit = torch.stack([
self.slide_inference(point, img_meta, rescale)
for point, img_meta in zip(points, img_metas)
], 0)
else:
seg_logit = self.whole_inference(points, img_metas, rescale)
output = F.softmax(seg_logit, dim=1)
return output
def simple_test(self, points, img_metas, rescale=True):
"""Simple test with single scene.
Args:
points (list[torch.Tensor]): List of points of shape [N, 3+C].
img_metas (list[dict]): Meta information of each sample.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Defaults to True.
Returns:
list[dict]: The output prediction result with following keys:
- semantic_mask (Tensor): Segmentation mask of shape [N].
"""
# 3D segmentation requires per-point prediction, so it's impossible
# to use down-sampling to get a batch of scenes with same num_points
# therefore, we only support testing one scene every time
seg_pred = []
for point, img_meta in zip(points, img_metas):
seg_prob = self.inference(point.unsqueeze(0), [img_meta],
rescale)[0]
seg_map = seg_prob.argmax(0) # [N]
# to cpu tensor for consistency with det3d
seg_map = seg_map.cpu()
seg_pred.append(seg_map)
# warp in dict
seg_pred = [dict(semantic_mask=seg_map) for seg_map in seg_pred]
return seg_pred
def aug_test(self, points, img_metas, rescale=True):
"""Test with augmentations.
Args:
points (list[torch.Tensor]): List of points of shape [B, N, 3+C].
img_metas (list[list[dict]]): Meta information of each sample.
Outer list are different samples while inner is different augs.
rescale (bool): Whether transform to original number of points.
Will be used for voxelization based segmentors.
Defaults to True.
Returns:
list[dict]: The output prediction result with following keys:
- semantic_mask (Tensor): Segmentation mask of shape [N].
"""
# in aug_test, one scene going through different augmentations could
# have the same number of points and are stacked as a batch
# to save memory, we get augmented seg logit inplace
seg_pred = []
for point, img_meta in zip(points, img_metas):
seg_prob = self.inference(point, img_meta, rescale)
seg_prob = seg_prob.mean(0) # [num_classes, N]
seg_map = seg_prob.argmax(0) # [N]
# to cpu tensor for consistency with det3d
seg_map = seg_map.cpu()
seg_pred.append(seg_map)
# warp in dict
seg_pred = [dict(semantic_mask=seg_map) for seg_map in seg_pred]
return seg_pred
|
py | 1a4feec32eaa8211c23787af17ebfff4194f90df | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.mimeview.tests import api, patch, pygments, rst, txtl
from trac.mimeview.tests.functional import functionalSuite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(api.test_suite())
suite.addTest(patch.test_suite())
suite.addTest(pygments.test_suite())
suite.addTest(rst.test_suite())
suite.addTest(txtl.test_suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
py | 1a4fef8efdd5aaac9b9597aed51079955d11218e | _base_ = '../../_base_/schedules/schedule_2x.py'
optimizer_config = dict(
grad_clip=dict(_delete_=True, max_norm=10, norm_type=2))
cudnn_benchmark = True
|
py | 1a4ff0c8457289b393bd5fea23d2a331864fdcb8 | # encoding: utf-8
# For Facebook
FACEBOOK_APP_SECRET = ''
FACEBOOK_APP_ID = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
MESSAGE_FORMAT = u'''Hi,
%(message)s
--
%(creator)s'''
# set email sender address
#default_email_sender = ''
# set default address to send messages
#default_email_to = ''
COMMIT_SCRIPT='' |
py | 1a4ff108bad8e54a56da6e6aa090cf29c7422796 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import os
import re
import sys
from copy import deepcopy
from importlib.machinery import SourceFileLoader
from datetime import datetime
import pandas as pd
from .procedure import Procedure, UnknownProcedure
from .parameters import Parameter
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def unique_filename(directory, prefix='DATA', suffix='', ext='csv',
dated_folder=False, index=True, datetimeformat="%Y-%m-%d"):
""" Returns a unique filename based on the directory and prefix
"""
now = datetime.now()
directory = os.path.abspath(directory)
if dated_folder:
directory = os.path.join(directory, now.strftime('%Y-%m-%d'))
if not os.path.exists(directory):
os.makedirs(directory)
if index:
i = 1
basename = "%s%s" % (prefix, now.strftime(datetimeformat))
basepath = os.path.join(directory, basename)
filename = "%s_%d%s.%s" % (basepath, i, suffix, ext)
while os.path.exists(filename):
i += 1
filename = "%s_%d%s.%s" % (basepath, i, suffix, ext)
else:
basename = "%s%s%s.%s" % (prefix, now.strftime(datetimeformat), suffix, ext)
filename = os.path.join(directory, basename)
return filename
class CSVFormatter(logging.Formatter):
""" Formatter of data results """
def __init__(self, columns, delimiter=','):
"""Creates a csv formatter for a given list of columns (=header).
:param columns: list of column names.
:type columns: list
:param delimiter: delimiter between columns.
:type delimiter: str
"""
super().__init__()
self.columns = columns
self.delimiter = delimiter
def format(self, record):
"""Formats a record as csv.
:param record: record to format.
:type record: dict
:return: a string
"""
return self.delimiter.join('{}'.format(record[x]) for x in self.columns)
def format_header(self):
return self.delimiter.join(self.columns)
class Results(object):
""" The Results class provides a convenient interface to reading and
writing data in connection with a :class:`.Procedure` object.
:cvar COMMENT: The character used to identify a comment (default: #)
:cvar DELIMITER: The character used to delimit the data (default: ,)
:cvar LINE_BREAK: The character used for line breaks (default \\n)
:cvar CHUNK_SIZE: The length of the data chuck that is read
:param procedure: Procedure object
:param data_filename: The data filename where the data is or should be
stored
"""
COMMENT = '#'
DELIMITER = ','
LINE_BREAK = "\n"
CHUNK_SIZE = 1000
def __init__(self, procedure, data_filename):
if not isinstance(procedure, Procedure):
raise ValueError("Results require a Procedure object")
self.procedure = procedure
self.procedure_class = procedure.__class__
self.parameters = procedure.parameter_objects()
self._header_count = -1
self.formatter = CSVFormatter(columns=self.procedure.DATA_COLUMNS)
if isinstance(data_filename, (list, tuple)):
data_filenames, data_filename = data_filename, data_filename[0]
else:
data_filenames = [data_filename]
self.data_filename = data_filename
self.data_filenames = data_filenames
if os.path.exists(data_filename): # Assume header is already written
self.reload()
self.procedure.status = Procedure.FINISHED
# TODO: Correctly store and retrieve status
else:
for filename in self.data_filenames:
with open(filename, 'w') as f:
f.write(self.header())
f.write(self.labels())
self._data = None
def __getstate__(self):
# Get all information needed to reconstruct procedure
self._parameters = self.procedure.parameter_values()
self._class = self.procedure.__class__.__name__
module = sys.modules[self.procedure.__module__]
self._package = module.__package__
self._module = module.__name__
self._file = module.__file__
state = self.__dict__.copy()
del state['procedure']
del state['procedure_class']
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Restore the procedure
module = SourceFileLoader(self._module, self._file).load_module()
cls = getattr(module, self._class)
self.procedure = cls()
self.procedure.set_parameters(self._parameters)
self.procedure.refresh_parameters()
self.procedure_class = cls
del self._parameters
del self._class
del self._package
del self._module
del self._file
def header(self):
""" Returns a text header to accompany a datafile so that the procedure
can be reconstructed
"""
h = []
procedure = re.search("'(?P<name>[^']+)'",
repr(self.procedure_class)).group("name")
h.append("Procedure: <%s>" % procedure)
h.append("Parameters:")
for name, parameter in self.parameters.items():
h.append("\t%s: %s" % (parameter.name, str(parameter).encode("unicode_escape").decode("utf-8")))
h.append("Data:")
self._header_count = len(h)
h = [Results.COMMENT + l for l in h] # Comment each line
return Results.LINE_BREAK.join(h) + Results.LINE_BREAK
def labels(self):
""" Returns the columns labels as a string to be written
to the file
"""
return self.formatter.format_header() + Results.LINE_BREAK
def format(self, data):
""" Returns a formatted string containing the data to be written
to a file
"""
return self.formatter.format(data)
def parse(self, line):
""" Returns a dictionary containing the data from the line """
data = {}
items = line.split(Results.DELIMITER)
for i, key in enumerate(self.procedure.DATA_COLUMNS):
data[key] = items[i]
return data
@staticmethod
def parse_header(header, procedure_class=None):
""" Returns a Procedure object with the parameters as defined in the
header text.
"""
if procedure_class is not None:
procedure = procedure_class()
else:
procedure = None
header = header.split(Results.LINE_BREAK)
procedure_module = None
parameters = {}
for line in header:
if line.startswith(Results.COMMENT):
line = line[1:] # Uncomment
else:
raise ValueError("Parsing a header which contains "
"uncommented sections")
if line.startswith("Procedure"):
regex = r"<(?:(?P<module>[^>]+)\.)?(?P<class>[^.>]+)>"
search = re.search(regex, line)
procedure_module = search.group("module")
procedure_class = search.group("class")
elif line.startswith("\t"):
separator = ": "
partitioned_line = line[1:].partition(separator)
if partitioned_line[1] != separator:
raise Exception("Error partitioning header line %s." % line)
else:
parameters[partitioned_line[0]] = partitioned_line[2]
if procedure is None:
if procedure_class is None:
raise ValueError("Header does not contain the Procedure class")
try:
from importlib import import_module
procedure_module = import_module(procedure_module)
procedure_class = getattr(procedure_module, procedure_class)
procedure = procedure_class()
except ImportError:
procedure = UnknownProcedure(parameters)
log.warning("Unknown Procedure being used")
except Exception as e:
raise e
# Fill the procedure with the parameters found
for name, parameter in procedure.parameter_objects().items():
if parameter.name in parameters:
value = parameters[parameter.name]
setattr(procedure, name, value)
else:
raise Exception("Missing '%s' parameter when loading '%s' class" % (
parameter.name, procedure_class))
procedure.refresh_parameters() # Enforce update of meta data
return procedure
@staticmethod
def load(data_filename, procedure_class=None):
""" Returns a Results object with the associated Procedure object and
data
"""
header = ""
header_read = False
header_count = 0
with open(data_filename, 'r') as f:
while not header_read:
line = f.readline()
if line.startswith(Results.COMMENT):
header += line.strip() + Results.LINE_BREAK
header_count += 1
else:
header_read = True
procedure = Results.parse_header(header[:-1], procedure_class)
results = Results(procedure, data_filename)
results._header_count = header_count
return results
@property
def data(self):
# Need to update header count for correct referencing
if self._header_count == -1:
self._header_count = len(
self.header()[-1].split(Results.LINE_BREAK))
if self._data is None or len(self._data) == 0:
# Data has not been read
try:
self.reload()
except Exception:
# Empty dataframe
self._data = pd.DataFrame(columns=self.procedure.DATA_COLUMNS)
else: # Concatenate additional data, if any, to already loaded data
skiprows = len(self._data) + self._header_count
chunks = pd.read_csv(
self.data_filename,
comment=Results.COMMENT,
header=0,
names=self._data.columns,
chunksize=Results.CHUNK_SIZE, skiprows=skiprows, iterator=True
)
try:
tmp_frame = pd.concat(chunks, ignore_index=True)
# only append new data if there is any
# if no new data, tmp_frame dtype is object, which override's
# self._data's original dtype - this can cause problems plotting
# (e.g. if trying to plot int data on a log axis)
if len(tmp_frame) > 0:
self._data = pd.concat([self._data, tmp_frame],
ignore_index=True)
except Exception:
pass # All data is up to date
return self._data
def reload(self):
""" Preforms a full reloading of the file data, neglecting
any changes in the comments
"""
chunks = pd.read_csv(
self.data_filename,
comment=Results.COMMENT,
chunksize=Results.CHUNK_SIZE,
iterator=True
)
try:
self._data = pd.concat(chunks, ignore_index=True)
except Exception:
self._data = chunks.read()
def __repr__(self):
return "<{}(filename='{}',procedure={},shape={})>".format(
self.__class__.__name__, self.data_filename,
self.procedure.__class__.__name__,
self.data.shape
)
|
py | 1a4ff2205d919e1242fe9275abad644cb28b78fd | ########################################
# CS/CNS/EE 155 2018
# Problem Set 6
#
# Author: Andrew Kang
# Description: Set 6 HMM helper
########################################
import re
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from matplotlib import animation
from matplotlib.animation import FuncAnimation
####################
# WORDCLOUD FUNCTIONS
####################
def mask():
# Parameters.
r = 128
d = 2 * r + 1
# Get points in a circle.
y, x = np.ogrid[-r:d-r, -r:d-r]
circle = (x**2 + y**2 <= r**2)
# Create mask.
mask = 255 * np.ones((d, d), dtype=np.uint8)
mask[circle] = 0
return mask
def text_to_wordcloud(text, max_words=50, title='', show=True):
plt.close('all')
# Generate a wordcloud image.
wordcloud = WordCloud(random_state=0,
max_words=max_words,
background_color='white',
mask=mask()).generate(text)
# Show the image.
if show:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.title(title, fontsize=24)
plt.show()
return wordcloud
def states_to_wordclouds(hmm, obs_map, max_words=50, show=True):
# Initialize.
M = 100000
n_states = len(hmm.A)
obs_map_r = obs_map_reverser(obs_map)
wordclouds = []
# Generate a large emission.
emission, states = hmm.generate_emission(M)
# For each state, get a list of observations that have been emitted
# from that state.
obs_count = []
for i in range(n_states):
obs_lst = np.array(emission)[np.where(np.array(states) == i)[0]]
obs_count.append(obs_lst)
# For each state, convert it into a wordcloud.
for i in range(n_states):
obs_lst = obs_count[i]
sentence = [obs_map_r[j] for j in obs_lst]
sentence_str = ' '.join(sentence)
wordclouds.append(text_to_wordcloud(sentence_str, max_words=max_words, title='State %d' % i, show=show))
return wordclouds
####################
# HMM FUNCTIONS
####################
def parse_observations(text):
# Convert text to dataset.
lines = [line.split() for line in text.split('\n') if line.split()]
obs_counter = 0
obs = []
obs_map = {}
for line in lines:
obs_elem = []
for word in line:
word = re.sub(r'[^\w]', '', word).lower()
if word not in obs_map:
# Add unique words to the observations map.
obs_map[word] = obs_counter
obs_counter += 1
# Add the encoded word.
obs_elem.append(obs_map[word])
# Add the encoded sequence.
obs.append(obs_elem)
return obs, obs_map
def obs_map_reverser(obs_map):
obs_map_r = {}
for key in obs_map:
obs_map_r[obs_map[key]] = key
return obs_map_r
def sample_sentence(hmm, obs_map, n_words=100):
# Get reverse map.
obs_map_r = obs_map_reverser(obs_map)
# Sample and convert sentence.
emission, states = hmm.generate_emission(n_words)
sentence = [obs_map_r[i] for i in emission]
return ' '.join(sentence).capitalize() + '...'
####################
# HMM VISUALIZATION FUNCTIONS
####################
def visualize_sparsities(hmm, O_max_cols=50, O_vmax=0.1):
plt.close('all')
plt.set_cmap('viridis')
# Visualize sparsity of A.
plt.imshow(hmm.A, vmax=1.0)
plt.colorbar()
plt.title('Sparsity of A matrix')
plt.show()
# Visualize parsity of O.
plt.imshow(np.array(hmm.O)[:, :O_max_cols], vmax=O_vmax, aspect='auto')
plt.colorbar()
plt.title('Sparsity of O matrix')
plt.show()
####################
# HMM ANIMATION FUNCTIONS
####################
def animate_emission(hmm, obs_map, M=8, height=12, width=12, delay=1):
# Parameters.
lim = 1200
text_x_offset = 40
text_y_offset = 80
x_offset = 580
y_offset = 520
R = 420
r = 100
arrow_size = 20
arrow_p1 = 0.03
arrow_p2 = 0.02
arrow_p3 = 0.06
# Initialize.
n_states = len(hmm.A)
obs_map_r = obs_map_reverser(obs_map)
wordclouds = states_to_wordclouds(hmm, obs_map, max_words=20, show=False)
# Initialize plot.
fig, ax = plt.subplots()
fig.set_figheight(height)
fig.set_figwidth(width)
ax.grid('off')
plt.axis('off')
ax.set_xlim([0, lim])
ax.set_ylim([0, lim])
# Plot each wordcloud.
for i, wordcloud in enumerate(wordclouds):
x = x_offset + int(R * np.cos(np.pi * 2 * i / n_states))
y = y_offset + int(R * np.sin(np.pi * 2 * i / n_states))
ax.imshow(wordcloud.to_array(), extent=(x - r, x + r, y - r, y + r), aspect='auto', zorder=-1)
# Initialize text.
text = ax.text(text_x_offset, lim - text_y_offset, '', fontsize=24)
# Make the arrows.
zorder_mult = n_states ** 2 * 100
arrows = []
for i in range(n_states):
row = []
for j in range(n_states):
# Arrow coordinates.
x_i = x_offset + R * np.cos(np.pi * 2 * i / n_states)
y_i = y_offset + R * np.sin(np.pi * 2 * i / n_states)
x_j = x_offset + R * np.cos(np.pi * 2 * j / n_states)
y_j = y_offset + R * np.sin(np.pi * 2 * j / n_states)
dx = x_j - x_i
dy = y_j - y_i
d = np.sqrt(dx**2 + dy**2)
if i != j:
arrow = ax.arrow(x_i + (r/d + arrow_p1) * dx + arrow_p2 * dy,
y_i + (r/d + arrow_p1) * dy + arrow_p2 * dx,
(1 - 2 * r/d - arrow_p3) * dx,
(1 - 2 * r/d - arrow_p3) * dy,
color=(1 - hmm.A[i][j], ) * 3,
head_width=arrow_size, head_length=arrow_size,
zorder=int(hmm.A[i][j] * zorder_mult))
else:
arrow = ax.arrow(x_i, y_i, 0, 0,
color=(1 - hmm.A[i][j], ) * 3,
head_width=arrow_size, head_length=arrow_size,
zorder=int(hmm.A[i][j] * zorder_mult))
row.append(arrow)
arrows.append(row)
emission, states = hmm.generate_emission(M)
def animate(i):
if i >= delay:
i -= delay
if i == 0:
arrows[states[0]][states[0]].set_color('red')
elif i == 1:
arrows[states[0]][states[0]].set_color((1 - hmm.A[states[0]][states[0]], ) * 3)
arrows[states[i - 1]][states[i]].set_color('red')
else:
arrows[states[i - 2]][states[i - 1]].set_color((1 - hmm.A[states[i - 2]][states[i - 1]], ) * 3)
arrows[states[i - 1]][states[i]].set_color('red')
# Set text.
text.set_text(' '.join([obs_map_r[e] for e in emission][:i+1]).capitalize())
return arrows + [text]
# Animate!
print('\nAnimating...')
anim = FuncAnimation(fig, animate, frames=M+delay, interval=1000)
return anim
# honestly this function is so jank but who even fuckin cares
# i don't even remember how or why i wrote this mess
# no one's gonna read this
# hey if you see this tho hmu on fb let's be friends
|
py | 1a4ff2c0a7a498611734d36920e2aaf47c883551 | import concurrent.futures
import rasterio
from rasterio._example import compute
def main(infile, outfile, num_workers=4):
with rasterio.Env():
with rasterio.open(infile) as src:
profile = src.profile
profile.update(blockxsize=128, blockysize=128, tiled=True)
with rasterio.open(outfile, "w", **profile) as dst:
windows = [window for ij, window in dst.block_windows()]
data_gen = (src.get_data(window=window) for window in windows)
with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
for window, result in zip(windows, executor.map(compute, data_gen)):
dst.write(result, window=window,
)
in_path = 'test.tif'
out_path = 'output.tif'
if __name__ == '__main__':
main(in_path, out_path)
|
py | 1a4ff37ddcd8a52764a4eec25ad627ea134d9ac9 | """
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_afm_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_afm_client.exceptions import ApiAttributeError
class DataColumnLocator(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'properties': ({str: (str,)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'properties': 'properties', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, properties, *args, **kwargs): # noqa: E501
"""DataColumnLocator - a model defined in OpenAPI
Args:
properties ({str: (str,)}): Mapping from dimension items (either 'localIdentifier' from 'AttributeItem', or \"measureGroup\") to their respective values. This effectively specifies the path (location) of the data column used for sorting. Therefore values for all dimension items must be specified.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.properties = properties
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, properties, *args, **kwargs): # noqa: E501
"""DataColumnLocator - a model defined in OpenAPI
Args:
properties ({str: (str,)}): Mapping from dimension items (either 'localIdentifier' from 'AttributeItem', or \"measureGroup\") to their respective values. This effectively specifies the path (location) of the data column used for sorting. Therefore values for all dimension items must be specified.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.properties = properties
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 1a4ff380b4406d96d105ea5bcf73f55b337a1a85 | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running ValueDice on the OpenAI Gym."""
import functools
from absl import flags
import acme
from acme import specs
from acme.agents.jax import value_dice
from absl import app
import helpers
import jax
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_steps', 1000000,
'Number of env steps to run training for.')
flags.DEFINE_integer('eval_every', 10000, 'How often to run evaluation')
flags.DEFINE_string('env_name', 'MountainCarContinuous-v0',
'What environment to run')
flags.DEFINE_string('dataset_name', 'd4rl_mujoco_halfcheetah/v0-medium',
'What dataset to use. '
'See the TFDS catalog for possible values.')
flags.DEFINE_integer('num_sgd_steps_per_step', 64,
'Number of SGD steps per learner step().')
flags.DEFINE_integer('seed', 0, 'Random seed.')
def main(_):
# Create an environment, grab the spec, and use it to create networks.
environment = helpers.make_environment(task=FLAGS.env_name)
environment_spec = specs.make_environment_spec(environment)
agent_networks = value_dice.make_networks(environment_spec)
# Construct the agent.
config = value_dice.ValueDiceConfig(
num_sgd_steps_per_step=FLAGS.num_sgd_steps_per_step)
agent = value_dice.ValueDice(
environment_spec,
agent_networks,
config=config,
make_demonstrations=functools.partial(
helpers.make_demonstration_iterator, dataset_name=FLAGS.dataset_name),
seed=FLAGS.seed)
# Create the environment loop used for training.
train_loop = acme.EnvironmentLoop(environment, agent, label='train_loop')
# Create the evaluation actor and loop.
eval_actor = agent.builder.make_actor(
random_key=jax.random.PRNGKey(FLAGS.seed),
policy_network=value_dice.apply_policy_and_sample(
agent_networks, eval_mode=True),
variable_source=agent)
eval_env = helpers.make_environment(task=FLAGS.env_name)
eval_loop = acme.EnvironmentLoop(eval_env, eval_actor, label='eval_loop')
assert FLAGS.num_steps % FLAGS.eval_every == 0
for _ in range(FLAGS.num_steps // FLAGS.eval_every):
eval_loop.run(num_episodes=5)
train_loop.run(num_steps=FLAGS.eval_every)
eval_loop.run(num_episodes=5)
if __name__ == '__main__':
app.run(main)
|
py | 1a4ff38fa9a503358e47dab0452c2ada2e010b25 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tools
=====
Utility classes for specific purpose, mainly for processing DCASE challenge submissions for publishing.
DatasetPacker
^^^^^^^^^^^^^
*dcase_util.tools.DatasetPacker*
DatasetPacker class can be used to create DCASE styled dataset packages where different data types delivered with
separate packages. Large data packages are split into multiple smalled ones to ease downloading them over net.
.. autosummary::
:toctree: generated/
DatasetPacker
DatasetPacker.pack
DatasetPacker.convert_markdown
SubmissionChecker
^^^^^^^^^^^^^^^^^
*dcase_util.tools.SubmissionChecker*
SubmissionChecker class can be used to check DCASE challenge submission meta yaml files.
.. autosummary::
:toctree: generated/
SubmissionChecker
SubmissionChecker.process
BibtexProcessor
^^^^^^^^^^^^^^^
*dcase_util.tools.BibtexProcessor*
This class provides tools to form bibtex entries for the DCASE challenge submissions.
.. autosummary::
:toctree: generated/
BibtexProcessor
BibtexProcessor.key
BibtexProcessor.authors
BibtexProcessor.authors_fancy
BibtexProcessor.affiliation_str
BibtexProcessor.affiliation_list
BibtexProcessor.affiliation_list_fancy
BibtexProcessor.submissions_fancy
BibtexProcessor.title
BibtexProcessor.abstract
"""
from .bibtex import *
from .submission import *
from .datasets import *
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
py | 1a4ff4115e6c5bdf58db7e6d6952e625e77a246d | #!/usr/bin/env python
__all__ = ['zhanqi_download']
from ..common import *
import re
def zhanqi_download(url, output_dir = '.', merge = True, info_only = False):
html = get_content(url)
rtmp_base_patt = r'VideoUrl":"([^"]+)"'
rtmp_id_patt = r'VideoID":"([^"]+)"'
title_patt = r'<p class="title-name" title="[^"]+">([^<]+)</p>'
title_patt_backup = r'<title>([^<]{1,9999})</title>'
rtmp_base = match1(html, rtmp_base_patt).replace('\\/','/')
rtmp_id = match1(html, rtmp_id_patt).replace('\\/','/')
title = match1(html, title_patt) or match1(html, title_patt_backup)
title = unescape_html(title)
real_url = rtmp_base+'/'+rtmp_id
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
download_rtmp_url(real_url, title, 'flv', {}, output_dir, merge = merge)
site_info = "zhanqi.tv"
download = zhanqi_download
download_playlist = playlist_not_supported('zhanqi')
|
py | 1a4ff4ec915f23e7c019a823ef18c805568717f8 | # Generated file, please do not change!!!
import re
import typing
import marshmallow
import marshmallow_enum
from commercetools import helpers
from ... import models
from ..cart import (
CartOrigin,
CartState,
DiscountCodeState,
InventoryMode,
LineItemMode,
LineItemPriceMode,
RoundingMode,
ShippingMethodState,
TaxCalculationMode,
TaxMode,
)
from ..common import ReferenceTypeId
from .common import (
BaseResourceSchema,
LocalizedStringField,
ReferenceSchema,
ResourceIdentifierSchema,
)
from .type import FieldContainerField
# Fields
# Marshmallow Schemas
class CartSchema(BaseResourceSchema):
key = marshmallow.fields.String(allow_none=True, missing=None)
last_modified_by = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.LastModifiedBySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="lastModifiedBy",
)
created_by = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.CreatedBySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="createdBy",
)
customer_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="customerId",
)
customer_email = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="customerEmail",
)
anonymous_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="anonymousId",
)
store = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".store.StoreKeyReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
line_items = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".LineItemSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="lineItems",
)
custom_line_items = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".CustomLineItemSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="customLineItems",
)
total_price = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="totalPrice",
)
taxed_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".TaxedPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxedPrice",
)
cart_state = marshmallow_enum.EnumField(
CartState, by_value=True, allow_none=True, missing=None, data_key="cartState"
)
shipping_address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingAddress",
)
billing_address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="billingAddress",
)
inventory_mode = marshmallow_enum.EnumField(
InventoryMode,
by_value=True,
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="inventoryMode",
)
tax_mode = marshmallow_enum.EnumField(
TaxMode, by_value=True, allow_none=True, missing=None, data_key="taxMode"
)
tax_rounding_mode = marshmallow_enum.EnumField(
RoundingMode,
by_value=True,
allow_none=True,
missing=None,
data_key="taxRoundingMode",
)
tax_calculation_mode = marshmallow_enum.EnumField(
TaxCalculationMode,
by_value=True,
allow_none=True,
missing=None,
data_key="taxCalculationMode",
)
customer_group = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".customer_group.CustomerGroupReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="customerGroup",
)
country = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
shipping_info = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ShippingInfoSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingInfo",
)
discount_codes = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".DiscountCodeInfoSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="discountCodes",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
payment_info = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".order.PaymentInfoSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="paymentInfo",
)
locale = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
delete_days_after_last_modification = marshmallow.fields.Integer(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="deleteDaysAfterLastModification",
)
refused_gifts = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".cart_discount.CartDiscountReferenceSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="refusedGifts",
)
origin = marshmallow_enum.EnumField(
CartOrigin, by_value=True, allow_none=True, missing=None
)
shipping_rate_input = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"Classification": helpers.absmod(
__name__, ".ClassificationShippingRateInputSchema"
),
"Score": helpers.absmod(__name__, ".ScoreShippingRateInputSchema"),
},
metadata={"omit_empty": True},
missing=None,
data_key="shippingRateInput",
)
item_shipping_addresses = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="itemShippingAddresses",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.Cart(**data)
class CartDraftSchema(helpers.BaseSchema):
currency = marshmallow.fields.String(allow_none=True, missing=None)
customer_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="customerId",
)
customer_email = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="customerEmail",
)
customer_group = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".customer_group.CustomerGroupResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="customerGroup",
)
anonymous_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="anonymousId",
)
store = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".store.StoreResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
country = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
inventory_mode = marshmallow_enum.EnumField(
InventoryMode,
by_value=True,
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="inventoryMode",
)
tax_mode = marshmallow_enum.EnumField(
TaxMode,
by_value=True,
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="taxMode",
)
tax_rounding_mode = marshmallow_enum.EnumField(
RoundingMode,
by_value=True,
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="taxRoundingMode",
)
tax_calculation_mode = marshmallow_enum.EnumField(
TaxCalculationMode,
by_value=True,
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="taxCalculationMode",
)
line_items = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".LineItemDraftSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="lineItems",
)
custom_line_items = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".CustomLineItemDraftSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="customLineItems",
)
shipping_address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingAddress",
)
billing_address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="billingAddress",
)
shipping_method = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".shipping_method.ShippingMethodResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingMethod",
)
external_tax_rate_for_shipping_method = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRateForShippingMethod",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
locale = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
delete_days_after_last_modification = marshmallow.fields.Integer(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="deleteDaysAfterLastModification",
)
origin = marshmallow_enum.EnumField(
CartOrigin,
by_value=True,
allow_none=True,
metadata={"omit_empty": True},
missing=None,
)
shipping_rate_input = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"Classification": helpers.absmod(
__name__, ".ClassificationShippingRateInputDraftSchema"
),
"Score": helpers.absmod(__name__, ".ScoreShippingRateInputDraftSchema"),
},
metadata={"omit_empty": True},
missing=None,
data_key="shippingRateInput",
)
item_shipping_addresses = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="itemShippingAddresses",
)
discount_codes = marshmallow.fields.List(
marshmallow.fields.String(allow_none=True),
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="discountCodes",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.CartDraft(**data)
class CartPagedQueryResponseSchema(helpers.BaseSchema):
limit = marshmallow.fields.Integer(allow_none=True, missing=None)
count = marshmallow.fields.Integer(allow_none=True, missing=None)
total = marshmallow.fields.Integer(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
offset = marshmallow.fields.Integer(allow_none=True, missing=None)
results = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".CartSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.CartPagedQueryResponse(**data)
class CartReferenceSchema(ReferenceSchema):
obj = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".CartSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type_id"]
return models.CartReference(**data)
class CartResourceIdentifierSchema(ResourceIdentifierSchema):
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type_id"]
return models.CartResourceIdentifier(**data)
class CartUpdateSchema(helpers.BaseSchema):
version = marshmallow.fields.Integer(allow_none=True, missing=None)
actions = marshmallow.fields.List(
helpers.Discriminator(
allow_none=True,
discriminator_field=("action", "action"),
discriminator_schemas={
"addCustomLineItem": helpers.absmod(
__name__, ".CartAddCustomLineItemActionSchema"
),
"addDiscountCode": helpers.absmod(
__name__, ".CartAddDiscountCodeActionSchema"
),
"addItemShippingAddress": helpers.absmod(
__name__, ".CartAddItemShippingAddressActionSchema"
),
"addLineItem": helpers.absmod(__name__, ".CartAddLineItemActionSchema"),
"addPayment": helpers.absmod(__name__, ".CartAddPaymentActionSchema"),
"addShoppingList": helpers.absmod(
__name__, ".CartAddShoppingListActionSchema"
),
"applyDeltaToCustomLineItemShippingDetailsTargets": helpers.absmod(
__name__,
".CartApplyDeltaToCustomLineItemShippingDetailsTargetsActionSchema",
),
"applyDeltaToLineItemShippingDetailsTargets": helpers.absmod(
__name__,
".CartApplyDeltaToLineItemShippingDetailsTargetsActionSchema",
),
"changeCustomLineItemMoney": helpers.absmod(
__name__, ".CartChangeCustomLineItemMoneyActionSchema"
),
"changeCustomLineItemQuantity": helpers.absmod(
__name__, ".CartChangeCustomLineItemQuantityActionSchema"
),
"changeLineItemQuantity": helpers.absmod(
__name__, ".CartChangeLineItemQuantityActionSchema"
),
"changeTaxCalculationMode": helpers.absmod(
__name__, ".CartChangeTaxCalculationModeActionSchema"
),
"changeTaxMode": helpers.absmod(
__name__, ".CartChangeTaxModeActionSchema"
),
"changeTaxRoundingMode": helpers.absmod(
__name__, ".CartChangeTaxRoundingModeActionSchema"
),
"recalculate": helpers.absmod(__name__, ".CartRecalculateActionSchema"),
"removeCustomLineItem": helpers.absmod(
__name__, ".CartRemoveCustomLineItemActionSchema"
),
"removeDiscountCode": helpers.absmod(
__name__, ".CartRemoveDiscountCodeActionSchema"
),
"removeItemShippingAddress": helpers.absmod(
__name__, ".CartRemoveItemShippingAddressActionSchema"
),
"removeLineItem": helpers.absmod(
__name__, ".CartRemoveLineItemActionSchema"
),
"removePayment": helpers.absmod(
__name__, ".CartRemovePaymentActionSchema"
),
"setAnonymousId": helpers.absmod(
__name__, ".CartSetAnonymousIdActionSchema"
),
"setBillingAddress": helpers.absmod(
__name__, ".CartSetBillingAddressActionSchema"
),
"setCartTotalTax": helpers.absmod(
__name__, ".CartSetCartTotalTaxActionSchema"
),
"setCountry": helpers.absmod(__name__, ".CartSetCountryActionSchema"),
"setCustomField": helpers.absmod(
__name__, ".CartSetCustomFieldActionSchema"
),
"setCustomLineItemCustomField": helpers.absmod(
__name__, ".CartSetCustomLineItemCustomFieldActionSchema"
),
"setCustomLineItemCustomType": helpers.absmod(
__name__, ".CartSetCustomLineItemCustomTypeActionSchema"
),
"setCustomLineItemShippingDetails": helpers.absmod(
__name__, ".CartSetCustomLineItemShippingDetailsActionSchema"
),
"setCustomLineItemTaxAmount": helpers.absmod(
__name__, ".CartSetCustomLineItemTaxAmountActionSchema"
),
"setCustomLineItemTaxRate": helpers.absmod(
__name__, ".CartSetCustomLineItemTaxRateActionSchema"
),
"setCustomShippingMethod": helpers.absmod(
__name__, ".CartSetCustomShippingMethodActionSchema"
),
"setCustomType": helpers.absmod(
__name__, ".CartSetCustomTypeActionSchema"
),
"setCustomerEmail": helpers.absmod(
__name__, ".CartSetCustomerEmailActionSchema"
),
"setCustomerGroup": helpers.absmod(
__name__, ".CartSetCustomerGroupActionSchema"
),
"setCustomerId": helpers.absmod(
__name__, ".CartSetCustomerIdActionSchema"
),
"setDeleteDaysAfterLastModification": helpers.absmod(
__name__, ".CartSetDeleteDaysAfterLastModificationActionSchema"
),
"setKey": helpers.absmod(__name__, ".CartSetKeyActionSchema"),
"setLineItemCustomField": helpers.absmod(
__name__, ".CartSetLineItemCustomFieldActionSchema"
),
"setLineItemCustomType": helpers.absmod(
__name__, ".CartSetLineItemCustomTypeActionSchema"
),
"setLineItemDistributionChannel": helpers.absmod(
__name__, ".CartSetLineItemDistributionChannelActionSchema"
),
"setLineItemPrice": helpers.absmod(
__name__, ".CartSetLineItemPriceActionSchema"
),
"setLineItemShippingDetails": helpers.absmod(
__name__, ".CartSetLineItemShippingDetailsActionSchema"
),
"setLineItemTaxAmount": helpers.absmod(
__name__, ".CartSetLineItemTaxAmountActionSchema"
),
"setLineItemTaxRate": helpers.absmod(
__name__, ".CartSetLineItemTaxRateActionSchema"
),
"setLineItemTotalPrice": helpers.absmod(
__name__, ".CartSetLineItemTotalPriceActionSchema"
),
"setLocale": helpers.absmod(__name__, ".CartSetLocaleActionSchema"),
"setShippingAddress": helpers.absmod(
__name__, ".CartSetShippingAddressActionSchema"
),
"setShippingMethod": helpers.absmod(
__name__, ".CartSetShippingMethodActionSchema"
),
"setShippingMethodTaxAmount": helpers.absmod(
__name__, ".CartSetShippingMethodTaxAmountActionSchema"
),
"setShippingMethodTaxRate": helpers.absmod(
__name__, ".CartSetShippingMethodTaxRateActionSchema"
),
"setShippingRateInput": helpers.absmod(
__name__, ".CartSetShippingRateInputActionSchema"
),
"updateItemShippingAddress": helpers.absmod(
__name__, ".CartUpdateItemShippingAddressActionSchema"
),
},
),
allow_none=True,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.CartUpdate(**data)
class CartUpdateActionSchema(helpers.BaseSchema):
action = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartUpdateAction(**data)
class CustomLineItemSchema(helpers.BaseSchema):
id = marshmallow.fields.String(allow_none=True, missing=None)
name = LocalizedStringField(allow_none=True, missing=None)
money = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
)
taxed_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".TaxedItemPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxedPrice",
)
total_price = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="totalPrice",
)
slug = marshmallow.fields.String(allow_none=True, missing=None)
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
state = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".order.ItemStateSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
tax_category = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".tax_category.TaxCategoryReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxCategory",
)
tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".tax_category.TaxRateSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxRate",
)
discounted_price_per_quantity = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".DiscountedLineItemPriceForQuantitySchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="discountedPricePerQuantity",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.CustomLineItem(**data)
class CustomLineItemDraftSchema(helpers.BaseSchema):
name = LocalizedStringField(allow_none=True, missing=None)
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
money = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
slug = marshmallow.fields.String(allow_none=True, missing=None)
tax_category = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".tax_category.TaxCategoryResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxCategory",
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.CustomLineItemDraft(**data)
class DiscountCodeInfoSchema(helpers.BaseSchema):
discount_code = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".discount_code.DiscountCodeReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="discountCode",
)
state = marshmallow_enum.EnumField(
DiscountCodeState, by_value=True, allow_none=True, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.DiscountCodeInfo(**data)
class DiscountedLineItemPortionSchema(helpers.BaseSchema):
discount = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".cart_discount.CartDiscountReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
discounted_amount = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="discountedAmount",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.DiscountedLineItemPortion(**data)
class DiscountedLineItemPriceSchema(helpers.BaseSchema):
value = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
)
included_discounts = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".DiscountedLineItemPortionSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="includedDiscounts",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.DiscountedLineItemPrice(**data)
class DiscountedLineItemPriceForQuantitySchema(helpers.BaseSchema):
quantity = marshmallow.fields.Float(allow_none=True, missing=None)
discounted_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".DiscountedLineItemPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="discountedPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.DiscountedLineItemPriceForQuantity(**data)
class ExternalLineItemTotalPriceSchema(helpers.BaseSchema):
price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
total_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="totalPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ExternalLineItemTotalPrice(**data)
class ExternalTaxAmountDraftSchema(helpers.BaseSchema):
total_gross = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="totalGross",
)
tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="taxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ExternalTaxAmountDraft(**data)
class ExternalTaxRateDraftSchema(helpers.BaseSchema):
name = marshmallow.fields.String(allow_none=True, missing=None)
amount = marshmallow.fields.Float(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
country = marshmallow.fields.String(allow_none=True, missing=None)
state = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
sub_rates = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".tax_category.SubRateSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="subRates",
)
included_in_price = marshmallow.fields.Boolean(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="includedInPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ExternalTaxRateDraft(**data)
class ItemShippingDetailsSchema(helpers.BaseSchema):
targets = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingTargetSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
valid = marshmallow.fields.Boolean(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ItemShippingDetails(**data)
class ItemShippingDetailsDraftSchema(helpers.BaseSchema):
targets = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingTargetSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ItemShippingDetailsDraft(**data)
class ItemShippingTargetSchema(helpers.BaseSchema):
address_key = marshmallow.fields.String(
allow_none=True, missing=None, data_key="addressKey"
)
quantity = marshmallow.fields.Float(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ItemShippingTarget(**data)
class LineItemSchema(helpers.BaseSchema):
id = marshmallow.fields.String(allow_none=True, missing=None)
product_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="productId"
)
name = LocalizedStringField(allow_none=True, missing=None)
product_slug = LocalizedStringField(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="productSlug",
)
product_type = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".product_type.ProductTypeReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="productType",
)
variant = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".product.ProductVariantSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.PriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
taxed_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".TaxedItemPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxedPrice",
)
total_price = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="totalPrice",
)
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
added_at = marshmallow.fields.DateTime(
allow_none=True, metadata={"omit_empty": True}, missing=None, data_key="addedAt"
)
state = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".order.ItemStateSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".tax_category.TaxRateSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxRate",
)
supply_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="supplyChannel",
)
distribution_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="distributionChannel",
)
discounted_price_per_quantity = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".DiscountedLineItemPriceForQuantitySchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="discountedPricePerQuantity",
)
price_mode = marshmallow_enum.EnumField(
LineItemPriceMode,
by_value=True,
allow_none=True,
missing=None,
data_key="priceMode",
)
line_item_mode = marshmallow_enum.EnumField(
LineItemMode,
by_value=True,
allow_none=True,
missing=None,
data_key="lineItemMode",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
last_modified_at = marshmallow.fields.DateTime(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="lastModifiedAt",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.LineItem(**data)
class LineItemDraftSchema(helpers.BaseSchema):
product_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="productId",
)
variant_id = marshmallow.fields.Integer(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="variantId",
)
sku = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
quantity = marshmallow.fields.Integer(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
added_at = marshmallow.fields.DateTime(
allow_none=True, metadata={"omit_empty": True}, missing=None, data_key="addedAt"
)
supply_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="supplyChannel",
)
distribution_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="distributionChannel",
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
external_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalPrice",
)
external_total_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalLineItemTotalPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTotalPrice",
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.LineItemDraft(**data)
class ReplicaCartDraftSchema(helpers.BaseSchema):
reference = helpers.Discriminator(
allow_none=True,
discriminator_field=("typeId", "type_id"),
discriminator_schemas={
"cart-discount": helpers.absmod(
__name__, ".cart_discount.CartDiscountReferenceSchema"
),
"cart": helpers.absmod(__name__, ".CartReferenceSchema"),
"category": helpers.absmod(__name__, ".category.CategoryReferenceSchema"),
"channel": helpers.absmod(__name__, ".channel.ChannelReferenceSchema"),
"key-value-document": helpers.absmod(
__name__, ".custom_object.CustomObjectReferenceSchema"
),
"customer-group": helpers.absmod(
__name__, ".customer_group.CustomerGroupReferenceSchema"
),
"customer": helpers.absmod(__name__, ".customer.CustomerReferenceSchema"),
"discount-code": helpers.absmod(
__name__, ".discount_code.DiscountCodeReferenceSchema"
),
"inventory-entry": helpers.absmod(
__name__, ".inventory.InventoryEntryReferenceSchema"
),
"order-edit": helpers.absmod(
__name__, ".order_edit.OrderEditReferenceSchema"
),
"order": helpers.absmod(__name__, ".order.OrderReferenceSchema"),
"payment": helpers.absmod(__name__, ".payment.PaymentReferenceSchema"),
"product-discount": helpers.absmod(
__name__, ".product_discount.ProductDiscountReferenceSchema"
),
"product-type": helpers.absmod(
__name__, ".product_type.ProductTypeReferenceSchema"
),
"product": helpers.absmod(__name__, ".product.ProductReferenceSchema"),
"review": helpers.absmod(__name__, ".review.ReviewReferenceSchema"),
"shipping-method": helpers.absmod(
__name__, ".shipping_method.ShippingMethodReferenceSchema"
),
"shopping-list": helpers.absmod(
__name__, ".shopping_list.ShoppingListReferenceSchema"
),
"state": helpers.absmod(__name__, ".state.StateReferenceSchema"),
"store": helpers.absmod(__name__, ".store.StoreReferenceSchema"),
"tax-category": helpers.absmod(
__name__, ".tax_category.TaxCategoryReferenceSchema"
),
"type": helpers.absmod(__name__, ".type.TypeReferenceSchema"),
"zone": helpers.absmod(__name__, ".zone.ZoneReferenceSchema"),
},
missing=None,
)
key = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ReplicaCartDraft(**data)
class ShippingInfoSchema(helpers.BaseSchema):
shipping_method_name = marshmallow.fields.String(
allow_none=True, missing=None, data_key="shippingMethodName"
)
price = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
)
shipping_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".shipping_method.ShippingRateSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="shippingRate",
)
taxed_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".TaxedItemPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxedPrice",
)
tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".tax_category.TaxRateSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxRate",
)
tax_category = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".tax_category.TaxCategoryReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxCategory",
)
shipping_method = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".shipping_method.ShippingMethodReferenceSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingMethod",
)
deliveries = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".order.DeliverySchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
discounted_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".DiscountedLineItemPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="discountedPrice",
)
shipping_method_state = marshmallow_enum.EnumField(
ShippingMethodState,
by_value=True,
allow_none=True,
missing=None,
data_key="shippingMethodState",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.ShippingInfo(**data)
class ShippingRateInputSchema(helpers.BaseSchema):
type = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type"]
return models.ShippingRateInput(**data)
class ClassificationShippingRateInputSchema(ShippingRateInputSchema):
key = marshmallow.fields.String(allow_none=True, missing=None)
label = LocalizedStringField(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type"]
return models.ClassificationShippingRateInput(**data)
class ScoreShippingRateInputSchema(ShippingRateInputSchema):
score = marshmallow.fields.Float(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type"]
return models.ScoreShippingRateInput(**data)
class ShippingRateInputDraftSchema(helpers.BaseSchema):
type = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type"]
return models.ShippingRateInputDraft(**data)
class ClassificationShippingRateInputDraftSchema(ShippingRateInputDraftSchema):
key = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type"]
return models.ClassificationShippingRateInputDraft(**data)
class ScoreShippingRateInputDraftSchema(ShippingRateInputDraftSchema):
score = marshmallow.fields.Float(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["type"]
return models.ScoreShippingRateInputDraft(**data)
class TaxPortionSchema(helpers.BaseSchema):
name = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
rate = marshmallow.fields.Float(allow_none=True, missing=None)
amount = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.TaxPortion(**data)
class TaxPortionDraftSchema(helpers.BaseSchema):
name = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
rate = marshmallow.fields.Float(allow_none=True, missing=None)
amount = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.TaxPortionDraft(**data)
class TaxedItemPriceSchema(helpers.BaseSchema):
total_net = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="totalNet",
)
total_gross = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="totalGross",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.TaxedItemPrice(**data)
class TaxedPriceSchema(helpers.BaseSchema):
total_net = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="totalNet",
)
total_gross = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"centPrecision": helpers.absmod(
__name__, ".common.CentPrecisionMoneySchema"
),
"highPrecision": helpers.absmod(
__name__, ".common.HighPrecisionMoneySchema"
),
},
missing=None,
data_key="totalGross",
)
tax_portions = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".TaxPortionSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="taxPortions",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.TaxedPrice(**data)
class TaxedPriceDraftSchema(helpers.BaseSchema):
total_net = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="totalNet",
)
total_gross = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="totalGross",
)
tax_portions = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".TaxPortionDraftSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="taxPortions",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.TaxedPriceDraft(**data)
class CartAddCustomLineItemActionSchema(CartUpdateActionSchema):
money = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
name = LocalizedStringField(allow_none=True, missing=None)
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
slug = marshmallow.fields.String(allow_none=True, missing=None)
tax_category = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".tax_category.TaxCategoryResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxCategory",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartAddCustomLineItemAction(**data)
class CartAddDiscountCodeActionSchema(CartUpdateActionSchema):
code = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartAddDiscountCodeAction(**data)
class CartAddItemShippingAddressActionSchema(CartUpdateActionSchema):
address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartAddItemShippingAddressAction(**data)
class CartAddLineItemActionSchema(CartUpdateActionSchema):
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
distribution_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="distributionChannel",
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
product_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="productId",
)
variant_id = marshmallow.fields.Integer(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="variantId",
)
sku = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
quantity = marshmallow.fields.Integer(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
supply_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="supplyChannel",
)
external_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalPrice",
)
external_total_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalLineItemTotalPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTotalPrice",
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartAddLineItemAction(**data)
class CartAddPaymentActionSchema(CartUpdateActionSchema):
payment = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".payment.PaymentResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartAddPaymentAction(**data)
class CartAddShoppingListActionSchema(CartUpdateActionSchema):
shopping_list = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".shopping_list.ShoppingListResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="shoppingList",
)
supply_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="supplyChannel",
)
distribution_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="distributionChannel",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartAddShoppingListAction(**data)
class CartApplyDeltaToCustomLineItemShippingDetailsTargetsActionSchema(
CartUpdateActionSchema
):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
targets_delta = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingTargetSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="targetsDelta",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartApplyDeltaToCustomLineItemShippingDetailsTargetsAction(**data)
class CartApplyDeltaToLineItemShippingDetailsTargetsActionSchema(
CartUpdateActionSchema
):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
targets_delta = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingTargetSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="targetsDelta",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartApplyDeltaToLineItemShippingDetailsTargetsAction(**data)
class CartChangeCustomLineItemMoneyActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
money = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartChangeCustomLineItemMoneyAction(**data)
class CartChangeCustomLineItemQuantityActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartChangeCustomLineItemQuantityAction(**data)
class CartChangeLineItemQuantityActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
external_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalPrice",
)
external_total_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalLineItemTotalPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTotalPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartChangeLineItemQuantityAction(**data)
class CartChangeTaxCalculationModeActionSchema(CartUpdateActionSchema):
tax_calculation_mode = marshmallow_enum.EnumField(
TaxCalculationMode,
by_value=True,
allow_none=True,
missing=None,
data_key="taxCalculationMode",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartChangeTaxCalculationModeAction(**data)
class CartChangeTaxModeActionSchema(CartUpdateActionSchema):
tax_mode = marshmallow_enum.EnumField(
TaxMode, by_value=True, allow_none=True, missing=None, data_key="taxMode"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartChangeTaxModeAction(**data)
class CartChangeTaxRoundingModeActionSchema(CartUpdateActionSchema):
tax_rounding_mode = marshmallow_enum.EnumField(
RoundingMode,
by_value=True,
allow_none=True,
missing=None,
data_key="taxRoundingMode",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartChangeTaxRoundingModeAction(**data)
class CartRecalculateActionSchema(CartUpdateActionSchema):
update_product_data = marshmallow.fields.Boolean(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="updateProductData",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartRecalculateAction(**data)
class CartRemoveCustomLineItemActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartRemoveCustomLineItemAction(**data)
class CartRemoveDiscountCodeActionSchema(CartUpdateActionSchema):
discount_code = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".discount_code.DiscountCodeReferenceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="discountCode",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartRemoveDiscountCodeAction(**data)
class CartRemoveItemShippingAddressActionSchema(CartUpdateActionSchema):
address_key = marshmallow.fields.String(
allow_none=True, missing=None, data_key="addressKey"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartRemoveItemShippingAddressAction(**data)
class CartRemoveLineItemActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
quantity = marshmallow.fields.Integer(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
external_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalPrice",
)
external_total_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalLineItemTotalPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTotalPrice",
)
shipping_details_to_remove = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetailsToRemove",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartRemoveLineItemAction(**data)
class CartRemovePaymentActionSchema(CartUpdateActionSchema):
payment = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".payment.PaymentResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartRemovePaymentAction(**data)
class CartSetAnonymousIdActionSchema(CartUpdateActionSchema):
anonymous_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="anonymousId",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetAnonymousIdAction(**data)
class CartSetBillingAddressActionSchema(CartUpdateActionSchema):
address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetBillingAddressAction(**data)
class CartSetCartTotalTaxActionSchema(CartUpdateActionSchema):
external_total_gross = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="externalTotalGross",
)
external_tax_portions = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".TaxPortionDraftSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxPortions",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCartTotalTaxAction(**data)
class CartSetCountryActionSchema(CartUpdateActionSchema):
country = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCountryAction(**data)
class CartSetCustomFieldActionSchema(CartUpdateActionSchema):
name = marshmallow.fields.String(allow_none=True, missing=None)
value = marshmallow.fields.Raw(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomFieldAction(**data)
class CartSetCustomLineItemCustomFieldActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
name = marshmallow.fields.String(allow_none=True, missing=None)
value = marshmallow.fields.Raw(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomLineItemCustomFieldAction(**data)
class CartSetCustomLineItemCustomTypeActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
type = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.TypeResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
fields = FieldContainerField(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomLineItemCustomTypeAction(**data)
class CartSetCustomLineItemShippingDetailsActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomLineItemShippingDetailsAction(**data)
class CartSetCustomLineItemTaxAmountActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
external_tax_amount = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxAmountDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxAmount",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomLineItemTaxAmountAction(**data)
class CartSetCustomLineItemTaxRateActionSchema(CartUpdateActionSchema):
custom_line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customLineItemId"
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomLineItemTaxRateAction(**data)
class CartSetCustomShippingMethodActionSchema(CartUpdateActionSchema):
shipping_method_name = marshmallow.fields.String(
allow_none=True, missing=None, data_key="shippingMethodName"
)
shipping_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".shipping_method.ShippingRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
data_key="shippingRate",
)
tax_category = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".tax_category.TaxCategoryResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxCategory",
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomShippingMethodAction(**data)
class CartSetCustomTypeActionSchema(CartUpdateActionSchema):
type = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.TypeResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
fields = FieldContainerField(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomTypeAction(**data)
class CartSetCustomerEmailActionSchema(CartUpdateActionSchema):
email = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomerEmailAction(**data)
class CartSetCustomerGroupActionSchema(CartUpdateActionSchema):
customer_group = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".customer_group.CustomerGroupResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="customerGroup",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomerGroupAction(**data)
class CartSetCustomerIdActionSchema(CartUpdateActionSchema):
customer_id = marshmallow.fields.String(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="customerId",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetCustomerIdAction(**data)
class CartSetDeleteDaysAfterLastModificationActionSchema(CartUpdateActionSchema):
delete_days_after_last_modification = marshmallow.fields.Integer(
allow_none=True,
metadata={"omit_empty": True},
missing=None,
data_key="deleteDaysAfterLastModification",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetDeleteDaysAfterLastModificationAction(**data)
class CartSetKeyActionSchema(CartUpdateActionSchema):
key = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetKeyAction(**data)
class CartSetLineItemCustomFieldActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
name = marshmallow.fields.String(allow_none=True, missing=None)
value = marshmallow.fields.Raw(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemCustomFieldAction(**data)
class CartSetLineItemCustomTypeActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
type = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.TypeResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
fields = FieldContainerField(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemCustomTypeAction(**data)
class CartSetLineItemDistributionChannelActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
distribution_channel = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".channel.ChannelResourceIdentifierSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="distributionChannel",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemDistributionChannelAction(**data)
class CartSetLineItemPriceActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
external_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemPriceAction(**data)
class CartSetLineItemShippingDetailsActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemShippingDetailsAction(**data)
class CartSetLineItemTaxAmountActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
external_tax_amount = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxAmountDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxAmount",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemTaxAmountAction(**data)
class CartSetLineItemTaxRateActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemTaxRateAction(**data)
class CartSetLineItemTotalPriceActionSchema(CartUpdateActionSchema):
line_item_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="lineItemId"
)
external_total_price = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalLineItemTotalPriceSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTotalPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLineItemTotalPriceAction(**data)
class CartSetLocaleActionSchema(CartUpdateActionSchema):
locale = marshmallow.fields.String(
allow_none=True, metadata={"omit_empty": True}, missing=None
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetLocaleAction(**data)
class CartSetShippingAddressActionSchema(CartUpdateActionSchema):
address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetShippingAddressAction(**data)
class CartSetShippingMethodActionSchema(CartUpdateActionSchema):
shipping_method = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".shipping_method.ShippingMethodResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingMethod",
)
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetShippingMethodAction(**data)
class CartSetShippingMethodTaxAmountActionSchema(CartUpdateActionSchema):
external_tax_amount = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxAmountDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxAmount",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetShippingMethodTaxAmountAction(**data)
class CartSetShippingMethodTaxRateActionSchema(CartUpdateActionSchema):
external_tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ExternalTaxRateDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetShippingMethodTaxRateAction(**data)
class CartSetShippingRateInputActionSchema(CartUpdateActionSchema):
shipping_rate_input = helpers.Discriminator(
allow_none=True,
discriminator_field=("type", "type"),
discriminator_schemas={
"Classification": helpers.absmod(
__name__, ".ClassificationShippingRateInputDraftSchema"
),
"Score": helpers.absmod(__name__, ".ScoreShippingRateInputDraftSchema"),
},
metadata={"omit_empty": True},
missing=None,
data_key="shippingRateInput",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartSetShippingRateInputAction(**data)
class CartUpdateItemShippingAddressActionSchema(CartUpdateActionSchema):
address = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.AddressSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
del data["action"]
return models.CartUpdateItemShippingAddressAction(**data)
class CustomLineItemImportDraftSchema(helpers.BaseSchema):
name = LocalizedStringField(allow_none=True, missing=None)
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
money = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".common.MoneySchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
missing=None,
)
slug = marshmallow.fields.String(allow_none=True, missing=None)
state = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".order.ItemStateSchema"),
allow_none=True,
many=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
tax_rate = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".tax_category.TaxRateSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxRate",
)
tax_category = helpers.LazyNestedField(
nested=helpers.absmod(
__name__, ".tax_category.TaxCategoryResourceIdentifierSchema"
),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="taxCategory",
)
custom = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".type.CustomFieldsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
)
shipping_details = helpers.LazyNestedField(
nested=helpers.absmod(__name__, ".ItemShippingDetailsDraftSchema"),
allow_none=True,
unknown=marshmallow.EXCLUDE,
metadata={"omit_empty": True},
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data, **kwargs):
return models.CustomLineItemImportDraft(**data)
|
py | 1a4ff65a599d30cee48af1d8552998f462a11fdb | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class IdentityClient(VssClient):
"""Identity
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(IdentityClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '8a3d49b8-91f0-46ef-b33d-dda338c25db3'
def create_or_bind_with_claims(self, source_identity):
"""CreateOrBindWithClaims.
[Preview API]
:param :class:`<Identity> <identity.v4_0.models.Identity>` source_identity:
:rtype: :class:`<Identity> <identity.v4_0.models.Identity>`
"""
content = self._serialize.body(source_identity, 'Identity')
response = self._send(http_method='PUT',
location_id='90ddfe71-171c-446c-bf3b-b597cd562afd',
version='4.0-preview.1',
content=content)
return self._deserialize('Identity', response)
def get_descriptor_by_id(self, id, is_master_id=None):
"""GetDescriptorById.
[Preview API]
:param str id:
:param bool is_master_id:
:rtype: :class:`<str> <identity.v4_0.models.str>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if is_master_id is not None:
query_parameters['isMasterId'] = self._serialize.query('is_master_id', is_master_id, 'bool')
response = self._send(http_method='GET',
location_id='a230389a-94f2-496c-839f-c929787496dd',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def create_groups(self, container):
"""CreateGroups.
:param :class:`<object> <identity.v4_0.models.object>` container:
:rtype: [Identity]
"""
content = self._serialize.body(container, 'object')
response = self._send(http_method='POST',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='4.0',
content=content,
returns_collection=True)
return self._deserialize('[Identity]', response)
def delete_group(self, group_id):
"""DeleteGroup.
:param str group_id:
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
self._send(http_method='DELETE',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='4.0',
route_values=route_values)
def list_groups(self, scope_ids=None, recurse=None, deleted=None, properties=None):
"""ListGroups.
:param str scope_ids:
:param bool recurse:
:param bool deleted:
:param str properties:
:rtype: [Identity]
"""
query_parameters = {}
if scope_ids is not None:
query_parameters['scopeIds'] = self._serialize.query('scope_ids', scope_ids, 'str')
if recurse is not None:
query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool')
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[Identity]', response)
def get_identity_changes(self, identity_sequence_id, group_sequence_id, scope_id=None):
"""GetIdentityChanges.
:param int identity_sequence_id:
:param int group_sequence_id:
:param str scope_id:
:rtype: :class:`<ChangedIdentities> <identity.v4_0.models.ChangedIdentities>`
"""
query_parameters = {}
if identity_sequence_id is not None:
query_parameters['identitySequenceId'] = self._serialize.query('identity_sequence_id', identity_sequence_id, 'int')
if group_sequence_id is not None:
query_parameters['groupSequenceId'] = self._serialize.query('group_sequence_id', group_sequence_id, 'int')
if scope_id is not None:
query_parameters['scopeId'] = self._serialize.query('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters)
return self._deserialize('ChangedIdentities', response)
def get_user_identity_ids_by_domain_id(self, domain_id):
"""GetUserIdentityIdsByDomainId.
:param str domain_id:
:rtype: [str]
"""
query_parameters = {}
if domain_id is not None:
query_parameters['domainId'] = self._serialize.query('domain_id', domain_id, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[str]', response)
def read_identities(self, descriptors=None, identity_ids=None, search_filter=None, filter_value=None, query_membership=None, properties=None, include_restricted_visibility=None, options=None):
"""ReadIdentities.
:param str descriptors:
:param str identity_ids:
:param str search_filter:
:param str filter_value:
:param str query_membership:
:param str properties:
:param bool include_restricted_visibility:
:param str options:
:rtype: [Identity]
"""
query_parameters = {}
if descriptors is not None:
query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str')
if identity_ids is not None:
query_parameters['identityIds'] = self._serialize.query('identity_ids', identity_ids, 'str')
if search_filter is not None:
query_parameters['searchFilter'] = self._serialize.query('search_filter', search_filter, 'str')
if filter_value is not None:
query_parameters['filterValue'] = self._serialize.query('filter_value', filter_value, 'str')
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
if include_restricted_visibility is not None:
query_parameters['includeRestrictedVisibility'] = self._serialize.query('include_restricted_visibility', include_restricted_visibility, 'bool')
if options is not None:
query_parameters['options'] = self._serialize.query('options', options, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[Identity]', response)
def read_identities_by_scope(self, scope_id, query_membership=None, properties=None):
"""ReadIdentitiesByScope.
:param str scope_id:
:param str query_membership:
:param str properties:
:rtype: [Identity]
"""
query_parameters = {}
if scope_id is not None:
query_parameters['scopeId'] = self._serialize.query('scope_id', scope_id, 'str')
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[Identity]', response)
def read_identity(self, identity_id, query_membership=None, properties=None):
"""ReadIdentity.
:param str identity_id:
:param str query_membership:
:param str properties:
:rtype: :class:`<Identity> <identity.v4_0.models.Identity>`
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
if properties is not None:
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
response = self._send(http_method='GET',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Identity', response)
def update_identities(self, identities):
"""UpdateIdentities.
:param :class:`<VssJsonCollectionWrapper> <identity.v4_0.models.VssJsonCollectionWrapper>` identities:
:rtype: [IdentityUpdateData]
"""
content = self._serialize.body(identities, 'VssJsonCollectionWrapper')
response = self._send(http_method='PUT',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
content=content,
returns_collection=True)
return self._deserialize('[IdentityUpdateData]', response)
def update_identity(self, identity, identity_id):
"""UpdateIdentity.
:param :class:`<Identity> <identity.v4_0.models.Identity>` identity:
:param str identity_id:
"""
route_values = {}
if identity_id is not None:
route_values['identityId'] = self._serialize.url('identity_id', identity_id, 'str')
content = self._serialize.body(identity, 'Identity')
self._send(http_method='PUT',
location_id='28010c54-d0c0-4c89-a5b0-1c9e188b9fb7',
version='4.0',
route_values=route_values,
content=content)
def create_identity(self, framework_identity_info):
"""CreateIdentity.
:param :class:`<FrameworkIdentityInfo> <identity.v4_0.models.FrameworkIdentityInfo>` framework_identity_info:
:rtype: :class:`<Identity> <identity.v4_0.models.Identity>`
"""
content = self._serialize.body(framework_identity_info, 'FrameworkIdentityInfo')
response = self._send(http_method='PUT',
location_id='dd55f0eb-6ea2-4fe4-9ebe-919e7dd1dfb4',
version='4.0',
content=content)
return self._deserialize('Identity', response)
def read_identity_batch(self, batch_info):
"""ReadIdentityBatch.
[Preview API]
:param :class:`<IdentityBatchInfo> <identity.v4_0.models.IdentityBatchInfo>` batch_info:
:rtype: [Identity]
"""
content = self._serialize.body(batch_info, 'IdentityBatchInfo')
response = self._send(http_method='POST',
location_id='299e50df-fe45-4d3a-8b5b-a5836fac74dc',
version='4.0-preview.1',
content=content,
returns_collection=True)
return self._deserialize('[Identity]', response)
def get_identity_snapshot(self, scope_id):
"""GetIdentitySnapshot.
[Preview API]
:param str scope_id:
:rtype: :class:`<IdentitySnapshot> <identity.v4_0.models.IdentitySnapshot>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='d56223df-8ccd-45c9-89b4-eddf692400d7',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('IdentitySnapshot', response)
def get_max_sequence_id(self):
"""GetMaxSequenceId.
Read the max sequence id of all the identities.
:rtype: long
"""
response = self._send(http_method='GET',
location_id='e4a70778-cb2c-4e85-b7cc-3f3c7ae2d408',
version='4.0')
return self._deserialize('long', response)
def get_self(self):
"""GetSelf.
Read identity of the home tenant request user.
:rtype: :class:`<IdentitySelf> <identity.v4_0.models.IdentitySelf>`
"""
response = self._send(http_method='GET',
location_id='4bb02b5b-c120-4be2-b68e-21f7c50a4b82',
version='4.0')
return self._deserialize('IdentitySelf', response)
def add_member(self, container_id, member_id):
"""AddMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='PUT',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('bool', response)
def read_member(self, container_id, member_id, query_membership=None):
"""ReadMember.
[Preview API]
:param str container_id:
:param str member_id:
:param str query_membership:
:rtype: :class:`<str> <identity.v4_0.models.str>`
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def read_members(self, container_id, query_membership=None):
"""ReadMembers.
[Preview API]
:param str container_id:
:param str query_membership:
:rtype: [str]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[str]', response)
def remove_member(self, container_id, member_id):
"""RemoveMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='DELETE',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('bool', response)
def read_member_of(self, member_id, container_id, query_membership=None):
"""ReadMemberOf.
[Preview API]
:param str member_id:
:param str container_id:
:param str query_membership:
:rtype: :class:`<str> <identity.v4_0.models.str>`
"""
route_values = {}
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='22865b02-9e4a-479e-9e18-e35b8803b8a0',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('str', response)
def read_members_of(self, member_id, query_membership=None):
"""ReadMembersOf.
[Preview API]
:param str member_id:
:param str query_membership:
:rtype: [str]
"""
route_values = {}
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
query_parameters = {}
if query_membership is not None:
query_parameters['queryMembership'] = self._serialize.query('query_membership', query_membership, 'str')
response = self._send(http_method='GET',
location_id='22865b02-9e4a-479e-9e18-e35b8803b8a0',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
returns_collection=True)
return self._deserialize('[str]', response)
def create_scope(self, info, scope_id):
"""CreateScope.
[Preview API]
:param :class:`<CreateScopeInfo> <identity.v4_0.models.CreateScopeInfo>` info:
:param str scope_id:
:rtype: :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
content = self._serialize.body(info, 'CreateScopeInfo')
response = self._send(http_method='PUT',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('IdentityScope', response)
def delete_scope(self, scope_id):
"""DeleteScope.
[Preview API]
:param str scope_id:
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
self._send(http_method='DELETE',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values)
def get_scope_by_id(self, scope_id):
"""GetScopeById.
[Preview API]
:param str scope_id:
:rtype: :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>`
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
response = self._send(http_method='GET',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('IdentityScope', response)
def get_scope_by_name(self, scope_name):
"""GetScopeByName.
[Preview API]
:param str scope_name:
:rtype: :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>`
"""
query_parameters = {}
if scope_name is not None:
query_parameters['scopeName'] = self._serialize.query('scope_name', scope_name, 'str')
response = self._send(http_method='GET',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('IdentityScope', response)
def rename_scope(self, rename_scope, scope_id):
"""RenameScope.
[Preview API]
:param :class:`<IdentityScope> <identity.v4_0.models.IdentityScope>` rename_scope:
:param str scope_id:
"""
route_values = {}
if scope_id is not None:
route_values['scopeId'] = self._serialize.url('scope_id', scope_id, 'str')
content = self._serialize.body(rename_scope, 'IdentityScope')
self._send(http_method='PATCH',
location_id='4e11e2bf-1e79-4eb5-8f34-a6337bd0de38',
version='4.0-preview.1',
route_values=route_values,
content=content)
def get_signed_in_token(self):
"""GetSignedInToken.
[Preview API]
:rtype: :class:`<AccessTokenResult> <identity.v4_0.models.AccessTokenResult>`
"""
response = self._send(http_method='GET',
location_id='6074ff18-aaad-4abb-a41e-5c75f6178057',
version='4.0-preview.1')
return self._deserialize('AccessTokenResult', response)
def get_signout_token(self):
"""GetSignoutToken.
[Preview API]
:rtype: :class:`<AccessTokenResult> <identity.v4_0.models.AccessTokenResult>`
"""
response = self._send(http_method='GET',
location_id='be39e83c-7529-45e9-9c67-0410885880da',
version='4.0-preview.1')
return self._deserialize('AccessTokenResult', response)
def get_tenant(self, tenant_id):
"""GetTenant.
[Preview API]
:param str tenant_id:
:rtype: :class:`<TenantInfo> <identity.v4_0.models.TenantInfo>`
"""
route_values = {}
if tenant_id is not None:
route_values['tenantId'] = self._serialize.url('tenant_id', tenant_id, 'str')
response = self._send(http_method='GET',
location_id='5f0a1723-2e2c-4c31-8cae-002d01bdd592',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('TenantInfo', response)
|
py | 1a4ff7c3f0e8533ab0ecf565be630b09c69456c4 | #!/Users/huseyinhacihabiboglu/PycharmProjects/higrid/venv/bin/python
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
|
py | 1a4ff84fab2fe919d178a11aab54a9744c70f5f1 |
import os
import zipfile
from datetime import datetime
import numpy as np
import spacy
import twokenize
from nlplingo.common.utils import IntPair
from nlplingo.sandbox.misc.train_test import generate_argument_data_feature
from nlplingo.sandbox.misc.train_test import generate_trigger_data_feature
from nlplingo.sandbox.misc.train_test import get_predicted_positive_triggers
from nlplingo.text.text_span import EntityMention
from nlplingo.text.text_theory import Document
# from pyspark import SparkContext, SparkConf
# from ctypes import *
global spacy_en
global tagger_blog
global tagger_tweet
global tagger_news
global tagger_dw
#sys.path.append('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite')
#sc.addPyFile('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite/crfsuite.py')
#cdll.LoadLibrary('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite/libcrfsuite-0.12.so')
#import crfsuite
class Token(object):
"""An individual word token.
"""
# idx : starting char offset
def __init__(self, text, idx, pos_tag=None):
self.text = text
self.idx = idx
self.tag_ = pos_tag
class Decoder(object):
#sys.path.append('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite')
#cdll.LoadLibrary('/nfs/mercury-04/u40/ychan/spark/ner/crfsuite/libcrfsuite-0.12.so')
#import crfsuite
# python_path: /nfs/mercury-04/u40/ychan/spark/ner/crfsuite
# libcrfsuite_so: /nfs/mercury-04/u40/ychan/spark/ner/crfsuite/libcrfsuite-0.12.so
# model_file: /nfs/mercury-04/u40/ychan/ner/model/twitter.cv1.model
def __init__(self, params):
#sys.path.append(python_path)
#for library in libcrfsuite_so_libs:
# cdll.LoadLibrary(library)
#import crfsuite as crfsuite
#self.crfsuite = crfsuite
import pycrfsuite as pycrfsuite
self.pycrfsuite = pycrfsuite
self.model_blog = params['crf_models']['blog']
self.model_tweet = params['crf_models']['tweet']
self.model_news = params['crf_models']['news']
self.model_dw = params['crf_models']['dw']
if 'resources.zip' in params:
if os.path.isfile(params['resources.zip']) and not os.path.isdir(params['crf_models']['dir']):
zip_ref = zipfile.ZipFile(params['resources.zip'], 'r')
zip_ref.extractall()
zip_ref.close()
def instances(self, fi):
xseq = []
for line in fi:
fields = line.split('\t')
item = {}
for field in fields[1:]:
sfield = field.encode('ascii', 'replace')
p = sfield.rfind(':')
if p == -1:
# Unweighted (weight=1) attribute.
item[sfield] = 1.0
elif (p+1) >= len(sfield):
item[sfield] = 1.0
else:
try:
weight = float(sfield[p+1:])
item[sfield[:p]] = weight
except ValueError:
item[sfield] = 1.0
xseq.append(item)
return self.pycrfsuite.ItemSequence(xseq)
#def instances(self, fi):
# xseq = self.crfsuite.ItemSequence()
#
# for line in fi:
# # Split the line with TAB characters.
# fields = line.split('\t')
# item = self.crfsuite.Item()
# for field in fields[1:]:
# #print('field %s' % (field))
# sfield = field.encode('ascii','replace')
# #print('sfield %s' % (sfield))
# p = sfield.rfind(':')
# if p == -1:
# # Unweighted (weight=1) attribute.
# #print('field:{} type(field):{}'.format(field, type(field)))
# #print(type(field))
# #field_string = field.encode('ascii','replace')
# #item.append(self.crfsuite.Attribute(field_string))
# item.append(self.crfsuite.Attribute(sfield))
# elif (p+1) >= len(sfield):
# item.append(self.crfsuite.Attribute(sfield))
# else:
# try:
# weight = float(sfield[p+1:])
# item.append(self.crfsuite.Attribute(sfield[:p], weight))
# except ValueError:
# item.append(self.crfsuite.Attribute(sfield))
# #print field
# # Weighted attribute
# #item.append(self.crfsuite.Attribute(sfield[:p], float(sfield[p+1:])))
# # Append the item to the item sequence.
# xseq.append(item)
#
# return xseq
# Blog , Conference , SocialMediaPosting
def get_content_tagger(self, xseq, content_type):
global tagger_blog
global tagger_tweet
global tagger_news
global tagger_dw
if content_type == 'Blog':
try:
tagger_blog.set(xseq)
except:
tagger_blog = self.pycrfsuite.Tagger()
tagger_blog.open(self.model_blog)
print('**** Loaded blog NER model %s' % (self.model_blog))
tagger_blog.set(xseq)
return tagger_blog
elif content_type == 'SocialMediaPosting':
try:
tagger_tweet.set(xseq)
except:
tagger_tweet = self.pycrfsuite.Tagger()
tagger_tweet.open(self.model_tweet)
print('**** Loaded tweet NER model %s' % (self.model_tweet))
tagger_tweet.set(xseq)
return tagger_tweet
elif content_type == 'NewsArticle':
try:
tagger_news.set(xseq)
except:
tagger_news = self.pycrfsuite.Tagger()
tagger_news.open(self.model_news)
print('**** Loaded news NER model %s' % (self.model_news))
tagger_news.set(xseq)
return tagger_news
elif content_type == 'Post':
try:
tagger_dw.set(xseq)
except:
tagger_dw = self.pycrfsuite.Tagger()
tagger_dw.open(self.model_dw)
print('**** Loaded dw NER model %s' % (self.model_dw))
tagger_dw.set(xseq)
return tagger_dw
def tag_seq(self, xseq, content_type):
tagger = self.get_content_tagger(xseq, content_type)
return tagger.tag()
def collect_predictions(content, predictions, char_offsets):
ret = []
i = 0
while i < len(predictions):
p = predictions[i]
if p.startswith('B-'):
label = p[2:]
(start, end) = char_offsets[i]
while (i+1) < len(predictions) and predictions[i+1] == 'I-'+label:
i += 1
end = char_offsets[i][1]
# these are when we mix in ACE and Blog annotations. ACE tags 'ORG', Blog tags 'ORGANIZATION'
if label == 'ORG':
label = 'ORGANIZATION'
if label == 'PER':
label = 'PERSON'
d = {}
d['start'] = start
d['end'] = end
d['label'] = label
d['text'] = content[start:end]
d['extractor'] = 'nlplingo.ner'
ret.append(d)
i += 1
return ret
# A line could be a paragraph consisting of multiple sentences.
# We will get the correct definition of sentences according to whether this is blog, tweet, etc.
def get_sentences(line, content_type):
global spacy_en
if content_type == 'SocialMediaPosting':
sentences = []
start_offset = 0
sent = []
for token in twokenize.tokenize(line[:-1]):
idx = line.index(token, start_offset)
sent.append(Token(token, idx))
start_offset = idx + len(token)
sentences.append(sent)
return sentences
elif content_type == 'Blog' or content_type == 'NewsArticle' or content_type == 'Post':
try:
spacy_doc = spacy_en(line)
except:
spacy_en = spacy.load('en')
print('**** Loaded spacy en')
spacy_doc = spacy_en(line)
return spacy_doc.sents
def decode_sentence(ner_fea, dec, content, sent, offset, content_type):
"""
:type ner_fea: ner.ner_feature.NerFeature
:type dec: ner.decoder.Decoder
:type content: str
:type offset: int
:type content_type: str
sent: spacy sentence
Returns:
list[dict()]
content_type: 'Blog' , 'SocialMediaPosting' , 'NewsArticle' (will decide which NER feature set to use)
"""
tokens = [t for t in sent if len(t.text) > 0]
# a list, 1 element for each word in line
# each element is a tab separate features, except the 1st element which is a dummy label
word_feas = line_to_features(ner_fea, tokens, content_type) # content_type decides which NER feature set to use
word_seq = dec.instances(word_feas) # of type pycrfsuite.ItemSequence
predictions = dec.tag_seq(word_seq, content_type) # content_type decides which NER model to load
char_offsets = []
for token in tokens:
start = token.idx + offset
end = start + len(token.text)
char_offsets.append((start, end))
assert (len(char_offsets) == len(predictions)), 'len(char_offsets) should match len(predictions)'
# returns a dict with keys: start, end, label, text, extractor
return collect_predictions(content, predictions, char_offsets)
def find(element, json):
x = reduce(lambda d, key: d.get(key, {}), element.split("."), json)
if any(x) is True:
return x
return None
# line : a json string
def line_to_predictions(ner_fea, dec, json_eg, attr, content_type, word_embeddings, trigger_generator, trigger_model, arg_generator, argument_model, event_domain):
"""
:type word_embeddings: embeddings.word_embeddings.WordEmbedding
:type trigger_generator: tasks.event_trigger.EventTriggerExampleGenerator
:type trigger_model: model.event_cnn.ExtractionModel
:type arg_generator: tasks.event_argument.EventArgumentExampleGenerator
:type trigger_model: model.event_cnn.ExtractionModel
"""
global spacy_en
content = find(attr, json_eg) # json_eg.get(attr)
print(content_type.encode('ascii', 'ignore'))
print(content.encode('ascii', 'ignore'))
offset = 0
all_predictions = []
if content is not None:
if type(content) is list:
content = '\n'.join(content)
for line in content.split('\n'):
#print(offset)
#print('[' + content_type.encode('ascii', 'ignore') + ']')
#print('[' + line.encode('ascii', 'ignore') + ']')
d['line'] = line
all_predictions.append(d)
doc_ner_predictions = []
sentences = get_sentences(line, content_type)
if sentences is not None:
for sent in sentences:
sent_predictions = decode_sentence(ner_fea, dec, content, sent, offset, content_type)
doc_ner_predictions.extend(sent_predictions)
all_predictions.extend(sent_predictions)
if content_type == 'Blog':
print('*** content_type == Blog ***')
print(line.encode('ascii', 'ignore'))
doc = Document('dummy', line)
for i, p in enumerate(doc_ner_predictions):
id = 'em-{}'.format(i)
# we need to minus 'offset', because we are splitting the original 'content' into several 'line(s)'
# then we pass each 'line' to make a Document object. But p[start], p[end] are with respect to the
# 'content', so you need to minus 'offset' in order to make the 2 sets of offsets match
doc.add_entity_mention(EntityMention(id, IntPair(int(p['start'])-offset, int(p['end'])-offset), p['text'], p['label']))
doc.annotate_sentences(word_embeddings, spacy_en)
print('added {} NER'.format(len(doc_ner_predictions)))
(trigger_examples, trigger_data, trigger_data_list, trigger_label) = generate_trigger_data_feature(trigger_generator, [doc])
print('Generated {} trigger_examples, at {}'.format(len(trigger_examples), datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
if len(trigger_examples) > 0:
trigger_predictions = trigger_model.predict(trigger_data_list)
predicted_positive_triggers_map = get_predicted_positive_triggers(trigger_predictions, trigger_examples, event_domain.get_event_type_index('None'), event_domain)
# the above is organized by docid, let's now expand to get the actual eventtrigger examples
predicted_positive_triggers = []
""":type list[nlplingo.tasks.event_trigger.EventTriggerExample]"""
for docid in predicted_positive_triggers_map.keys():
predicted_positive_triggers.extend(predicted_positive_triggers_map[docid])
print('Predicted {} positive triggers, at {}'.format(len(predicted_positive_triggers), datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
for trigger_eg in predicted_positive_triggers:
print('trigger_eg %s (%s,%s) %s' % (trigger_eg.token.text, str(trigger_eg.token.start_char_offset()), str(trigger_eg.token.end_char_offset()), trigger_eg.event_type))
if len(predicted_positive_triggers) > 0:
if argument_model is None:
for eg in predicted_positive_triggers:
d = {}
d['docid'] = eg.sentence.docid
d['start'] = eg.anchor.start_char_offset()
d['end'] = eg.anchor.end_char_offset()
d['text'] = eg.anchor.text
all_predictions.append(d)
else:
# generate arguments with predicted triggers
(arg_examples_pt, arg_data_pt, arg_data_list_pt, arg_label_pt) = generate_argument_data_feature(arg_generator, [doc], params=None, predicted_triggers=predicted_positive_triggers_map)
#print('formed {} tasks eventargument examples'.format(len(arg_examples_pt)))
if len(arg_examples_pt) > 0:
# decode arguments with predicted triggers
argument_predictions_pt = argument_model.predict(arg_data_list_pt)
pred_arg_max = np.argmax(argument_predictions_pt, axis=1)
#predicted_events = defaultdict(list) # to collate by anchor
for i, predicted_label in enumerate(pred_arg_max):
if predicted_label != event_domain.get_event_role_index('None'):
eg = arg_examples_pt[i]
""":type: tasks.event_argument.EventArgumentExample"""
predicted_role = event_domain.get_event_role_from_index(predicted_label)
# print('{} || {} || {}'.format(predicted_role, eg.anchor.to_string(), eg.eventargument.to_string()))
#predicted_events[eg.anchor].append(EventArgument('dummy', eg.eventargument, predicted_role))
#print('argument_eg %s (%s,%s) %s' % (eg.eventargument.text, str(eg.eventargument.start_char_offset()), str(eg.eventargument.end_char_offset()), '{}.{}'.format(eg.anchor.label, predicted_role)))
d = {}
d['start'] = eg.argument.start_char_offset() + offset
d['end'] = eg.argument.end_char_offset() + offset
d['label'] = '{}.{}'.format(eg.anchor.label, predicted_role)
d['text'] = eg.argument.text
d['extractor'] = 'nlplingo.network'
all_predictions.append(d)
offset += len(line) + 1 # +1 to account for newline
# a list of dict, one for each predicted NE mention
if len(all_predictions) > 0:
if not "extractions" in json_eg:
json_eg["extractions"] = {}
json_eg['extractions'][attr] = all_predictions
return json_eg
# for each word in sent, return: label \tab (\tab separated list of features). If a feature is weighted, it will be like (.*):weight
def line_to_features(ner_fea, sent, content_type):
d = ('', '', '')
seq = [d, d]
for token in sent:
#start = token.idx
#print(token.text.encode('ascii', 'ignore'))
pos_tag = 'NN' if token.tag_ is None else token.tag_
seq.append((ner_fea.encode(token.text), pos_tag, 'DUMMY-tag'))
seq.append(d)
seq.append(d)
return ner_fea.extract_features(seq, content_type)
|
py | 1a4ff95f81c5d04476b15094d5c1b33dc7c41760 | from typing import List, Optional
import attr
from casexml.apps.case.xform import extract_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.motech.value_source import CaseTriggerInfo
@attr.s
class RepeaterResponse:
"""
Ducktypes an HTTP response for Repeater.handle_response(),
RepeatRecord.handle_success() and RepeatRecord.handle_failure()
"""
status_code = attr.ib()
reason = attr.ib()
text = attr.ib(default="")
retry = attr.ib(default=True)
def get_relevant_case_updates_from_form_json(
domain: str,
form_json: dict,
case_types: list,
extra_fields: list,
form_question_values: Optional[dict] = None,
) -> List[CaseTriggerInfo]:
result = []
case_blocks = extract_case_blocks(form_json)
case_ids = [case_block['@case_id'] for case_block in case_blocks]
cases = CaseAccessors(domain).get_cases(case_ids, ordered=True)
db_case_dict = {case.case_id: case for case in cases}
for case_block in case_blocks:
case = db_case_dict[case_block['@case_id']]
if case_types and case.type not in case_types:
continue
case_create = case_block.get('create') or {}
case_update = case_block.get('update') or {}
result.append(CaseTriggerInfo(
domain=domain,
case_id=case_block['@case_id'],
type=case.type,
name=case.name,
owner_id=case.owner_id,
modified_by=case.modified_by,
updates={**case_create, **case_update},
created='create' in case_block,
closed='close' in case_block,
extra_fields={f: case.get_case_property(f) for f in extra_fields},
form_question_values=form_question_values or {},
))
return result
|
py | 1a4ff9731662ecd1d3690f6fc5acc1594bc7be22 | from django.shortcuts import render, redirect
from django.views.generic import TemplateView, ListView, CreateView
from django.core.files.storage import FileSystemStorage
from django.urls import reverse_lazy
from django.contrib.auth.decorators import login_required
@login_required
def upload(request):
context = {}
if request.method == 'POST':
uploaded_file = request.FILES['document']
fs = FileSystemStorage()
name = fs.save(uploaded_file.name, uploaded_file)
context['url'] = fs.url(name)
return render(request, 'upload.html', context) |
py | 1a4ff9c6fdf99ecd992301d5ef45fb174c31c774 | # -*- coding: utf-8 -*-
"""
Created on Thu May 5, 2016
@author: jonki
python2.7
"""
import numpy as np
import random
import sys
class QLearning(object):
def __init__(self):
# Reward matrix
self.R = np.array([
[-1, -1, -1, -1, 0, -1],
[-1, -1, -1, 0, -1, 100],
[-1, -1, -1, 0, -1, -1],
[-1, 0, 0, -1, 0, -1],
[ 0, -1, -1, 0, -1, 100],
[-1, 0, -1, -1, 0, 100]
])
# Initial Q-value
self.Q = np.zeros((6,6))
self.LEARNING_COUNT = 1000
self.GAMMA = 0.8
self.GOAL_STATE = 5
return
def learn(self):
# set a start state randomly
state = self._getRandomState()
for i in range(self.LEARNING_COUNT):
# extract possible actions in state
possible_actions = self._getPossibleActionsFromState(state)
# choise an action from possible actions randomly
action = random.choice(possible_actions)
# Update Q-value
# Q(s,a) = r(s,a) + Gamma * max[Q(next_s, possible_actions)]
next_state = action # in this example, action value is same as next state
next_possible_actions = self._getPossibleActionsFromState(next_state)
max_Q_next_s_a = self._getMaxQvalueFromStateAndPossibleActions(next_state, next_possible_actions)
self.Q[state, action] = self.R[state, action] + self.GAMMA * max_Q_next_s_a
state = next_state
# If an agent reached a goal state, restart an episode from a random start state
if state == self.GOAL_STATE:
state = self._getRandomState()
def _getRandomState(self):
return random.randint(0, self.R.shape[0] - 1)
def _getPossibleActionsFromState(self, state):
if state < 0 or state >= self.R.shape[0]: sys.exit("invaid state: %d" % state)
return list(np.where(np.array(self.R[state] != -1)))[0]
def _getMaxQvalueFromStateAndPossibleActions(self, state, possible_actions):
return max([self.Q[state][i] for i in (possible_actions)])
def dumpQvalue(self):
print self.Q.astype(int) # convert float to int for redability
def runGreedy(self, start_state = 0):
print "===== START ====="
state = start_state
while state != self.GOAL_STATE:
print "current state: %d" % state
possible_actions = self._getPossibleActionsFromState(state)
# get best action which maximaizes Q-value(s, a)
max_Q = 0
best_action_candidates = []
for a in possible_actions:
if self.Q[state][a] > max_Q:
best_action_candidates = [a,]
max_Q = self.Q[state][a]
elif self.Q[state][a] == max_Q:
best_action_candidates.append(a)
print("best_action_candidates:", best_action_candidates)
# get a best action from candidates randomly
best_action = random.choice(best_action_candidates)
print "-> choose action: %d" % best_action
state = best_action # in this example, action value is same as next state
print "state is %d, GOAL!!" % state
if __name__ == "__main__":
QL = QLearning()
QL.learn()
QL.dumpQvalue()
for s in range(QL.R.shape[0]-1):
print("s ",s)
print("env is ", QL.R.shape)
QL.runGreedy(s)
|
py | 1a4ffa2175c3d3be366ee9d1763cc059cd8d10d9 | #!/usr/bin/env python
import unittest
import warnings
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import PointGroup, SpaceGroup, _get_symm_data
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "4/10/14"
class PointGroupTest(unittest.TestCase):
def test_order(self):
order = {"mmm": 8, "432": 24, "-6m2": 12}
for k, v in order.items():
pg = PointGroup(k)
self.assertEqual(order[k], len(pg.symmetry_ops))
def test_get_orbit(self):
pg = PointGroup("mmm")
self.assertEqual(len(pg.get_orbit([0.1, 0.1, 0.1])), 8)
self.assertEqual(len(pg.get_orbit([0, 0, 0.1])), 2)
self.assertEqual(len(pg.get_orbit([1.2, 1.2, 1])), 8)
def test_is_sub_super_group(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
pgmmm = PointGroup("mmm")
pgmm2 = PointGroup("mm2")
pg222 = PointGroup("222")
pg4 = PointGroup("4")
self.assertTrue(pgmmm.is_supergroup(pgmm2))
self.assertTrue(pgmm2.is_subgroup(pgmmm))
self.assertTrue(pgmmm.is_supergroup(pg222))
self.assertFalse(pgmmm.is_supergroup(pg4))
pgm3m = PointGroup("m-3m")
pg6mmm = PointGroup("6/mmm")
pg3m = PointGroup("-3m")
# TODO: Fix the test below.
# self.assertTrue(pg3m.is_subgroup(pgm3m))
self.assertTrue(pg3m.is_subgroup(pg6mmm))
self.assertFalse(pgm3m.is_supergroup(pg6mmm))
class SpaceGroupTest(unittest.TestCase):
def test_renamed_e_symbols(self):
sg = SpaceGroup.from_int_number(64)
assert sg.symbol == "Cmce"
for sym, num in (
("Aem2", 39),
("Aea2", 41),
("Cmce", 64),
("Cmme", 67),
("Ccce", 68),
):
assert SpaceGroup(sym).int_number == num
def test_abbrev_symbols(self):
sg = SpaceGroup("P2/c")
self.assertEqual(sg.int_number, 13)
sg = SpaceGroup("R-3mH")
self.assertEqual(sg.int_number, 166)
def test_attr(self):
sg = SpaceGroup("Fm-3m")
self.assertEqual(sg.full_symbol, "F4/m-32/m")
self.assertEqual(sg.point_group, "m-3m")
def test_point_group_is_set(self):
for i in range(1, 231):
sg = SpaceGroup.from_int_number(i)
self.assertTrue(hasattr(sg, "point_group"))
for symbol in _get_symm_data("space_group_encoding"):
sg = SpaceGroup(symbol)
self.assertTrue(hasattr(sg, "point_group"))
def test_full_symbols(self):
sg = SpaceGroup("P2/m2/m2/m")
self.assertEqual(sg.symbol, "Pmmm")
def test_order_symm_ops(self):
for name in SpaceGroup.SG_SYMBOLS:
sg = SpaceGroup(name)
self.assertEqual(len(sg.symmetry_ops), sg.order)
def test_get_settings(self):
self.assertEqual({"Fm-3m(a-1/4,b-1/4,c-1/4)", "Fm-3m"}, SpaceGroup.get_settings("Fm-3m"))
self.assertEqual(
{
"Pmmn",
"Pmnm:1",
"Pnmm:2",
"Pmnm:2",
"Pnmm",
"Pnmm:1",
"Pmmn:1",
"Pmnm",
"Pmmn:2",
},
SpaceGroup.get_settings("Pmmn"),
)
self.assertEqual(
{"Pnmb", "Pman", "Pncm", "Pmna", "Pcnm", "Pbmn"},
SpaceGroup.get_settings("Pmna"),
)
def test_crystal_system(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.crystal_system, "trigonal")
sg = SpaceGroup("R-3cH")
self.assertEqual(sg.crystal_system, "trigonal")
def test_get_orbit(self):
sg = SpaceGroup("Fm-3m")
p = np.random.randint(0, 100 + 1, size=(3,)) / 100
self.assertLessEqual(len(sg.get_orbit(p)), sg.order)
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
sg = SpaceGroup("Fm-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:H")
self.assertFalse(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:R")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pnma")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P12/c1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P-1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertTrue(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pmmn:2")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup.from_int_number(165)
self.assertFalse(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(tet))
self.assertFalse(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
def test_symmops(self):
sg = SpaceGroup("Pnma")
op = SymmOp.from_rotation_and_translation([[1, 0, 0], [0, -1, 0], [0, 0, -1]], [0.5, 0.5, 0.5])
self.assertIn(op, sg.symmetry_ops)
def test_other_settings(self):
sg = SpaceGroup("Pbnm")
self.assertEqual(sg.int_number, 62)
self.assertEqual(sg.order, 8)
self.assertRaises(ValueError, SpaceGroup, "hello")
def test_subgroup_supergroup(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
self.assertTrue(SpaceGroup("Pma2").is_subgroup(SpaceGroup("Pccm")))
self.assertFalse(SpaceGroup.from_int_number(229).is_subgroup(SpaceGroup.from_int_number(230)))
def test_hexagonal(self):
sgs = [146, 148, 155, 160, 161, 166, 167]
for sg in sgs:
s = SpaceGroup.from_int_number(sg, hexagonal=False)
self.assertTrue(not s.symbol.endswith("H"))
def test_string(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.to_latex_string(), "R$\overline{3}$cH")
sg = SpaceGroup("P6/mmm")
self.assertEqual(sg.to_latex_string(), "P6/mmm")
sg = SpaceGroup("P4_1")
self.assertEqual(sg.to_unicode_string(), "P4₁")
if __name__ == "__main__":
unittest.main()
|
py | 1a4ffb7979a2a3db9af7e2d2f6271139e24b466d | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import pkg_resources
import threading
import argparse
import logging
import signal
import errno
import json
import stat
import os
import shutil
from contextlib import contextmanager
from uuid import uuid4
from yaml import safe_load
from ansible_runner import run
from ansible_runner import output
from ansible_runner.utils import dump_artifact, Bunch
from ansible_runner.runner import Runner
from ansible_runner.exceptions import AnsibleRunnerException
VERSION = pkg_resources.require("ansible_runner")[0].version
DEFAULT_ROLES_PATH = os.getenv('ANSIBLE_ROLES_PATH', None)
DEFAULT_RUNNER_BINARY = os.getenv('RUNNER_BINARY', None)
DEFAULT_RUNNER_PLAYBOOK = os.getenv('RUNNER_PLAYBOOK', None)
DEFAULT_RUNNER_ROLE = os.getenv('RUNNER_ROLE', None)
DEFAULT_RUNNER_MODULE = os.getenv('RUNNER_MODULE', None)
logger = logging.getLogger('ansible-runner')
@contextmanager
def role_manager(args):
if args.role:
role = {'name': args.role}
if args.role_vars:
role_vars = {}
for item in args.role_vars.split():
key, value = item.split('=')
try:
role_vars[key] = ast.literal_eval(value)
except Exception:
role_vars[key] = value
role['vars'] = role_vars
kwargs = Bunch(**args.__dict__)
kwargs.update(private_data_dir=args.private_data_dir,
json_mode=args.json,
ignore_logging=False,
rotate_artifacts=args.rotate_artifacts)
if args.artifact_dir:
kwargs.artifact_dir = args.artifact_dir
project_path = os.path.join(args.private_data_dir, 'project')
project_exists = os.path.exists(project_path)
env_path = os.path.join(args.private_data_dir, 'env')
env_exists = os.path.exists(env_path)
envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
envvars_exists = os.path.exists(envvars_path)
if args.cmdline:
kwargs.cmdline = args.cmdline
playbook = None
tmpvars = None
play = [{'hosts': args.hosts if args.hosts is not None else "all",
'gather_facts': not args.role_skip_facts,
'roles': [role]}]
filename = str(uuid4().hex)
playbook = dump_artifact(json.dumps(play), project_path, filename)
kwargs.playbook = playbook
output.debug('using playbook file %s' % playbook)
if args.inventory:
inventory_file = os.path.join(args.private_data_dir, 'inventory', args.inventory)
if not os.path.exists(inventory_file):
raise AnsibleRunnerException('location specified by --inventory does not exist')
kwargs.inventory = inventory_file
output.debug('using inventory file %s' % inventory_file)
roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
roles_path = os.path.abspath(roles_path)
output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)
envvars = {}
if envvars_exists:
with open(envvars_path, 'rb') as f:
tmpvars = f.read()
new_envvars = safe_load(tmpvars)
if new_envvars:
envvars = new_envvars
envvars['ANSIBLE_ROLES_PATH'] = roles_path
kwargs.envvars = envvars
else:
kwargs = args
yield kwargs
if args.role:
if not project_exists and os.path.exists(project_path):
logger.debug('removing dynamically generated project folder')
shutil.rmtree(project_path)
elif playbook and os.path.isfile(playbook):
logger.debug('removing dynamically generated playbook')
os.remove(playbook)
# if a previous envvars existed in the private_data_dir,
# restore the original file contents
if tmpvars:
with open(envvars_path, 'wb') as f:
f.write(tmpvars)
elif not envvars_exists and os.path.exists(envvars_path):
logger.debug('removing dynamically generated envvars folder')
os.remove(envvars_path)
# since ansible-runner created the env folder, remove it
if not env_exists and os.path.exists(env_path):
logger.debug('removing dynamically generated env folder')
shutil.rmtree(env_path)
def main(sys_args=None):
parser = argparse.ArgumentParser(description='manage ansible execution')
parser.add_argument('--version', action='version', version=VERSION)
parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])
parser.add_argument('private_data_dir',
help='Base directory containing Runner metadata (project, inventory, etc')
group = parser.add_mutually_exclusive_group()
group.add_argument("-m", "--module", default=DEFAULT_RUNNER_MODULE,
help="Invoke an Ansible module directly without a playbook")
group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
help="The name of the playbook to execute")
group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
help="Invoke an Ansible role directly without a playbook")
parser.add_argument("-b", "--binary", default=DEFAULT_RUNNER_BINARY,
help="The full path to ansible[-playbook] binary")
parser.add_argument("--hosts",
help="Define the set of hosts to execute against")
parser.add_argument("-i", "--ident",
default=uuid4(),
help="An identifier that will be used when generating the"
"artifacts directory and can be used to uniquely identify a playbook run")
parser.add_argument("--rotate-artifacts",
default=0,
type=int,
help="Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation")
parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
help="Path to the Ansible roles directory")
parser.add_argument("--role-vars",
help="Variables to pass to the role at runtime")
parser.add_argument("--role-skip-facts", action="store_true", default=False,
help="Disable fact collection when executing a role directly")
parser.add_argument("--artifact-dir",
help="Optional Path for the artifact root directory, by default it is located inside the private data dir")
parser.add_argument("--inventory",
help="Override the default inventory location in private_data_dir")
parser.add_argument("-j", "--json", action="store_true",
help="Output the json event structure to stdout instead of Ansible output")
parser.add_argument("-v", action="count",
help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")
parser.add_argument("-q", "--quiet", action="store_true",
help="Disable all output")
parser.add_argument("--cmdline",
help="Command line options to pass to ansible-playbook at execution time")
parser.add_argument("--debug", action="store_true",
help="Enable Runner debug output logging")
parser.add_argument("--logfile",
help="Log output messages to a file")
parser.add_argument("-a", "--args", dest='module_args',
help="Module arguments")
parser.add_argument("--process-isolation", dest='process_isolation', action="store_true",
help="Limits what directories on the filesystem the playbook run has access to, defaults to /tmp")
parser.add_argument("--process-isolation-executable", dest='process_isolation_executable', default="bwrap",
help="Process isolation executable that will be used. Defaults to bwrap")
parser.add_argument("--process-isolation-path", dest='process_isolation_path', default="/tmp",
help="Path that an isolated playbook run will use for staging. Defaults to /tmp")
parser.add_argument("--process-isolation-hide-paths", dest='process_isolation_hide_paths',
help="List of paths on the system that should be hidden from the playbook run")
parser.add_argument("--process-isolation-show-paths", dest='process_isolation_show_paths',
help="List of paths on the system that should be exposed to the playbook run")
parser.add_argument("--process-isolation-ro-paths", dest='process_isolation_ro_paths',
help="List of paths on the system that should be exposed to the playbook run as read-only")
args = parser.parse_args(sys_args)
output.configure()
# enable or disable debug mode
output.set_debug('enable' if args.debug else 'disable')
# set the output logfile
if args.logfile:
output.set_logfile(args.logfile)
output.debug('starting debug logging')
# get the absolute path for start since it is a daemon
args.private_data_dir = os.path.abspath(args.private_data_dir)
pidfile = os.path.join(args.private_data_dir, 'pid')
try:
os.makedirs(args.private_data_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
pass
else:
raise
if args.command != 'run':
stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
if not os.path.exists(stderr_path):
os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
stderr = open(stderr_path, 'w+')
if args.command in ('start', 'run'):
if args.command == 'start':
import daemon
from daemon.pidfile import TimeoutPIDLockFile
context = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile),
stderr=stderr
)
else:
context = threading.Lock()
with context:
with role_manager(args) as args:
if args.inventory:
with open(args.inventory) as f:
inventory_data = f.read()
else:
inventory_data = None
run_options = dict(private_data_dir=args.private_data_dir,
ident=args.ident,
binary=args.binary,
playbook=args.playbook,
module=args.module,
module_args=args.module_args,
host_pattern=args.hosts,
verbosity=args.v,
quiet=args.quiet,
rotate_artifacts=args.rotate_artifacts,
ignore_logging=False,
json_mode=args.json,
inventory=inventory_data,
roles_path=[args.roles_path] if args.roles_path else None,
process_isolation=args.process_isolation,
process_isolation_executable=args.process_isolation_executable,
process_isolation_path=args.process_isolation_path,
process_isolation_hide_paths=args.process_isolation_hide_paths,
process_isolation_show_paths=args.process_isolation_show_paths,
process_isolation_ro_paths=args.process_isolation_ro_paths)
if args.cmdline:
run_options['cmdline'] = args.cmdline
res = run(**run_options)
return(res.rc)
try:
with open(pidfile, 'r') as f:
pid = int(f.readline())
except IOError:
return(1)
if args.command == 'stop':
Runner.handle_termination(pid)
return (0)
elif args.command == 'is-alive':
try:
os.kill(pid, signal.SIG_DFL)
return(0)
except OSError:
return(1)
|
py | 1a4ffbc90beb0242dbd5063f1be2342fe16c891a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import decimal
import json
import logging
import os
import pickle
import time
from datetime import date, datetime
from typing import TYPE_CHECKING, List, NamedTuple
import mock
import pytest
import pytz
import snowflake.connector
from snowflake.connector import (
DictCursor,
InterfaceError,
NotSupportedError,
ProgrammingError,
constants,
errorcode,
errors,
)
from snowflake.connector.compat import BASE_EXCEPTION_CLASS, IS_WINDOWS
from snowflake.connector.cursor import SnowflakeCursor
try:
from snowflake.connector.cursor import ResultMetadata
except ImportError:
class ResultMetadata(NamedTuple):
name: str
type_code: int
display_size: int
internal_size: int
precision: int
scale: int
is_nullable: bool
from snowflake.connector.errorcode import (
ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT,
ER_INVALID_VALUE,
ER_NOT_POSITIVE_SIZE,
)
from snowflake.connector.sqlstate import SQLSTATE_FEATURE_NOT_SUPPORTED
from snowflake.connector.telemetry import TelemetryField
from ..randomize import random_string
try:
from snowflake.connector.constants import (
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT,
)
from snowflake.connector.errorcode import (
ER_NO_ARROW_RESULT,
ER_NO_PYARROW,
ER_NO_PYARROW_SNOWSQL,
)
from snowflake.connector.result_batch import ArrowResultBatch, JSONResultBatch
except ImportError:
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT = None
ER_NO_ARROW_RESULT = None
ER_NO_PYARROW = None
ER_NO_PYARROW_SNOWSQL = None
ArrowResultBatch = JSONResultBatch = None
if TYPE_CHECKING: # pragma: no cover
from snowflake.connector.result_batch import ResultBatch
def _drop_warehouse(conn, db_parameters):
conn.cursor().execute(
"drop warehouse if exists {}".format(db_parameters["name_wh"])
)
@pytest.fixture()
def conn(request, conn_cnx, db_parameters):
def fin():
with conn_cnx() as cnx:
cnx.cursor().execute(
"use {db}.{schema}".format(
db=db_parameters["database"], schema=db_parameters["schema"]
)
)
cnx.cursor().execute("drop table {name}".format(name=db_parameters["name"]))
request.addfinalizer(fin)
with conn_cnx() as cnx:
cnx.cursor().execute(
"""
create table {name} (
aa int,
dt date,
tm time,
ts timestamp,
tsltz timestamp_ltz,
tsntz timestamp_ntz,
tstz timestamp_tz,
pct float,
ratio number(5,2),
b binary)
""".format(
name=db_parameters["name"]
)
)
return conn_cnx
def _check_results(cursor, results):
assert cursor.sfqid, "Snowflake query id is None"
assert cursor.rowcount == 3, "the number of records"
assert results[0] == 65432, "the first result was wrong"
assert results[1] == 98765, "the second result was wrong"
assert results[2] == 123456, "the third result was wrong"
def test_insert_select(conn, db_parameters):
"""Inserts and selects integer data."""
with conn() as cnx:
c = cnx.cursor()
try:
c.execute(
"insert into {name}(aa) values(123456),"
"(98765),(65432)".format(name=db_parameters["name"])
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 3, "wrong number of records were inserted"
assert c.rowcount == 3, "wrong number of records were inserted"
finally:
c.close()
try:
c = cnx.cursor()
c.execute(
"select aa from {name} order by aa".format(name=db_parameters["name"])
)
results = []
for rec in c:
results.append(rec[0])
_check_results(c, results)
finally:
c.close()
with cnx.cursor(snowflake.connector.DictCursor) as c:
c.execute(
"select aa from {name} order by aa".format(name=db_parameters["name"])
)
results = []
for rec in c:
results.append(rec["AA"])
_check_results(c, results)
def test_insert_and_select_by_separate_connection(conn, db_parameters):
"""Inserts a record and select it by a separate connection."""
with conn() as cnx:
result = cnx.cursor().execute(
"insert into {name}(aa) values({value})".format(
name=db_parameters["name"], value="1234"
)
)
cnt = 0
for rec in result:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
assert result.rowcount == 1, "wrong number of records were inserted"
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
timezone="UTC",
)
try:
c = cnx2.cursor()
c.execute("select aa from {name}".format(name=db_parameters["name"]))
results = []
for rec in c:
results.append(rec[0])
c.close()
assert results[0] == 1234, "the first result was wrong"
assert result.rowcount == 1, "wrong number of records were selected"
finally:
cnx2.close()
def _total_milliseconds_from_timedelta(td):
"""Returns the total number of milliseconds contained in the duration object."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) // 10 ** 3
def _total_seconds_from_timedelta(td):
"""Returns the total number of seconds contained in the duration object."""
return _total_milliseconds_from_timedelta(td) // 10 ** 3
def test_insert_timestamp_select(conn, db_parameters):
"""Inserts and gets timestamp, timestamp with tz, date, and time.
Notes:
Currently the session parameter TIMEZONE is ignored.
"""
PST_TZ = "America/Los_Angeles"
JST_TZ = "Asia/Tokyo"
current_timestamp = datetime.utcnow()
current_timestamp = current_timestamp.replace(tzinfo=pytz.timezone(PST_TZ))
current_date = current_timestamp.date()
current_time = current_timestamp.time()
other_timestamp = current_timestamp.replace(tzinfo=pytz.timezone(JST_TZ))
with conn() as cnx:
cnx.cursor().execute("alter session set TIMEZONE=%s", (PST_TZ,))
c = cnx.cursor()
try:
fmt = (
"insert into {name}(aa, tsltz, tstz, tsntz, dt, tm) "
"values(%(value)s,%(tsltz)s, %(tstz)s, %(tsntz)s, "
"%(dt)s, %(tm)s)"
)
c.execute(
fmt.format(name=db_parameters["name"]),
{
"value": 1234,
"tsltz": current_timestamp,
"tstz": other_timestamp,
"tsntz": current_timestamp,
"dt": current_date,
"tm": current_time,
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were selected"
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
timezone="UTC",
)
try:
c = cnx2.cursor()
c.execute(
"select aa, tsltz, tstz, tsntz, dt, tm from {name}".format(
name=db_parameters["name"]
)
)
result_numeric_value = []
result_timestamp_value = []
result_other_timestamp_value = []
result_ntz_timestamp_value = []
result_date_value = []
result_time_value = []
for (aa, ts, tstz, tsntz, dt, tm) in c:
result_numeric_value.append(aa)
result_timestamp_value.append(ts)
result_other_timestamp_value.append(tstz)
result_ntz_timestamp_value.append(tsntz)
result_date_value.append(dt)
result_time_value.append(tm)
c.close()
assert result_numeric_value[0] == 1234, "the integer result was wrong"
td_diff = _total_milliseconds_from_timedelta(
current_timestamp - result_timestamp_value[0]
)
assert td_diff == 0, "the timestamp result was wrong"
td_diff = _total_milliseconds_from_timedelta(
other_timestamp - result_other_timestamp_value[0]
)
assert td_diff == 0, "the other timestamp result was wrong"
td_diff = _total_milliseconds_from_timedelta(
current_timestamp.replace(tzinfo=None) - result_ntz_timestamp_value[0]
)
assert td_diff == 0, "the other timestamp result was wrong"
assert current_date == result_date_value[0], "the date result was wrong"
assert current_time == result_time_value[0], "the time result was wrong"
desc = c.description
assert len(desc) == 6, "invalid number of column meta data"
assert desc[0][0].upper() == "AA", "invalid column name"
assert desc[1][0].upper() == "TSLTZ", "invalid column name"
assert desc[2][0].upper() == "TSTZ", "invalid column name"
assert desc[3][0].upper() == "TSNTZ", "invalid column name"
assert desc[4][0].upper() == "DT", "invalid column name"
assert desc[5][0].upper() == "TM", "invalid column name"
assert (
constants.FIELD_ID_TO_NAME[desc[0][1]] == "FIXED"
), "invalid column name: {}".format(constants.FIELD_ID_TO_NAME[desc[0][1]])
assert (
constants.FIELD_ID_TO_NAME[desc[1][1]] == "TIMESTAMP_LTZ"
), "invalid column name"
assert (
constants.FIELD_ID_TO_NAME[desc[2][1]] == "TIMESTAMP_TZ"
), "invalid column name"
assert (
constants.FIELD_ID_TO_NAME[desc[3][1]] == "TIMESTAMP_NTZ"
), "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[4][1]] == "DATE", "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[5][1]] == "TIME", "invalid column name"
finally:
cnx2.close()
def test_insert_timestamp_ltz(conn, db_parameters):
"""Inserts and retrieve timestamp ltz."""
tzstr = "America/New_York"
# sync with the session parameter
with conn() as cnx:
cnx.cursor().execute("alter session set timezone='{tzstr}'".format(tzstr=tzstr))
current_time = datetime.now()
current_time = current_time.replace(tzinfo=pytz.timezone(tzstr))
c = cnx.cursor()
try:
fmt = "insert into {name}(aa, tsltz) values(%(value)s,%(ts)s)"
c.execute(
fmt.format(name=db_parameters["name"]),
{
"value": 8765,
"ts": current_time,
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
finally:
c.close()
try:
c = cnx.cursor()
c.execute("select aa,tsltz from {name}".format(name=db_parameters["name"]))
result_numeric_value = []
result_timestamp_value = []
for (aa, ts) in c:
result_numeric_value.append(aa)
result_timestamp_value.append(ts)
td_diff = _total_milliseconds_from_timedelta(
current_time - result_timestamp_value[0]
)
assert td_diff == 0, "the first result was wrong"
finally:
c.close()
def test_struct_time(conn, db_parameters):
"""Binds struct_time object for updating timestamp."""
tzstr = "America/New_York"
os.environ["TZ"] = tzstr
if not IS_WINDOWS:
time.tzset()
test_time = time.strptime("30 Sep 01 11:20:30", "%d %b %y %H:%M:%S")
with conn() as cnx:
c = cnx.cursor()
try:
fmt = "insert into {name}(aa, tsltz) values(%(value)s,%(ts)s)"
c.execute(
fmt.format(name=db_parameters["name"]),
{
"value": 87654,
"ts": test_time,
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
finally:
c.close()
os.environ["TZ"] = "UTC"
if not IS_WINDOWS:
time.tzset()
assert cnt == 1, "wrong number of records were inserted"
try:
result = cnx.cursor().execute(
"select aa, tsltz from {name}".format(name=db_parameters["name"])
)
for (_, _tsltz) in result:
pass
_tsltz -= _tsltz.tzinfo.utcoffset(_tsltz)
assert test_time.tm_year == _tsltz.year, "Year didn't match"
assert test_time.tm_mon == _tsltz.month, "Month didn't match"
assert test_time.tm_mday == _tsltz.day, "Day didn't match"
assert test_time.tm_hour == _tsltz.hour, "Hour didn't match"
assert test_time.tm_min == _tsltz.minute, "Minute didn't match"
assert test_time.tm_sec == _tsltz.second, "Second didn't match"
finally:
os.environ["TZ"] = "UTC"
if not IS_WINDOWS:
time.tzset()
def test_insert_binary_select(conn, db_parameters):
"""Inserts and get a binary value."""
value = b"\x00\xFF\xA1\xB2\xC3"
with conn() as cnx:
c = cnx.cursor()
try:
fmt = "insert into {name}(b) values(%(b)s)"
c.execute(fmt.format(name=db_parameters["name"]), {"b": value})
count = sum(int(rec[0]) for rec in c)
assert count == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were selected"
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
)
try:
c = cnx2.cursor()
c.execute("select b from {name}".format(name=db_parameters["name"]))
results = [b for (b,) in c]
assert value == results[0], "the binary result was wrong"
desc = c.description
assert len(desc) == 1, "invalid number of column meta data"
assert desc[0][0].upper() == "B", "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[0][1]] == "BINARY", "invalid column name"
finally:
cnx2.close()
def test_insert_binary_select_with_bytearray(conn, db_parameters):
"""Inserts and get a binary value using the bytearray type."""
value = bytearray(b"\x00\xFF\xA1\xB2\xC3")
with conn() as cnx:
c = cnx.cursor()
try:
fmt = "insert into {name}(b) values(%(b)s)"
c.execute(fmt.format(name=db_parameters["name"]), {"b": value})
count = sum(int(rec[0]) for rec in c)
assert count == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were selected"
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters["user"],
password=db_parameters["password"],
host=db_parameters["host"],
port=db_parameters["port"],
account=db_parameters["account"],
database=db_parameters["database"],
schema=db_parameters["schema"],
protocol=db_parameters["protocol"],
)
try:
c = cnx2.cursor()
c.execute("select b from {name}".format(name=db_parameters["name"]))
results = [b for (b,) in c]
assert bytes(value) == results[0], "the binary result was wrong"
desc = c.description
assert len(desc) == 1, "invalid number of column meta data"
assert desc[0][0].upper() == "B", "invalid column name"
assert constants.FIELD_ID_TO_NAME[desc[0][1]] == "BINARY", "invalid column name"
finally:
cnx2.close()
def test_variant(conn, db_parameters):
"""Variant including JSON object."""
name_variant = db_parameters["name"] + "_variant"
with conn() as cnx:
cnx.cursor().execute(
"""
create table {name} (
created_at timestamp, data variant)
""".format(
name=name_variant
)
)
try:
with conn() as cnx:
current_time = datetime.now()
c = cnx.cursor()
try:
fmt = (
"insert into {name}(created_at, data) "
"select column1, parse_json(column2) "
"from values(%(created_at)s, %(data)s)"
)
c.execute(
fmt.format(name=name_variant),
{
"created_at": current_time,
"data": (
'{"SESSION-PARAMETERS":{'
'"TIMEZONE":"UTC", "SPECIAL_FLAG":true}}'
),
},
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, "wrong number of records were inserted"
assert c.rowcount == 1, "wrong number of records were inserted"
finally:
c.close()
result = cnx.cursor().execute(
"select created_at, data from {name}".format(name=name_variant)
)
_, data = result.fetchone()
data = json.loads(data)
assert data["SESSION-PARAMETERS"]["SPECIAL_FLAG"], (
"JSON data should be parsed properly. " "Invalid JSON data"
)
finally:
with conn() as cnx:
cnx.cursor().execute("drop table {name}".format(name=name_variant))
def test_callproc(conn_cnx):
"""Callproc test.
Notes:
It's a nop as of now.
"""
with conn_cnx() as cnx:
with pytest.raises(errors.NotSupportedError):
cnx.cursor().callproc("whatever the stored procedure")
def test_invalid_bind_data_type(conn_cnx):
"""Invalid bind data type."""
with conn_cnx() as cnx:
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute("select 1 from dual where 1=%s", ([1, 2, 3],))
def test_timeout_query(conn_cnx):
with conn_cnx() as cnx:
cnx.cursor().execute("select 1")
c = cnx.cursor()
try:
c.execute(
"select seq8() as c1 " "from table(generator(timeLimit => 60))",
timeout=5,
)
raise Exception("Must be canceled")
except BASE_EXCEPTION_CLASS as err:
assert isinstance(
err, errors.ProgrammingError
), "Programming Error Exception"
assert err.errno == 604, "Invalid error code"
finally:
c.close()
def test_executemany(conn, db_parameters):
"""Executes many statements. Client binding is supported by either dict, or list data types.
Notes:
The binding data type is dict and tuple, respectively.
"""
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%(value)s)".format(
name=db_parameters["name"]
)
c.executemany(
fmt,
[
{"value": "1234"},
{"value": "234"},
{"value": "34"},
{"value": "4"},
],
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 4, "number of records"
assert c.rowcount == 4, "wrong number of records were inserted"
c.close()
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%s)".format(name=db_parameters["name"])
c.executemany(
fmt,
[
(12345,),
(1234,),
(234,),
(34,),
(4,),
],
)
rec = c.fetchone()
assert rec[0] == 5, "number of records"
assert c.rowcount == 5, "wrong number of records were inserted"
c.close()
@pytest.mark.skipolddriver
def test_executemany_qmark_types(conn, db_parameters):
table_name = random_string(5, "date_test_")
with conn(paramstyle="qmark") as cnx:
with cnx.cursor() as cur:
cur.execute(f"create table {table_name} (birth_date date)")
insert_qy = f"INSERT INTO {table_name} (birth_date) values (?)"
date_1, date_2 = date(1969, 2, 7), date(1969, 1, 1)
try:
# insert two dates, one in tuple format which specifies
# the snowflake type similar to how we support it in this
# example:
# https://docs.snowflake.com/en/user-guide/python-connector-example.html#using-qmark-or-numeric-binding-with-datetime-objects
cur.executemany(
insert_qy,
[[date_1], [("DATE", date_2)]],
)
cur.execute(f"select * from {table_name}")
inserted_dates = [row[0] for row in cur.fetchall()]
assert date_1 in inserted_dates
assert date_2 in inserted_dates
finally:
cur.execute(f"drop table if exists {table_name}")
def test_closed_cursor(conn, db_parameters):
"""Attempts to use the closed cursor. It should raise errors.
Notes:
The binding data type is scalar.
"""
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%s)".format(name=db_parameters["name"])
c.executemany(
fmt,
[
12345,
1234,
234,
34,
4,
],
)
rec = c.fetchone()
assert rec[0] == 5, "number of records"
assert c.rowcount == 5, "number of records"
c.close()
fmt = "select aa from {name}".format(name=db_parameters["name"])
try:
c.execute(fmt)
raise Exception("should fail as the cursor was closed.")
except snowflake.connector.Error as err:
assert err.errno == errorcode.ER_CURSOR_IS_CLOSED
def test_fetchmany(conn, db_parameters):
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%(value)s)".format(
name=db_parameters["name"]
)
c.executemany(
fmt,
[
{"value": "3456789"},
{"value": "234567"},
{"value": "1234"},
{"value": "234"},
{"value": "34"},
{"value": "4"},
],
)
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 6, "number of records"
assert c.rowcount == 6, "number of records"
c.close()
c = cnx.cursor()
fmt = "select aa from {name} order by aa desc".format(
name=db_parameters["name"]
)
c.execute(fmt)
rows = c.fetchmany(2)
assert len(rows) == 2, "The number of records"
assert rows[1][0] == 234567, "The second record"
rows = c.fetchmany(1)
assert len(rows) == 1, "The number of records"
assert rows[0][0] == 1234, "The first record"
rows = c.fetchmany(5)
assert len(rows) == 3, "The number of records"
assert rows[-1][0] == 4, "The last record"
rows = c.fetchmany(15)
assert len(rows) == 0, "The number of records"
c.close()
def test_process_params(conn, db_parameters):
"""Binds variables for insert and other queries."""
with conn() as cnx:
c = cnx.cursor()
fmt = "insert into {name}(aa) values(%(value)s)".format(
name=db_parameters["name"]
)
c.executemany(
fmt,
[
{"value": "3456789"},
{"value": "234567"},
{"value": "1234"},
{"value": "234"},
{"value": "34"},
{"value": "4"},
],
)
cnt = 0
for rec in c:
cnt += int(rec[0])
c.close()
assert cnt == 6, "number of records"
fmt = "select count(aa) from {name} where aa > %(value)s".format(
name=db_parameters["name"]
)
c = cnx.cursor()
c.execute(fmt, {"value": 1233})
for (_cnt,) in c:
pass
assert _cnt == 3, "the number of records"
c.close()
fmt = "select count(aa) from {name} where aa > %s".format(
name=db_parameters["name"]
)
c = cnx.cursor()
c.execute(fmt, (1234,))
for (_cnt,) in c:
pass
assert _cnt == 2, "the number of records"
c.close()
def test_real_decimal(conn, db_parameters):
with conn() as cnx:
c = cnx.cursor()
fmt = ("insert into {name}(aa, pct, ratio) " "values(%s,%s,%s)").format(
name=db_parameters["name"]
)
c.execute(fmt, (9876, 12.3, decimal.Decimal("23.4")))
for (_cnt,) in c:
pass
assert _cnt == 1, "the number of records"
c.close()
c = cnx.cursor()
fmt = "select aa, pct, ratio from {name}".format(name=db_parameters["name"])
c.execute(fmt)
for (_aa, _pct, _ratio) in c:
pass
assert _aa == 9876, "the integer value"
assert _pct == 12.3, "the float value"
assert _ratio == decimal.Decimal("23.4"), "the decimal value"
c.close()
with cnx.cursor(snowflake.connector.DictCursor) as c:
fmt = "select aa, pct, ratio from {name}".format(name=db_parameters["name"])
c.execute(fmt)
rec = c.fetchone()
assert rec["AA"] == 9876, "the integer value"
assert rec["PCT"] == 12.3, "the float value"
assert rec["RATIO"] == decimal.Decimal("23.4"), "the decimal value"
def test_none_errorhandler(conn_testaccount):
c = conn_testaccount.cursor()
with pytest.raises(errors.ProgrammingError):
c.errorhandler = None
def test_nope_errorhandler(conn_testaccount):
def user_errorhandler(connection, cursor, errorclass, errorvalue):
pass
c = conn_testaccount.cursor()
c.errorhandler = user_errorhandler
c.execute("select * foooooo never_exists_table")
c.execute("select * barrrrr never_exists_table")
c.execute("select * daaaaaa never_exists_table")
assert c.messages[0][0] == errors.ProgrammingError, "One error was recorded"
assert len(c.messages) == 1, "should be one error"
@pytest.mark.internal
def test_binding_negative(negative_conn_cnx, db_parameters):
with negative_conn_cnx() as cnx:
with pytest.raises(TypeError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(name=db_parameters["name"]),
(1, 2, 3),
)
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(name=db_parameters["name"]),
(),
)
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(name=db_parameters["name"]),
(["a"],),
)
def test_execute_after_close(conn_testaccount):
"""SNOW-13588: Raises an error if executing after the connection is closed."""
cursor = conn_testaccount.cursor()
conn_testaccount.close()
with pytest.raises(errors.Error):
cursor.execute("show tables")
def test_multi_table_insert(conn, db_parameters):
try:
with conn() as cnx:
cur = cnx.cursor()
cur.execute(
"""
INSERT INTO {name}(aa) VALUES(1234),(9876),(2345)
""".format(
name=db_parameters["name"]
)
)
assert cur.rowcount == 3, "the number of records"
cur.execute(
"""
CREATE OR REPLACE TABLE {name}_foo (aa_foo int)
""".format(
name=db_parameters["name"]
)
)
cur.execute(
"""
CREATE OR REPLACE TABLE {name}_bar (aa_bar int)
""".format(
name=db_parameters["name"]
)
)
cur.execute(
"""
INSERT ALL
INTO {name}_foo(aa_foo) VALUES(aa)
INTO {name}_bar(aa_bar) VALUES(aa)
SELECT aa FROM {name}
""".format(
name=db_parameters["name"]
)
)
assert cur.rowcount == 6
finally:
with conn() as cnx:
cnx.cursor().execute(
"""
DROP TABLE IF EXISTS {name}_foo
""".format(
name=db_parameters["name"]
)
)
cnx.cursor().execute(
"""
DROP TABLE IF EXISTS {name}_bar
""".format(
name=db_parameters["name"]
)
)
@pytest.mark.skipif(
True,
reason="""
Negative test case.
""",
)
def test_fetch_before_execute(conn_testaccount):
"""SNOW-13574: Fetch before execute."""
cursor = conn_testaccount.cursor()
with pytest.raises(errors.DataError):
cursor.fetchone()
def test_close_twice(conn_testaccount):
conn_testaccount.close()
conn_testaccount.close()
@pytest.mark.parametrize("result_format", ("arrow", "json"))
def test_fetch_out_of_range_timestamp_value(conn, result_format):
with conn() as cnx:
cur = cnx.cursor()
cur.execute(
f"alter session set python_connector_query_result_format='{result_format}'"
)
cur.execute("select '12345-01-02'::timestamp_ntz")
with pytest.raises(errors.InterfaceError):
cur.fetchone()
@pytest.mark.parametrize("sql", (None, ""), ids=["None", "empty"])
def test_empty_execution(conn, sql):
"""Checks whether executing an empty string, or nothing behaves as expected."""
with conn() as cnx:
with cnx.cursor() as cur:
if sql is not None:
cur.execute(sql)
assert cur._result is None
with pytest.raises(
TypeError, match="'NoneType' object is not( an)? itera(tor|ble)"
):
cur.fetchone()
with pytest.raises(
TypeError, match="'NoneType' object is not( an)? itera(tor|ble)"
):
cur.fetchall()
@pytest.mark.parametrize(
"reuse_results", (False, pytest.param(True, marks=pytest.mark.skipolddriver))
)
def test_reset_fetch(conn, reuse_results):
"""Tests behavior after resetting the cursor."""
with conn(reuse_results=reuse_results) as cnx:
with cnx.cursor() as cur:
cur.execute("select 1")
cur.reset()
if reuse_results:
assert cur.fetchone() == (1,)
else:
assert cur.fetchone() is None
assert len(cur.fetchall()) == 0
def test_rownumber(conn):
"""Checks whether rownumber is returned as expected."""
with conn() as cnx:
with cnx.cursor() as cur:
assert cur.execute("select * from values (1), (2)")
assert cur.rownumber is None
assert cur.fetchone() == (1,)
assert cur.rownumber == 0
assert cur.fetchone() == (2,)
assert cur.rownumber == 1
def test_values_set(conn):
"""Checks whether a bunch of properties start as Nones, but get set to something else when a query was executed."""
properties = [
"timestamp_output_format",
"timestamp_ltz_output_format",
"timestamp_tz_output_format",
"timestamp_ntz_output_format",
"date_output_format",
"timezone",
"time_output_format",
"binary_output_format",
]
with conn() as cnx:
with cnx.cursor() as cur:
for property in properties:
assert getattr(cur, property) is None
assert cur.execute("select 1").fetchone() == (1,)
# The default values might change in future, so let's just check that they aren't None anymore
for property in properties:
assert getattr(cur, property) is not None
def test_execute_helper_params_error(conn_testaccount):
"""Tests whether calling _execute_helper with a non-dict statement params is handled correctly."""
with conn_testaccount.cursor() as cur:
with pytest.raises(
ProgrammingError,
match=r"The data type of statement params is invalid. It must be dict.$",
):
cur._execute_helper("select %()s", statement_params="1")
def test_desc_rewrite(conn, caplog):
"""Tests whether describe queries are rewritten as expected and this action is logged."""
with conn() as cnx:
with cnx.cursor() as cur:
table_name = random_string(5, "test_desc_rewrite_")
try:
cur.execute("create or replace table {} (a int)".format(table_name))
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur.execute("desc {}".format(table_name))
assert (
"snowflake.connector.cursor",
20,
"query was rewritten: org=desc {table_name}, new=describe table {table_name}".format(
table_name=table_name
),
) in caplog.record_tuples
finally:
cur.execute("drop table {}".format(table_name))
@pytest.mark.skipolddriver
@pytest.mark.parametrize("result_format", [False, None, "json"])
def test_execute_helper_cannot_use_arrow(conn_cnx, caplog, result_format):
"""Tests whether cannot use arrow is handled correctly inside of _execute_helper."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", False
):
if result_format is False:
result_format = None
else:
result_format = {
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: result_format
}
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur.execute("select 1", _statement_params=result_format)
assert (
"snowflake.connector.cursor",
logging.DEBUG,
"Cannot use arrow result format, fallback to json format",
) in caplog.record_tuples
assert cur.fetchone() == (1,)
@pytest.mark.skipolddriver
def test_execute_helper_cannot_use_arrow_exception(conn_cnx):
"""Like test_execute_helper_cannot_use_arrow but when we are trying to force arrow an Exception should be raised."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", False
):
with pytest.raises(
ProgrammingError,
match="The result set in Apache Arrow format is not supported for the platform.",
):
cur.execute(
"select 1",
_statement_params={
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: "arrow"
},
)
@pytest.mark.skipolddriver
def test_check_can_use_arrow_resultset(conn_cnx, caplog):
"""Tests check_can_use_arrow_resultset has no effect when we can use arrow."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", True
):
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur.check_can_use_arrow_resultset()
assert "Arrow" not in caplog.text
@pytest.mark.skipolddriver
@pytest.mark.parametrize("snowsql", [True, False])
def test_check_cannot_use_arrow_resultset(conn_cnx, caplog, snowsql):
"""Tests check_can_use_arrow_resultset expected outcomes."""
config = {}
if snowsql:
config["application"] = "SnowSQL"
with conn_cnx(**config) as cnx:
with cnx.cursor() as cur:
with mock.patch(
"snowflake.connector.cursor.CAN_USE_ARROW_RESULT_FORMAT", False
):
with pytest.raises(
ProgrammingError,
match="Currently SnowSQL doesn't support the result set in Apache Arrow format."
if snowsql
else "The result set in Apache Arrow format is not supported for the platform.",
) as pe:
cur.check_can_use_arrow_resultset()
assert pe.errno == (
ER_NO_PYARROW_SNOWSQL if snowsql else ER_NO_ARROW_RESULT
)
@pytest.mark.skipolddriver
def test_check_can_use_pandas(conn_cnx):
"""Tests check_can_use_arrow_resultset has no effect when we can import pandas."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch("snowflake.connector.cursor.installed_pandas", True):
cur.check_can_use_pandas()
@pytest.mark.skipolddriver
def test_check_cannot_use_pandas(conn_cnx):
"""Tests check_can_use_arrow_resultset has expected outcomes."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
with mock.patch("snowflake.connector.cursor.installed_pandas", False):
with pytest.raises(
ProgrammingError,
match=r"Optional dependency: 'pyarrow' is not installed, please see the "
"following link for install instructions: https:.*",
) as pe:
cur.check_can_use_pandas()
assert pe.errno == ER_NO_PYARROW
@pytest.mark.skipolddriver
def test_not_supported_pandas(conn_cnx):
"""Check that fetch_pandas functions return expected error when arrow results are not available."""
result_format = {PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: "json"}
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute("select 1", _statement_params=result_format)
with mock.patch("snowflake.connector.cursor.installed_pandas", True):
with pytest.raises(NotSupportedError):
cur.fetch_pandas_all()
with pytest.raises(NotSupportedError):
list(cur.fetch_pandas_batches())
def test_query_cancellation(conn_cnx):
"""Tests whether query_cancellation works."""
with conn_cnx() as cnx:
with cnx.cursor() as cur:
cur.execute(
"select max(seq8()) from table(generator(timeLimit=>30));",
_no_results=True,
)
sf_qid = cur.sfqid
cur.abort_query(sf_qid)
def test_executemany_error(conn_cnx):
"""Tests calling executemany without many things."""
with conn_cnx() as con:
with con.cursor() as cur:
with pytest.raises(
InterfaceError,
match="No parameters are specified for the command: select 1",
) as ie:
cur.executemany("select 1", [])
assert ie.errno == ER_INVALID_VALUE
def test_executemany_insert_rewrite(conn_cnx):
"""Tests calling executemany with a non rewritable pyformat insert query."""
with conn_cnx() as con:
with con.cursor() as cur:
with pytest.raises(
InterfaceError, match="Failed to rewrite multi-row insert"
) as ie:
cur.executemany("insert into numbers (select 1)", [1, 2])
assert ie.errno == ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT
def test_executemany_bulk_insert_size_mismatch(conn_cnx):
"""Tests bulk insert error with variable length of arguments."""
with conn_cnx(paramstyle="qmark") as con:
with con.cursor() as cur:
with pytest.raises(
InterfaceError, match="Bulk data size don't match. expected: 1, got: 2"
) as ie:
cur.executemany("insert into numbers values (?,?)", [[1], [1, 2]])
assert ie.errno == ER_FAILED_TO_REWRITE_MULTI_ROW_INSERT
def test_fetchmany_size_error(conn_cnx):
"""Tests retrieving a negative number of results."""
with conn_cnx() as con:
with con.cursor() as cur:
cur.execute("select 1")
with pytest.raises(
ProgrammingError,
match="The number of rows is not zero or positive number: -1",
) as ie:
cur.fetchmany(-1)
assert ie.errno == ER_NOT_POSITIVE_SIZE
def test_nextset(conn_cnx, caplog):
"""Tests no op function nextset."""
caplog.set_level(logging.DEBUG, "snowflake.connector")
with conn_cnx() as con:
with con.cursor() as cur:
caplog.set_level(logging.DEBUG, "snowflake.connector")
assert cur.nextset() is None
assert ("snowflake.connector.cursor", logging.DEBUG, "nop") in caplog.record_tuples
def test_scroll(conn_cnx):
"""Tests if scroll returns a NotSupported exception."""
with conn_cnx() as con:
with con.cursor() as cur:
with pytest.raises(
NotSupportedError, match="scroll is not supported."
) as nse:
cur.scroll(2)
assert nse.errno == SQLSTATE_FEATURE_NOT_SUPPORTED
def test__log_telemetry_job_data(conn_cnx, caplog):
"""Tests whether we handle missing connection object correctly while logging a telemetry event."""
with conn_cnx() as con:
with con.cursor() as cur:
with mock.patch.object(cur, "_connection", None):
caplog.set_level(logging.DEBUG, "snowflake.connector")
cur._log_telemetry_job_data("test", True)
assert (
"snowflake.connector.cursor",
logging.WARNING,
"Cursor failed to log to telemetry. Connection object may be None.",
) in caplog.record_tuples
@pytest.mark.skipolddriver(reason="new feature in v2.5.0")
@pytest.mark.parametrize(
"result_format,expected_chunk_type",
(
("json", JSONResultBatch),
("arrow", ArrowResultBatch),
),
)
def test_resultbatch(
conn_cnx,
result_format,
expected_chunk_type,
capture_sf_telemetry,
):
"""This test checks the following things:
1. After executing a query can we pickle the result batches
2. When we get the batches, do we emit a telemetry log
3. Whether we can iterate through ResultBatches multiple times
4. Whether the results make sense
5. See whether getter functions are working
"""
rowcount = 100000
with conn_cnx(
session_parameters={
"python_connector_query_result_format": result_format,
}
) as con:
with capture_sf_telemetry.patch_connection(con) as telemetry_data:
with con.cursor() as cur:
cur.execute(
f"select seq4() from table(generator(rowcount => {rowcount}));"
)
assert cur._result_set.total_row_index() == rowcount
pre_pickle_partitions = cur.get_result_batches()
assert len(pre_pickle_partitions) > 1
assert pre_pickle_partitions is not None
assert all(
isinstance(p, expected_chunk_type) for p in pre_pickle_partitions
)
pickle_str = pickle.dumps(pre_pickle_partitions)
assert any(
t.message["type"] == TelemetryField.GET_PARTITIONS_USED
for t in telemetry_data.records
)
post_pickle_partitions: List["ResultBatch"] = pickle.loads(pickle_str)
total_rows = 0
# Make sure the batches can be iterated over individually
for i, partition in enumerate(post_pickle_partitions):
# Tests whether the getter functions are working
if i == 0:
assert partition.compressed_size is None
assert partition.uncompressed_size is None
else:
assert partition.compressed_size is not None
assert partition.uncompressed_size is not None
for row in partition:
col1 = row[0]
assert col1 == total_rows
total_rows += 1
assert total_rows == rowcount
total_rows = 0
# Make sure the batches can be iterated over again
for partition in post_pickle_partitions:
for row in partition:
col1 = row[0]
assert col1 == total_rows
total_rows += 1
assert total_rows == rowcount
@pytest.mark.skipolddriver(reason="new feature in v2.5.0")
@pytest.mark.parametrize(
"result_format,patch_path",
(
("json", "snowflake.connector.result_batch.JSONResultBatch.create_iter"),
("arrow", "snowflake.connector.result_batch.ArrowResultBatch.create_iter"),
),
)
def test_resultbatch_lazy_fetching_and_schemas(conn_cnx, result_format, patch_path):
"""Tests whether pre-fetching results chunks fetches the right amount of them."""
rowcount = 1000000 # We need at least 5 chunks for this test
with conn_cnx(
session_parameters={
"python_connector_query_result_format": result_format,
}
) as con:
with con.cursor() as cur:
# Dummy return value necessary to not iterate through every batch with
# first fetchone call
downloads = [iter([(i,)]) for i in range(10)]
with mock.patch(
patch_path,
side_effect=downloads,
) as patched_download:
cur.execute(
f"select seq4() as c1, randstr(1,random()) as c2 "
f"from table(generator(rowcount => {rowcount}));"
)
result_batches = cur.get_result_batches()
batch_schemas = [batch.schema for batch in result_batches]
for schema in batch_schemas:
# all batches should have the same schema
assert schema == [
ResultMetadata("C1", 0, None, None, 10, 0, False),
ResultMetadata("C2", 2, None, 16777216, None, None, False),
]
assert patched_download.call_count == 0
assert len(result_batches) > 5
assert result_batches[0]._local # Sanity check first chunk being local
cur.fetchone() # Trigger pre-fetching
# While the first chunk is local we still call _download on it, which
# short circuits and just parses (for JSON batches) and then returns
# an iterator through that data, so we expect the call count to be 5.
# (0 local and 1, 2, 3, 4 pre-fetched) = 5 total
start_time = time.time()
while time.time() < start_time + 1:
if patched_download.call_count == 5:
break
else:
assert patched_download.call_count == 5
@pytest.mark.skipolddriver(reason="new feature in v2.5.0")
@pytest.mark.parametrize("result_format", ["json", "arrow"])
def test_resultbatch_schema_exists_when_zero_rows(conn_cnx, result_format):
with conn_cnx(
session_parameters={"python_connector_query_result_format": result_format}
) as con:
with con.cursor() as cur:
cur.execute(
"select seq4() as c1, randstr(1,random()) as c2 from table(generator(rowcount => 1)) where 1=0"
)
result_batches = cur.get_result_batches()
# verify there is 1 batch and 0 rows in that batch
assert len(result_batches) == 1
assert result_batches[0].rowcount == 0
# verify that the schema is correct
schema = result_batches[0].schema
assert schema == [
ResultMetadata("C1", 0, None, None, 10, 0, False),
ResultMetadata("C2", 2, None, 16777216, None, None, False),
]
def test_optional_telemetry(conn_cnx, capture_sf_telemetry):
"""Make sure that we do not fail when _first_chunk_time is not present in cursor."""
with conn_cnx() as con:
with con.cursor() as cur:
with capture_sf_telemetry.patch_connection(con, False) as telemetry:
cur.execute("select 1;")
cur._first_chunk_time = None
assert cur.fetchall() == [
(1,),
]
assert not any(
r.message.get("type", "") == TelemetryField.TIME_CONSUME_LAST_RESULT
for r in telemetry.records
)
@pytest.mark.parametrize("result_format", ("json", "arrow"))
@pytest.mark.parametrize("cursor_type", (SnowflakeCursor, DictCursor))
@pytest.mark.parametrize("fetch_method", ("__next__", "fetchone"))
def test_out_of_range_year(conn_cnx, result_format, cursor_type, fetch_method):
"""Tests whether the year 10000 is out of range exception is raised as expected."""
with conn_cnx(
session_parameters={
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: result_format
}
) as con:
with con.cursor(cursor_type) as cur:
cur.execute(
"select * from VALUES (1, TO_TIMESTAMP('9999-01-01 00:00:00')), (2, TO_TIMESTAMP('10000-01-01 00:00:00'))"
)
iterate_obj = cur if fetch_method == "fetchone" else iter(cur)
fetch_next_fn = getattr(iterate_obj, fetch_method)
# first fetch doesn't raise error
fetch_next_fn()
with pytest.raises(
InterfaceError,
match="date value out of range"
if IS_WINDOWS
else "year 10000 is out of range",
):
fetch_next_fn()
@pytest.mark.skipolddriver
def test_describe(conn_cnx):
with conn_cnx() as con:
with con.cursor() as cur:
table_name = random_string(5, "test_describe_")
# test select
description = cur.describe(
"select * from VALUES(1, 3.1415926, 'snow', TO_TIMESTAMP('2021-01-01 00:00:00'))"
)
assert description is not None
column_types = [column[1] for column in description]
assert constants.FIELD_ID_TO_NAME[column_types[0]] == "FIXED"
assert constants.FIELD_ID_TO_NAME[column_types[1]] == "FIXED"
assert constants.FIELD_ID_TO_NAME[column_types[2]] == "TEXT"
assert "TIMESTAMP" in constants.FIELD_ID_TO_NAME[column_types[3]]
assert len(cur.fetchall()) == 0
# test insert
cur.execute(f"create table {table_name} (aa int)")
try:
description = cur.describe(
"insert into {name}(aa) values({value})".format(
name=table_name, value="1234"
)
)
assert description[0][0] == "number of rows inserted"
assert cur.rowcount is None
finally:
cur.execute(f"drop table if exists {table_name}")
@pytest.mark.skipolddriver
def test_fetch_batches_with_sessions(conn_cnx):
rowcount = 250_000
with conn_cnx() as con:
with con.cursor() as cur:
cur.execute(
f"select seq4() as foo from table(generator(rowcount=>{rowcount}))"
)
num_batches = len(cur.get_result_batches())
with mock.patch(
"snowflake.connector.network.SnowflakeRestful._use_requests_session",
side_effect=con._rest._use_requests_session,
) as get_session_mock:
result = cur.fetchall()
# all but one batch is downloaded using a session
assert get_session_mock.call_count == num_batches - 1
assert len(result) == rowcount
|
py | 1a4ffd71b2a0bf6a8d1c43c187ec96a2f5827ef1 | import unittest
from unittest import mock
import shutil
import tempfile
from typing import Sequence
import lib
from lib import storage, bitcoin, keystore, constants
from lib.transaction import Transaction
from lib.simple_config import SimpleConfig
from lib.wallet import TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT, sweep
from lib.util import bfh, bh2u
from plugins.trustedcoin import trustedcoin
from . import TestCaseForTestnet
from . import SequentialTestCase
from .test_bitcoin import needs_test_with_all_ecc_implementations
class WalletIntegrityHelper:
gap_limit = 1 # make tests run faster
@classmethod
def check_seeded_keystore_sanity(cls, test_obj, ks):
test_obj.assertTrue(ks.is_deterministic())
test_obj.assertFalse(ks.is_watching_only())
test_obj.assertFalse(ks.can_import())
test_obj.assertTrue(ks.has_seed())
@classmethod
def check_xpub_keystore_sanity(cls, test_obj, ks):
test_obj.assertTrue(ks.is_deterministic())
test_obj.assertTrue(ks.is_watching_only())
test_obj.assertFalse(ks.can_import())
test_obj.assertFalse(ks.has_seed())
@classmethod
def create_standard_wallet(cls, ks, gap_limit=None):
store = storage.WalletStorage('if_this_exists_mocking_failed_648151893')
store.put('keystore', ks.dump())
store.put('gap_limit', gap_limit or cls.gap_limit)
w = lib.wallet.Standard_Wallet(store)
w.synchronize()
return w
@classmethod
def create_imported_wallet(cls, privkeys=False):
store = storage.WalletStorage('if_this_exists_mocking_failed_648151893')
if privkeys:
k = keystore.Imported_KeyStore({})
store.put('keystore', k.dump())
w = lib.wallet.Imported_Wallet(store)
return w
@classmethod
def create_multisig_wallet(cls, keystores: Sequence, multisig_type: str, gap_limit=None):
"""Creates a multisig wallet."""
store = storage.WalletStorage('if_this_exists_mocking_failed_648151893')
for i, ks in enumerate(keystores):
cosigner_index = i + 1
store.put('x%d/' % cosigner_index, ks.dump())
store.put('wallet_type', multisig_type)
store.put('gap_limit', gap_limit or cls.gap_limit)
w = lib.wallet.Multisig_Wallet(store)
w.synchronize()
return w
# TODO passphrase/seed_extension
class TestWalletKeystoreAddressIntegrityForMainnet(SequentialTestCase):
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_standard(self, mock_write):
seed_words = 'cycle rocket west magnet parrot shuffle foot correct salt library feed song'
self.assertEqual(bitcoin.seed_type(seed_words), 'standard')
ks = keystore.from_seed(seed_words, '', False)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks)
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'xprv9s21ZrQH143K32jECVM729vWgGq4mUDJCk1ozqAStTphzQtCTuoFmFafNoG1g55iCnBTXUzz3zWnDb5CVLGiFvmaZjuazHDL8a81cPQ8KL6')
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52CwBdDWroaZf8U')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], '1NNkttn1YvVGdqBW4PR6zvc3Zx3H5owKRf')
self.assertEqual(w.get_change_addresses()[0], '1KSezYMhAJMWqFbVFB2JshYg69UpmEXR4D')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_segwit(self, mock_write):
seed_words = 'bitter grass shiver impose acquire brush forget axis eager alone wine silver'
self.assertEqual(bitcoin.seed_type(seed_words), 'segwit')
ks = keystore.from_seed(seed_words, '', False)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks)
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'zprvAZswDvNeJeha8qZ8g7efN3FXYVJLaEUsE9TW6qXDEbVe74AZ75c2sZFZXPNFzxnhChDQ89oC8C5AjWwHmH1HeRKE1c4kKBQAmjUDdKDUZw2')
self.assertEqual(ks.xpub, 'zpub6nsHdRuY92FsMKdbn9BfjBCG6X8pyhCibNP6uDvpnw2cyrVhecvHRMa3Ne8kdJZxjxgwnpbHLkcR4bfnhHy6auHPJyDTQ3kianeuVLdkCYQ')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2wpkh')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1q3g5tmkmlvxryhh843v4dz026avatc0zzr6h3af')
self.assertEqual(w.get_change_addresses()[0], 'bc1qdy94n2q5qcp0kg7v9yzwe6wvfkhnvyzje7nx2p')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_old(self, mock_write):
seed_words = 'powerful random nobody notice nothing important anyway look away hidden message over'
self.assertEqual(bitcoin.seed_type(seed_words), 'old')
ks = keystore.from_seed(seed_words, '', False)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks)
self.assertTrue(isinstance(ks, keystore.Old_KeyStore))
self.assertEqual(ks.mpk, 'e9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09d5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b3')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], '1FJEEB8ihPMbzs2SkLmr37dHyRFzakqUmo')
self.assertEqual(w.get_change_addresses()[0], '1KRW8pH6HFHZh889VDq6fEKvmrsmApwNfe')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_seed_2fa(self, mock_write):
seed_words = 'kiss live scene rude gate step hip quarter bunker oxygen motor glove'
self.assertEqual(bitcoin.seed_type(seed_words), '2fa')
xprv1, xpub1, xprv2, xpub2 = trustedcoin.TrustedCoinPlugin.xkeys_from_seed(seed_words, '')
ks1 = keystore.from_xprv(xprv1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9uraXy9F3HP7i8QDqwNTBiD8Jf4bPD4Epif8cS8qbUbgeidUesyZpKmzfcSeHutsGfFnjgih7kzwTB5UQVRNB5LoXaNc8pFusKYx3KVVvYR')
self.assertEqual(ks1.xpub, 'xpub68qvwUg8sewQvcUgwxuTYr9rrgu5nfn6BwajQpYT9p8fXWxdCRHpN86UWruWJAD1ede8Sv8ERrTa22Gyc4SBfm7zFpcyoVWVBKCVwnw6s1J')
self.assertEqual(ks1.xpub, xpub1)
ks2 = keystore.from_xprv(xprv2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
self.assertEqual(ks2.xprv, 'xprv9uraXy9F3HP7kKSiRAvLV7Nrjj7YzspDys7dvGLLu4tLZT49CEBxPWp88dHhVxvZ69SHrPQMUCWjj4Ka2z9kNvs1HAeEf3extGGeSWqEVqf')
self.assertEqual(ks2.xpub, 'xpub68qvwUg8sewQxoXBXCTLrFKbHkx3QLY5M63EiejxTQRKSFPHjmWCwK8byvZMM2wZNYA3SmxXoma3M1zxhGESHZwtB7SwrxRgKXAG8dCD2eS')
self.assertEqual(ks2.xpub, xpub2)
long_user_id, short_id = trustedcoin.get_user_id(
{'x1/': {'xpub': xpub1},
'x2/': {'xpub': xpub2}})
xpub3 = trustedcoin.make_xpub(trustedcoin.get_signing_xpub(), long_user_id)
ks3 = keystore.from_xpub(xpub3)
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks3)
self.assertTrue(isinstance(ks3, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2, ks3], '2of3')
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '35L8XmCDoEBKeaWRjvmZvoZvhp8BXMMMPV')
self.assertEqual(w.get_change_addresses()[0], '3PeZEcumRqHSPNN43hd4yskGEBdzXgY8Cy')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_seed_bip44_standard(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/44'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'xprv9zGLcNEb3cHUKizLVBz6RYeE9bEZAVPjH2pD1DEzCnPcsemWc3d3xTao8sfhfUmDLMq6e3RcEMEvJG1Et8dvfL8DV4h7mwm9J6AJsW9WXQD')
self.assertEqual(ks.xpub, 'xpub6DFh1smUsyqmYD4obDX6ngaxhd53Zx7aeFjoobebm7vbkT6f9awJWFuGzBT9FQJEWFBL7UyhMXtYzRcwDuVbcxtv9Ce2W9eMm4KXLdvdbjv')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2pkh')
self.assertEqual(w.get_receiving_addresses()[0], '16j7Dqk3Z9DdTdBtHcCVLaNQy9MTgywUUo')
self.assertEqual(w.get_change_addresses()[0], '1GG5bVeWgAp5XW7JLCphse14QaC4qiHyWn')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_seed_bip49_p2sh_segwit(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/49'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'yprvAJEYHeNEPcyBoQYM7sGCxDiNCTX65u4ANgZuSGTrKN5YCC9MP84SBayrgaMyZV7zvkHrr3HVPTK853s2SPk4EttPazBZBmz6QfDkXeE8Zr7')
self.assertEqual(ks.xpub, 'ypub6XDth9u8DzXV1tcpDtoDKMf6kVMaVMn1juVWEesTshcX4zUVvfNgjPJLXrD9N7AdTLnbHFL64KmBn3SNaTe69iZYbYCqLCCNPZKbLz9niQ4')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2wpkh-p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '35ohQTdNykjkF1Mn9nAVEFjupyAtsPAK1W')
self.assertEqual(w.get_change_addresses()[0], '3KaBTcviBLEJajTEMstsA2GWjYoPzPK7Y7')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_seed_bip84_native_segwit(self, mock_write):
# test case from bip84
seed_words = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks = keystore.from_bip39_seed(seed_words, '', "m/84'/0'/0'")
self.assertTrue(isinstance(ks, keystore.BIP32_KeyStore))
self.assertEqual(ks.xprv, 'zprvAdG4iTXWBoARxkkzNpNh8r6Qag3irQB8PzEMkAFeTRXxHpbF9z4QgEvBRmfvqWvGp42t42nvgGpNgYSJA9iefm1yYNZKEm7z6qUWCroSQnE')
self.assertEqual(ks.xpub, 'zpub6rFR7y4Q2AijBEqTUquhVz398htDFrtymD9xYYfG1m4wAcvPhXNfE3EfH1r1ADqtfSdVCToUG868RvUUkgDKf31mGDtKsAYz2oz2AGutZYs')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(w.txin_type, 'p2wpkh')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1qcr8te4kr609gcawutmrza0j4xv80jy8z306fyu')
self.assertEqual(w.get_change_addresses()[0], 'bc1q8c6fshw2dlwun7ekn9qwf37cu2rn755upcp6el')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_multisig_seed_standard(self, mock_write):
seed_words = 'blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure'
self.assertEqual(bitcoin.seed_type(seed_words), 'standard')
ks1 = keystore.from_seed(seed_words, '', True)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9s21ZrQH143K3t9vo23J3hajRbzvkRLJ6Y1zFrUFAfU3t8oooMPfb7f87cn5KntgqZs5nipZkCiBFo5ZtaSD2eDo7j7CMuFV8Zu6GYLTpY6')
self.assertEqual(ks1.xpub, 'xpub661MyMwAqRbcGNEPu3aJQqXTydqR9t49Tkwb4Esrj112kw8xLthv8uybxvaki4Ygt9xiwZUQGeFTG7T2TUzR3eA4Zp3aq5RXsABHFBUrq4c')
# electrum seed: ghost into match ivory badge robot record tackle radar elbow traffic loud
ks2 = keystore.from_xpub('xpub661MyMwAqRbcGfCPEkkyo5WmcrhTq8mi3xuBS7VEZ3LYvsgY1cCFDbenT33bdD12axvrmXhuX3xkAbKci3yZY9ZEk8vhLic7KNhLjqdh5ec')
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2], '2of2')
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '32ji3QkAgXNz6oFoRfakyD3ys1XXiERQYN')
self.assertEqual(w.get_change_addresses()[0], '36XWwEHrrVCLnhjK5MrVVGmUHghr9oWTN1')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_electrum_multisig_seed_segwit(self, mock_write):
seed_words = 'snow nest raise royal more walk demise rotate smooth spirit canyon gun'
self.assertEqual(bitcoin.seed_type(seed_words), 'segwit')
ks1 = keystore.from_seed(seed_words, '', True)
WalletIntegrityHelper.check_seeded_keystore_sanity(self, ks1)
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'ZprvAjxLRqPiDfPDxXrm8JvcoCGRAW6xUtktucG6AMtdzaEbTEJN8qcECvujfhtDU3jLJ9g3Dr3Gz5m1ypfMs8iSUh62gWyHZ73bYLRWyeHf6y4')
self.assertEqual(ks1.xpub, 'Zpub6xwgqLvc42wXB1wEELTdALD9iXwStMUkGqBgxkJFYumaL2dWgNvUkjEDWyDFZD3fZuDWDzd1KQJ4NwVHS7hs6H6QkpNYSShfNiUZsgMdtNg')
# electrum seed: hedgehog sunset update estate number jungle amount piano friend donate upper wool
ks2 = keystore.from_xpub('Zpub6y4oYeETXAbzLNg45wcFDGwEG3vpgsyMJybiAfi2pJtNF3i3fJVxK2BeZJaw7VeKZm192QHvXP3uHDNpNmNDbQft9FiMzkKUhNXQafUMYUY')
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2], '2of2')
self.assertEqual(w.txin_type, 'p2wsh')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1qvzezdcv6vs5h45ugkavp896e0nde5c5lg5h0fwe2xyfhnpkxq6gq7pnwlc')
self.assertEqual(w.get_change_addresses()[0], 'bc1qxqf840dqswcmu7a8v82fj6ej0msx08flvuy6kngr7axstjcaq6us9hrehd')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_multisig_seed_bip45_standard(self, mock_write):
seed_words = 'treat dwarf wealth gasp brass outside high rent blood crowd make initial'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
ks1 = keystore.from_bip39_seed(seed_words, '', "m/45'/0")
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xprv, 'xprv9vyEFyXf7pYVv4eDU3hhuCEAHPHNGuxX73nwtYdpbLcqwJCPwFKknAK8pHWuHHBirCzAPDZ7UJHrYdhLfn1NkGp9rk3rVz2aEqrT93qKRD9')
self.assertEqual(ks1.xpub, 'xpub69xafV4YxC6o8Yiga5EiGLAtqR7rgNgNUGiYgw3S9g9pp6XYUne1KxdcfYtxwmA3eBrzMFuYcNQKfqsXCygCo4GxQFHfywxpUbKNfYvGJka')
# bip39 seed: tray machine cook badge night page project uncover ritual toward person enact
# der: m/45'/0
ks2 = keystore.from_xpub('xpub6B26nSWddbWv7J3qQn9FbwPPQktSBdPQfLfHhRK4375QoZq8fvM8rQey1koGSTxC5xVoMzNMaBETMUmCqmXzjc8HyAbN7LqrvE4ovGRwNGg')
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2], '2of2')
self.assertEqual(w.txin_type, 'p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '3JPTQ2nitVxXBJ1yhMeDwH6q417UifE3bN')
self.assertEqual(w.get_change_addresses()[0], '3FGyDuxgUDn2pSZe5xAJH1yUwSdhzDMyEE')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_multisig_seed_p2sh_segwit(self, mock_write):
# bip39 seed: pulse mixture jazz invite dune enrich minor weapon mosquito flight fly vapor
# der: m/49'/0'/0'
# NOTE: there is currently no bip43 standard derivation path for p2wsh-p2sh
ks1 = keystore.from_xprv('YprvAUXFReVvDjrPerocC3FxVH748sJUTvYjkAhtKop5VnnzVzMEHr1CHrYQKZwfJn1As3X4LYMav6upxd5nDiLb6SCjRZrBH76EFvyQAG4cn79')
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xpub, 'Ypub6hWbqA2p47QgsLt5J4nxrR3ngu8xsPGb7PdV8CDh48KyNngNqPKSqertAqYhQ4umELu1UsZUCYfj9XPA6AdSMZWDZQobwF7EJ8uNrECaZg1')
# bip39 seed: slab mixture skin evoke harsh tattoo rare crew sphere extend balcony frost
# der: m/49'/0'/0'
ks2 = keystore.from_xpub('Ypub6iNDhL4WWq5kFZcdFqHHwX4YTH4rYGp8xbndpRrY7WNZFFRfogSrL7wRTajmVHgR46AT1cqUG1mrcRd7h1WXwBsgX2QvT3zFbBCDiSDLkau')
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2], '2of2')
self.assertEqual(w.txin_type, 'p2wsh-p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '35LeC45QgCVeRor1tJD6LiDgPbybBXisns')
self.assertEqual(w.get_change_addresses()[0], '39RhtDchc6igmx5tyoimhojFL1ZbQBrXa6')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip32_extended_version_bytes(self, mock_write):
seed_words = 'crouch dumb relax small truck age shine pink invite spatial object tenant'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
bip32_seed = keystore.bip39_to_seed(seed_words, '')
self.assertEqual('0df68c16e522eea9c1d8e090cfb2139c3b3a2abed78cbcb3e20be2c29185d3b8df4e8ce4e52a1206a688aeb88bfee249585b41a7444673d1f16c0d45755fa8b9',
bh2u(bip32_seed))
def create_keystore_from_bip32seed(xtype):
ks = keystore.BIP32_KeyStore({})
ks.add_xprv_from_seed(bip32_seed, xtype=xtype, derivation='m/')
return ks
ks = create_keystore_from_bip32seed(xtype='standard')
self.assertEqual('033a05ec7ae9a9833b0696eb285a762f17379fa208b3dc28df1c501cf84fe415d0', ks.derive_pubkey(0, 0))
self.assertEqual('02bf27f41683d84183e4e930e66d64fc8af5508b4b5bf3c473c505e4dbddaeed80', ks.derive_pubkey(1, 0))
ks = create_keystore_from_bip32seed(xtype='standard') # p2pkh
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(ks.xprv, 'xprv9s21ZrQH143K3nyWMZVjzGL4KKAE1zahmhTHuV5pdw4eK3o3igC5QywgQG7UTRe6TGBniPDpPFWzXMeMUFbBj8uYsfXGjyMmF54wdNt8QBm')
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcGH3yTb2kMQGnsLziRTJZ8vNthsVSCGbdBr8CGDWKxnGAFYgyKTzBtwvPPmfVAWJuFmxRXjSbUTg87wDkWQ5GmzpfUcN9t8Z')
self.assertEqual(w.get_receiving_addresses()[0], '19fWEVaXqgJFFn7JYNr6ouxyjZy3uK7CdK')
self.assertEqual(w.get_change_addresses()[0], '1EEX7da31qndYyeKdbM665w1ze5gbkkAZZ')
ks = create_keystore_from_bip32seed(xtype='p2wpkh-p2sh')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(ks.xprv, 'yprvABrGsX5C9janu6AdBvHNCMRZVHJfxcaCgoyWgsyi1wSXN9cGyLMe33bpRU54TLJ1ruJbTrpNqusYQeFvBx1CXNb9k1DhKtBFWo8b1sLbXhN')
self.assertEqual(ks.xpub, 'ypub6QqdH2c5z7967aF6HwpNZVNJ3K9AN5J442u7VGPKaGyWEwwRWsftaqvJGkeZKNe7Jb3C9FG3dAfT94ZzFRrcGhMizGvB6Jtm3itJsEFhxMC')
self.assertEqual(w.get_receiving_addresses()[0], '34SAT5gGF5UaBhhSZ8qEuuxYvZ2cm7Zi23')
self.assertEqual(w.get_change_addresses()[0], '38unULZaetSGSKvDx7Krukh8zm8NQnxGiA')
ks = create_keystore_from_bip32seed(xtype='p2wpkh')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(ks.xprv, 'zprvAWgYBBk7JR8GkPMk2H4zQSX4fFT7uEZhbvVjUGsbPwpQRFRWDzXCf7FxSg2eTEwwGYRQDLQwJaE6HvsUueRDKcGkcLv7unzjnXCEQVWhrF9')
self.assertEqual(ks.xpub, 'zpub6jftahH18ngZxsSD8JbzmaToDHHcJhHYy9RLGfHCxHMPJ3kemXqTCuaSHxc9KHJ2iE9ztirc5q212MBYy8Gd4w3KrccbgDiFKSwxFpYKEH6')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1qtuynwzd0d6wptvyqmc6ehkm70zcamxpshyzu5e')
self.assertEqual(w.get_change_addresses()[0], 'bc1qjy5zunxh6hjysele86qqywfa437z4xwmleq8wk')
ks = create_keystore_from_bip32seed(xtype='standard') # p2sh
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1')
self.assertEqual(ks.xprv, 'xprv9s21ZrQH143K3nyWMZVjzGL4KKAE1zahmhTHuV5pdw4eK3o3igC5QywgQG7UTRe6TGBniPDpPFWzXMeMUFbBj8uYsfXGjyMmF54wdNt8QBm')
self.assertEqual(ks.xpub, 'xpub661MyMwAqRbcGH3yTb2kMQGnsLziRTJZ8vNthsVSCGbdBr8CGDWKxnGAFYgyKTzBtwvPPmfVAWJuFmxRXjSbUTg87wDkWQ5GmzpfUcN9t8Z')
self.assertEqual(w.get_receiving_addresses()[0], '3F4nm8Vunb7mxVvqhUP238PYge2hpU5qYv')
self.assertEqual(w.get_change_addresses()[0], '3N8jvKGmxzVHENn6B4zTdZt3N9bmRKjj96')
ks = create_keystore_from_bip32seed(xtype='p2wsh-p2sh')
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1')
self.assertEqual(ks.xprv, 'YprvANkMzkodih9AKfL18akM2RmND5LwAyFo15dBc9FFPiGvzLBBjjjv8ATkEB2Y1mWv6NNaLSpVj8G3XosgVBA9frhpaUL6jHeFQXQTbqVPcv2')
self.assertEqual(ks.xpub, 'Ypub6bjiQGLXZ4hTY9QUEcHMPZi6m7BRaRyeNJYnQXerx3ous8WLHH4AfxnE5Tc2sos1Y47B1qGAWP3xGEBkYf1ZRBUPpk2aViMkwTABT6qoiBb')
self.assertEqual(w.get_receiving_addresses()[0], '3L1BxLLASGKE3DR1ruraWm3hZshGCKqcJx')
self.assertEqual(w.get_change_addresses()[0], '3NDGcbZVXTpaQWRhiuVPpXsNt4g2JiCX4E')
ks = create_keystore_from_bip32seed(xtype='p2wsh')
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1')
self.assertEqual(ks.xprv, 'ZprvAhadJRUYsNgeAxX7xwXyEWrsP3VP7bFHvC9QPY98miep3RzQzPuUkE7tFNz81gAqW1VP5vR4BncbR6VFCsaAU6PRSp2XKCTjgFU6zRpk6Xp')
self.assertEqual(ks.xpub, 'Zpub6vZyhw1ShkEwPSbb4y4ybeobw5KsX3y9HR51BvYkL4BnvEKZXwDjJ2SN6fZcsiWvwhDymJriy3QW9WoKGMRaDR9zh5j15dBFDBDpqjK1ekQ')
self.assertEqual(w.get_receiving_addresses()[0], 'bc1q84x0yrztvcjg88qef4d6978zccxulcmc9y88xcg4ghjdau999x7q7zv2qe')
self.assertEqual(w.get_change_addresses()[0], 'bc1q0fj5mra96hhnum80kllklc52zqn6kppt3hyzr49yhr3ecr42z3tsrkg3gs')
class TestWalletKeystoreAddressIntegrityForTestnet(TestCaseForTestnet):
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip39_multisig_seed_p2sh_segwit_testnet(self, mock_write):
# bip39 seed: finish seminar arrange erosion sunny coil insane together pretty lunch lunch rose
# der: m/49'/1'/0'
# NOTE: there is currently no bip43 standard derivation path for p2wsh-p2sh
ks1 = keystore.from_xprv('Uprv9BEixD3As2LK5h6G2SNT3cTqbZpsWYPceKTSuVAm1yuSybxSvQz2MV1o8cHTtctQmj4HAenb3eh5YJv4YRZjv35i8fofVnNbs4Dd2B4i5je')
self.assertTrue(isinstance(ks1, keystore.BIP32_KeyStore))
self.assertEqual(ks1.xpub, 'Upub5QE5Mia4hPtcJBAj8TuTQkQa9bfMv17U1YP3hsaNaKSRrQHbTxJGuHLGyv3MbKZixuPyjfXGUdbTjE4KwyFcX8YD7PX5ybTDbP11UT8UpZR')
# bip39 seed: square page wood spy oil story rebel give milk screen slide shuffle
# der: m/49'/1'/0'
ks2 = keystore.from_xpub('Upub5QRzUGRJuWJe5MxGzwgQAeyJjzcdGTXkkq77w6EfBkCyf5iWppSaZ4caY2MgWcU9LP4a4uE5apUFN4wLoENoe9tpu26mrUxeGsH84dN3JFh')
WalletIntegrityHelper.check_xpub_keystore_sanity(self, ks2)
self.assertTrue(isinstance(ks2, keystore.BIP32_KeyStore))
w = WalletIntegrityHelper.create_multisig_wallet([ks1, ks2], '2of2')
self.assertEqual(w.txin_type, 'p2wsh-p2sh')
self.assertEqual(w.get_receiving_addresses()[0], '2MzsfTfTGomPRne6TkctMmoDj6LwmVkDrMt')
self.assertEqual(w.get_change_addresses()[0], '2NFp9w8tbYYP9Ze2xQpeYBJQjx3gbXymHX7')
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bip32_extended_version_bytes(self, mock_write):
seed_words = 'crouch dumb relax small truck age shine pink invite spatial object tenant'
self.assertEqual(keystore.bip39_is_checksum_valid(seed_words), (True, True))
bip32_seed = keystore.bip39_to_seed(seed_words, '')
self.assertEqual('0df68c16e522eea9c1d8e090cfb2139c3b3a2abed78cbcb3e20be2c29185d3b8df4e8ce4e52a1206a688aeb88bfee249585b41a7444673d1f16c0d45755fa8b9',
bh2u(bip32_seed))
def create_keystore_from_bip32seed(xtype):
ks = keystore.BIP32_KeyStore({})
ks.add_xprv_from_seed(bip32_seed, xtype=xtype, derivation='m/')
return ks
ks = create_keystore_from_bip32seed(xtype='standard')
self.assertEqual('033a05ec7ae9a9833b0696eb285a762f17379fa208b3dc28df1c501cf84fe415d0', ks.derive_pubkey(0, 0))
self.assertEqual('02bf27f41683d84183e4e930e66d64fc8af5508b4b5bf3c473c505e4dbddaeed80', ks.derive_pubkey(1, 0))
ks = create_keystore_from_bip32seed(xtype='standard') # p2pkh
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(ks.xprv, 'tprv8ZgxMBicQKsPecD328MF9ux3dSaSFWci7FNQmuWH7uZ86eY8i3XpvjK8KSH8To2QphiZiUqaYc6nzDC6bTw8YCB9QJjaQL5pAApN4z7vh2B')
self.assertEqual(ks.xpub, 'tpubD6NzVbkrYhZ4Y5Epun1qZKcACU6NQqocgYyC4RYaYBMWw8nuLSMR7DvzVamkqxwRgrTJ1MBMhc8wwxT2vbHqMu8RBXy4BvjWMxR5EdZroxE')
self.assertEqual(w.get_receiving_addresses()[0], 'mpBTXYfWehjW2tavFwpUdqBJbZZkup13k2')
self.assertEqual(w.get_change_addresses()[0], 'mtkUQgf1psDtL67wMAKTv19LrdgPWy6GDQ')
ks = create_keystore_from_bip32seed(xtype='p2wpkh-p2sh')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(ks.xprv, 'uprv8tXDerPXZ1QsVuQ9rV8sN13YoQitC8cD2MtdZJQAVuw19kMMxhhPYnyGLeEiThgLELqNTxS91GTLsVofKAM9LRrkGeRzzEuJRtt1Tcostr7')
self.assertEqual(ks.xpub, 'upub57Wa4MvRPNyAiPUcxWfsj8zHMSZNbbL4PapEMgon4FTz2YgWWF1e6bHkBvpDKk2Rg2Zy9LsonXFFbv7jNeCZ5kdKWv8UkfcoxpdjJrZuBX6')
self.assertEqual(w.get_receiving_addresses()[0], '2MuzNWpcHrXyvPVKzEGT7Xrwp8uEnXXjWnK')
self.assertEqual(w.get_change_addresses()[0], '2MzTzY5VcGLwce7YmdEwjXhgQD7LYEKLJTm')
ks = create_keystore_from_bip32seed(xtype='p2wpkh')
w = WalletIntegrityHelper.create_standard_wallet(ks)
self.assertEqual(ks.xprv, 'vprv9DMUxX4ShgxMMCbGgqvVa693yNsL8kbhwUQrLhJ3svJtCrAbDMrxArdQMrCJTcLFdyxBDS2hTvotknRE2rmA8fYM8z8Ra9inhcwerEsG6Ev')
self.assertEqual(ks.xpub, 'vpub5SLqN2bLY4WeZgfjnsTVwE5nXQhpYDKZJhLT95hfSFqs5eVjkuBCiewtD8moKegM5fgmtpUNFBboVCjJ6LcZszJvPFpuLaSJEYhNhUAnrCS')
self.assertEqual(w.get_receiving_addresses()[0], 'tb1qtuynwzd0d6wptvyqmc6ehkm70zcamxpsaze002')
self.assertEqual(w.get_change_addresses()[0], 'tb1qjy5zunxh6hjysele86qqywfa437z4xwm4lm549')
ks = create_keystore_from_bip32seed(xtype='standard') # p2sh
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1')
self.assertEqual(ks.xprv, 'tprv8ZgxMBicQKsPecD328MF9ux3dSaSFWci7FNQmuWH7uZ86eY8i3XpvjK8KSH8To2QphiZiUqaYc6nzDC6bTw8YCB9QJjaQL5pAApN4z7vh2B')
self.assertEqual(ks.xpub, 'tpubD6NzVbkrYhZ4Y5Epun1qZKcACU6NQqocgYyC4RYaYBMWw8nuLSMR7DvzVamkqxwRgrTJ1MBMhc8wwxT2vbHqMu8RBXy4BvjWMxR5EdZroxE')
self.assertEqual(w.get_receiving_addresses()[0], '2N6czpsRwQ3d8AHZPNbztf5NotzEsaZmVQ8')
self.assertEqual(w.get_change_addresses()[0], '2NDgwz4CoaSzdSAQdrCcLFWsJaVowCNgiPA')
ks = create_keystore_from_bip32seed(xtype='p2wsh-p2sh')
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1')
self.assertEqual(ks.xprv, 'Uprv95RJn67y7xyEvUZXo9brC5PMXCm9QVHoLdYJUZfhsgmQmvvGj75fduqC9MCC28uETouMLYSFtUqqzfRRcPW6UuyR77YQPeNJKd9t3XutF8b')
self.assertEqual(ks.xpub, 'Upub5JQfBberxLXY8xdzuB8rZDL65Ebdox1ehrTuGx5KS2JPejFRGePvBi9fzdmgtBFKuVdx1vsvfjdkj5jVfsMWEEjzMPEtA55orYubtrCZmRr')
self.assertEqual(w.get_receiving_addresses()[0], '2NBZQ25GC3ipaF13ZY3UT8i2xnDuS17pJqx')
self.assertEqual(w.get_change_addresses()[0], '2NDmUgLVX8vKvcJ4FQ37GSUre6QtBzKkb6k')
ks = create_keystore_from_bip32seed(xtype='p2wsh')
w = WalletIntegrityHelper.create_multisig_wallet([ks], '1of1')
self.assertEqual(ks.xprv, 'Vprv16YtLrHXxePM6noKqtFtMtmUgBE9bEpF3fPLmpvuPksssLostujtdHBwqhEeVuzESz22UY8hyPx9ed684SQpCmUKSVhpxPFbvVNY7qnviNR')
self.assertEqual(ks.xpub, 'Vpub5dEvVGKn7251zFq7jXvUmJRbFCk5ka19cxz84LyCp2gGhq4eXJZUomop1qjGt5uFK8kkmQUV8PzJcNM4PZmX2URbDiwJjyuJ8GyFHRrEmmG')
self.assertEqual(w.get_receiving_addresses()[0], 'tb1q84x0yrztvcjg88qef4d6978zccxulcmc9y88xcg4ghjdau999x7qf2696k')
self.assertEqual(w.get_change_addresses()[0], 'tb1q0fj5mra96hhnum80kllklc52zqn6kppt3hyzr49yhr3ecr42z3ts5777jl')
class TestWalletSending(TestCaseForTestnet):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.electrum_path = tempfile.mkdtemp()
cls.config = SimpleConfig({'electrum_path': cls.electrum_path})
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(cls.electrum_path)
def create_standard_wallet_from_seed(self, seed_words):
ks = keystore.from_seed(seed_words, '', False)
return WalletIntegrityHelper.create_standard_wallet(ks, gap_limit=2)
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_between_p2wpkh_and_compressed_p2pkh(self, mock_write):
wallet1 = self.create_standard_wallet_from_seed('bitter grass shiver impose acquire brush forget axis eager alone wine silver')
wallet2 = self.create_standard_wallet_from_seed('cycle rocket west magnet parrot shuffle foot correct salt library feed song')
# bootstrap wallet1
funding_tx = Transaction('01000000014576dacce264c24d81887642b726f5d64aa7825b21b350c7b75a57f337da6845010000006b483045022100a3f8b6155c71a98ad9986edd6161b20d24fad99b6463c23b463856c0ee54826d02200f606017fd987696ebbe5200daedde922eee264325a184d5bbda965ba5160821012102e5c473c051dae31043c335266d0ef89c1daab2f34d885cc7706b267f3269c609ffffffff0240420f00000000001600148a28bddb7f61864bdcf58b2ad13d5aeb3abc3c42a2ddb90e000000001976a914c384950342cb6f8df55175b48586838b03130fad88ac00000000')
funding_txid = funding_tx.txid()
funding_output_value = 1000000
self.assertEqual('add2535aedcbb5ba79cc2260868bb9e57f328738ca192937f2c92e0e94c19203', funding_txid)
wallet1.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# wallet1 -> wallet2
outputs = [(bitcoin.TYPE_ADDRESS, wallet2.get_receiving_address(), 250000)]
tx = wallet1.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet1.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet1.is_mine(wallet1.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('010000000001010392c1940e2ec9f2372919ca3887327fe5b98b866022cc79bab5cbed5a53d2ad0000000000feffffff0290d00300000000001976a914ea7804a2c266063572cc009a63dc25dcc0e9d9b588ac285e0b0000000000160014690b59a8140602fb23cc2904ece9cc4daf361052024730440220608a5339ca894592da82119e1e4a1d09335d70a552c683687223b8ed724465e902201b3f0feccf391b1b6257e4b18970ae57d7ca060af2dae519b3690baad2b2a34e0121030faee9b4a25b7db82023ca989192712cdd4cb53d3d9338591c7909e581ae1c0c00000000',
str(tx_copy))
self.assertEqual('3c06ae4d9be8226a472b3e7f7c127c7e3016f525d658d26106b80b4c7e3228e2', tx_copy.txid())
self.assertEqual('d8d930ae91dce73118c3fffabbdfcfb87f5d91673fb4c7dfd0fbe7cf03bf426b', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED) # TX_HEIGHT_UNCONF_PARENT but nvm
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet2 -> wallet1
outputs = [(bitcoin.TYPE_ADDRESS, wallet1.get_receiving_address(), 100000)]
tx = wallet2.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet2.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet2.is_mine(wallet2.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('0100000001e228327e4c0bb80661d258d625f516307e7c127c7f3e2b476a22e89b4dae063c000000006b483045022100d3895b31e7c9766987c6f53794c7394f534f4acecefda5479d963236f9703d0b022026dd4e40700ceb788f136faf54bf85b966648dc7c2a608d8110604f2d22d59070121030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cffeffffff02a0860100000000001600148a28bddb7f61864bdcf58b2ad13d5aeb3abc3c4268360200000000001976a914ca4c60999c46c2108326590b125aefd476dcb11888ac00000000',
str(tx_copy))
self.assertEqual('5f25707571eb776bdf14142f9966bf2a681906e0a79501edbb99a972c2ceb972', tx_copy.txid())
self.assertEqual('5f25707571eb776bdf14142f9966bf2a681906e0a79501edbb99a972c2ceb972', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet level checks
self.assertEqual((0, funding_output_value - 250000 - 5000 + 100000, 0), wallet1.get_balance())
self.assertEqual((0, 250000 - 5000 - 100000, 0), wallet2.get_balance())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_between_p2sh_2of3_and_uncompressed_p2pkh(self, mock_write):
wallet1a = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4XJzYkhsCbDCcZRmDAKSD7bXi9mdCni7acVt45fxbTVZyU6jRGh29ULKTjoapkfFsSJvQHitcVKbQgzgkkYsAmaovcro7Mhf')
],
'2of3', gap_limit=2
)
wallet1b = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('cycle rocket west magnet parrot shuffle foot correct salt library feed song', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4YARFMEZPckrqJkw59GZD1PXtQnw14ukvWDofR7Z1HMeSCxfYEZVvg4VdZ8zGok5VxHwdrLqew5cMdQntWc5mT7mh1CSgrnX')
],
'2of3', gap_limit=2
)
# ^ third seed: ghost into match ivory badge robot record tackle radar elbow traffic loud
wallet2 = self.create_standard_wallet_from_seed('powerful random nobody notice nothing important anyway look away hidden message over')
# bootstrap wallet1
funding_tx = Transaction('010000000001014121f99dc02f0364d2dab3d08905ff4c36fc76c55437fd90b769c35cc18618280100000000fdffffff02d4c22d00000000001600143fd1bc5d32245850c8cb5be5b09c73ccbb9a0f75001bb7000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887024830450221008781c78df0c9d4b5ea057333195d5d76bc29494d773f14fa80e27d2f288b2c360220762531614799b6f0fb8d539b18cb5232ab4253dd4385435157b28a44ff63810d0121033de77d21926e09efd04047ae2d39dbd3fb9db446e8b7ed53e0f70f9c9478f735dac11300')
funding_txid = funding_tx.txid()
funding_output_value = 12000000
self.assertEqual('b25cd55687c9e528c2cfd546054f35fb6741f7cf32d600f07dfecdf2e1d42071', funding_txid)
wallet1a.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# wallet1 -> wallet2
outputs = [(bitcoin.TYPE_ADDRESS, wallet2.get_receiving_address(), 370000)]
tx = wallet1a.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx = Transaction(tx.serialize()) # simulates moving partial txn between cosigners
self.assertFalse(tx.is_complete())
wallet1b.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet1a.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet1a.is_mine(wallet1a.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('01000000017120d4e1f2cdfe7df000d632cff74167fb354f0546d5cfc228e5c98756d55cb201000000fdfe0000483045022100f9ce5616683e613ae14b98d56436454b003348a8172e2ed598018e3d206e57d7022030c65c6551e839f9e9409812be624dbb4e36bd4152c9ed9b0988c10fd8201d1401483045022100d5cb94d4d1dcf01bb9e9280e8178a7e9ada3ad14378ca543afcc9f5667b27cb2022018e76b74800a21934e73b226b34cbbe45c877fba64693da8a20d3cb330f2eafd014c69522102afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f2821030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf2103e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce53aefeffffff0250a50500000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac2862b1000000000017a9142e517854aa54668128c0e9a3fdd4dec13ad571368700000000',
str(tx_copy))
self.assertEqual('26f3bdd0402e1cff19126244ebe3d32722cef0db507c7229ca8754f5e06ef25d', tx_copy.txid())
self.assertEqual('26f3bdd0402e1cff19126244ebe3d32722cef0db507c7229ca8754f5e06ef25d', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet2 -> wallet1
outputs = [(bitcoin.TYPE_ADDRESS, wallet1a.get_receiving_address(), 100000)]
tx = wallet2.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet2.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet2.is_mine(wallet2.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('01000000015df26ee0f55487ca29727c50dbf0ce2227d3e3eb44621219ff1c2e40d0bdf326000000008b483045022100bd9f61ba82507d3a28922fb8be129e14699dfa54ddd03cc9494f696d38ac4121022071afca6fad5bc5c09b0a675e6444be3e97dbbdbc283764ee5f4e27a032d933d80141045f7ba332df2a7b4f5d13f246e307c9174cfa9b8b05f3b83410a3c23ef8958d610be285963d67c7bc1feb082f168fa9877c25999963ff8b56b242a852b23e25edfeffffff02a08601000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887280b0400000000001976a914ca14915184a2662b5d1505ce7142c8ca066c70e288ac00000000',
str(tx_copy))
self.assertEqual('c573b3f8464a4ed40dfc79d0889a780f44e917beef7a75883b2427c2987f3e95', tx_copy.txid())
self.assertEqual('c573b3f8464a4ed40dfc79d0889a780f44e917beef7a75883b2427c2987f3e95', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet level checks
self.assertEqual((0, funding_output_value - 370000 - 5000 + 100000, 0), wallet1a.get_balance())
self.assertEqual((0, 370000 - 5000 - 100000, 0), wallet2.get_balance())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_between_p2wsh_2of3_and_p2wsh_p2sh_2of2(self, mock_write):
wallet1a = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('bitter grass shiver impose acquire brush forget axis eager alone wine silver', '', True),
keystore.from_xpub('Vpub5fcdcgEwTJmbmqAktuK8Kyq92fMf7sWkcP6oqAii2tG47dNbfkGEGUbfS9NuZaRywLkHE6EmUksrqo32ZL3ouLN1HTar6oRiHpDzKMAF1tf'),
keystore.from_xpub('Vpub5fjkKyYnvSS4wBuakWTkNvZDaBM2vQ1MeXWq368VJHNr2eT8efqhpmZ6UUkb7s2dwCXv2Vuggjdhk4vZVyiAQTwUftvff73XcUGq2NQmWra')
],
'2of3', gap_limit=2
)
wallet1b = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('snow nest raise royal more walk demise rotate smooth spirit canyon gun', '', True),
keystore.from_xpub('Vpub5fjkKyYnvSS4wBuakWTkNvZDaBM2vQ1MeXWq368VJHNr2eT8efqhpmZ6UUkb7s2dwCXv2Vuggjdhk4vZVyiAQTwUftvff73XcUGq2NQmWra'),
keystore.from_xpub('Vpub5gSKXzxK7FeKQedu2q1z9oJWxqvX72AArW3HSWpEhc8othDH8xMDu28gr7gf17sp492BuJod8Tn7anjvJrKpETwqnQqX7CS8fcYyUtedEMk')
],
'2of3', gap_limit=2
)
# ^ third seed: hedgehog sunset update estate number jungle amount piano friend donate upper wool
wallet2a = WalletIntegrityHelper.create_multisig_wallet(
[
# bip39: finish seminar arrange erosion sunny coil insane together pretty lunch lunch rose, der: m/1234'/1'/0', p2wsh-p2sh multisig
keystore.from_xprv('Uprv9CvELvByqm8k2dpecJVjgLMX1z5DufEjY4fBC5YvdGF5WjGCa7GVJJ2fYni1tyuF7Hw83E6W2ZBjAhaFLZv2ri3rEsubkCd5avg4EHKoDBN'),
keystore.from_xpub('Upub5Qb8ik4Cnu8g97KLXKgVXHqY6tH8emQvqtBncjSKsyfTZuorPtTZgX7ovKKZHuuVGBVd1MTTBkWez1XXt2weN1sWBz6SfgRPQYEkNgz81QF')
],
'2of2', gap_limit=2
)
wallet2b = WalletIntegrityHelper.create_multisig_wallet(
[
# bip39: square page wood spy oil story rebel give milk screen slide shuffle, der: m/1234'/1'/0', p2wsh-p2sh multisig
keystore.from_xprv('Uprv9BbnKEXJxXaNvdEsRJ9VA9toYrSeFJh5UfGBpM2iKe8Uh7UhrM9K8ioL53s8gvCoGfirHHaqpABDAE7VUNw8LNU1DMJKVoWyeNKu9XcDC19'),
keystore.from_xpub('Upub5RuakRisg8h3F7u7iL2k3UJFa1uiK7xauHamzTxYBbn4PXbM7eajr6M9Q2VCr6cVGhfhqWQqxnABvtSATuVM1xzxk4nA189jJwzaMn1QX7V')
],
'2of2', gap_limit=2
)
# bootstrap wallet1
funding_tx = Transaction('01000000000101a41aae475d026c9255200082c7fad26dc47771275b0afba238dccda98a597bd20000000000fdffffff02400d0300000000002200203c43ac80d6e3015cf378bf6bac0c22456723d6050bef324ec641e7762440c63c9dcd410000000000160014824626055515f3ed1d2cfc9152d2e70685c71e8f02483045022100b9f39fad57d07ce1e18251424034f21f10f20e59931041b5167ae343ce973cf602200fefb727fa0ffd25b353f1bcdae2395898fe407b692c62f5885afbf52fa06f5701210301a28f68511ace43114b674371257bb599fd2c686c4b19544870b1799c954b40e9c11300')
funding_txid = funding_tx.txid()
funding_output_value = 200000
self.assertEqual('d2bd6c9d332db8e2c50aa521cd50f963fba214645aab2f7556e061a412103e21', funding_txid)
wallet1a.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# wallet1 -> wallet2
outputs = [(bitcoin.TYPE_ADDRESS, wallet2a.get_receiving_address(), 165000)]
tx = wallet1a.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
txid = tx.txid()
tx = Transaction(tx.serialize()) # simulates moving partial txn between cosigners
self.assertEqual(txid, tx.txid())
self.assertFalse(tx.is_complete())
wallet1b.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet1a.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet1a.is_mine(wallet1a.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('01000000000101213e1012a461e056752fab5a6414a2fb63f950cd21a50ac5e2b82d339d6cbdd20000000000feffffff023075000000000000220020cc5e4cc05a76d0648cd0742768556317e9f8cc729aed077134287909035dba88888402000000000017a914187842cea9c15989a51ce7ca889a08b824bf8743870400483045022100ea2fbd3d8681cfafdcae1bdaaa64f92fb9872fb8f6bf03a2b7effcf7390b66c8022021a79eff7975479934f958f3766d6ac61d708c79b785e398b3bcd84b1039e9b501483045022100dbc4f1ec18f0e0deb4ff88d7d5b3d3b7b500a80d0c0f33efbd3262f0c8689095022074fd226c0b52e3716ad907d14cba9c79aca482a8f4a51662ca83a5b9db49e15b016952210223f815ab09f6bfc8519165c5232947ae89d9d43d678fb3486f3b28382a2371fa210273c529c2c9a99592f2066cebc2172a48991af2b471cb726b9df78c6497ce984e2102aa8fc578b445a1e4257be6b978fcece92980def98dce0e1eb89e7364635ae94153ae00000000',
str(tx_copy))
self.assertEqual('6e9c3cd8788bdb970a124ea06136d52bc01cec4f9b1e217627d5e90ebe77d049', tx_copy.txid())
self.assertEqual('c58650fb77d04577fccb3e201deecbf691ab52ffb61cd2e57996c4d51f7e980b', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
self.assertEqual(txid, tx_copy.txid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet2 -> wallet1
outputs = [(bitcoin.TYPE_ADDRESS, wallet1a.get_receiving_address(), 100000)]
tx = wallet2a.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
txid = tx.txid()
tx = Transaction(tx.serialize()) # simulates moving partial txn between cosigners
self.assertEqual(txid, tx.txid())
self.assertFalse(tx.is_complete())
wallet2b.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet2a.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet2a.is_mine(wallet2a.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('0100000000010149d077be0ee9d52776211e9b4fec1cc02bd53661a04e120a97db8b78d83c9c6e01000000232200204311edae835c7a5aa712c8ca644180f13a3b2f3b420fa879b181474724d6163cfeffffff0260ea00000000000017a9143025051b6b5ccd4baf30dfe2de8aa84f0dd567ed87a0860100000000002200203c43ac80d6e3015cf378bf6bac0c22456723d6050bef324ec641e7762440c63c0400483045022100c254468bbe6b8bd1c8c01b6a223e46cc5c6b56fbba87d59575385ad249133b0e02207139688f8d6ae8076c92a266d98454d25c040d04c8e513a37bf7c32dad3e48210147304402204af5edbab2d674f6a9edef8c97b2f7fdf8ababedc7b287710cc7a64d4699358b022064e2d07f4bb32373be31b2003dc56b7b831a7c01419326efb3011c64b898b3f00147522102119f899075a131d4d519d4cdcf5de5907dc2df3b93d54b53ded852211d2b6cb12102fdb0f6775d4b6619257c43343ba5e7807b0164f1eb3f00f2b594ab9e53ab812652ae00000000',
str(tx_copy))
self.assertEqual('84b0dcb43022385f7a10e2710e5625a2be3cd6e390387b6100b55500d5eea8f6', tx_copy.txid())
self.assertEqual('7e561e25da843326e61fd20a40b72fcaeb8690176fc7c3fcbadb3a0146c8396c', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
self.assertEqual(txid, tx_copy.txid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet level checks
self.assertEqual((0, funding_output_value - 165000 - 5000 + 100000, 0), wallet1a.get_balance())
self.assertEqual((0, 165000 - 5000 - 100000, 0), wallet2a.get_balance())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_between_p2sh_1of2_and_p2wpkh_p2sh(self, mock_write):
wallet1a = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('phone guilt ancient scan defy gasp off rotate approve ill word exchange', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YPZ3ntVjqSCxiUUv2jikrUBU73Q3iJ7Y8iR41oYf991L5fanv7ciHjbjokdK2bjYqg1BzEUDxucU9qM5WRdBiY738wmgLP4')
],
'1of2', gap_limit=2
)
# ^ second seed: kingdom now gift initial age right velvet exotic harbor enforce kingdom kick
wallet2 = WalletIntegrityHelper.create_standard_wallet(
# bip39: uniform tank success logic lesson awesome stove elegant regular desert drip device, der: m/49'/1'/0'
keystore.from_xprv('uprv91HGbrNZTK4x8u22nbdYGzEuWPxjaHMREUi7CNhY64KsG5ZGnVM99uCa16EMSfrnaPTFxjbRdBZ2WiBkokoM8anzAy3Vpc52o88WPkitnxi'),
gap_limit=2
)
# bootstrap wallet1
funding_tx = Transaction('010000000001027e20990282eb29588375ad04936e1e991af3bc5b9c6f1ab62eca8c25becaef6a01000000171600140e6a17fadc8bafba830f3467a889f6b211d69a00fdffffff51847fd6bcbdfd1d1ea2c2d95c2d8de1e34c5f2bd9493e88a96a4e229f564e800100000017160014ecdf9fa06856f9643b1a73144bc76c24c67774a6fdffffff021e8501000000000017a91451991bfa68fbcb1e28aa0b1e060b7d24003352e38700093d000000000017a914b0b9f31bace76cdfae2c14abc03e223403d7dc4b870247304402205e19721b92c6afd70cd932acb50815a36ee32ab46a934147d62f02c13aeacf4702207289c4a4131ef86e27058ff70b6cb6bf0e8e81c6cbab6dddd7b0a9bc732960e4012103fe504411c21f7663caa0bbf28931f03fae7e0def7bc54851e0194dfb1e2c85ef02483045022100e969b65096fba4f8b24eb5bc622d2282076241621f3efe922cc2067f7a8a6be702203ec4047dd2a71b9c83eb6a0875a6d66b4d65864637576c06ed029d3d1a8654b0012102bbc8100dca67ba0297aba51296a4184d714204a5fc2eda34708360f37019a3dccfcc1300')
funding_txid = funding_tx.txid()
funding_output_value = 4000000
self.assertEqual('1137c12de4ce0f5b08de8846ba14c0814351a7f0f31457c8ea51a5d4b3c891a3', funding_txid)
wallet1a.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# wallet1 -> wallet2
outputs = [(bitcoin.TYPE_ADDRESS, wallet2.get_receiving_address(), 1000000)]
tx = wallet1a.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet1a.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet1a.is_mine(wallet1a.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('0100000001a391c8b3d4a551eac85714f3f0a7514381c014ba4688de085b0fcee42dc13711010000009200483045022100fcf03aeb97b66791372c18aa0dd651817cf458d941dd628c966f0305a023360f022016c534530e267b6a52f90e62aa9fb50ace609ffb21e472d3ba7b29db9b30050e014751210245c90e040d4f9d1fc136b3d4d6b7535bbb5df2bd27666c21977042cc1e05b5b02103c9a6bebfce6294488315e58137a279b2efe09f1f528ecf93b40675ded3cf0e5f52aefeffffff0240420f000000000017a9149573eb50f3136dff141ac304190f41c8becc92ce8738b32d000000000017a914b815d1b430ae9b632e3834ed537f7956325ee2a98700000000',
str(tx_copy))
self.assertEqual('1b7e94860b9681d4e371928d40fdbd4641e991aa74f1a211f239c887047e4a2a', tx_copy.txid())
self.assertEqual('1b7e94860b9681d4e371928d40fdbd4641e991aa74f1a211f239c887047e4a2a', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet2 -> wallet1
outputs = [(bitcoin.TYPE_ADDRESS, wallet1a.get_receiving_address(), 300000)]
tx = wallet2.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
self.assertEqual(wallet2.txin_type, tx.inputs()[0]['type'])
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet2.is_mine(wallet2.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('010000000001012a4a7e0487c839f211a2f174aa91e94146bdfd408d9271e3d481960b86947e1b00000000171600149fad840ed174584ee054bd26f3e411817338c5edfeffffff02e09304000000000017a914b0b9f31bace76cdfae2c14abc03e223403d7dc4b87d89a0a000000000017a9148ccd0efb2be5b412c4033715f560ed8f446c8ceb87024830450221009c816c3e0c40b37085244f0976f65635b8d711952bad9843c5f51e386fd37cc402202c34a4a7227182742d9f93e9f28c4bd30ded6514550f39614cb5ad00e46690070121038362bbf0b4918b37e9d7c75930ed3a78e3d445724cb5c37ade4a59b6e411fe4e00000000',
str(tx_copy))
self.assertEqual('f65edb0843ff44436dc5964fb6b298e157502b9b4a83dac6b82dd2d2a3247d0a', tx_copy.txid())
self.assertEqual('63efc09db4c7445eaaca9a5e7732202f42aec81a53b05d819f1918ce0cf3b84d', tx_copy.wtxid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
wallet1a.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
wallet2.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
# wallet level checks
self.assertEqual((0, funding_output_value - 1000000 - 5000 + 300000, 0), wallet1a.get_balance())
self.assertEqual((0, 1000000 - 5000 - 300000, 0), wallet2.get_balance())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bump_fee_p2pkh(self, mock_write):
wallet = self.create_standard_wallet_from_seed('fold object utility erase deputy output stadium feed stereo usage modify bean')
# bootstrap wallet
funding_tx = Transaction('010000000001011f4db0ecd81f4388db316bc16efb4e9daf874cf4950d54ecb4c0fb372433d68500000000171600143d57fd9e88ef0e70cddb0d8b75ef86698cab0d44fdffffff0280969800000000001976a91472e34cebab371967b038ce41d0e8fa1fb983795e88ac86a0ae020000000017a9149188bc82bdcae077060ebb4f02201b73c806edc887024830450221008e0725d531bd7dee4d8d38a0f921d7b1213e5b16c05312a80464ecc2b649598d0220596d309cf66d5f47cb3df558dbb43c5023a7796a80f5a88b023287e45a4db6b9012102c34d61ceafa8c216f01e05707672354f8119334610f7933a3f80dd7fb6290296bd391400')
funding_txid = funding_tx.txid()
funding_output_value = 10000000
self.assertEqual('03052739fcfa2ead5f8e57e26021b0c2c546bcd3d74c6e708d5046dc58d90762', funding_txid)
wallet.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create tx
outputs = [(bitcoin.TYPE_ADDRESS, '2N1VTMMFb91SH9SNRAkT7z8otP5eZEct4KL', 2500000)]
coins = wallet.get_spendable_coins(domain=None, config=self.config)
tx = wallet.make_unsigned_transaction(coins, outputs, config=self.config, fixed_fee=5000)
tx.set_rbf(True)
tx.locktime = 1325501
wallet.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet.is_mine(wallet.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
self.assertEqual('01000000016207d958dc46508d706e4cd7d3bc46c5c2b02160e2578e5fad2efafc39270503000000006b483045022100df74e6a88085be1ff3a3fd96cf2ef03b5e33fa06788f56aa71649f0177d1bfc402206e36a7e6124863ac746d5288d6d47c1d1eac5d4ac3818e561a7a0f2c0a269429012102a807c07bd7975211078e916bdda061d97e98d59a3631a804aada2f9a3f5b587afdffffff02a02526000000000017a9145a71fc1a7a98ddd67be935ade1600981c0d066f987585d7200000000001976a914aab9af3fbee0ab4e5c00d53e92f66d4bcb44f1bd88acbd391400',
str(tx_copy))
self.assertEqual('44e6dd9529a253181112fc40cadd8ebb4c4359aacb91aa24c45556a1d00839b0', tx_copy.txid())
self.assertEqual('44e6dd9529a253181112fc40cadd8ebb4c4359aacb91aa24c45556a1d00839b0', tx_copy.wtxid())
wallet.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual((0, funding_output_value - 2500000 - 5000, 0), wallet.get_balance())
# bump tx
tx = wallet.bump_fee(tx=Transaction(tx.serialize()), delta=5000)
tx.locktime = 1325501
self.assertFalse(tx.is_complete())
wallet.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
tx_copy = Transaction(tx.serialize())
self.assertEqual('01000000016207d958dc46508d706e4cd7d3bc46c5c2b02160e2578e5fad2efafc39270503000000006a473044022055b7e6b7e89a55740f7aa2ad1ffcd4b5c913f0de63cf512438921534bc9c3a8d022043b3b27bdc2da4cc6265e4cc9673a3780ccd5cd6f0ee2eaedb51720c15b7a00a012102a807c07bd7975211078e916bdda061d97e98d59a3631a804aada2f9a3f5b587afdffffff02a02526000000000017a9145a71fc1a7a98ddd67be935ade1600981c0d066f987d0497200000000001976a914aab9af3fbee0ab4e5c00d53e92f66d4bcb44f1bd88acbd391400',
str(tx_copy))
self.assertEqual('f26edcf20991dccedf16058adbee923db7057c9b102db660156b8142b6a59bc7', tx_copy.txid())
self.assertEqual('f26edcf20991dccedf16058adbee923db7057c9b102db660156b8142b6a59bc7', tx_copy.wtxid())
wallet.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual((0, funding_output_value - 2500000 - 10000, 0), wallet.get_balance())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_cpfp_p2pkh(self, mock_write):
wallet = self.create_standard_wallet_from_seed('fold object utility erase deputy output stadium feed stereo usage modify bean')
# bootstrap wallet
funding_tx = Transaction('010000000001010f40064d66d766144e17bb3276d96042fd5aee2196bcce7e415f839e55a83de800000000171600147b6d7c7763b9185b95f367cf28e4dc6d09441e73fdffffff02404b4c00000000001976a9141df43441a3a3ee563e560d3ddc7e07cc9f9c3cdb88ac009871000000000017a9143873281796131b1996d2f94ab265327ee5e9d6e28702473044022029c124e5a1e2c6fa12e45ccdbdddb45fec53f33b982389455b110fdb3fe4173102203b3b7656bca07e4eae3554900aa66200f46fec0af10e83daaa51d9e4e62a26f4012103c8f0460c245c954ef563df3b1743ea23b965f98b120497ac53bd6b8e8e9e0f9bbe391400')
funding_txid = funding_tx.txid()
funding_output_value = 5000000
self.assertEqual('9973bf8918afa349b63934432386f585613b51034db6c8628b61ba2feb8a3668', funding_txid)
wallet.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# cpfp tx
tx = wallet.cpfp(funding_tx, fee=50000)
tx.set_rbf(True)
tx.locktime = 1325502
wallet.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertEqual(tx.txid(), tx_copy.txid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
self.assertEqual('010000000168368aeb2fba618b62c8b64d03513b6185f58623433439b649a3af1889bf7399000000006a47304402203a0b369e46c5fbacb83044b7ab9d69ff7998774041d6870993504915bc495d210220272833b870d8abca516adb7dc4cb27892b1b6e4b52fbfeb592a72c3e795eb213012102a7536f0bfbc60c5a8e86e2b9df26431fc062f9f454016dbc26f2467e0bc98b3ffdffffff01f0874b00000000001976a9141df43441a3a3ee563e560d3ddc7e07cc9f9c3cdb88acbe391400',
str(tx_copy))
self.assertEqual('47500a425518b5542d94db1157f473b8cf322d31ea97a1a642fec19386cdb761', tx_copy.txid())
self.assertEqual('47500a425518b5542d94db1157f473b8cf322d31ea97a1a642fec19386cdb761', tx_copy.wtxid())
wallet.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual((0, funding_output_value - 50000, 0), wallet.get_balance())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_bump_fee_p2wpkh(self, mock_write):
wallet = self.create_standard_wallet_from_seed('frost repair depend effort salon ring foam oak cancel receive save usage')
# bootstrap wallet
funding_tx = Transaction('01000000000102acd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba020000001716001455c7f5e0631d8e6f5f05dddb9f676cec48845532fdffffffd146691ef6a207b682b13da5f2388b1f0d2a2022c8cfb8dc27b65434ec9ec8f701000000171600147b3be8a7ceaf15f57d7df2a3d216bc3c259e3225fdffffff02a9875b000000000017a914ea5a99f83e71d1c1dfc5d0370e9755567fe4a141878096980000000000160014d4ca56fcbad98fb4dcafdc573a75d6a6fffb09b702483045022100dde1ba0c9a2862a65791b8d91295a6603207fb79635935a67890506c214dd96d022046c6616642ef5971103c1db07ac014e63fa3b0e15c5729eacdd3e77fcb7d2086012103a72410f185401bb5b10aaa30989c272b554dc6d53bda6da85a76f662723421af024730440220033d0be8f74e782fbcec2b396647c7715d2356076b442423f23552b617062312022063c95cafdc6d52ccf55c8ee0f9ceb0f57afb41ea9076eb74fe633f59c50c6377012103b96a4954d834fbcfb2bbf8cf7de7dc2b28bc3d661c1557d1fd1db1bfc123a94abb391400')
funding_txid = funding_tx.txid()
funding_output_value = 10000000
self.assertEqual('52e669a20a26c8b3df5b41e5e6309b18bcde8e1ad7ea17a18f63b6dc6c8becc0', funding_txid)
wallet.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create tx
outputs = [(bitcoin.TYPE_ADDRESS, '2N1VTMMFb91SH9SNRAkT7z8otP5eZEct4KL', 2500000)]
coins = wallet.get_spendable_coins(domain=None, config=self.config)
tx = wallet.make_unsigned_transaction(coins, outputs, config=self.config, fixed_fee=5000)
tx.set_rbf(True)
tx.locktime = 1325499
wallet.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet.is_mine(wallet.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
self.assertEqual('01000000000101c0ec8b6cdcb6638fa117ead71a8edebc189b30e6e5415bdfb3c8260aa269e6520100000000fdffffff02a02526000000000017a9145a71fc1a7a98ddd67be935ade1600981c0d066f987585d720000000000160014f0fe5c1867a174a12e70165e728a072619455ed50247304402205442705e988abe74bf391b293bb1b886674284a92ed0788c33024f9336d60aef022013a93049d3bed693254cd31a704d70bb988a36750f0b74d0a5b4d9e29c54ca9d0121028d4c44ca36d2c4bff3813df8d5d3c0278357521ecb892cd694c473c03970e4c5bb391400',
str(tx_copy))
self.assertEqual('b019bbad45a46ed25365e46e4cae6428fb12ae425977eb93011ffb294cb4977e', tx_copy.txid())
self.assertEqual('ba87313e2b3b42f1cc478843d4d53c72d6e06f6c66ac8cfbe2a59cdac2fd532d', tx_copy.wtxid())
wallet.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual((0, funding_output_value - 2500000 - 5000, 0), wallet.get_balance())
# bump tx
tx = wallet.bump_fee(tx=Transaction(tx.serialize()), delta=5000)
tx.locktime = 1325500
self.assertFalse(tx.is_complete())
wallet.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
tx_copy = Transaction(tx.serialize())
self.assertEqual('01000000000101c0ec8b6cdcb6638fa117ead71a8edebc189b30e6e5415bdfb3c8260aa269e6520100000000fdffffff02a02526000000000017a9145a71fc1a7a98ddd67be935ade1600981c0d066f987d049720000000000160014f0fe5c1867a174a12e70165e728a072619455ed5024730440220517fed3a902b5b41fa718ffd5f229b835b8ed26f23433c4ea437d24eff66d15b0220526854a6ebcd351ab2373d0e7c4e20f17c420520b5d570c2df7ca1d773d6a55d0121028d4c44ca36d2c4bff3813df8d5d3c0278357521ecb892cd694c473c03970e4c5bc391400',
str(tx_copy))
self.assertEqual('9a1c0ef7e871798b86074c7f8dd1e81b6d9a758ff07e0059eee31dc6fbf4f438', tx_copy.txid())
self.assertEqual('59144d30c911ac33359b0a32d5a3fdd2ca806982c85838e193eb95f5d315e813', tx_copy.wtxid())
wallet.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual((0, funding_output_value - 2500000 - 10000, 0), wallet.get_balance())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_cpfp_p2wpkh(self, mock_write):
wallet = self.create_standard_wallet_from_seed('frost repair depend effort salon ring foam oak cancel receive save usage')
# bootstrap wallet
funding_tx = Transaction('01000000000101c0ec8b6cdcb6638fa117ead71a8edebc189b30e6e5415bdfb3c8260aa269e6520000000017160014ba9ca815474a674ff1efb3fc82cf0f3460de8c57fdffffff0230390f000000000017a9148b59abaca8215c0d4b18cbbf715550aa2b50c85b87404b4c000000000016001483c3bc7234f17a209cc5dcce14903b54ee4dab9002473044022038a05f7d38bcf810dfebb39f1feda5cc187da4cf5d6e56986957ddcccedc75d302203ab67ccf15431b4e2aeeab1582b9a5a7821e7ac4be8ebf512505dbfdc7e094fd0121032168234e0ba465b8cedc10173ea9391725c0f6d9fa517641af87926626a5144abd391400')
funding_txid = funding_tx.txid()
funding_output_value = 5000000
self.assertEqual('c36a6e1cd54df108e69574f70bc9b88dc13beddc70cfad9feb7f8f6593255d4a', funding_txid)
wallet.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# cpfp tx
tx = wallet.cpfp(funding_tx, fee=50000)
tx.set_rbf(True)
tx.locktime = 1325501
wallet.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertEqual(tx.txid(), tx_copy.txid())
self.assertEqual(tx.wtxid(), tx_copy.wtxid())
self.assertEqual('010000000001014a5d2593658f7feb9fadcf70dced3bc18db8c90bf77495e608f14dd51c6e6ac30100000000fdffffff01f0874b000000000016001483c3bc7234f17a209cc5dcce14903b54ee4dab900248304502210098fbe458a9f1c595d6bf63962fad00300a7b60c6dd8b2e7625f3804a3bf1086602204bc8a46fb162be8f85a23644eccf9f4223fa092f5c861144676a34dc83a7c39d012102a6ff1ffc189b4776b78e20edca969cc45da3e610cc0cc79925604be43fee469fbd391400',
str(tx_copy))
self.assertEqual('38a21c67336232c88ae15311f329197c69ee70e872f8acb5bc9c2b6417c35ad8', tx_copy.txid())
self.assertEqual('b5b8264ed5f3e03d48ef82fa2a25278cd9c0563fa78e557f370b7e0558293172', tx_copy.wtxid())
wallet.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual((0, funding_output_value - 50000, 0), wallet.get_balance())
@needs_test_with_all_ecc_implementations
def test_sweep_p2pk(self):
class NetworkMock:
relay_fee = 1000
def get_local_height(self): return 1325785
def listunspent_for_scripthash(self, scripthash):
if scripthash == '460e4fb540b657d775d84ff4955c9b13bd954c2adc26a6b998331343f85b6a45':
return [{'tx_hash': 'ac24de8b58e826f60bd7b9ba31670bdfc3e8aedb2f28d0e91599d741569e3429', 'tx_pos': 1, 'height': 1325785, 'value': 1000000}]
else:
return []
privkeys = ['93NQ7CFbwTPyKDJLXe97jczw33fiLijam2SCZL3Uinz1NSbHrTu', ]
network = NetworkMock()
dest_addr = 'tb1q3ws2p0qjk5vrravv065xqlnkckvzcpclk79eu2'
tx = sweep(privkeys, network, config=None, recipient=dest_addr, fee=5000)
tx_copy = Transaction(tx.serialize())
self.assertEqual('010000000129349e5641d79915e9d0282fdbaee8c3df0b6731bab9d70bf626e8588bde24ac010000004847304402206bf0d0a93abae0d5873a62ebf277a5dd2f33837821e8b93e74d04e19d71b578002201a6d729bc159941ef5c4c9e5fe13ece9fc544351ba531b00f68ba549c8b38a9a01fdffffff01b82e0f00000000001600148ba0a0bc12b51831f58c7ea8607e76c5982c071fd93a1400',
str(tx_copy))
self.assertEqual('7f827fc5256c274fd1094eb7e020c8ded0baf820356f61aa4f14a9093b0ea0ee', tx_copy.txid())
self.assertEqual('7f827fc5256c274fd1094eb7e020c8ded0baf820356f61aa4f14a9093b0ea0ee', tx_copy.wtxid())
class TestWalletOfflineSigning(TestCaseForTestnet):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.electrum_path = tempfile.mkdtemp()
cls.config = SimpleConfig({'electrum_path': cls.electrum_path})
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(cls.electrum_path)
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_xprv_online_xpub_p2pkh(self, mock_write):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/44'/1'/0'
keystore.from_xprv('tprv8gfKwjuAaqtHgqxMh1tosAQ28XvBMkcY5NeFRA3pZMpz6MR4H4YZ3MJM4fvNPnRKeXR1Td2vQGgjorNXfo94WvT5CYDsPAqjHxSn436G1Eu'),
gap_limit=4
)
wallet_online = WalletIntegrityHelper.create_standard_wallet(
keystore.from_xpub('tpubDDMN69wQjDZxaJz9afZQGa48hZS7X5oSegF2hg67yddNvqfpuTN9DqvDEp7YyVf7AzXnqBqHdLhzTAStHvsoMDDb8WoJQzNrcHgDJHVYgQF'),
gap_limit=4
)
# bootstrap wallet_online
funding_tx = Transaction('01000000000116e9c9dac2651672316aab3b9553257b6942c5f762c5d795776d9cfa504f183c000000000000fdffffff8085019852fada9da84b58dcf753d292dde314a19f5a5527f6588fa2566142130000000000fdffffffa4154a48db20ce538b28722a89c6b578bd5b5d60d6d7b52323976339e39405230000000000fdffffff0b5ef43f843a96364aebd708e25ea1bdcf2c7df7d0d995560b8b1be5f357b64f0100000000fdffffffd41dfe1199c76fdb3f20e9947ea31136d032d9da48c5e45d85c8f440e2351a510100000000fdffffff5bd015d17e4a1837b01c24ebb4a6b394e3da96a85442bd7dc6abddfbf16f20510000000000fdffffff13a3e7f80b1bd46e38f2abc9e2f335c18a4b0af1778133c7f1c3caae9504345c0200000000fdffffffdf4fc1ab21bca69d18544ddb10a913cd952dbc730ab3d236dd9471445ff405680100000000fdffffffe0424d78a30d5e60ac6b26e2274d7d6e7c6b78fe0b49bdc3ac4dd2147c9535750100000000fdffffff7ab6dd6b3c0d44b0fef0fdc9ab0ad6eee23eef799eee29c005d52bc4461998760000000000fdffffff48a77e5053a21acdf4f235ce00c82c9bc1704700f54d217f6a30704711b9737d0000000000fdffffff86918b39c1d9bb6f34d9b082182f73cedd15504331164dc2b186e95c568ccb870000000000fdffffff15a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff9a2875297f81dfd3b77426d63f621db350c270cc28c634ad86b9969ee33ac6960000000000fdffffffd6eeb1d1833e00967083d1ab86fa5a2e44355bd613d9277135240fe6f60148a20100000000fdffffffd8a6e5a9b68a65ff88220ca33e36faf6f826ae8c5c8a13fe818a5e63828b68a40100000000fdffffff73aab8471f82092e45ed1b1afeffdb49ea1ec74ce4853f971812f6a72a7e85aa0000000000fdffffffacd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba0000000000fdffffff1eddd5e13bef1aba1ff151762b5860837daa9b39db1eae8ea8227c81a5a1c8ba0000000000fdffffff67a096ff7c343d39e96929798097f6d7a61156bbdb905fbe534ba36f273271d40100000000fdffffff109a671eb7daf6dcd07c0ceff99f2de65864ab36d64fb3a890bab951569adeee0100000000fdffffff4f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0200000000fdffffff042f280000000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600146c540c1c9f546004539f45318b8d9f4d7b4857ef80969800000000001976a91422a6daa4a7b695c8a2dd104d47c5dc73d655c96f88ac809698000000000017a914a6885437e0762013facbda93894202a0fe86e35f8702473044022075ef5f04d7a63347064938e15a0c74277a79e5c9d32a26e39e8a517a44d565cc022015246790fb5b29c9bf3eded1b95699b1635bcfc6d521886fddf1135ba1b988ec012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe02473044022061aa9b0d9649ffd7259bc54b35f678565dbbe11507d348dd8885522eaf1fa70c02202cc79de09e8e63e8d57fde6ef66c079ddac4d9828e1936a9db833d4c142615c3012103a8f58fc1f5625f18293403104874f2d38c9279f777e512570e4199c7d292b81b0247304402207744dc1ab0bf77c081b58540c4321d090c0a24a32742a361aa55ad86f0c7c24e02201a9b0dd78b63b495ab5a0b5b161c54cb085d70683c90e188bb4dc2e41e142f6601210361fb354f8259abfcbfbdda36b7cb4c3b05a3ca3d68dd391fd8376e920d93870d0247304402204803e423c321acc6c12cb0ebf196d2906842fdfed6de977cc78277052ee5f15002200634670c1dc25e6b1787a65d3e09c8e6bb0340238d90b9d98887e8fd53944e080121031104c60d027123bf8676bcaefaa66c001a0d3d379dc4a9492a567a9e1004452d02473044022050e4b5348d30011a22b6ae8b43921d29249d88ea71b1fbaa2d9c22dfdef58b7002201c5d5e143aa8835454f61b0742226ebf8cd466bcc2cdcb1f77b92e473d3b13190121030496b9d49aa8efece4f619876c60a77d2c0dc846390ecdc5d9acbfa1bb3128760247304402204d6a9b986e1a0e3473e8aef84b3eb7052442a76dfd7631e35377f141496a55490220131ab342853c01e31f111436f8461e28bc95883b871ca0e01b5f57146e79d7bb012103262ffbc88e25296056a3c65c880e3686297e07f360e6b80f1219d65b0900e84e02483045022100c8ffacf92efa1dddef7e858a241af7a80adcc2489bcc325195970733b1f35fac022076f40c26023a228041a9665c5290b9918d06f03b716e4d8f6d47e79121c7eb37012102d9ba7e02d7cd7dd24302f823b3114c99da21549c663f72440dc87e8ba412120902483045022100b55545d84e43d001bbc10a981f184e7d3b98a7ed6689863716cab053b3655a2f0220537eb76a695fbe86bf020b4b6f7ae93b506d778bbd0885f0a61067616a2c8bce0121034a57f2fa2c32c9246691f6a922fb1ebdf1468792bae7eff253a99fc9f2a5023902483045022100f1d4408463dbfe257f9f778d5e9c8cdb97c8b1d395dbd2e180bc08cad306492c022002a024e19e1a406eaa24467f033659de09ab58822987281e28bb6359288337bd012103e91daa18d924eea62011ce596e15b6d683975cf724ea5bf69a8e2022c26fc12f0247304402204f1e12b923872f396e5e1a3aa94b0b2e86b4ce448f4349a017631db26d7dff8a022069899a05de2ad2bbd8e0202c56ab1025a7db9a4998eea70744e3c367d2a7eb71012103b0eee86792dbef1d4a49bc4ea32d197c8c15d27e6e0c5c33e58e409e26d4a39a0247304402201787dacdb92e0df6ad90226649f0e8321287d0bd8fddc536a297dd19b5fc103e022001fe89300a76e5b46d0e3f7e39e0ee26cc83b71d59a2a5da1dd7b13350cd0c07012103afb1e43d7ec6b7999ef0f1093069e68fe1dfe5d73fc6cfb4f7a5022f7098758c02483045022100acc1212bba0fe4fcc6c3ae5cf8e25f221f140c8444d3c08dfc53a93630ac25da02203f12982847244bd9421ef340293f3a38d2ab5d028af60769e46fcc7d81312e7e012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024830450221009c04934102402949484b21899271c3991c007b783b8efc85a3c3d24641ac7c24022006fb1895ce969d08a2cb29413e1a85427c7e85426f7a185108ca44b5a0328cb301210360248db4c7d7f76fe231998d2967104fee04df8d8da34f10101cc5523e82648c02483045022100b11fe61b393fa5dbe18ab98f65c249345b429b13f69ee2d1b1335725b24a0e73022010960cdc5565cbc81885c8ed95142435d3c202dfa5a3dc5f50f3914c106335ce0121029c878610c34c21381cda12f6f36ab88bf60f5f496c1b82c357b8ac448713e7b50247304402200ca080db069c15bbf98e1d4dff68d0aea51227ff5d17a8cf67ceae464c22bbb0022051e7331c0918cbb71bb2cef29ca62411454508a16180b0fb5df94248890840df0121028f0be0cde43ff047edbda42c91c37152449d69789eb812bb2e148e4f22472c0f0247304402201fefe258938a2c481d5a745ef3aa8d9f8124bbe7f1f8c693e2ddce4ddc9a927c02204049e0060889ede8fda975edf896c03782d71ba53feb51b04f5ae5897d7431dc012103946730b480f52a43218a9edce240e8b234790e21df5e96482703d81c3c19d3f1024730440220126a6a56dbe69af78d156626fc9cf41d6aac0c07b8b5f0f8491f68db5e89cb5002207ee6ed6f2f41da256f3c1e79679a3de6cf34cc08b940b82be14aefe7da031a6b012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024730440220363204a1586d7f13c148295122cbf9ec7939685e3cadab81d6d9e921436d21b7022044626b8c2bd4aa7c167d74bc4e9eb9d0744e29ce0ad906d78e10d6d854f23d170121037fb9c51716739bb4c146857fab5a783372f72a65987d61f3b58c74360f4328dd0247304402207925a4c2a3a6b76e10558717ee28fcb8c6fde161b9dc6382239af9f372ace99902204a58e31ce0b4a4804a42d2224331289311ded2748062c92c8aca769e81417a4c012102e18a8c235b48e41ef98265a8e07fa005d2602b96d585a61ad67168d74e7391cb02483045022100bbfe060479174a8d846b5a897526003eb2220ba307a5fee6e1e8de3e4e8b38fd02206723857301d447f67ac98a5a5c2b80ef6820e98fae213db1720f93d91161803b01210386728e2ac3ecee15f58d0505ee26f86a68f08c702941ffaf2fb7213e5026aea10247304402203a2613ae68f697eb02b5b7d18e3c4236966dac2b3a760e3021197d76e9ad4239022046f9067d3df650fcabbdfd250308c64f90757dec86f0b08813c979a42d06a6ec012102a1d7ee1cb4dc502f899aaafae0a2eb6cbf80d9a1073ae60ddcaabc3b1d1f15df02483045022100ab1bea2cc5388428fd126c7801550208701e21564bd4bd00cfd4407cfafc1acd0220508ee587f080f3c80a5c0b2175b58edd84b755e659e2135b3152044d75ebc4b501210236dd1b7f27a296447d0eb3750e1bdb2d53af50b31a72a45511dc1ec3fe7a684a19391400')
funding_txid = funding_tx.txid()
self.assertEqual('98574bc5f6e75769eb0c93d41453cc1dfbd15c14e63cc3c42f37cdbd08858762', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1qp0mv2sxsyxxfj5gl0332f9uyez93su9cf26757', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325340
self.assertFalse(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual('d9c21696eca80321933e7444ca928aaf25eeda81aaa2f4e5c085d4d0a9cf7aa7', tx.txid())
self.assertEqual('d9c21696eca80321933e7444ca928aaf25eeda81aaa2f4e5c085d4d0a9cf7aa7', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_xprv_online_xpub_p2wpkh_p2sh(self, mock_write):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/49'/1'/0'
keystore.from_xprv('uprv8zHHrMQMQ26utWwNJ5MK2SXpB9hbmy7pbPaneii69xT8cZTyFpxQFxkknGWKP8dxBTZhzy7yP6cCnLrRCQjzJDk3G61SjZpxhFQuB2NR8a5'),
gap_limit=4
)
wallet_online = WalletIntegrityHelper.create_standard_wallet(
keystore.from_xpub('upub5DGeFrwFEPfD711qQ6tKPaUYjBY6BRqfxcWPT77hiHz7VMo7oNGeom5EdXoKXEazePyoN3ueJMqHBfp3MwmsaD8k9dFHoa8KGeVXev7Pbg2'),
gap_limit=4
)
# bootstrap wallet_online
funding_tx = Transaction('01000000000116e9c9dac2651672316aab3b9553257b6942c5f762c5d795776d9cfa504f183c000000000000fdffffff8085019852fada9da84b58dcf753d292dde314a19f5a5527f6588fa2566142130000000000fdffffffa4154a48db20ce538b28722a89c6b578bd5b5d60d6d7b52323976339e39405230000000000fdffffff0b5ef43f843a96364aebd708e25ea1bdcf2c7df7d0d995560b8b1be5f357b64f0100000000fdffffffd41dfe1199c76fdb3f20e9947ea31136d032d9da48c5e45d85c8f440e2351a510100000000fdffffff5bd015d17e4a1837b01c24ebb4a6b394e3da96a85442bd7dc6abddfbf16f20510000000000fdffffff13a3e7f80b1bd46e38f2abc9e2f335c18a4b0af1778133c7f1c3caae9504345c0200000000fdffffffdf4fc1ab21bca69d18544ddb10a913cd952dbc730ab3d236dd9471445ff405680100000000fdffffffe0424d78a30d5e60ac6b26e2274d7d6e7c6b78fe0b49bdc3ac4dd2147c9535750100000000fdffffff7ab6dd6b3c0d44b0fef0fdc9ab0ad6eee23eef799eee29c005d52bc4461998760000000000fdffffff48a77e5053a21acdf4f235ce00c82c9bc1704700f54d217f6a30704711b9737d0000000000fdffffff86918b39c1d9bb6f34d9b082182f73cedd15504331164dc2b186e95c568ccb870000000000fdffffff15a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff9a2875297f81dfd3b77426d63f621db350c270cc28c634ad86b9969ee33ac6960000000000fdffffffd6eeb1d1833e00967083d1ab86fa5a2e44355bd613d9277135240fe6f60148a20100000000fdffffffd8a6e5a9b68a65ff88220ca33e36faf6f826ae8c5c8a13fe818a5e63828b68a40100000000fdffffff73aab8471f82092e45ed1b1afeffdb49ea1ec74ce4853f971812f6a72a7e85aa0000000000fdffffffacd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba0000000000fdffffff1eddd5e13bef1aba1ff151762b5860837daa9b39db1eae8ea8227c81a5a1c8ba0000000000fdffffff67a096ff7c343d39e96929798097f6d7a61156bbdb905fbe534ba36f273271d40100000000fdffffff109a671eb7daf6dcd07c0ceff99f2de65864ab36d64fb3a890bab951569adeee0100000000fdffffff4f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0200000000fdffffff042f280000000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600146c540c1c9f546004539f45318b8d9f4d7b4857ef80969800000000001976a91422a6daa4a7b695c8a2dd104d47c5dc73d655c96f88ac809698000000000017a914a6885437e0762013facbda93894202a0fe86e35f8702473044022075ef5f04d7a63347064938e15a0c74277a79e5c9d32a26e39e8a517a44d565cc022015246790fb5b29c9bf3eded1b95699b1635bcfc6d521886fddf1135ba1b988ec012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe02473044022061aa9b0d9649ffd7259bc54b35f678565dbbe11507d348dd8885522eaf1fa70c02202cc79de09e8e63e8d57fde6ef66c079ddac4d9828e1936a9db833d4c142615c3012103a8f58fc1f5625f18293403104874f2d38c9279f777e512570e4199c7d292b81b0247304402207744dc1ab0bf77c081b58540c4321d090c0a24a32742a361aa55ad86f0c7c24e02201a9b0dd78b63b495ab5a0b5b161c54cb085d70683c90e188bb4dc2e41e142f6601210361fb354f8259abfcbfbdda36b7cb4c3b05a3ca3d68dd391fd8376e920d93870d0247304402204803e423c321acc6c12cb0ebf196d2906842fdfed6de977cc78277052ee5f15002200634670c1dc25e6b1787a65d3e09c8e6bb0340238d90b9d98887e8fd53944e080121031104c60d027123bf8676bcaefaa66c001a0d3d379dc4a9492a567a9e1004452d02473044022050e4b5348d30011a22b6ae8b43921d29249d88ea71b1fbaa2d9c22dfdef58b7002201c5d5e143aa8835454f61b0742226ebf8cd466bcc2cdcb1f77b92e473d3b13190121030496b9d49aa8efece4f619876c60a77d2c0dc846390ecdc5d9acbfa1bb3128760247304402204d6a9b986e1a0e3473e8aef84b3eb7052442a76dfd7631e35377f141496a55490220131ab342853c01e31f111436f8461e28bc95883b871ca0e01b5f57146e79d7bb012103262ffbc88e25296056a3c65c880e3686297e07f360e6b80f1219d65b0900e84e02483045022100c8ffacf92efa1dddef7e858a241af7a80adcc2489bcc325195970733b1f35fac022076f40c26023a228041a9665c5290b9918d06f03b716e4d8f6d47e79121c7eb37012102d9ba7e02d7cd7dd24302f823b3114c99da21549c663f72440dc87e8ba412120902483045022100b55545d84e43d001bbc10a981f184e7d3b98a7ed6689863716cab053b3655a2f0220537eb76a695fbe86bf020b4b6f7ae93b506d778bbd0885f0a61067616a2c8bce0121034a57f2fa2c32c9246691f6a922fb1ebdf1468792bae7eff253a99fc9f2a5023902483045022100f1d4408463dbfe257f9f778d5e9c8cdb97c8b1d395dbd2e180bc08cad306492c022002a024e19e1a406eaa24467f033659de09ab58822987281e28bb6359288337bd012103e91daa18d924eea62011ce596e15b6d683975cf724ea5bf69a8e2022c26fc12f0247304402204f1e12b923872f396e5e1a3aa94b0b2e86b4ce448f4349a017631db26d7dff8a022069899a05de2ad2bbd8e0202c56ab1025a7db9a4998eea70744e3c367d2a7eb71012103b0eee86792dbef1d4a49bc4ea32d197c8c15d27e6e0c5c33e58e409e26d4a39a0247304402201787dacdb92e0df6ad90226649f0e8321287d0bd8fddc536a297dd19b5fc103e022001fe89300a76e5b46d0e3f7e39e0ee26cc83b71d59a2a5da1dd7b13350cd0c07012103afb1e43d7ec6b7999ef0f1093069e68fe1dfe5d73fc6cfb4f7a5022f7098758c02483045022100acc1212bba0fe4fcc6c3ae5cf8e25f221f140c8444d3c08dfc53a93630ac25da02203f12982847244bd9421ef340293f3a38d2ab5d028af60769e46fcc7d81312e7e012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024830450221009c04934102402949484b21899271c3991c007b783b8efc85a3c3d24641ac7c24022006fb1895ce969d08a2cb29413e1a85427c7e85426f7a185108ca44b5a0328cb301210360248db4c7d7f76fe231998d2967104fee04df8d8da34f10101cc5523e82648c02483045022100b11fe61b393fa5dbe18ab98f65c249345b429b13f69ee2d1b1335725b24a0e73022010960cdc5565cbc81885c8ed95142435d3c202dfa5a3dc5f50f3914c106335ce0121029c878610c34c21381cda12f6f36ab88bf60f5f496c1b82c357b8ac448713e7b50247304402200ca080db069c15bbf98e1d4dff68d0aea51227ff5d17a8cf67ceae464c22bbb0022051e7331c0918cbb71bb2cef29ca62411454508a16180b0fb5df94248890840df0121028f0be0cde43ff047edbda42c91c37152449d69789eb812bb2e148e4f22472c0f0247304402201fefe258938a2c481d5a745ef3aa8d9f8124bbe7f1f8c693e2ddce4ddc9a927c02204049e0060889ede8fda975edf896c03782d71ba53feb51b04f5ae5897d7431dc012103946730b480f52a43218a9edce240e8b234790e21df5e96482703d81c3c19d3f1024730440220126a6a56dbe69af78d156626fc9cf41d6aac0c07b8b5f0f8491f68db5e89cb5002207ee6ed6f2f41da256f3c1e79679a3de6cf34cc08b940b82be14aefe7da031a6b012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024730440220363204a1586d7f13c148295122cbf9ec7939685e3cadab81d6d9e921436d21b7022044626b8c2bd4aa7c167d74bc4e9eb9d0744e29ce0ad906d78e10d6d854f23d170121037fb9c51716739bb4c146857fab5a783372f72a65987d61f3b58c74360f4328dd0247304402207925a4c2a3a6b76e10558717ee28fcb8c6fde161b9dc6382239af9f372ace99902204a58e31ce0b4a4804a42d2224331289311ded2748062c92c8aca769e81417a4c012102e18a8c235b48e41ef98265a8e07fa005d2602b96d585a61ad67168d74e7391cb02483045022100bbfe060479174a8d846b5a897526003eb2220ba307a5fee6e1e8de3e4e8b38fd02206723857301d447f67ac98a5a5c2b80ef6820e98fae213db1720f93d91161803b01210386728e2ac3ecee15f58d0505ee26f86a68f08c702941ffaf2fb7213e5026aea10247304402203a2613ae68f697eb02b5b7d18e3c4236966dac2b3a760e3021197d76e9ad4239022046f9067d3df650fcabbdfd250308c64f90757dec86f0b08813c979a42d06a6ec012102a1d7ee1cb4dc502f899aaafae0a2eb6cbf80d9a1073ae60ddcaabc3b1d1f15df02483045022100ab1bea2cc5388428fd126c7801550208701e21564bd4bd00cfd4407cfafc1acd0220508ee587f080f3c80a5c0b2175b58edd84b755e659e2135b3152044d75ebc4b501210236dd1b7f27a296447d0eb3750e1bdb2d53af50b31a72a45511dc1ec3fe7a684a19391400')
funding_txid = funding_tx.txid()
self.assertEqual('98574bc5f6e75769eb0c93d41453cc1dfbd15c14e63cc3c42f37cdbd08858762', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1qp0mv2sxsyxxfj5gl0332f9uyez93su9cf26757', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325341
self.assertFalse(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('3f0d188519237478258ad2bf881643618635d11c2bb95512e830fcf2eda3c522', tx_copy.txid())
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual('3f0d188519237478258ad2bf881643618635d11c2bb95512e830fcf2eda3c522', tx.txid())
self.assertEqual('27b78ec072a403b0545258e7a1a8d494e4b6fd48bf77f4251a12160c92207cbc', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_xprv_online_xpub_p2wpkh(self, mock_write):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/84'/1'/0'
keystore.from_xprv('vprv9K9hbuA23Bidgj1KRSHUZMa59jJLeZBpXPVn4RP7sBLArNhZxJjw4AX7aQmVTErDt4YFC11ptMLjbwxgrsH8GLQ1cx77KggWeVPeDBjr9xM'),
gap_limit=4
)
wallet_online = WalletIntegrityHelper.create_standard_wallet(
keystore.from_xpub('vpub5Y941QgusZGvuD5nXTpUvVWohm8q41uftcRNronjRWs9jB2iVr4BbxqbRfAoQjWHgJtDCQEXChgfsPbEuBnidtkFztZSD3zDKTrtwXa2LCa'),
gap_limit=4
)
# bootstrap wallet_online
funding_tx = Transaction('01000000000116e9c9dac2651672316aab3b9553257b6942c5f762c5d795776d9cfa504f183c000000000000fdffffff8085019852fada9da84b58dcf753d292dde314a19f5a5527f6588fa2566142130000000000fdffffffa4154a48db20ce538b28722a89c6b578bd5b5d60d6d7b52323976339e39405230000000000fdffffff0b5ef43f843a96364aebd708e25ea1bdcf2c7df7d0d995560b8b1be5f357b64f0100000000fdffffffd41dfe1199c76fdb3f20e9947ea31136d032d9da48c5e45d85c8f440e2351a510100000000fdffffff5bd015d17e4a1837b01c24ebb4a6b394e3da96a85442bd7dc6abddfbf16f20510000000000fdffffff13a3e7f80b1bd46e38f2abc9e2f335c18a4b0af1778133c7f1c3caae9504345c0200000000fdffffffdf4fc1ab21bca69d18544ddb10a913cd952dbc730ab3d236dd9471445ff405680100000000fdffffffe0424d78a30d5e60ac6b26e2274d7d6e7c6b78fe0b49bdc3ac4dd2147c9535750100000000fdffffff7ab6dd6b3c0d44b0fef0fdc9ab0ad6eee23eef799eee29c005d52bc4461998760000000000fdffffff48a77e5053a21acdf4f235ce00c82c9bc1704700f54d217f6a30704711b9737d0000000000fdffffff86918b39c1d9bb6f34d9b082182f73cedd15504331164dc2b186e95c568ccb870000000000fdffffff15a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff9a2875297f81dfd3b77426d63f621db350c270cc28c634ad86b9969ee33ac6960000000000fdffffffd6eeb1d1833e00967083d1ab86fa5a2e44355bd613d9277135240fe6f60148a20100000000fdffffffd8a6e5a9b68a65ff88220ca33e36faf6f826ae8c5c8a13fe818a5e63828b68a40100000000fdffffff73aab8471f82092e45ed1b1afeffdb49ea1ec74ce4853f971812f6a72a7e85aa0000000000fdffffffacd6459dec7c3c51048eb112630da756f5d4cb4752b8d39aa325407ae0885cba0000000000fdffffff1eddd5e13bef1aba1ff151762b5860837daa9b39db1eae8ea8227c81a5a1c8ba0000000000fdffffff67a096ff7c343d39e96929798097f6d7a61156bbdb905fbe534ba36f273271d40100000000fdffffff109a671eb7daf6dcd07c0ceff99f2de65864ab36d64fb3a890bab951569adeee0100000000fdffffff4f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0200000000fdffffff042f280000000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600146c540c1c9f546004539f45318b8d9f4d7b4857ef80969800000000001976a91422a6daa4a7b695c8a2dd104d47c5dc73d655c96f88ac809698000000000017a914a6885437e0762013facbda93894202a0fe86e35f8702473044022075ef5f04d7a63347064938e15a0c74277a79e5c9d32a26e39e8a517a44d565cc022015246790fb5b29c9bf3eded1b95699b1635bcfc6d521886fddf1135ba1b988ec012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe02473044022061aa9b0d9649ffd7259bc54b35f678565dbbe11507d348dd8885522eaf1fa70c02202cc79de09e8e63e8d57fde6ef66c079ddac4d9828e1936a9db833d4c142615c3012103a8f58fc1f5625f18293403104874f2d38c9279f777e512570e4199c7d292b81b0247304402207744dc1ab0bf77c081b58540c4321d090c0a24a32742a361aa55ad86f0c7c24e02201a9b0dd78b63b495ab5a0b5b161c54cb085d70683c90e188bb4dc2e41e142f6601210361fb354f8259abfcbfbdda36b7cb4c3b05a3ca3d68dd391fd8376e920d93870d0247304402204803e423c321acc6c12cb0ebf196d2906842fdfed6de977cc78277052ee5f15002200634670c1dc25e6b1787a65d3e09c8e6bb0340238d90b9d98887e8fd53944e080121031104c60d027123bf8676bcaefaa66c001a0d3d379dc4a9492a567a9e1004452d02473044022050e4b5348d30011a22b6ae8b43921d29249d88ea71b1fbaa2d9c22dfdef58b7002201c5d5e143aa8835454f61b0742226ebf8cd466bcc2cdcb1f77b92e473d3b13190121030496b9d49aa8efece4f619876c60a77d2c0dc846390ecdc5d9acbfa1bb3128760247304402204d6a9b986e1a0e3473e8aef84b3eb7052442a76dfd7631e35377f141496a55490220131ab342853c01e31f111436f8461e28bc95883b871ca0e01b5f57146e79d7bb012103262ffbc88e25296056a3c65c880e3686297e07f360e6b80f1219d65b0900e84e02483045022100c8ffacf92efa1dddef7e858a241af7a80adcc2489bcc325195970733b1f35fac022076f40c26023a228041a9665c5290b9918d06f03b716e4d8f6d47e79121c7eb37012102d9ba7e02d7cd7dd24302f823b3114c99da21549c663f72440dc87e8ba412120902483045022100b55545d84e43d001bbc10a981f184e7d3b98a7ed6689863716cab053b3655a2f0220537eb76a695fbe86bf020b4b6f7ae93b506d778bbd0885f0a61067616a2c8bce0121034a57f2fa2c32c9246691f6a922fb1ebdf1468792bae7eff253a99fc9f2a5023902483045022100f1d4408463dbfe257f9f778d5e9c8cdb97c8b1d395dbd2e180bc08cad306492c022002a024e19e1a406eaa24467f033659de09ab58822987281e28bb6359288337bd012103e91daa18d924eea62011ce596e15b6d683975cf724ea5bf69a8e2022c26fc12f0247304402204f1e12b923872f396e5e1a3aa94b0b2e86b4ce448f4349a017631db26d7dff8a022069899a05de2ad2bbd8e0202c56ab1025a7db9a4998eea70744e3c367d2a7eb71012103b0eee86792dbef1d4a49bc4ea32d197c8c15d27e6e0c5c33e58e409e26d4a39a0247304402201787dacdb92e0df6ad90226649f0e8321287d0bd8fddc536a297dd19b5fc103e022001fe89300a76e5b46d0e3f7e39e0ee26cc83b71d59a2a5da1dd7b13350cd0c07012103afb1e43d7ec6b7999ef0f1093069e68fe1dfe5d73fc6cfb4f7a5022f7098758c02483045022100acc1212bba0fe4fcc6c3ae5cf8e25f221f140c8444d3c08dfc53a93630ac25da02203f12982847244bd9421ef340293f3a38d2ab5d028af60769e46fcc7d81312e7e012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024830450221009c04934102402949484b21899271c3991c007b783b8efc85a3c3d24641ac7c24022006fb1895ce969d08a2cb29413e1a85427c7e85426f7a185108ca44b5a0328cb301210360248db4c7d7f76fe231998d2967104fee04df8d8da34f10101cc5523e82648c02483045022100b11fe61b393fa5dbe18ab98f65c249345b429b13f69ee2d1b1335725b24a0e73022010960cdc5565cbc81885c8ed95142435d3c202dfa5a3dc5f50f3914c106335ce0121029c878610c34c21381cda12f6f36ab88bf60f5f496c1b82c357b8ac448713e7b50247304402200ca080db069c15bbf98e1d4dff68d0aea51227ff5d17a8cf67ceae464c22bbb0022051e7331c0918cbb71bb2cef29ca62411454508a16180b0fb5df94248890840df0121028f0be0cde43ff047edbda42c91c37152449d69789eb812bb2e148e4f22472c0f0247304402201fefe258938a2c481d5a745ef3aa8d9f8124bbe7f1f8c693e2ddce4ddc9a927c02204049e0060889ede8fda975edf896c03782d71ba53feb51b04f5ae5897d7431dc012103946730b480f52a43218a9edce240e8b234790e21df5e96482703d81c3c19d3f1024730440220126a6a56dbe69af78d156626fc9cf41d6aac0c07b8b5f0f8491f68db5e89cb5002207ee6ed6f2f41da256f3c1e79679a3de6cf34cc08b940b82be14aefe7da031a6b012102801bc7170efb82c490e243204d86970f15966aa3bce6a06bef5c09a83a5bfffe024730440220363204a1586d7f13c148295122cbf9ec7939685e3cadab81d6d9e921436d21b7022044626b8c2bd4aa7c167d74bc4e9eb9d0744e29ce0ad906d78e10d6d854f23d170121037fb9c51716739bb4c146857fab5a783372f72a65987d61f3b58c74360f4328dd0247304402207925a4c2a3a6b76e10558717ee28fcb8c6fde161b9dc6382239af9f372ace99902204a58e31ce0b4a4804a42d2224331289311ded2748062c92c8aca769e81417a4c012102e18a8c235b48e41ef98265a8e07fa005d2602b96d585a61ad67168d74e7391cb02483045022100bbfe060479174a8d846b5a897526003eb2220ba307a5fee6e1e8de3e4e8b38fd02206723857301d447f67ac98a5a5c2b80ef6820e98fae213db1720f93d91161803b01210386728e2ac3ecee15f58d0505ee26f86a68f08c702941ffaf2fb7213e5026aea10247304402203a2613ae68f697eb02b5b7d18e3c4236966dac2b3a760e3021197d76e9ad4239022046f9067d3df650fcabbdfd250308c64f90757dec86f0b08813c979a42d06a6ec012102a1d7ee1cb4dc502f899aaafae0a2eb6cbf80d9a1073ae60ddcaabc3b1d1f15df02483045022100ab1bea2cc5388428fd126c7801550208701e21564bd4bd00cfd4407cfafc1acd0220508ee587f080f3c80a5c0b2175b58edd84b755e659e2135b3152044d75ebc4b501210236dd1b7f27a296447d0eb3750e1bdb2d53af50b31a72a45511dc1ec3fe7a684a19391400')
funding_txid = funding_tx.txid()
self.assertEqual('98574bc5f6e75769eb0c93d41453cc1dfbd15c14e63cc3c42f37cdbd08858762', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1qp0mv2sxsyxxfj5gl0332f9uyez93su9cf26757', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325341
self.assertFalse(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual('ee76c0c6da87f0eb5ab4d1ae05d3942512dcd3c4c42518f9d3619e74400cfc1f', tx_copy.txid())
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual('ee76c0c6da87f0eb5ab4d1ae05d3942512dcd3c4c42518f9d3619e74400cfc1f', tx.txid())
self.assertEqual('729c2e40a2fccd6b731407c01ed304119c1ac329bdf9baae5b642d916c5f3272', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_wif_online_addr_p2pkh(self, mock_write): # compressed pubkey
wallet_offline = WalletIntegrityHelper.create_imported_wallet(privkeys=True)
wallet_offline.import_private_key('p2pkh:cQDxbmQfwRV3vP1mdnVHq37nJekHLsuD3wdSQseBRA2ct4MFk5Pq', pw=None)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('mg2jk6S5WGDhUPA8mLSxDLWpUoQnX1zzoG')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325340
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.txid())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_wif_online_addr_p2wpkh_p2sh(self, mock_write):
wallet_offline = WalletIntegrityHelper.create_imported_wallet(privkeys=True)
wallet_offline.import_private_key('p2wpkh-p2sh:cU9hVzhpvfn91u2zTVn8uqF2ymS7ucYH8V5TmsTDmuyMHgRk9WsJ', pw=None)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('2NA2JbUVK7HGWUCK5RXSVNHrkgUYF8d9zV8')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325340
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual('7642816d051aa3b333b6564bb6e44fe3a5885bfe7db9860dfbc9973a5c9a6562', tx.txid())
self.assertEqual('9bb9949974954613945756c48ca5525cd5cba1b667ccb10c7a53e1ed076a1117', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_wif_online_addr_p2wpkh(self, mock_write):
wallet_offline = WalletIntegrityHelper.create_imported_wallet(privkeys=True)
wallet_offline.import_private_key('p2wpkh:cPuQzcNEgbeYZ5at9VdGkCwkPA9r34gvEVJjuoz384rTfYpahfe7', pw=None)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('tb1qm2eh4787lwanrzr6pf0ekf5c7jnmghm2y9k529')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325340
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual('f8039bd85279f2b5698f15d47f2e338d067d09af391bd8a19467aa94d03f280c', tx.txid())
self.assertEqual('3b7cc3c3352bbb43ddc086487ac696e09f2863c3d9e8636721851b8008a83ffa', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_xprv_online_addr_p2pkh(self, mock_write): # compressed pubkey
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/44'/1'/0'
keystore.from_xprv('tprv8gfKwjuAaqtHgqxMh1tosAQ28XvBMkcY5NeFRA3pZMpz6MR4H4YZ3MJM4fvNPnRKeXR1Td2vQGgjorNXfo94WvT5CYDsPAqjHxSn436G1Eu'),
gap_limit=4
)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('mg2jk6S5WGDhUPA8mLSxDLWpUoQnX1zzoG')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325340
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertFalse(tx.is_segwit())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.txid())
self.assertEqual('e56da664631b8c666c6df38ec80c954c4ac3c4f56f040faf0070e4681e937fc4', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_xprv_online_addr_p2wpkh_p2sh(self, mock_write):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/49'/1'/0'
keystore.from_xprv('uprv8zHHrMQMQ26utWwNJ5MK2SXpB9hbmy7pbPaneii69xT8cZTyFpxQFxkknGWKP8dxBTZhzy7yP6cCnLrRCQjzJDk3G61SjZpxhFQuB2NR8a5'),
gap_limit=4
)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('2NA2JbUVK7HGWUCK5RXSVNHrkgUYF8d9zV8')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325340
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual('7642816d051aa3b333b6564bb6e44fe3a5885bfe7db9860dfbc9973a5c9a6562', tx.txid())
self.assertEqual('9bb9949974954613945756c48ca5525cd5cba1b667ccb10c7a53e1ed076a1117', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_xprv_online_addr_p2wpkh(self, mock_write):
wallet_offline = WalletIntegrityHelper.create_standard_wallet(
# bip39: "qwe", der: m/84'/1'/0'
keystore.from_xprv('vprv9K9hbuA23Bidgj1KRSHUZMa59jJLeZBpXPVn4RP7sBLArNhZxJjw4AX7aQmVTErDt4YFC11ptMLjbwxgrsH8GLQ1cx77KggWeVPeDBjr9xM'),
gap_limit=4
)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('tb1qm2eh4787lwanrzr6pf0ekf5c7jnmghm2y9k529')
# bootstrap wallet_online
funding_tx = Transaction('01000000000101197a89cff51096b9dd4214cdee0eb90cb27a25477e739521d728a679724042730100000000fdffffff048096980000000000160014dab37af8fefbbb31887a0a5f9b2698f4a7b45f6a80969800000000001976a91405a20074ef7eb42c7c6fcd4f499faa699742783288ac809698000000000017a914b808938a8007bc54509cd946944c479c0fa6554f87131b2c0400000000160014a04dfdb9a9aeac3b3fada6f43c2a66886186e2440247304402204f5dbb9dda65eab26179f1ca7c37c8baf028153815085dd1bbb2b826296e3b870220379fcd825742d6e2bdff772f347b629047824f289a5499a501033f6c3495594901210363c9c98740fe0455c646215cea9b13807b758791c8af7b74e62968bef57ff8ae1e391400')
funding_txid = funding_tx.txid()
self.assertEqual('0a08ea26a49e2b80f253796d605b69e2d0403fac64bdf6f7db82ada4b7bb6b62', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, 'tb1quk7ahlhr3qmjndy0uvu9y9hxfesrtahtta9ghm', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325340
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx
tx = wallet_offline.sign_transaction(tx_copy, password=None)
self.assertTrue(tx.is_complete())
self.assertTrue(tx.is_segwit())
self.assertEqual('f8039bd85279f2b5698f15d47f2e338d067d09af391bd8a19467aa94d03f280c', tx.txid())
self.assertEqual('3b7cc3c3352bbb43ddc086487ac696e09f2863c3d9e8636721851b8008a83ffa', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_hd_multisig_online_addr_p2sh(self, mock_write):
# 2-of-3 legacy p2sh multisig
wallet_offline1 = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('blast uniform dragon fiscal ensure vast young utility dinosaur abandon rookie sure', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4XJzYkhsCbDCcZRmDAKSD7bXi9mdCni7acVt45fxbTVZyU6jRGh29ULKTjoapkfFsSJvQHitcVKbQgzgkkYsAmaovcro7Mhf')
],
'2of3', gap_limit=2
)
wallet_offline2 = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('cycle rocket west magnet parrot shuffle foot correct salt library feed song', '', True),
keystore.from_xpub('tpubD6NzVbkrYhZ4YTPEgwk4zzr8wyo7pXGmbbVUnfYNtx6SgAMF5q3LN3Kch58P9hxGNsTmP7Dn49nnrmpE6upoRb1Xojg12FGLuLHkVpVtS44'),
keystore.from_xpub('tpubD6NzVbkrYhZ4YARFMEZPckrqJkw59GZD1PXtQnw14ukvWDofR7Z1HMeSCxfYEZVvg4VdZ8zGok5VxHwdrLqew5cMdQntWc5mT7mh1CSgrnX')
],
'2of3', gap_limit=2
)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('2N4z38eTKcWTZnfugCCfRyXtXWMLnn8HDfw')
# bootstrap wallet_online
funding_tx = Transaction('010000000001016207d958dc46508d706e4cd7d3bc46c5c2b02160e2578e5fad2efafc3927050301000000171600147a4fc8cdc1c2cf7abbcd88ef6d880e59269797acfdffffff02809698000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e48870d0916020000000017a914703f83ef20f3a52d908475dcad00c5144164d5a2870247304402203b1a5cb48cadeee14fa6c7bbf2bc581ca63104762ec5c37c703df778884cc5b702203233fa53a2a0bfbd85617c636e415da72214e359282cce409019319d031766c50121021112c01a48cc7ea13cba70493c6bffebb3e805df10ff4611d2bf559d26e25c04bf391400')
funding_txid = funding_tx.txid()
self.assertEqual('c59913a1fa9b1ef1f6928f0db490be67eeb9d7cb05aa565ee647e859642f3532', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, '2MuCQQHJNnrXzQzuqfUCfAwAjPqpyEHbgue', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325503
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx - first
tx = wallet_offline1.sign_transaction(tx_copy, password=None)
self.assertFalse(tx.is_complete())
tx = Transaction(tx.serialize())
# sign tx - second
tx = wallet_offline2.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
tx = Transaction(tx.serialize())
self.assertEqual('010000000132352f6459e847e65e56aa05cbd7b9ee67be90b40d8f92f6f11e9bfaa11399c500000000fdfe0000483045022100cfe41e783629a2ad0b1f17cd2dbd69db05763fa7a22691131fa321ba3140d7cb02203fbda2ccc6212315464cd814d4e909b4f80a2361e3af0f9deda06478f91a0f3901483045022100b84fd63e957f2409558f63962fc91ba58334efde8b88ff53ca71da3d0fe7219702206001c6caeb30e18a7525fc72de0003e12646bf815b12fb132c1aadd6ffa1989c014c69522102afb4af9a91264e1c6dce3ebe5312801723270ac0ba8134b7b49129328fcb0f2821030b482838721a38d94847699fed8818b5c5f56500ef72f13489e365b65e5749cf2103e5db7969ae2f2576e6a061bf3bb2db16571e77ffb41e0b27170734359235cbce53aefdffffff02a02526000000000017a9141567b2578f300fa618ef0033611fd67087aff6d187585d72000000000017a91480c2353f6a7bc3c71e99e062655b19adb3dd2e4887bf391400',
str(tx))
self.assertEqual('bb4c28af28b970522c56ff0482cd98c2b78a90bec578bcede8a9e5cbec6ef5e7', tx.txid())
self.assertEqual('bb4c28af28b970522c56ff0482cd98c2b78a90bec578bcede8a9e5cbec6ef5e7', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_hd_multisig_online_addr_p2wsh_p2sh(self, mock_write):
# 2-of-2 p2sh-embedded segwit multisig
wallet_offline1 = WalletIntegrityHelper.create_multisig_wallet(
[
# bip39: finish seminar arrange erosion sunny coil insane together pretty lunch lunch rose, der: m/1234'/1'/0', p2wsh-p2sh multisig
keystore.from_xprv('Uprv9CvELvByqm8k2dpecJVjgLMX1z5DufEjY4fBC5YvdGF5WjGCa7GVJJ2fYni1tyuF7Hw83E6W2ZBjAhaFLZv2ri3rEsubkCd5avg4EHKoDBN'),
keystore.from_xpub('Upub5Qb8ik4Cnu8g97KLXKgVXHqY6tH8emQvqtBncjSKsyfTZuorPtTZgX7ovKKZHuuVGBVd1MTTBkWez1XXt2weN1sWBz6SfgRPQYEkNgz81QF')
],
'2of2', gap_limit=2
)
wallet_offline2 = WalletIntegrityHelper.create_multisig_wallet(
[
# bip39: square page wood spy oil story rebel give milk screen slide shuffle, der: m/1234'/1'/0', p2wsh-p2sh multisig
keystore.from_xprv('Uprv9BbnKEXJxXaNvdEsRJ9VA9toYrSeFJh5UfGBpM2iKe8Uh7UhrM9K8ioL53s8gvCoGfirHHaqpABDAE7VUNw8LNU1DMJKVoWyeNKu9XcDC19'),
keystore.from_xpub('Upub5RuakRisg8h3F7u7iL2k3UJFa1uiK7xauHamzTxYBbn4PXbM7eajr6M9Q2VCr6cVGhfhqWQqxnABvtSATuVM1xzxk4nA189jJwzaMn1QX7V')
],
'2of2', gap_limit=2
)
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('2MsHQRm1pNi6VsmXYRxYMcCTdPu7Xa1RyFe')
# bootstrap wallet_online
funding_tx = Transaction('0100000000010118d494d28e5c3bf61566ca0313e22c3b561b888a317d689cc8b47b947adebd440000000017160014aec84704ea8508ddb94a3c6e53f0992d33a2a529fdffffff020f0925000000000017a91409f7aae0265787a02de22839d41e9c927768230287809698000000000017a91400698bd11c38f887f17c99846d9be96321fbf989870247304402206b906369f4075ebcfc149f7429dcfc34e11e1b7bbfc85d1185d5e9c324be0d3702203ce7fc12fd3131920fbcbb733250f05dbf7d03e18a4656232ee69d5c54dd46bd0121028a4b697a37f3f57f6e53f90db077fa9696095b277454fda839c211d640d48649c0391400')
funding_txid = funding_tx.txid()
self.assertEqual('54356de9e156b85c8516fd4d51bdb68b5513f58b4a6147483978ae254627ee3e', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, '2N8CtJRwxb2GCaiWWdSHLZHHLoZy53CCyxf', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325504
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx - first
tx = wallet_offline1.sign_transaction(tx_copy, password=None)
self.assertFalse(tx.is_complete())
self.assertEqual('6a58a51591142429203b62b6ddf6b799a6926882efac229998c51bee6c3573eb', tx.txid())
tx = Transaction(tx.serialize())
# sign tx - second
tx = wallet_offline2.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
tx = Transaction(tx.serialize())
self.assertEqual('010000000001013eee274625ae78394847614a8bf513558bb6bd514dfd16855cb856e1e96d355401000000232200206ee8d4bb1277b7dbe1d4e49b880993aa993f417a9101cb23865c7c7258732704fdffffff02a02526000000000017a914a4189ef02c95cfe36f8e880c6cb54dff0837b22687585d72000000000017a91400698bd11c38f887f17c99846d9be96321fbf98987040047304402205a9dd9eb5676196893fb08f60079a2e9f567ee39614075d8c5d9fab0f11cbbc7022039640855188ebb7bccd9e3f00b397a888766d42d00d006f1ca7457c15449285f014730440220234f6648c5741eb195f0f4cd645298a10ce02f6ef557d05df93331e21c4f58cb022058ce2af0de1c238c4a8dd3b3c7a9a0da6e381ddad7593cddfc0480f9fe5baadf0147522102975c00f6af579f9a1d283f1e5a43032deadbab2308aef30fb307c0cfe54777462102d3f47041b424a84898e315cc8ef58190f6aec79c178c12de0790890ba7166e9c52aec0391400',
str(tx))
self.assertEqual('6a58a51591142429203b62b6ddf6b799a6926882efac229998c51bee6c3573eb', tx.txid())
self.assertEqual('96d0bca1001778c54e4c3a07929fab5562c5b5a23fd1ca3aa3870cc5df2bf97d', tx.wtxid())
@needs_test_with_all_ecc_implementations
@mock.patch.object(storage.WalletStorage, '_write')
def test_sending_offline_hd_multisig_online_addr_p2wsh(self, mock_write):
# 2-of-3 p2wsh multisig
wallet_offline1 = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('bitter grass shiver impose acquire brush forget axis eager alone wine silver', '', True),
keystore.from_xpub('Vpub5fcdcgEwTJmbmqAktuK8Kyq92fMf7sWkcP6oqAii2tG47dNbfkGEGUbfS9NuZaRywLkHE6EmUksrqo32ZL3ouLN1HTar6oRiHpDzKMAF1tf'),
keystore.from_xpub('Vpub5fjkKyYnvSS4wBuakWTkNvZDaBM2vQ1MeXWq368VJHNr2eT8efqhpmZ6UUkb7s2dwCXv2Vuggjdhk4vZVyiAQTwUftvff73XcUGq2NQmWra')
],
'2of3', gap_limit=2
)
wallet_offline2 = WalletIntegrityHelper.create_multisig_wallet(
[
keystore.from_seed('snow nest raise royal more walk demise rotate smooth spirit canyon gun', '', True),
keystore.from_xpub('Vpub5fjkKyYnvSS4wBuakWTkNvZDaBM2vQ1MeXWq368VJHNr2eT8efqhpmZ6UUkb7s2dwCXv2Vuggjdhk4vZVyiAQTwUftvff73XcUGq2NQmWra'),
keystore.from_xpub('Vpub5gSKXzxK7FeKQedu2q1z9oJWxqvX72AArW3HSWpEhc8othDH8xMDu28gr7gf17sp492BuJod8Tn7anjvJrKpETwqnQqX7CS8fcYyUtedEMk')
],
'2of3', gap_limit=2
)
# ^ third seed: hedgehog sunset update estate number jungle amount piano friend donate upper wool
wallet_online = WalletIntegrityHelper.create_imported_wallet(privkeys=False)
wallet_online.import_address('tb1q83p6eqxkuvq4eumcha46crpzg4nj84s9p0hnynkxg8nhvfzqcc7q4erju6')
# bootstrap wallet_online
funding_tx = Transaction('0100000000010132352f6459e847e65e56aa05cbd7b9ee67be90b40d8f92f6f11e9bfaa11399c501000000171600142e5d579693b2a7679622935df94d9f3c84909b24fdffffff0280969800000000002200203c43ac80d6e3015cf378bf6bac0c22456723d6050bef324ec641e7762440c63c83717d010000000017a91441b772909ad301b41b76f4a3c5058888a7fe6f9a8702483045022100de54689f74b8efcce7fdc91e40761084686003bcd56c886ee97e75a7e803526102204dea51ae5e7d01bd56a8c336c64841f7fe02a8b101fa892e13f2d079bb14e6bf012102024e2f73d632c49f4b821ccd3b6da66b155427b1e5b1c4688cefd5a4b4bfa404c1391400')
funding_txid = funding_tx.txid()
self.assertEqual('643a7ab9083d0227dd9df314ce56b18d279e6018ff975079dfaab82cd7a66fa3', funding_txid)
wallet_online.receive_tx_callback(funding_txid, funding_tx, TX_HEIGHT_UNCONFIRMED)
# create unsigned tx
outputs = [(bitcoin.TYPE_ADDRESS, '2MyoZVy8T1t94yLmyKu8DP1SmbWvnxbkwRA', 2500000)]
tx = wallet_online.mktx(outputs=outputs, password=None, config=self.config, fee=5000)
tx.set_rbf(True)
tx.locktime = 1325505
self.assertFalse(tx.is_complete())
self.assertEqual(1, len(tx.inputs()))
tx_copy = Transaction(tx.serialize())
self.assertTrue(wallet_online.is_mine(wallet_online.get_txin_address(tx_copy.inputs()[0])))
self.assertEqual(tx.txid(), tx_copy.txid())
# sign tx - first
tx = wallet_offline1.sign_transaction(tx_copy, password=None)
self.assertFalse(tx.is_complete())
self.assertEqual('32e946761b4e718c1fa8d044db9e72d5831f6395eb284faf2fb5c4af0743e501', tx.txid())
tx = Transaction(tx.serialize())
# sign tx - second
tx = wallet_offline2.sign_transaction(tx, password=None)
self.assertTrue(tx.is_complete())
tx = Transaction(tx.serialize())
self.assertEqual('01000000000101a36fa6d72cb8aadf795097ff18609e278db156ce14f39ddd27023d08b97a3a640000000000fdffffff02a02526000000000017a91447ee5a659f6ffb53f7e3afc1681b6415f3c00fa187585d7200000000002200203c43ac80d6e3015cf378bf6bac0c22456723d6050bef324ec641e7762440c63c04004730440220629d89626585f563202e6b38ceddc26ccd00737e0b7ee4239b9266ef9174ea2f02200b74828399a2e35ed46c9b484af4817438d5fea890606ebb201b821944db1fdc0147304402205d1a59c84c419992069e9764a7992abca6a812cc5dfd4f0d6515d4283e660ce802202597a38899f31545aaf305629bd488f36bf54e4a05fe983932cafbb3906efb8f016952210223f815ab09f6bfc8519165c5232947ae89d9d43d678fb3486f3b28382a2371fa210273c529c2c9a99592f2066cebc2172a48991af2b471cb726b9df78c6497ce984e2102aa8fc578b445a1e4257be6b978fcece92980def98dce0e1eb89e7364635ae94153aec1391400',
str(tx))
self.assertEqual('32e946761b4e718c1fa8d044db9e72d5831f6395eb284faf2fb5c4af0743e501', tx.txid())
self.assertEqual('4376fa5f1f6cb37b1f3956175d3bd4ef6882169294802b250a3c672f3ff431c1', tx.wtxid())
class TestWalletHistory_SimpleRandomOrder(TestCaseForTestnet):
transactions = {
"0f4972c84974b908a58dda2614b68cf037e6c03e8291898c719766f213217b67": "01000000029d1bdbe67f0bd0d7bd700463f5c29302057c7b52d47de9e2ca5069761e139da2000000008b483045022100a146a2078a318c1266e42265a369a8eef8993750cb3faa8dd80754d8d541d5d202207a6ab8864986919fd1a7fd5854f1e18a8a0431df924d7a878ec3dc283e3d75340141045f7ba332df2a7b4f5d13f246e307c9174cfa9b8b05f3b83410a3c23ef8958d610be285963d67c7bc1feb082f168fa9877c25999963ff8b56b242a852b23e25edfeffffff9d1bdbe67f0bd0d7bd700463f5c29302057c7b52d47de9e2ca5069761e139da2010000008a47304402201c7fa37b74a915668b0244c01f14a9756bbbec1031fb69390bcba236148ab37e02206151581f9aa0e6758b503064c1e661a726d75c6be3364a5a121a8c12cf618f64014104dc28da82e141416aaf771eb78128d00a55fdcbd13622afcbb7a3b911e58baa6a99841bfb7b99bcb7e1d47904fda5d13fdf9675cdbbe73e44efcc08165f49bac6feffffff02b0183101000000001976a914ca14915184a2662b5d1505ce7142c8ca066c70e288ac005a6202000000001976a9145eb4eeaefcf9a709f8671444933243fbd05366a388ac54c51200",
"2791cdc98570cc2b6d9d5b197dc2d002221b074101e3becb19fab4b79150446d": "010000000132201ff125888a326635a2fc6e971cd774c4d0c1a757d742d0f6b5b020f7203a050000006a47304402201d20bb5629a35b84ff9dd54788b98e265623022894f12152ac0e6158042550fe02204e98969e1f7043261912dd0660d3da64e15acf5435577fc02a00eccfe76b323f012103a336ad86546ab66b6184238fe63bb2955314be118b32fa45dd6bd9c4c5875167fdffffff0254959800000000001976a9148d2db0eb25b691829a47503006370070bc67400588ac80969800000000001976a914f96669095e6df76cfdf5c7e49a1909f002e123d088ace8ca1200",
"2d216451b20b6501e927d85244bcc1c7c70598332717df91bb571359c358affd": "010000000001036cdf8d2226c57d7cc8485636d8e823c14790d5f24e6cf38ba9323babc7f6db2901000000171600143fc0dbdc2f939c322aed5a9c3544468ec17f5c3efdffffff507dce91b2a8731636e058ccf252f02b5599489b624e003435a29b9862ccc38c0200000017160014c50ff91aa2a790b99aa98af039ae1b156e053375fdffffff6254162cf8ace3ddfb3ec242b8eade155fa91412c5bde7f55decfac5793743c1010000008b483045022100de9599dcd7764ca8d4fcbe39230602e130db296c310d4abb7f7ae4d139c4d46402200fbfd8e6dc94d90afa05b0c0eab3b84feb465754db3f984fbf059447282771c30141045eecefd39fabba7b0098c3d9e85794e652bdbf094f3f85a3de97a249b98b9948857ea1e8209ee4f196a6bbcfbad103a38698ee58766321ba1cdee0cbfb60e7b2fdffffff01e85af70100000000160014e8d29f07cd5f813317bec4defbef337942d85d74024730440220218049aee7bbd34a7fa17f972a8d24a0469b0131d943ef3e30860401eaa2247402203495973f006e6ee6ae74a83228623029f238f37390ee4b587d95cdb1d1aaee9901210392ba263f3a2b260826943ff0df25e9ca4ef603b98b0a916242c947ae0626575f02473044022002603e5ceabb4406d11aedc0cccbf654dd391ce68b6b2228a40e51cf8129310d0220533743120d93be8b6c1453973935b911b0a2322e74708d23e8b5f90e74b0f192012103221b4ee0f508ba595fc1b9c2252ed9d03e99c73b97344dae93263c68834f034800ed161300",
"31494e7e9f42f4bd736769b07cc602e2a1019617b2c72a03ec945b667aada78f": "0100000000010454022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a000000008b483045022100ea8fe74db2aba23ad36ac66aaa481bad2b4d1b3c331869c1d60a28ce8cfad43c02206fa817281b33fbf74a6dd7352bdc5aa1d6d7966118a4ad5b7e153f37205f1ae80141045f7ba332df2a7b4f5d13f246e307c9174cfa9b8b05f3b83410a3c23ef8958d610be285963d67c7bc1feb082f168fa9877c25999963ff8b56b242a852b23e25edfdffffff54022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a01000000171600146dfe07e12af3db7c715bf1c455f8517e19c361e7fdffffff54022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a020000006a47304402200b1fb89e9a772a8519294acd61a53a29473ce76077165447f49a686f1718db5902207466e2e8290f84114dc9d6c56419cb79a138f03d7af8756de02c810f19e4e03301210222bfebe09c2638cfa5aa8223fb422fe636ba9675c5e2f53c27a5d10514f49051fdffffff54022b1b4d3b45e7fcac468de2d6df890a9f41050c05d80e68d4b083f728e76a0300000000fdffffff018793140d000000001600144b3e27ddf4fc5f367421ee193da5332ef351b700000247304402207ba52959938a3853bcfd942d8a7e6a181349069cde3ea73dbde43fa9669b8d5302207a686b92073863203305cb5d5550d88bdab0d21b9e9761ba4a106ea3970e08d901210265c1e014112ed19c9f754143fb6a2ff89f8630d62b33eb5ae708c9ea576e61b50002473044022029e868a905aa3ecae6eafcbd5959aefff0e5f39c1fc7a131a174828806e74e5202202f0aaa7c3cb3d9a9d526e5428ce37c0f0af0d774aa30b09ded8bc2230e7ffaf2012102fe0104455dc52b1689bba130664e452642180eb865217acfc6997260b7d946ae22c71200",
"336eee749da7d1c537fd5679157fae63005bfd4bb8cf47ae73600999cbc9beaa": "0100000000010232201ff125888a326635a2fc6e971cd774c4d0c1a757d742d0f6b5b020f7203a020000006a4730440220198c0ba2b2aefa78d8cca01401d408ecdebea5ac05affce36f079f6e5c8405ca02200eabb1b9a01ff62180cf061dfacedba6b2e07355841b9308de2d37d83489c7b80121031c663e5534fe2a6de816aded6bb9afca09b9e540695c23301f772acb29c64a05fdfffffffb28ff16811d3027a2405be68154be8fdaff77284dbce7a2314c4107c2c941600000000000fdffffff015e104f01000000001976a9146dfd56a0b5d0c9450d590ad21598ecfeaa438bd788ac000247304402207d6dc521e3a4577685535f098e5bac4601aa03658b924f30bf7afef1850e437e022045b76771d8b6ca1939352d6b759fca31029e5b2edffa44dc747fe49770e746cd012102c7f36d4ceed353b90594ebaf3907972b6d73289bdf4707e120de31ec4e1eb11679f31200",
"3a6ed17d34c49dfdf413398e113cf5f71710d59e9f4050bbc601d513a77eb308": "010000000168091e76227e99b098ef8d6d5f7c1bb2a154dd49103b93d7b8d7408d49f07be0000000008a47304402202f683a63af571f405825066bd971945a35e7142a75c9a5255d364b25b7115d5602206c59a7214ae729a519757e45fdc87061d357813217848cf94df74125221267ac014104aecb9d427e10f0c370c32210fe75b6e72ccc4f415076cf1a6318fbed5537388862c914b29269751ab3a04962df06d96f5f4f54e393a0afcbfa44b590385ae61afdffffff0240420f00000000001976a9145f917fd451ca6448978ebb2734d2798274daf00b88aca8063d00000000001976a914e1232622a96a04f5e5a24ca0792bb9c28b089d6e88ace9ca1200",
"475c149be20c8a73596fad6cb8861a5af46d4fcf8e26a9dbf6cedff7ff80b70d": "01000000013a7e6f19a963adc7437d2f3eb0936f1fc9ef4ba7e083e19802eb1111525a59c2000000008b483045022100958d3931051306489d48fe69b32561e0a16e82a2447c07be9d1069317084b5e502202f70c2d9be8248276d334d07f08f934ffeea83977ad241f9c2de954a2d577f94014104d950039cec15ad10ad4fb658873bc746148bc861323959e0c84bf10f8633104aa90b64ce9f80916ab0a4238e025dcddf885b9a2dd6e901fe043a433731db8ab4fdffffff02a086010000000000160014bbfab2cc3267cea2df1b68c392cb3f0294978ca922940d00000000001976a914760f657c67273a06cad5b1d757a95f4ed79f5a4b88ac4c8d1300",
"56a65810186f82132cea35357819499468e4e376fca685c023700c75dc3bd216": "01000000000101614b142aeeb827d35d2b77a5b11f16655b6776110ddd9f34424ff49d85706cf90200000000fdffffff02784a4c00000000001600148464f47f35cbcda2e4e5968c5a3a862c43df65a1404b4c00000000001976a914c9efecf0ecba8b42dce0ae2b28e3ea0573d351c988ac0247304402207d8e559ed1f56cb2d02c4cb6c95b95c470f4b3cb3ce97696c3a58e39e55cd9b2022005c9c6f66a7154032a0bb2edc1af1f6c8f488bec52b6581a3a780312fb55681b0121024f83b87ac3440e9b30cec707b7e1461ecc411c2f45520b45a644655528b0a68ae9ca1200",
"6ae728f783b0d4680ed8050c05419f0a89dfd6e28d46acfce7453b4d1b2b0254": "0100000000010496941b9f18710b39bacde890e39a7fa401e6bf49985857cb7adfb8a45147ef1e000000001716001441aec99157d762708339d7faf7a63a8c479ed84cfdffffff96941b9f18710b39bacde890e39a7fa401e6bf49985857cb7adfb8a45147ef1e0100000000fdffffff1a5d1e4ca513983635b0df49fd4f515c66dd26d7bff045cfbd4773aa5d93197f000000006a4730440220652145460092ef42452437b942cb3f563bf15ad90d572d0b31d9f28449b7a8dd022052aae24f58b8f76bd2c9cf165cc98623f22870ccdbef1661b6dbe01c0ef9010f01210375b63dd8e93634bbf162d88b25d6110b5f5a9638f6fe080c85f8b21c2199a1fdfdffffff1a5d1e4ca513983635b0df49fd4f515c66dd26d7bff045cfbd4773aa5d93197f010000008a47304402207517c52b241e6638a84b05385e0b3df806478c2e444f671ca34921f6232ee2e70220624af63d357b83e3abe7cdf03d680705df0049ec02f02918ee371170e3b4a73d014104de408e142c00615294813233cdfe9e7774615ae25d18ba4a1e3b70420bb6666d711464518457f8b947034076038c6f0cfc8940d85d3de0386e0ad88614885c7cfdffffff0480969800000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac809698000000000017a914f2a76207d7b54bd34282281205923841341d9e1f87002d3101000000001976a914b8d4651937cd7db5bcf5fc98e6d2d8cfa131e85088ac743db20a00000000160014c7d0df09e03173170aed0247243874c6872748ed02483045022100b932cda0aeb029922e126568a48c05d79317747dcd77e61dce44e190e140822002202d13f84338bb272c531c4086277ac11e166c59612f4aefa6e20f78455bdc09970121028e6808a8ac1e9ede621aaabfcad6f86662dbe0ace0236f078eb23c24bc88bd5e02483045022100d74a253262e3898626c12361ba9bb5866f9303b42eec0a55ced0578829e2e61e022059c08e61d90cd63c84de61c796c9d1bc1e2f8217892a7c07b383af357ddd7a730121028641e89822127336fc12ff99b1089eb1a124847639a0e98d17ff03a135ad578b000020c71200",
"72419d187c61cfc67a011095566b374dc2c01f5397e36eafe68e40fc44474112": "0100000002677b2113f26697718c8991823ec0e637f08cb61426da8da508b97449c872490f000000008b4830450221009c50c0f56f34781dfa7b3d540ac724436c67ffdc2e5b2d5a395c9ebf72116ef802205a94a490ea14e4824f36f1658a384aeaecadd54839600141eb20375a49d476d1014104c291245c2ee3babb2a35c39389df56540867f93794215f743b9aa97f5ba114c4cdee8d49d877966728b76bc649bb349efd73adef1d77452a9aac26f8c51ae1ddfdffffff677b2113f26697718c8991823ec0e637f08cb61426da8da508b97449c872490f010000008b483045022100ae0b286493491732e7d3f91ab4ac4cebf8fe8a3397e979cb689e62d350fdcf2802206cf7adf8b29159dd797905351da23a5f6dab9b9dbf5028611e86ccef9ff9012e014104c62c4c4201d5c6597e5999f297427139003fdb82e97c2112e84452d1cfdef31f92dd95e00e4d31a6f5f9af0dadede7f6f4284b84144e912ff15531f36358bda7fdffffff019f7093030000000022002027ce908c4ee5f5b76b4722775f23e20c5474f459619b94040258290395b88afb6ec51200",
"76bcf540b27e75488d95913d0950624511900ae291a37247c22d996bb7cde0b4": "0100000001f4ba9948cdc4face8315c7f0819c76643e813093ffe9fbcf83d798523c7965db000000006a473044022061df431a168483d144d4cffe1c5e860c0a431c19fc56f313a899feb5296a677c02200208474cc1d11ad89b9bebec5ec00b1e0af0adaba0e8b7f28eed4aaf8d409afb0121039742bf6ab70f12f6353e9455da6ed88f028257950450139209b6030e89927997fdffffff01d4f84b00000000001976a9140b93db89b6bf67b5c2db3370b73d806f458b3d0488ac0a171300",
"7f19935daa7347bdcf45f0bfd726dd665c514ffd49dfb035369813a54c1e5d1a": "01000000000102681b6a8dd3a406ee10e4e4aece3c2e69f6680c02f53157be6374c5c98322823a00000000232200209adfa712053a06cc944237148bcefbc48b16eb1dbdc43d1377809bcef1bea9affdffffff681b6a8dd3a406ee10e4e4aece3c2e69f6680c02f53157be6374c5c98322823a0100000023220020f40ed2e3fbffd150e5b74f162c3ce5dae0dfeba008a7f0f8271cf1cf58bfb442fdffffff02801d2c04000000001976a9140cc01e19090785d629cdcc98316f328df554de4f88ac6d455d05000000001976a914b9e828990a8731af4527bcb6d0cddf8d5ffe90ce88ac040047304402206eb65bd302eefae24eea05781e8317503e68584067d35af028a377f0751bb55b0220226453d00db341a4373f1bcac2391f886d3a6e4c30dd15133d1438018d2aad24014730440220343e578591fab0236d28fb361582002180d82cb1ba79eec9139a7a9519fca4260220723784bd708b4a8ed17bb4b83a5fd2e667895078e80eec55119015beb3592fd2016952210222eca5665ed166d090a5241d9a1eb27a92f85f125aaf8df510b2b5f701f3f534210227bca514c22353a7ae15c61506522872afecf10df75e599aabe4d562d0834fce2103601d7d49bada5a57a4832eafe4d1f1096d7b0b051de4a29cd5fc8ad62865e0a553ae0400483045022100b15ea9daacd809eb4d783a1449b7eb33e2965d4229e1a698db10869299dddc670220128871ffd27037a3e9dac6748ce30c14b145dd7f9d56cc9dcde482461fb6882601483045022100cb659e1de65f8b87f64d1b9e62929a5d565bbd13f73a1e6e9dd5f4efa024b6560220667b13ce2e1a3af2afdcedbe83e2120a6e8341198a79efb855b8bc5f93b4729f0169522102d038600af253cf5019f9d5637ca86763eca6827ed7b2b7f8cc6326dffab5eb68210315cdb32b7267e9b366fb93efe29d29705da3db966e8c8feae0c8eb51a7cf48e82103f0335f730b9414acddad5b3ee405da53961796efd8c003e76e5cd306fcc8600c53ae1fc71200",
"9de08bcafc602a3d2270c46cbad1be0ef2e96930bec3944739089f960652e7cb": "010000000001013409c10fd732d9e4b3a9a1c4beb511fa5eb32bc51fd169102a21aa8519618f800000000000fdffffff0640420f00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac40420f00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac40420f00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac80841e00000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac64064a000000000016001469825d422ca80f2a5438add92d741c7df45211f280969800000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac02483045022100b4369b18bccb74d72b6a38bd6db59122a9e8af3356890a5ecd84bdb8c7ffe317022076a5aa2b817be7b3637d179106fccebb91acbc34011343c8e8177acc2da4882e0121033c8112bbf60855f4c3ae489954500c4b8f3408665d8e1f63cf3216a76125c69865281300",
"a29d131e766950cae2e97dd4527b7c050293c2f5630470bdd7d00b7fe6db1b9d": "010000000400899af3606e93106a5d0f470e4e2e480dfc2fd56a7257a1f0f4d16fd5961a0f000000006a47304402205b32a834956da303f6d124e1626c7c48a30b8624e33f87a2ae04503c87946691022068aa7f936591fb4b3272046634cf526e4f8a018771c38aff2432a021eea243b70121034bb61618c932b948b9593d1b506092286d9eb70ea7814becef06c3dfcc277d67fdffffff4bc2dcc375abfc7f97d8e8c482f4c7b8bc275384f5271678a32c35d955170753000000006b483045022100de775a580c6cb47061d5a00c6739033f468420c5719f9851f32c6992610abd3902204e6b296e812bb84a60c18c966f6166718922780e6344f243917d7840398eb3db0121025d7317c6910ad2ad3d29a748c7796ddf01e4a8bc5e3bf2a98032f0a20223e4aafdffffff4bc2dcc375abfc7f97d8e8c482f4c7b8bc275384f5271678a32c35d955170753010000006a4730440220615a26f38bf6eb7043794c08fb81f273896b25783346332bec4de8dfaf7ed4d202201c2bc4515fc9b07ded5479d5be452c61ce785099f5e33715e9abd4dbec410e11012103caa46fcb1a6f2505bf66c17901320cc2378057c99e35f0630c41693e97ebb7cffdffffff4bc2dcc375abfc7f97d8e8c482f4c7b8bc275384f5271678a32c35d955170753030000006b483045022100c8fba762dc50041ee3d5c7259c01763ed913063019eefec66678fb8603624faa02200727783ccbdbda8537a6201c63e30c0b2eb9afd0e26cb568d885e6151ef2a8540121027254a862a288cfd98853161f575c49ec0b38f79c3ef0bf1fb89986a3c36a8906fdffffff0240787d01000000001976a9149cd3dfb0d87a861770ae4e268e74b45335cf00ab88ac3bfc1502000000001976a914c30f2af6a79296b6531bf34dba14c8419be8fb7d88ac52c51200",
"c1433779c5faec5df5e7bdc51214a95f15deeab842c23efbdde3acf82c165462": "0100000003aabec9cb99096073ae47cfb84bfd5b0063ae7f157956fd37c5d1a79d74ee6e33000000008b4830450221008136fc880d5e24fdd9d2a43f5085f374fef013b814f625d44a8075104981d92a0220744526ec8fc7887c586968f22403f0180d54c9b7ff8db9b553a3c4497982e8250141047b8b4c91c5a93a1f2f171c619ca41770427aa07d6de5130c3ba23204b05510b3bd58b7a1b35b9c4409104cfe05e1677fc8b51c03eac98b206e5d6851b31d2368fdffffff16d23bdc750c7023c085a6fc76e3e468944919783535ea2c13826f181058a656010000008a47304402204148410f2d796b1bb976b83904167d28b65dcd7c21b3876022b4fa70abc86280022039ea474245c3dc8cd7e5a572a155df7a6a54496e50c73d9fed28e76a1cf998c00141044702781daed201e35aa07e74d7bda7069e487757a71e3334dc238144ad78819de4120d262e8488068e16c13eea6092e3ab2f729c13ef9a8c42136d6365820f7dfdffffff68091e76227e99b098ef8d6d5f7c1bb2a154dd49103b93d7b8d7408d49f07be0010000008b4830450221008228af51b61a4ee09f58b4a97f204a639c9c9d9787f79b2fc64ea54402c8547902201ed81fca828391d83df5fbd01a3fa5dd87168c455ed7451ba8ccb5bf06942c3b0141046fcdfab26ac08c827e68328dbbf417bbe7577a2baaa5acc29d3e33b3cc0c6366df34455a9f1754cb0952c48461f71ca296b379a574e33bcdbb5ed26bad31220bfdffffff0210791c00000000001976a914a4b991e7c72996c424fe0215f70be6aa7fcae22c88ac80c3c901000000001976a914b0f6e64ea993466f84050becc101062bb502b4e488ac7af31200",
"c2595a521111eb0298e183e0a74befc91f6f93b03e2f7d43c7ad63a9196f7e3a": "01000000018557003cb450f53922f63740f0f77db892ef27e15b2614b56309bfcee96a0ad3010000006a473044022041923c905ae4b5ed9a21aa94c60b7dbcb8176d58d1eb1506d9fb1e293b65ce01022015d6e9d2e696925c6ad46ce97cc23dec455defa6309b839abf979effc83b8b160121029332bf6bed07dcca4be8a5a9d60648526e205d60c75a21291bffcdefccafdac3fdffffff01c01c0f00000000001976a914a2185918aa1006f96ed47897b8fb620f28a1b09988ac01171300",
"e07bf0498d40d7b8d7933b1049dd54a1b21b7c5f6d8def98b0997e22761e0968": "01000000016d445091b7b4fa19cbbee30141071b2202d0c27d195b9d6d2bcc7085c9cd9127010000008b483045022100daf671b52393af79487667eddc92ebcc657e8ae743c387b25d1c1a2e19c7a4e7022015ef2a52ea7e94695de8898821f9da539815775516f18329896e5fc52a3563b30141041704a3daafaace77c8e6e54cf35ed27d0bf9bb8bcd54d1b955735ff63ec54fe82a80862d455c12e739108b345d585014bf6aa0cbd403817c89efa18b3c06d6b5fdffffff02144a4c00000000001976a9148942ac692ace81019176c4fb0ac408b18b49237f88ac404b4c00000000001976a914dd36d773acb68ac1041bc31b8a40ee504b164b2e88ace9ca1200",
"e453e7346693b507561691b5ea73f8eba60bfc8998056226df55b2fac88ba306": "010000000125af87b0c2ebb9539d644e97e6159ccb8e1aa80fe986d01f60d2f3f37f207ae8010000008b483045022100baed0747099f7b28a5624005d50adf1069120356ac68c471a56c511a5bf6972b022046fbf8ec6950a307c3c18ca32ad2955c559b0d9bbd9ec25b64f4806f78cadf770141041ea9afa5231dc4d65a2667789ebf6806829b6cf88bfe443228f95263730b7b70fb8b00b2b33777e168bcc7ad8e0afa5c7828842794ce3814c901e24193700f6cfdffffff02a0860100000000001976a914ade907333744c953140355ff60d341cedf7609fd88ac68830a00000000001976a9145d48feae4c97677e4ca7dcd73b0d9fd1399c962b88acc9cc1300",
"e87a207ff3f3d2601fd086e90fa81a8ecb9c15e6974e649d53b9ebc2b087af25": "01000000010db780fff7dfcef6dba9268ecf4f6df45a1a86b86cad6f59738a0ce29b145c47010000008a47304402202887ec6ec200e4e2b4178112633011cbdbc999e66d398b1ff3998e23f7c5541802204964bd07c0f18c48b7b9c00fbe34c7bc035efc479e21a4fa196027743f06095f0141044f1714ed25332bb2f74be169784577d0838aa66f2374f5d8cbbf216063626822d536411d13cbfcef1ff3cc1d58499578bc4a3c4a0be2e5184b2dd7963ef67713fdffffff02a0860100000000001600145bbdf3ba178f517d4812d286a40c436a9088076e6a0b0c00000000001976a9143fc16bef782f6856ff6638b1b99e4d3f863581d388acfbcb1300"
}
txid_list = sorted(list(transactions))
@classmethod
def create_old_wallet(cls):
ks = keystore.from_old_mpk('e9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09d5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b3')
# seed words: powerful random nobody notice nothing important anyway look away hidden message over
w = WalletIntegrityHelper.create_standard_wallet(ks, gap_limit=20)
# some txns are beyond gap limit:
w.create_new_address(for_change=True)
return w
@mock.patch.object(storage.WalletStorage, '_write')
def test_restoring_old_wallet_txorder1(self, mock_write):
w = self.create_old_wallet()
for i in [2, 12, 7, 9, 11, 10, 16, 6, 17, 1, 13, 15, 5, 8, 4, 0, 14, 18, 3]:
tx = Transaction(self.transactions[self.txid_list[i]])
w.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual(27633300, sum(w.get_balance()))
@mock.patch.object(storage.WalletStorage, '_write')
def test_restoring_old_wallet_txorder2(self, mock_write):
w = self.create_old_wallet()
for i in [9, 18, 2, 0, 13, 3, 1, 11, 4, 17, 7, 14, 12, 15, 10, 8, 5, 6, 16]:
tx = Transaction(self.transactions[self.txid_list[i]])
w.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual(27633300, sum(w.get_balance()))
@mock.patch.object(storage.WalletStorage, '_write')
def test_restoring_old_wallet_txorder3(self, mock_write):
w = self.create_old_wallet()
for i in [5, 8, 17, 0, 9, 10, 12, 3, 15, 18, 2, 11, 14, 7, 16, 1, 4, 6, 13]:
tx = Transaction(self.transactions[self.txid_list[i]])
w.receive_tx_callback(tx.txid(), tx, TX_HEIGHT_UNCONFIRMED)
self.assertEqual(27633300, sum(w.get_balance()))
class TestWalletHistory_EvilGapLimit(TestCaseForTestnet):
transactions = {
# txn A:
"511a35e240f4c8855de4c548dad932d03611a37e94e9203fdb6fc79911fe1dd4": "010000000001018aacc3c8f98964232ebb74e379d8ff4e800991eecfcf64bd1793954f5e50a8790100000000fdffffff0340420f0000000000160014dbf321e905d544b54b86a2f3ed95b0ac66a3ddb0ff0514000000000016001474f1c130d3db22894efb3b7612b2c924628d0d7e80841e000000000016001488492707677190c073b6555fb08d37e91bbb75d802483045022100cf2904e09ea9d2670367eccc184d92fcb8a9b9c79a12e4efe81df161077945db02203530276a3401d944cf7a292e0660f36ee1df4a1c92c131d2c0d31d267d52524901210215f523a412a5262612e1a5ef9842dc864b0d73dc61fb4c6bfd480a867bebb1632e181400",
# txn B:
"fde0b68938709c4979827caa576e9455ded148537fdb798fd05680da64dc1b4f": "01000000000101a317998ac6cc717de17213804e1459900fe257b9f4a3b9b9edd29806728277530100000000fdffffff03c0c62d00000000001600149543301687b1ca2c67718d55fbe10413c73ddec200093d00000000001600141bc12094a4475dcfbf24f9920dafddf9104ca95b3e4a4c0000000000160014b226a59f2609aa7da4026fe2c231b5ae7be12ac302483045022100f1082386d2ce81612a3957e2801803938f6c0066d76cfbd853918d4119f396df022077d05a2b482b89707a8a600013cb08448cf211218a462f2a23c2c0d80a8a0ca7012103f4aac7e189de53d95e0cb2e45d3c0b2be18e93420734934c61a6a5ad88dd541033181400",
# txn C:
"268fce617aaaa4847835c2212b984d7b7741fdab65de22813288341819bc5656": "010000000001014f1bdc64da8056d08f79db7f5348d1de55946e57aa7c8279499c703889b6e0fd0100000000fdffffff0260e316000000000016001445e9879cf7cd5b4a15df7ddcaf5c6dca0e1508bacc242600000000001600141bc12094a4475dcfbf24f9920dafddf9104ca95b02483045022100ae3618912f341fefee11b67e0047c47c88c4fa031561c3fafe993259dd14d846022056fa0a5b5d8a65942fa68bcc2f848fd71fa455ba42bc2d421b67eb49ba62aa4e01210394d8f4f06c2ea9c569eb050c897737a7315e7f2104d9b536b49968cc89a1f11033181400",
}
@classmethod
def create_wallet(cls):
ks = keystore.from_xpub('vpub5Vhmk4dEJKanDTTw6immKXa3thw45u3gbd1rPYjREB6viP13sVTWcH6kvbR2YeLtGjradr6SFLVt9PxWDBSrvw1Dc1nmd3oko3m24CQbfaJ')
# seed words: nephew work weather maze pyramid employ check permit garment scene kiwi smooth
w = WalletIntegrityHelper.create_standard_wallet(ks, gap_limit=20)
return w
@mock.patch.object(storage.WalletStorage, '_write')
def test_restoring_wallet_txorder1(self, mock_write):
w = self.create_wallet()
w.storage.put('stored_height', 1316917 + 100)
for txid in self.transactions:
tx = Transaction(self.transactions[txid])
w.transactions[tx.txid()] = tx
# txn A is an external incoming txn paying to addr (3) and (15)
# txn B is an external incoming txn paying to addr (4) and (25)
# txn C is an internal transfer txn from addr (25) -- to -- (1) and (25)
w.receive_history_callback('tb1qgh5c088he4d559wl0hw27hrdeg8p2z96pefn4q', # HD index 1
[('268fce617aaaa4847835c2212b984d7b7741fdab65de22813288341819bc5656', 1316917)],
{})
w.synchronize()
w.receive_history_callback('tb1qm0ejr6g964zt2jux5te7m9ds43n28hdsdz9ull', # HD index 3
[('511a35e240f4c8855de4c548dad932d03611a37e94e9203fdb6fc79911fe1dd4', 1316912)],
{})
w.synchronize()
w.receive_history_callback('tb1qj4pnq958k89zcem3342lhcgyz0rnmhkzl6x0cl', # HD index 4
[('fde0b68938709c4979827caa576e9455ded148537fdb798fd05680da64dc1b4f', 1316917)],
{})
w.synchronize()
w.receive_history_callback('tb1q3pyjwpm8wxgvquak240mprfhaydmkawcsl25je', # HD index 15
[('511a35e240f4c8855de4c548dad932d03611a37e94e9203fdb6fc79911fe1dd4', 1316912)],
{})
w.synchronize()
w.receive_history_callback('tb1qr0qjp99ygawul0eylxfqmt7alygye22mj33vej', # HD index 25
[('fde0b68938709c4979827caa576e9455ded148537fdb798fd05680da64dc1b4f', 1316917),
('268fce617aaaa4847835c2212b984d7b7741fdab65de22813288341819bc5656', 1316917)],
{})
w.synchronize()
self.assertEqual(9999788, sum(w.get_balance()))
|
py | 1a4fff7ab0010b29843087a4b82536be24319609 | from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import os
import sqlite3
import random
from panel import Ui_quiz
# Questions Database
conn = sqlite3.connect('questions.db')
c = conn.cursor()
# User information Database
conn2 = sqlite3.connect('info_user.db')
c2 = conn2.cursor()
c2.execute('''CREATE TABLE IF NOT EXISTS level(
level text
)''')
conn2.commit()
# default time question
time = 8
# answer question variable
answer_question = 0
check_answer = True
# Page status variable (question page/other page)
status_question = False
# level variable
level = 0
# check buy time and wrong option
status_buy_time = True
status_buy_option = True
class Root(QMainWindow):
def __init__(self):
global level
QMainWindow.__init__(self)
self.ui = Ui_quiz()
self.ui.setupUi(self)
self.oldPos = []
self.show()
# set timer
self.timer = QTimer(self)
self.timer.timeout.connect(self.timer_func)
self.timer.start(1000)
# set info user
self.ui.username.setText(os.getlogin())
self.ui.profile.setText(str(os.getlogin())[0].lower())
self.ui.username2.setText(os.getlogin())
# Set level
try:
c2.execute('SELECT * FROM level')
level = c2.fetchone()[0]
self.ui.level.setText(level)
self.ui.level2.setText(level)
except:
c2.execute('INSERT INTO level VALUES(1)')
conn2.commit()
# Set Button
self.ui.letsgo.clicked.connect(lambda: self.ui.pages.setCurrentWidget(self.ui.select))
self.ui.tech.clicked.connect(self.tech)
self.ui.sport.clicked.connect(self.sport)
self.ui.info.clicked.connect(self.info)
self.ui.cinema.clicked.connect(self.cinema)
self.ui.math.clicked.connect(self.math)
self.ui.nature.clicked.connect(self.nature)
# set option
self.ui.one.clicked.connect(self.one)
self.ui.two.clicked.connect(self.two)
self.ui.three.clicked.connect(self.three)
self.ui.four.clicked.connect(self.four)
# set Button end question
self.ui.end.clicked.connect(lambda: self.ui.pages.setCurrentWidget(self.ui.select))
self.ui.end.clicked.connect(self.end_question)
self.ui.end2.clicked.connect(lambda: self.ui.pages.setCurrentWidget(self.ui.select))
self.ui.end2.clicked.connect(self.end_question)
# help user
self.ui.buy_option.clicked.connect(self.wrong_option)
self.ui.buy_time.clicked.connect(self.buy_time)
def mousePressEvent(self, evt):
self.oldPos = evt.globalPos()
def mouseMoveEvent(self, evt):
delta = QPoint(evt.globalPos() - self.oldPos)
self.move(self.x() + delta.x(), self.y() + delta.y())
self.oldPos = evt.globalPos()
# Technology category
def tech(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.tech)
self.ui.next2.clicked.connect(self.tech)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM tech')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
# Sports category
def sport(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.sport)
self.ui.next2.clicked.connect(self.sport)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM Football')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def info(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.info)
self.ui.next2.clicked.connect(self.info)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM information')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def cinema(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.cinema)
self.ui.next2.clicked.connect(self.cinema)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM cinema')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def math(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.math)
self.ui.next2.clicked.connect(self.math)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM math')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
def nature(self):
global conn
global c
global time
global check_answer
global status_question
self.ui.next.clicked.connect(self.nature)
self.ui.next2.clicked.connect(self.nature)
self.ui.pages.setCurrentWidget(self.ui.question)
c.execute('SELECT * FROM nature')
questions = c.fetchall()
tedad = len(questions)
ran = random.randrange(0, tedad)
questions = questions[ran]
self.set_qu(questions[0], questions[1], questions[2], questions[3], questions[4], questions[5])
check_answer = True
status_question = True
time = 8
# Set option questions
def set_qu(self, question, one, two, three, four, answer):
global answer_question
global check_answer
global status_buy_option
global status_buy_time
# clear Ui
self.ui.quest.clear()
self.ui.quest_2.clear()
status_buy_time = True
status_buy_option = True
self.ui.line1.hide()
self.ui.line2.hide()
self.ui.line3.hide()
self.ui.line4.hide()
if len(question) <= 45:
self.ui.quest.setText(question)
self.ui.quest_2.clear()
else:
self.ui.quest.setText(question[:40])
self.ui.quest_2.setText(question[40:])
self.ui.quest_win.setText(question)
self.ui.quest_lost.setText(question)
self.ui.one.setText(one)
self.ui.two.setText(two)
self.ui.three.setText(three)
self.ui.four.setText(four)
answer_question = answer
if answer == 1:
self.ui.answer_win.setText(one)
self.ui.answer_lost.setText(one)
elif answer == 2:
self.ui.answer_win.setText(two)
self.ui.answer_lost.setText(two)
elif answer == 3:
self.ui.answer_win.setText(three)
self.ui.answer_lost.setText(three)
else:
self.ui.answer_win.setText(four)
self.ui.answer_lost.setText(four)
# One second timer
def timer_func(self):
global time
global status_question
global level
if status_question:
# timer
time -= 1
if len(str(time)) == 2:
self.ui.time.setText('00:'+str(time))
else:
self.ui.time.setText('00:0' + str(time))
if time == 0 and check_answer:
self.ui.pages.setCurrentWidget(self.ui.False_answer)
status_question = False
c2.execute('SELECT * FROM level')
level = c2.fetchone()[0]
self.ui.level.setText(level)
self.ui.level2.setText(level)
# Option one to four
def one(self):
self.check(1)
def two(self):
self.check(2)
def three(self):
self.check(3)
def four(self):
self.check(4)
# Check user answer
def check(self, user_answer):
global check_answer
global answer_question
global level
if user_answer == answer_question:
check_answer = False
self.ui.pages.setCurrentWidget(self.ui.True_answer)
new_level = float(level) + 1
sql_update_query = f"""Update level set level = {new_level} where level = {level}"""
c2.execute(sql_update_query)
conn2.commit()
else:
self.ui.pages.setCurrentWidget(self.ui.False_answer)
# help user (show wrong option)
def wrong_option(self):
global answer_question
global level
global status_buy_option
if status_buy_option:
status_buy_option = False
if answer_question != 1:
self.ui.line1.show()
elif answer_question != 2:
self.ui.line2.show()
elif answer_question != 3:
self.ui.line3.show()
elif answer_question != 4:
self.ui.line4.show()
new_level = float(level) - 0.5
sql_update_query = f"""Update level set level = {new_level} where level = {level}"""
c2.execute(sql_update_query)
conn2.commit()
# buy time
@staticmethod
def buy_time():
global time
global level
global status_buy_time
if status_buy_time:
time += 5
status_buy_time = False
new_level = float(level) - 0.5
sql_update_query = f"""Update level set level = {new_level} where level = {level}"""
c2.execute(sql_update_query)
conn2.commit()
# end question
@staticmethod
def end_question():
global status_question
status_question = False
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
root = Root()
sys.exit(app.exec_())
|
py | 1a4fffb9234b88c945fde1713d3e946128800487 | import cv2
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def convert_2d_to_3d(u, v, z, K):
v0 = K[1][2]
u0 = K[0][2]
fy = K[1][1]
fx = K[0][0]
x = (u - u0) * z / fx
y = (v - v0) * z / fy
return (x, y, z)
def feature_match(img1, img2):
r''' Find features on both images and match them pairwise
'''
max_n_features = 1000
# max_n_features = 500
use_flann = False # better not use flann
detector = cv2.xfeatures2d.SIFT_create(max_n_features)
# find the keypoints and descriptors with SIFT
kp1, des1 = detector.detectAndCompute(img1, None)
kp2, des2 = detector.detectAndCompute(img2, None)
if (des1 is None) or (des2 is None):
return [], []
des1 = des1.astype(np.float32)
des2 = des2.astype(np.float32)
if use_flann:
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
else:
matcher = cv2.DescriptorMatcher().create('BruteForce')
matches = matcher.knnMatch(des1, des2, k=2)
good = []
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.8 * n.distance:
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
return pts1, pts2
def get_pose_pnp(rgb_curr, rgb_near, depth_curr, K):
gray_curr = rgb2gray(rgb_curr).astype(np.uint8)
gray_near = rgb2gray(rgb_near).astype(np.uint8)
height, width = gray_curr.shape
pts2d_curr, pts2d_near = feature_match(gray_curr,
gray_near) # feature matching
# dilation of depth
kernel = np.ones((4, 4), np.uint8)
depth_curr_dilated = cv2.dilate(depth_curr, kernel)
# extract 3d pts
pts3d_curr = []
pts2d_near_filtered = [
] # keep only feature points with depth in the current frame
for i, pt2d in enumerate(pts2d_curr):
# print(pt2d)
u, v = pt2d[0], pt2d[1]
z = depth_curr_dilated[v, u]
if z > 0:
xyz_curr = convert_2d_to_3d(u, v, z, K)
pts3d_curr.append(xyz_curr)
pts2d_near_filtered.append(pts2d_near[i])
# the minimal number of points accepted by solvePnP is 4:
if len(pts3d_curr) >= 4 and len(pts2d_near_filtered) >= 4:
pts3d_curr = np.expand_dims(np.array(pts3d_curr).astype(np.float32),
axis=1)
pts2d_near_filtered = np.expand_dims(
np.array(pts2d_near_filtered).astype(np.float32), axis=1)
# ransac
ret = cv2.solvePnPRansac(pts3d_curr,
pts2d_near_filtered,
np.asarray(K),
distCoeffs=None)
success = ret[0]
rotation_vector = ret[1]
translation_vector = ret[2]
return (success, rotation_vector, translation_vector)
else:
return (0, None, None)
|
py | 1a5000fb5925a04a1cd7e75c9ff3e4fe189f940c | from django.test import TestCase
from model_mommy import mommy
from ..models import Ayah, Sura
class TestQuranTextEndpointsURI(TestCase):
@classmethod
def setUpTestData(cls):
cls.sura = mommy.make(Sura, name='Al-Fateha', index=1)
cls.ayah = mommy.make(Ayah, number=1, sura=cls.sura,
text='بسم الله الرحمن الرحيم')
def test_sura_list_url_with_tail_slash(self):
url_with_tail_slash = '/quran/'
self.assertEqual(200, self.client.get(url_with_tail_slash).status_code)
def test_sura_list_url_without_tail_slash(self):
url_without_tail_slash = '/quran'
# Root URL can't accept url without tail
self.assertEqual(301, self.client.get(url_without_tail_slash).status_code)
def test_sura_details_url_with_tail_slash(self):
url_with_tail_slash = '/quran/1/1/'
self.assertEqual(200, self.client.get(url_with_tail_slash).status_code)
def test_sura_details_url_without_tail_slash(self):
url_without_tail_slash = '/quran/1/1'
self.assertEqual(200, self.client.get(url_without_tail_slash).status_code)
|
py | 1a50012d034f082de66cbf5fcb8981c4ebebac5f | from selenium.webdriver.remote.webdriver import WebDriver
from .globals import ENDPOINTS
def has_needed_cookies(driver: WebDriver):
"""
Callback function to check if important cookies exists in a webdriver
"""
kosogha = driver.get_cookie("kosogha")
ci_session = driver.get_cookie("ci_session")
cookie = driver.get_cookie("cookie")
return kosogha and ci_session and cookie
def recaptcha_ok(driver: WebDriver) -> bool:
"""
Check if recaptcha is checked
"""
recaptcha_anchor = driver.find_element_by_id("recaptcha-anchor")
aria_checked_attrib = recaptcha_anchor.get_attribute("aria-checked")
return "false" not in aria_checked_attrib
def redirected_into_welcome_page(driver: WebDriver) -> bool:
return driver.current_url == ENDPOINTS["welcome_page"]
|
py | 1a500193f55dc60f6cea55c762dfa8e46ce05ff2 | '''
Various classes and functions for handling Regier and colleagues'
communicative cost model.
'''
import numpy as np
from scipy.spatial.distance import pdist, squareform
class Partition:
'''
A partition object represents a partition of an n-dimensional
space. To create a partition, pass a list like [[0,0,1,1],
[0,0,1,1]], where the structure of the lists represents the space
(here 2x4), and the numbers represent the categories (here
category 0 and 1). Passing a tuple like (2,4) creates a trivial
partition of given dimensionality. Various iteration methods are
available for traversing the partition.
'''
@property
def shape(self):
return self._partition.shape
@property
def size(self):
return self._partition.size
def __init__(self, partition):
if isinstance(partition, tuple):
self._partition = np.zeros(partition, dtype=int)
else:
self._partition = np.array(partition, dtype=int)
self._boolean_matrix = None
def __repr__(self):
'''
Provides textual description of the partition object.
'''
if len(self.shape) == 1:
return 'Partition[length=%i, n_categories=%i]' % (self.shape[0], self.__len__())
return 'Partition[shape=%s, n_categories=%i]' % ('x'.join(map(str, self.shape)), self.__len__())
def __str__(self):
'''
Provides printable representation of the partition object.
'''
return self._partition.__str__()
def __len__(self):
'''
The length of a partition is the number of categories it
contains.
'''
return np.unique(self._partition).size
def __getitem__(self, key):
'''
Pass a tuple to get the category memebership of a point. Pass
an integer to get a list of points that belong to a category.
'''
if isinstance(key, tuple):
return self._partition[key]
return list(map(tuple, np.transpose(np.where(self._partition==key))))
def __setitem__(self, key, value):
'''
Change the category membership of a particular point.
'''
if not isinstance(key, tuple):
raise ValueError('Index must be tuple. For 1D spaces, include a trailing comma in the index.')
self._boolean_matrix = None
self._partition[key] = value
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the space
along with its associated category.
'''
for point, category in np.ndenumerate(self._partition):
yield point, category
def iter_categories(self):
'''
Iterate over categories in the partition. Each iteration
returns an integer.
'''
for category in np.unique(self._partition):
yield category
def iter_points(self):
'''
Iterate over points in the space. Each iteration returns a
tuple.
'''
for point in np.ndindex(self.shape):
yield point
def boolean_matrix(self):
'''
Returns a 2D Boolean matrix, where rows correspond to meanings
and columns correspond to categories. True indicates that the
ith meaning belongs to the jth category. This Boolean matrix
representation is used by the communicative_cost method in the
Space object for fast computation using a similarity matrix.
'''
if self._boolean_matrix:
return self._boolean_matrix
self._boolean_matrix = convert_to_bool_matrix(self._partition)
return self._boolean_matrix
def spawn_speaker(self):
'''
Creates a Speaker with perfect speaker certainty.
'''
return Speaker(self.shape)
def spawn_listener(self, gamma, mu=2):
'''
Creates a Listener who represents the partition according to
the specified gamma and mu parameters. gamma may be set to
'uniform' to create a uniform listener.
'''
return Listener(self.shape, self.listener_distributions(gamma, mu))
def listener_distributions(self, gamma, mu=2):
'''
Returns a dictionary mapping categories to distributions
created under the specified gamma and mu parameters. gamma may
be set to 'uniform' to create uniform category distributions.
'''
if gamma == 'uniform':
return {category:self.uniform_distribution(category) for category in self.iter_categories()}
else:
return {category:self.gaussian_distribution(category, gamma, mu) for category in self.iter_categories()}
def uniform_distribution(self, category):
'''
Returns the uniform distribution for a particular category.
'''
category_members = self[category]
uniform_probability = 1.0 / len(category_members)
distribution = np.zeros(self.shape, dtype=float)
for point in category_members:
distribution[point] = uniform_probability
return Distribution(distribution, normalize=False)
def gaussian_distribution(self, category, gamma=1, mu=2):
'''
Returns the Gaussian distribution for a particular category
under the specified gamma and mu parameters.
'''
distribution = np.zeros(self.shape, dtype=float)
for point in self.iter_points():
distribution[point] = self._category_similarity(point, category, gamma, mu)
return Distribution(distribution, normalize=True)
def _category_similarity(self, point, category, gamma, mu):
'''
Returns the sum similarity between a point and all members of
a category under the specified gamma and mu parameters.
'''
return sum(self._similarity(point, member, gamma, mu) for member in self[category])
def _similarity(self, x, y, gamma, mu):
'''
Returns the similarity between two points under the specified
gamma and mu parameters.
'''
if not ((isinstance(gamma, int) or isinstance(gamma, float)) and gamma >= 0):
raise ValueError('Gamma parameter must be positive number.')
return np.exp(-gamma * self._distance(x, y, mu)**2)
def _distance(self, x, y, mu):
'''
Returns the Minkowski distance between two points for some mu.
mu = 1: Manhattan distance
mu = 2: Euclidean distance
'''
if not ((isinstance(mu, int) or isinstance(mu, float)) and mu > 0):
if mu == 'circle_euclidean':
return self._circle_euclidean(x, y)
raise ValueError('Mu parameter must be positive number.')
return sum(abs(x - y)**mu for x, y in zip(x, y))**(1.0/mu)
def _circle_euclidean(self, x, y):
'''
Returns the Euclidean distance between two points on a line
which wraps back around on itself (the shorter distance in
either direction is returned).
'''
sigma = 0.0
for dim in range(len(self.shape)):
d1 = abs(x[dim] - y[dim])
d2 = abs(d1 - self.shape[dim])
if d1 < d2:
sigma += d1**2
else:
sigma += d2**2
return sigma**0.5
########################################################################
class Distribution:
'''
A Distribution object represents a probability distribution. An
error is raised if the passed probabilities do not sum to 1; to
correct this, set normalize to True, which will automatically
normalize the distribution.
'''
@property
def shape(self):
return self.probabilities.shape
def __init__(self, distribution, normalize=False):
distribution = np.array(distribution, dtype=float)
if distribution.ndim == 0:
raise ValueError('Distribution must have at least one dimension')
if normalize is True:
self.probabilities = distribution / distribution.sum()
elif np.isclose(distribution.sum(), 1.0):
self.probabilities = distribution
else:
raise ValueError('Probabilities do not sum to 1: Use normalize=True')
def __repr__(self):
'''
Provides textual description of the distribution.
'''
dims = len(self.shape)
start = '['*dims + str(self.probabilities[(0,)*dims])
end = str(self.probabilities[(-1,)*dims]) + ']'*dims
return 'Distribution%s ... %s' % (start, end)
def __str__(self):
'''
Provides printable representation of the distribution.
'''
return self.probabilities.__str__()
def __getitem__(self, key):
'''
Pass an int (1D) or tuple (ND) to get the probability of that
point on the distribution.
'''
return self.probabilities[key]
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the
distribution along with its associated probability.
'''
for point, probability in np.ndenumerate(self.probabilities):
yield point, probability
def __mul__(self, operand):
return self.probabilities * operand.probabilities
def smooth(self, alpha):
'''
Returns a smoothed copy of the Distribution using convex
combination smoothing. alpha=0: no smoothing; alpha=1: smooth
to a uniform distribution.
'''
if alpha:
if not isinstance(alpha, (int, float)) and (alpha < 0 or alpha > 1):
raise ValueError('Alpha must be number between 0 and 1.')
uniform = np.full(self.shape, 1.0 / np.product(self.shape), dtype=float)
return Distribution(uniform*alpha + self.probabilities*(1.0 - alpha), False)
return self
########################################################################
class Need(Distribution):
'''
A Need object represents the probability with which each point in
an n-dimensional space will need to be expressed. To create a Need
object, pass a list like [[2,2,4,5], [3,1,6,8]], where the
structure of the lists represents the space (here 2x4), and the
numbers represent the frequency or probability of each point.
Frequencies will automatically be converted to probabilities.
Passing a tuple like (2,4) creates a Need object of given
dimensionality with uniform need probabilities.
'''
def __init__(self, need_frequencies):
if isinstance(need_frequencies, tuple):
self.probabilities = np.full(need_frequencies, 1.0 / np.product(need_frequencies), dtype=float)
else:
need_frequencies = np.array(need_frequencies, dtype=float)
if need_frequencies.ndim == 0:
raise ValueError('Distribution must be at least one dimensional')
self.probabilities = need_frequencies / need_frequencies.sum()
########################################################################
class Speaker:
'''
Collection of distributions - one for each point in the space.
'''
@property
def shape(self):
return self._shape
def __init__(self, shape, speaker_distributions=None):
if not isinstance(shape, tuple):
raise ValueError('Shape must be tuple')
self._shape = shape
self._distributions = {}
if speaker_distributions:
if not isinstance(speaker_distributions, dict):
raise ValueError('Speaker distributions shoud be passed as dictionary: point:distribution')
else:
points = list(np.ndindex(self._shape))
for point in points:
if point not in speaker_distributions:
raise ValueError('Speaker distributions must be provided for every point')
for point, speaker_distribution in speaker_distributions.items():
if point not in points:
raise ValueError('Invalid point contained in passed speaker distributions')
self[point] = speaker_distribution
else: # Assume speaker certainty and create point distributions
for point in np.ndindex(self._shape):
point_distribution = np.zeros(self._shape, dtype=float)
point_distribution[point] = 1.0
self._distributions[point] = Distribution(point_distribution)
def __getitem__(self, key):
'''
Pass a tuple to get the category memebership of a point. Pass
an integer to get a list of points that belong to a category.
'''
if key not in self._distributions:
raise ValueError('Invalid point.')
return self._distributions[key]
def __setitem__(self, key, value):
'''
Change the category membership of a particular point.
'''
if not self._valid_key(key):
raise ValueError('Invalid point.')
if not isinstance(value, Distribution):
value = Distribution(value)
if value.shape != self._shape:
raise ValueError('Distribution shape does not match the shape of the speaker.')
self._distributions[key] = value
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the
distribution along with its associated probability.
'''
for point in np.ndindex(self._shape):
yield (point, self[point])
def _valid_key(self, key):
if not isinstance(key, tuple):
return False
if len(key) != len(self.shape):
return False
for dim in range(len(key)):
if key[dim] >= self._shape[dim]:
return False
return True
########################################################################
class Listener:
'''
Collection of distributions - one for each category
'''
@property
def shape(self):
return self._shape
def __init__(self, shape, listener_distributions):
if not isinstance(shape, tuple):
raise ValueError('Shape must be tuple')
if not isinstance(listener_distributions, dict):
raise ValueError('Listener distributions shoud be passed as dictionary: category:Distribution')
self._shape = shape
self._distributions = {}
for category, listener_distribution in listener_distributions.items():
self[category] = listener_distribution
def __getitem__(self, key):
'''
Pass an integer to get the distribution for that category.
'''
if key not in self._distributions:
raise ValueError('Invalid category.')
return self._distributions[key]
def __setitem__(self, key, value):
'''
Change the distribution for a particular category
'''
if not isinstance(value, Distribution):
value = Distribution(value)
if value.shape != self._shape:
raise ValueError('Distribution shape does not match the shape of the listener.')
self._distributions[key] = value
def __iter__(self):
'''
Default iterator. Each iteration returns a point in the
distribution along with its associated probability.
'''
for category in sorted(list(self._distributions.keys())):
yield (category, self[category])
def smooth(self, alpha):
if alpha:
smoothed_distributions = {}
for category, distribution in self._distributions.items():
smoothed_distributions[category] = distribution.smooth(alpha)
return Listener(self.shape, smoothed_distributions)
return self
########################################################################
class Space:
'''
A Space object represents an n-dimensional universe. To create a
space object of certain dimensionality, pass a tuple like (2,4).
Optionally, you can pass a need object specifying, a gamma setting
(default: 1), a mu setting (default: 2 (Euclidean), 1 =
Manhattan), If no need object is passed, a uniform need object
will be created.
'''
@property
def shape(self):
return self._shape
def __init__(self, shape, need=None, gamma=1, mu=2):
if not isinstance(shape, tuple):
raise ValueError('The shape of the space must be a tuple.')
self._shape = shape
if need:
if not isinstance(need, Need):
raise ValueError('Invalid need object. Pass a need object or set to None for uniform need probabilities.')
self._need = need
else: # Need unspecified, so create a uniform need object
self._need = Need(self._shape)
if not ((isinstance(gamma, int) or isinstance(gamma, float)) and gamma >= 0):
raise ValueError('Gamma parameter must be positive number.')
self._gamma = gamma
if not ((isinstance(mu, int) or isinstance(mu, float)) and mu > 0):
raise ValueError('Mu parameter must be positive number.')
self._mu = mu
pairwise_distances = pdist(list(np.ndindex(self._shape)), 'minkowski', self._mu)
distance_matrix = squareform(pairwise_distances)
self._similarity_matrix = np.exp(-self._gamma * distance_matrix**2)
def __repr__(self):
'''
Provides textual description of the space object.
'''
if len(self._shape) == 1:
return 'Space[length=%i, gamma=%i, mu=%s]' % (self._shape[0], self._gamma, self._mu)
return 'Space[dimensionality=%s, gamma=%i, mu=%s]' % ('x'.join(map(str, self._shape)), self._gamma, self._mu)
def communicative_cost(self, partition, need=None):
'''
Returns the communicative cost for a given partition and need
probabilities. If no need object is passed, the need
probabilities will be inherited from the space's own need
object.
'''
if not isinstance(partition, Partition):
raise ValueError('Invalid Partition object.')
if partition.shape != self._shape:
raise ValueError('Partition object does not match the dimensions of the space. Should be %s.' % 'x'.join(map(str, self._shape)))
if need:
if not isinstance(need, Need):
raise ValueError('Invalid Need object. Pass a Need object or set to None to inherit need probabilities from the space.')
if need.shape != self._shape:
raise ValueError('Need object does not match the dimensions of the space. Should be %s.' % 'x'.join(map(str, self._shape)))
else:
need = self._need
boolean_matrix = partition.boolean_matrix()
listener_distributions = np.dot(self._similarity_matrix, boolean_matrix)
norm_listener_distributions = listener_distributions * boolean_matrix / listener_distributions.sum(axis=0)
neg_log_listener_distributions = -np.log2(norm_listener_distributions.sum(axis=1))
return (need.probabilities * neg_log_listener_distributions.reshape(self._shape)).sum()
def cost(self, language_array):
'''
Returns the communicative cost of a language passed as a
simple numpy array under the assumption of uniform need
probabilities. Essentially does the same as the
communicative_cost method above without the need to first
convert the numpy array to a Partition object.
'''
if not isinstance(language_array, np.ndarray):
raise ValueError('language_array should be Numpy array')
if language_array.shape != self._shape:
raise ValueError('Partition object does not match the dimensions of the space. Should be %s.' % 'x'.join(map(str, self._shape)))
boolean_matrix = convert_to_bool_matrix(language_array)
listener_distributions = np.dot(self._similarity_matrix, boolean_matrix)
norm_listener_distributions = listener_distributions * boolean_matrix / listener_distributions.sum(axis=0)
neg_log_listener_distributions = -np.log2(norm_listener_distributions.sum(axis=1))
return (self._need.probabilities * neg_log_listener_distributions.reshape(self._shape)).sum()
########################################################################
def convert_to_bool_matrix(partition):
'''
Returns a 2D Boolean matrix, where rows correspond to meanings and
columns correspond to categories. True indicates that the ith
meaning belongs to the jth category. This Boolean matrix
representation is used by the communicative_cost method in the
Space object for fast computation using a similarity matrix.
'''
n_points = partition.size # determines number of rows
n_categories = len(np.unique(partition)) # determines number of columns
cat_to_col = {cat:col for col, cat in enumerate(np.unique(partition))} # maps categories to columns
boolean_matrix = np.zeros((n_points, n_categories), dtype=bool)
for row, point in enumerate(np.ndindex(partition.shape)):
column = cat_to_col[partition[point]]
boolean_matrix[row, column] = True
return boolean_matrix
########################################################################
def KL_divergence(s, l):
'''
Returns the KL divergence between a speaker and listener
distribution.
'''
if s.shape != l.shape:
raise ValueError('Speaker and listener distributions do not have the same shape')
D_KL = 0.0
for point in np.ndindex(s.shape):
if s[point] == 0:
continue
if l[point] == 0:
raise ValueError('Cannot compute KL divergence because l=0 where s>0 at point %s. Try smoothing.'%str(point))
D_KL += s[point] * np.log2(s[point] / (l[point]))
return D_KL
def cost(partition, need, speaker, listener, alpha=None):
'''
Returns the communicative cost given partition, need, speaker, and
listener objects.
'''
if not isinstance(partition, Partition):
raise ValueError('Invalid Partition object')
if not isinstance(need, Need) or partition.shape != need.shape:
raise ValueError('Invalid Need object')
if not isinstance(speaker, Speaker) or partition.shape != speaker.shape:
raise ValueError('Invalid Speaker object')
if not isinstance(listener, Listener) or partition.shape != listener.shape:
raise ValueError('Invalid Listener object')
if alpha:
listener = listener.smooth(alpha)
return sum(need[target] * KL_divergence(speaker[target], listener[category]) for target, category in partition)
########################################################################
def random_partition(shape, n_categories, convex=False, seeds=None):
'''
Returns a randomly generated partition object with specified
shape, number of categories, and convexity.
'''
space = np.full(shape, -1, dtype=int)
n_items = np.product(shape)
points = list(np.ndindex(shape))
if seeds is None:
seeds = [points[p] for p in np.random.choice(n_items, n_categories, False)]
for category in range(n_categories):
space[seeds[category]] = category
for point in points:
if space[point] == -1:
if convex:
distances = [dist(point, seed, 2) for seed in seeds]
min_distance = min(distances)
category = np.random.choice([c for c in range(n_categories) if distances[c] == min_distance])
else:
category = np.random.choice(n_categories)
space[point] = category
return seeds, space
def iter_partitions(collection):
if len(collection) == 1:
yield [ collection ]
return
first = collection[0]
for smaller in iter_partitions(collection[1:]):
for n, subset in enumerate(smaller):
yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]
yield [ [ first ] ] + smaller
def all_partitions(shape):
'''
Returns all partitions of a space
'''
space = np.zeros(shape, dtype=int)
for partition in iter_partitions(list(np.ndindex(shape))):
for category, points in enumerate(partition):
for point in points:
space[point] = category
yield Partition(space)
def dist(x, y, mu):
return sum(abs(x - y)**mu for x, y in zip(x, y))**(1.0/mu)
|
py | 1a5002791beead8b92c5b2b29e472d920ee17575 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import logging
def conv3x3(in_channels, out_channels, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=False)
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, apply_activation=False):
super(ResidualBlock, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels, stride)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = conv3x3(out_channels, out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = conv3x3(out_channels, out_channels)
self.bn3 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.apply_activation = apply_activation
def forward(self, x):
"""Output size is same as input size"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out += residual
residual = out
out = self.conv2(out)
out = self.bn2(out)
out += residual
residual = out
out = self.conv3(out)
out = self.bn3(out)
out += residual
if self.apply_activation: out = self.relu(out)
return out
|
py | 1a5002a81a2abfcfbe1194d04e2535e2b18285fe | import turtle
import math
#1. Write a function called square that takes a parameter named t, which is a turtle. It should use the turtle to draw a square.Write a function call that passes bob as an argument to square, and then run the program again.
def square(t):
for i in range(4):
t.fd(100)
t.lt(90)
bob = turtle.Turtle()
#square(bob)
#2. Add another parameter, named length, to square. Modify the body so length of the sides is length, and then modify the function call to provide a second argument. Run the program again. Test your program with a range of values for length.
def square(t, length):
for i in range(4):
t.fd(length)
t.lt(90)
#for i in range(100, 200, 10):
# square(bob, i)
#3. Make a copy of square and change the name to polygon. Add another parameter named n and modify the body so it draws an n-sided regular polygon. Hint: The exterior angles of an n-sided regular polygon are 360/n degrees.
def polygon(t, len, n):
for i in range(n):
t.fd(len)
t.lt(360/n)
#polygon(bob, 50, 6)
# 4 Write a function called circle that takes a turtle, t, and radius, r, as parameters and that draws an approximate circle by calling polygon with an appropriate length and number of sides. Test your function with a range of values of r. Hint: figure out the circumference of the circle and make sure that length * n = circumference.
def circle(t, r):
circumferance = 2 * math.pi * r
length = 10
n = int(circumferance / length)
polygon(t, length, n)
#circle(bob, 100)
#Make a more general version of circle called arc that takes an additional parameter angle, which determines what fraction of a circle to draw. angle is in units of degrees, so when angle=360, arc should draw a complete circle.
def |
py | 1a50051e44f203a7a9867c57871969e28c877baa | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the pairs function below.
def pairs(k, arr):
res = 0
memo = dict()
for el in arr:
if el-k in memo:
res += 1
if el+k in memo:
res += 1
memo[el] = True
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = pairs(k, arr)
fptr.write(str(result) + '\n')
fptr.close()
|
py | 1a500543981cd9b47c25e514ee87f30e20a57bc0 | # pylint: disable=C0103,C0111,C0302,W0212
import datetime
import pytest
import numpy as np
import quasardb
import test_table as tslib
def test_int64_get_ranges__when_timeseries_is_empty(table, intervals):
results = table.int64_get_ranges(tslib._int64_col_name(table), intervals)
assert len(results) == 2
assert len(results[0]) == 0
assert len(results[1]) == 0
def test_int64_erase_ranges__when_timeseries_is_empty(table, intervals):
erased_count = table.erase_ranges(tslib._int64_col_name(table), intervals)
assert erased_count == 0
def test_int64_get_ranges(table, intervals):
start_time = tslib._start_time(intervals)
column_name = tslib._int64_col_name(table)
inserted_int64_data = tslib._generate_int64_ts(start_time, 1000)
table.int64_insert(column_name,
inserted_int64_data[0],
inserted_int64_data[1])
results = table.int64_get_ranges(column_name, [(
start_time, start_time + np.timedelta64(10, 's'))])
tslib._check_ts_results(results, inserted_int64_data, 10)
results = table.int64_get_ranges(column_name,
[(start_time,
start_time + np.timedelta64(10, 's')),
(start_time + np.timedelta64(10, 's'),
start_time + np.timedelta64(20, 's'))])
tslib._check_ts_results(results, inserted_int64_data, 20)
# Everything
results = table.int64_get_ranges(column_name)
tslib._check_ts_results(results, inserted_int64_data, 1000)
# empty result
out_of_time = start_time + np.timedelta64(10, 'h')
results = table.int64_get_ranges(
column_name, [(out_of_time, out_of_time + np.timedelta64(10, 's'))])
assert len(results) == 2
assert len(results[0]) == 0
assert len(results[1]) == 0
# error: column doesn't exist
with pytest.raises(quasardb.Error):
table.int64_get_ranges(
"lolilol", [(start_time, start_time + np.timedelta64(10, 's'))])
with pytest.raises(quasardb.Error):
table.int64_insert(
"lolilol",
inserted_int64_data[0],
inserted_int64_data[1])
with pytest.raises(quasardb.IncompatibleTypeError):
table.blob_get_ranges(
column_name, [(start_time, start_time + np.timedelta64(10, 's'))])
with pytest.raises(quasardb.IncompatibleTypeError):
table.blob_insert(
column_name,
inserted_int64_data[0],
inserted_int64_data[1])
def test_int64_erase_ranges(table, intervals):
start_time = tslib._start_time(intervals)
column_name = tslib._int64_col_name(table)
inserted_int64_data = tslib._generate_int64_ts(start_time, 1000)
table.int64_insert(
column_name,
inserted_int64_data[0],
inserted_int64_data[1])
results = table.int64_get_ranges(column_name, [(
start_time, start_time + np.timedelta64(10, 's'))])
erased_count = table.erase_ranges(column_name, [(
start_time, start_time + np.timedelta64(10, 's'))])
assert erased_count == len(results[0])
erased_count = table.erase_ranges(column_name, [(
start_time, start_time + np.timedelta64(10, 's'))])
assert erased_count == 0
results = table.int64_get_ranges(column_name, [(
start_time, start_time + np.timedelta64(10, 's'))])
assert len(results[0]) == 0
|
py | 1a50057aa9de328c43c7ce39610e090b9fc56f4d | """The Energy Intensity Indicators Model
"""
from __future__ import print_function, division, absolute_import
import os
from EnergyIntensityIndicators.Residential import residential_floorspace
from EnergyIntensityIndicators.Industry import (manufacturing,
nonmanufacuturing, asm_price_fit)
from EnergyIntensityIndicators import (industry, residential, commercial, transportation,
electricity, additive_lmdi, multiplicative_lmdi,
LMDI)
__author__ = 'Isabelle Rabideau'
__email__ = '[email protected]'
EIIDIR = os.path.dirname(os.path.realpath(__file__))
TESTDATADIR = os.path.join(os.path.dirname(EIIDIR), 'tests', 'data') |
py | 1a500665a5df5926d3da104a56f44aa68c76dbeb | import torch
from torch import nn
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def euclidean_dist(x, y):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def hard_example_mining(dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
ap_mat = dist_mat.mul(is_pos.float())
an_mat = dist_mat.mul(is_neg.float())
dist_ap, relative_p_inds = torch.max(ap_mat, 1, keepdim=True)
max_dist_an, _ = torch.max(an_mat, 1, keepdim=True)
max_an_mat = dist_mat + max_dist_an * (is_pos.float())
dist_an, relative_n_inds = torch.min(max_an_mat, 1, keepdim=True)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind, 1, relative_p_inds.data)
n_inds = torch.gather(
ind, 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an
class TripletLoss(object):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
def __init__(self, margin=None):
self.margin = margin
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
def __call__(self, global_feat, labels, normalize_feature=False):
if normalize_feature:
global_feat = normalize(global_feat, axis=-1)
dist_mat = euclidean_dist(global_feat, global_feat)
dist_ap, dist_an = hard_example_mining(
dist_mat, labels)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
return loss, dist_ap, dist_an
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss |
py | 1a50075e2220638a6059c48271bec260d3859a59 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Miscellaneous generic utility functions and classes."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import filecmp
import inspect
import os
import re
import sys
import six
from six.moves import range
from six.moves import cStringIO as StringIO
from .lang import classproperty
def get_new_uuid():
"""
Return a new UUID (typically to be used for new nodes).
It uses the UUID version specified in
aiida.backends.settings.AIIDANODES_UUID_VERSION
"""
import uuid
return six.text_type(uuid.uuid4())
# To speed up the process (os.path.abspath calls are slow)
_repository_folder_cache = {} # pylint: disable=invalid-name
def get_repository_folder(subfolder=None):
"""
Return the top folder of the local repository.
"""
try:
return _repository_folder_cache[subfolder]
except KeyError:
from aiida.manage.configuration import get_profile
repository_path = get_profile().repository_path
if not os.path.isdir(repository_path):
raise ImportError
if subfolder is None:
retval = os.path.abspath(repository_path)
elif subfolder == "sandbox":
retval = os.path.abspath(os.path.join(repository_path, 'sandbox'))
elif subfolder == "repository":
retval = os.path.abspath(os.path.join(repository_path, 'repository'))
else:
raise ValueError("Invalid 'subfolder' passed to get_repository_folder: {}".format(subfolder))
_repository_folder_cache[subfolder] = retval
return retval
def validate_list_of_string_tuples(val, tuple_length):
"""
Check that:
1. ``val`` is a list or tuple
2. each element of the list:
a. is a list or tuple
b. is of length equal to the parameter tuple_length
c. each of the two elements is a string
Return if valid, raise ValidationError if invalid
"""
from aiida.common.exceptions import ValidationError
err_msg = ("the value must be a list (or tuple) "
"of length-N list (or tuples), whose elements are strings; "
"N={}".format(tuple_length))
if not isinstance(val, (list, tuple)):
raise ValidationError(err_msg)
for element in val:
if (not isinstance(element, (list, tuple)) or (len(element) != tuple_length) or
not all(isinstance(s, six.string_types) for s in element)):
raise ValidationError(err_msg)
return True
def get_unique_filename(filename, list_of_filenames):
"""
Return a unique filename that can be added to the list_of_filenames.
If filename is not in list_of_filenames, it simply returns the filename
string itself. Otherwise, it appends a integer number to the filename
(before the extension) until it finds a unique filename.
:param filename: the filename to add
:param list_of_filenames: the list of filenames to which filename
should be added, without name duplicates
:returns: Either filename or its modification, with a number appended
between the name and the extension.
"""
if filename not in list_of_filenames:
return filename
basename, ext = os.path.splitext(filename)
# Not optimized, but for the moment this should be fast enough
append_int = 1
while True:
new_filename = "{:s}-{:d}{:s}".format(basename, append_int, ext)
if new_filename not in list_of_filenames:
break
append_int += 1
return new_filename
def str_timedelta(dt, max_num_fields=3, short=False, negative_to_zero=False): # pylint: disable=invalid-name
"""
Given a dt in seconds, return it in a HH:MM:SS format.
:param dt: a TimeDelta object
:param max_num_fields: maximum number of non-zero fields to show
(for instance if the number of days is non-zero, shows only
days, hours and minutes, but not seconds)
:param short: if False, print always ``max_num_fields`` fields, even
if they are zero. If True, do not print the first fields, if they
are zero.
:param negative_to_zero: if True, set dt = 0 if dt < 0.
"""
if max_num_fields <= 0:
raise ValueError("max_num_fields must be > 0")
s_tot = dt.total_seconds() # Important to get more than 1 day, and for
# negative values. dt.seconds would give
# wrong results in these cases, see
# http://docs.python.org/2/library/datetime.html
s_tot = int(s_tot)
if negative_to_zero:
if s_tot < 0:
s_tot = 0
negative = (s_tot < 0)
s_tot = abs(s_tot)
negative_string = " in the future" if negative else " ago"
# For the moment stay away from months and years, difficult to get
days, remainder = divmod(s_tot, 3600 * 24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
all_fields = [(days, 'D'), (hours, 'h'), (minutes, 'm'), (seconds, 's')]
fields = []
start_insert = False
counter = 0
for idx, field in enumerate(all_fields):
if field[0] != 0:
start_insert = True
if (len(all_fields) - idx) <= max_num_fields:
start_insert = True
if start_insert:
if counter >= max_num_fields:
break
fields.append(field)
counter += 1
if short:
while len(fields) > 1: # at least one element has to remain
if fields[0][0] != 0:
break
fields.pop(0) # remove first element
# Join the fields
raw_string = ":".join(["{:02d}{}".format(*f) for f in fields])
if raw_string.startswith('0'):
raw_string = raw_string[1:]
# Return the resulting string, appending a suitable string if the time
# is negative
return "{}{}".format(raw_string, negative_string)
def get_class_string(obj):
"""
Return the string identifying the class of the object (module + object name,
joined by dots).
It works both for classes and for class instances.
"""
if inspect.isclass(obj):
return "{}.{}".format(obj.__module__, obj.__name__)
return "{}.{}".format(obj.__module__, obj.__class__.__name__)
def get_object_from_string(class_string):
"""
Given a string identifying an object (as returned by the get_class_string
method) load and return the actual object.
"""
import importlib
the_module, _, the_name = class_string.rpartition('.')
return getattr(importlib.import_module(the_module), the_name)
def export_shard_uuid(uuid):
"""
Sharding of the UUID for the import/export
"""
return os.path.join(uuid[:2], uuid[2:4], uuid[4:])
def grouper(n, iterable): # pylint: disable=invalid-name
"""
Given an iterable, returns an iterable that returns tuples of groups of
elements from iterable of length n, except the last one that has the
required length to exaust iterable (i.e., there is no filling applied).
:param n: length of each tuple (except the last one,that will have length
<= n
:param iterable: the iterable to divide in groups
"""
import itertools
iterator = iter(iterable)
while True:
chunk = tuple(itertools.islice(iterator, n))
if not chunk:
return
yield chunk
class ArrayCounter(object): # pylint: disable=useless-object-inheritance
"""
A counter & a method that increments it and returns its value.
It is used in various tests.
"""
seq = None
def __init__(self):
self.seq = -1
def array_counter(self):
self.seq += 1
return self.seq
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
# Directory comparison
dirs_cmp = filecmp.dircmp(dir1, dir2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return (False, "Left directory: {}, right directory: {}, files only "
"in left directory: {}, files only in right directory: "
"{}, not comparable files: {}".format(dir1, dir2, dirs_cmp.left_only, dirs_cmp.right_only,
dirs_cmp.funny_files))
# If the directories contain the same files, compare the common files
(_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
if mismatch:
return (False, "The following files in the directories {} and {} "
"don't match: {}".format(dir1, dir2, mismatch))
if errors:
return (False, "The following files in the directories {} and {} "
"aren't regular: {}".format(dir1, dir2, errors))
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
res, msg = are_dir_trees_equal(new_dir1, new_dir2)
if not res:
return False, msg
return True, "The given directories ({} and {}) are equal".format(dir1, dir2)
class Prettifier(object): # pylint: disable=useless-object-inheritance
"""
Class to manage prettifiers (typically for labels of kpoints
in band plots)
"""
@classmethod
def _prettify_label_pass(cls, label):
"""
No-op prettifier, simply returns the same label
:param label: a string to prettify
"""
return label
@classmethod
def _prettify_label_agr(cls, label):
"""
Prettifier for XMGrace
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'\xG\f{}')
.replace('DELTA', r'\xD\f{}')
.replace('LAMBDA', r'\xL\f{}')
.replace('SIGMA', r'\xS\f{}')
) # yapf:disable
return re.sub(r'_(.?)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_agr_simple(cls, label):
"""
Prettifier for XMGrace (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'\xG'
return re.sub(r'(\d+)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_gnuplot(cls, label):
"""
Prettifier for Gnuplot
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
label = (
label
.replace(u'GAMMA', u'Γ')
.replace(u'DELTA', u'Δ')
.replace(u'LAMBDA', u'Λ')
.replace(u'SIGMA', u'Σ')
) # yapf:disable
return re.sub(r'_(.?)', r'_{\1}', label)
@classmethod
def _prettify_label_gnuplot_simple(cls, label):
"""
Prettifier for Gnuplot (for old label names)
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
if label == 'G':
return u'Γ'
return re.sub(r'(\d+)', r'_{\1}', label)
@classmethod
def _prettify_label_latex(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'$\Gamma$')
.replace('DELTA', r'$\Delta$')
.replace('LAMBDA', r'$\Lambda$')
.replace('SIGMA', r'$\Sigma$')
) # yapf:disable
label = re.sub(r'_(.?)', r'$_{\1}$', label)
# label += r"$_{\vphantom{0}}$"
return label
@classmethod
def _prettify_label_latex_simple(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'$\Gamma$'
return re.sub(r'(\d+)', r'$_{\1}$', label)
@classproperty
def prettifiers(cls): # pylint: disable=no-self-argument
"""
Property that returns a dictionary that for each string associates
the function to prettify a label
:return: a dictionary where keys are strings and values are functions
"""
return {
'agr_seekpath': cls._prettify_label_agr,
'agr_simple': cls._prettify_label_agr_simple,
'latex_simple': cls._prettify_label_latex_simple,
'latex_seekpath': cls._prettify_label_latex,
'gnuplot_simple': cls._prettify_label_gnuplot_simple,
'gnuplot_seekpath': cls._prettify_label_gnuplot,
'pass': cls._prettify_label_pass,
}
@classmethod
def get_prettifiers(cls):
"""
Return a list of valid prettifier strings
:return: a list of strings
"""
return sorted(cls.prettifiers.keys()) # pylint: disable=no-member
def __init__(self, format): # pylint: disable=redefined-builtin
"""
Create a class to pretttify strings of a given format
:param format: a string with the format to use to prettify.
Valid formats are obtained from self.prettifiers
"""
if format is None:
format = 'pass'
try:
self._prettifier_f = self.prettifiers[format] # pylint: disable=unsubscriptable-object
except KeyError:
raise ValueError("Unknown prettifier format {}; valid formats: {}".format(
format, ", ".join(self.get_prettifiers())))
def prettify(self, label):
"""
Prettify a label using the format passed in the initializer
:param label: the string to prettify
:return: a prettified string
"""
return self._prettifier_f(label)
def prettify_labels(labels, format=None): # pylint: disable=redefined-builtin
"""
Prettify label for typesetting in various formats
:param labels: a list of length-2 tuples, in the format(position, label)
:param format: a string with the format for the prettifier (e.g. 'agr',
'matplotlib', ...)
:return: the same list as labels, but with the second value possibly replaced
with a prettified version that typesets nicely in the selected format
"""
prettifier = Prettifier(format)
return [(pos, prettifier.prettify(label)) for pos, label in labels]
def join_labels(labels, join_symbol="|", threshold=1.e-6):
"""
Join labels with a joining symbol when they are very close
:param labels: a list of length-2 tuples, in the format(position, label)
:param join_symbol: the string to use to join different paths. By default, a pipe
:param threshold: the threshold to decide if two float values are the same and should
be joined
:return: the same list as labels, but with the second value possibly replaced
with strings joined when close enough
"""
if labels:
new_labels = [list(labels[0])]
# modify labels when in overlapping position
j = 0
for i in range(1, len(labels)):
if abs(labels[i][0] - labels[i - 1][0]) < threshold:
new_labels[j][1] += join_symbol + labels[i][1]
else:
new_labels.append(list(labels[i]))
j += 1
else:
new_labels = []
return new_labels
def strip_prefix(full_string, prefix):
"""
Strip the prefix from the given string and return it. If the prefix is not present
the original string will be returned unaltered
:param full_string: the string from which to remove the prefix
:param prefix: the prefix to remove
:return: the string with prefix removed
"""
if full_string.startswith(prefix):
return full_string.rsplit(prefix)[1]
return full_string
class Capturing(object): # pylint: disable=useless-object-inheritance
"""
This class captures stdout and returns it
(as a list, split by lines).
Note: if you raise a SystemExit, you have to catch it outside.
E.g., in our tests, this works::
import sys
with self.assertRaises(SystemExit):
with Capturing() as output:
sys.exit()
But out of the testing environment, the code instead just exits.
To use it, access the obj.stdout_lines, or just iterate over the object
:param capture_stderr: if True, also captures sys.stderr. To access the
lines, use obj.stderr_lines. If False, obj.stderr_lines is None.
"""
# pylint: disable=attribute-defined-outside-init
def __init__(self, capture_stderr=False):
self.stdout_lines = list()
super(Capturing, self).__init__()
self._capture_stderr = capture_stderr
if self._capture_stderr:
self.stderr_lines = list()
else:
self.stderr_lines = None
def __enter__(self):
"""Enter the context where all output is captured."""
self._stdout = sys.stdout
self._stringioout = StringIO()
sys.stdout = self._stringioout
if self._capture_stderr:
self._stderr = sys.stderr
self._stringioerr = StringIO()
sys.stderr = self._stringioerr
return self
def __exit__(self, *args):
"""Exit the context where all output is captured."""
self.stdout_lines.extend(self._stringioout.getvalue().splitlines())
sys.stdout = self._stdout
del self._stringioout # free up some memory
if self._capture_stderr:
self.stderr_lines.extend(self._stringioerr.getvalue().splitlines())
sys.stderr = self._stderr
del self._stringioerr # free up some memory
def __str__(self):
return str(self.stdout_lines)
def __iter__(self):
return iter(self.stdout_lines)
class ErrorAccumulator(object): # pylint: disable=useless-object-inheritance
"""
Allows to run a number of functions and collect all the errors they raise
This allows to validate multiple things and tell the user about all the
errors encountered at once. Works best if the individual functions do not depend on each other.
Does not allow to trace the stack of each error, therefore do not use for debugging, but for
semantical checking with user friendly error messages.
"""
def __init__(self, *error_cls):
self.error_cls = error_cls
self.errors = {k: [] for k in self.error_cls}
def run(self, function, *args, **kwargs):
try:
function(*args, **kwargs)
except self.error_cls as err:
self.errors[err.__class__].append(err)
def success(self):
return bool(not any(self.errors.values()))
def result(self, raise_error=Exception):
if raise_error:
self.raise_errors(raise_error)
return self.success(), self.errors
def raise_errors(self, raise_cls):
if not self.success():
raise raise_cls('The following errors were encountered: {}'.format(self.errors))
|
py | 1a500783bbde8f5b4d49493ae39ffafab8a66dce | """
WSGI config for events project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
application = get_wsgi_application()
|
py | 1a5007ad9a17dbe48cf809a3ccdeb650c8e23f16 | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.93
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.is_owner_for_project_by_slug_payload import IsOwnerForProjectBySlugPayload
class TestIsOwnerForProjectBySlugPayload(unittest.TestCase):
"""IsOwnerForProjectBySlugPayload unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIsOwnerForProjectBySlugPayload(self):
"""Test IsOwnerForProjectBySlugPayload"""
# FIXME: construct object with mandatory attributes with example values
# model = IsOwnerForProjectBySlugPayload() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a50088f23912a2de21321b082723f37b139a3fa | from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Patient"):
if 'company' in frappe.db.get_table_columns("Patient"):
frappe.db.sql("alter table `tabPatient` drop column company")
|
py | 1a5008b3c7bde996c208c4e87eaed1ea57b2915d | input = """
:- a.
:- not b.
a :- b.
a v b :- c.
c v d.
"""
output = """
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.