max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
iblviewer/volume.py | nantille/iblviewer | 0 | 8600 | from dataclasses import dataclass, field
from typing import Mapping, List, Any
from datetime import datetime
import logging
import pandas as pd
import glob
import numpy as np
import logging
import os
from collections import OrderedDict
import nrrd
import vtk
import vedo
from vtk.util.numpy_support import numpy_to_vtk
from iblviewer.collection import Collection
import iblviewer.objects as obj
import iblviewer.utils as utils
@dataclass
class VolumeModel:
RAW = 'raw'
SEGMENTED = 'segmented'
NORMALIZED_SUFFIX = '_norm'
DATA_TYPE = {RAW:0, SEGMENTED:1}
PREFIX = 'Volume'
__count = 0
def unique_name():
VolumeModel.__count += 1
return f'{VolumeModel.PREFIX}_{VolumeModel.__count}'
name: str = field(default_factory=unique_name)
file_path: str = None
scalars: Collection = field(default_factory=Collection)
axes: List = field(default_factory=lambda: [1, 1, 1])
data_min: float = None
data_max: float = None
data_map_step: float = 1.0
data: np.ndarray = None
data_type: str = RAW
resolution: int = 1
# Default units are microns.
units: float = 1e-06
base_color_map: Any = None
# At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings
mapping_name: str = None
lateralized: bool = False
# Mapping function. If None, the volume will be given as it is.
mapping: Any = None
luts: Collection = field(default_factory=Collection)
slicers: Collection = field(default_factory=Collection)
isosurfaces: Collection = field(default_factory=Collection)
interactive_subsampling: bool = True
volume_visible: bool = True
slices_visible: bool = True
transpose_shape: Any = None
dimensions: np.ndarray = np.zeros(3).astype(float)
center: np.ndarray = np.zeros(3).astype(float)
def compute_size(self):
"""
Compute volume size
"""
if self.data is None:
return
self.dimensions = np.array(self.data.shape)[:3]
if self.resolution is None:
return
self.resolution = int(self.resolution) # TODO: move this to constructor or init
self.dimensions *= self.resolution
self.center = np.ones(3) * self.resolution / 2 + self.dimensions / 2
def compute_range(self, force=False):
"""
Compute min and max range in the volume
:return: Min and max values
"""
if self.data_min is not None and self.data_max is not None and not force:
return self.data_min, self.data_max
self.data_min = np.min(self.data)
self.data_max = np.max(self.data)
#print('Volume min-max', self.data_min, self.data_max)
return self.data_min, self.data_max
def guess_volume_type(self):
"""
Infer the volume type when it was not specified by the user.
We assume here that typical values between -1 and 1 are raw volumes.
"""
if self.data_type is None:
if self.data_min is None or self.data_max is None:
self.compute_range()
if self.data_min >= -1 and self.data_max <= 1:
guess = VolumeModel.RAW
else:
guess = VolumeModel.SEGMENTED
self.data_type = guess
def is_segmented(self, auto_guess=True):
"""
Get whether current volume/image is segmented
:return: Boolean
"""
if self.data_type is None and auto_guess:
self.guess_volume_type()
return self.data_type == VolumeModel.SEGMENTED
def read_volume(self, file_path):
"""
Read local volume. Downloads the file first if it's remote.
:param file_path: Volume path
:return: 3D array
"""
if file_path.startswith('http') or file_path.startswith('ftp'):
downloaded_temp_file_path = vedo.download(file_path, verbose=False)
if file_path.endswith('nrrd'):
data, header = nrrd.read(downloaded_temp_file_path)
else:
data = vedo.loadImageData(downloaded_temp_file_path)
else:
if file_path.endswith('nrrd'):
data, header = nrrd.read(file_path, index_order='C')
else:
data = vedo.loadImageData(file_path)
return data
def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True):
"""
Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK
:param file_path: Volume file path. Could support other file types easily.
:param remap_scalars: Whether scalar values in the volume are replaced by
their row id from a mapping that stores. This is necessary in the case of segmented
volumes with regions that have a discontinuous id.
:param mapping: Pandas Series or a Dictionary
:param make_current: Set the volume data as the current one
:return: 3D array
"""
data = None
if not remap_scalars or mapping is None:
data = self.import_volume(file_path)
else:
time = datetime.now()
new_file_path = utils.change_file_name(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)
if os.path.exists(new_file_path):
data = self.import_volume(new_file_path)
else:
data = self.import_volume(file_path)
data, mapping = self.remap_slow(data, mapping, new_file_path)
logging.info('Remapped scalar values in: ' + str(utils.time_diff(time)) + 's')
'''
if volume is not None:
logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's')
min_value, max_value = np.amin(data), np.amax(data)
logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value))
else:
logging.error('Failed to open atlas ' + new_file_path)
'''
if make_current and data is not None:
self.data = data
return data, mapping
def transpose(self, shape=None):
"""
Transpose the volume for visualization in VTK
:param shape: The new shape. If None, will default to self.transpose_shape
"""
if shape is None:
shape = self.transpose_shape
if shape is None:
return
self.data = np.transpose(self.data, shape)
def remap_slow(self, data, mapping=None, write_path=None):
"""
Reassign volume values (slow on large volumes!) so that they're continuous
:param data: Volume ndarray
:param write_path: Where the modified volume will be stored
(to spare going through this method next time)
:param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones
:return: Modified volume data
"""
logging.info('\nBuilding appropriate volume from Allen data source...')
#volume = np.vectorize(self.f)(data)
labels = np.sort(np.unique(data))
num_labels = len(labels)
if mapping is None:
mapping = pd.Series(labels)
logging.info('Num regions labeled in volume ' + str(num_labels) + ' from ' + str(mapping.size) + ' in atlas')
logging.info('Reassigning ' + str(num_labels) + ' scalar values...')
for iter_id in range(num_labels):
label = labels[iter_id]
ids = mapping.index[mapping == label].to_list()
if len(ids) < 1:
continue
# On a large volume, this takes a long time
data[data == label] = ids[0]
if num_labels > 10000 and iter_id % 10 == 0:
logging.info(' Progress: ' + str(int(iter_id/num_labels)*100) + '%')
if write_path is not None:
logging.info('Saving volume data under ' + write_path)
nrrd.write(write_path, data, index_order='C')
return data, mapping
def build_lut(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None, make_active=True):
"""
Build a look-up table (LUT, sometimes known as transfer function) for the volume
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values
:param make_active: Whether this one is made active (you still have to update the views after that)
:return: LUTModel
"""
lut_model = LUTModel()
lut_model.build(scalar_map, scalar_range, color_map, alpha_map,
zero_is_transparent, noise_amount, nan_rgba)
self.luts.store(lut_model, set_current=make_active)
return lut_model
def blend_maps(map1, map2, time, total_time):
"""
Blend color maps
"""
weight1 = max(0.0, total_time - time)
weight2 = max(0.0, time)
return map1 * weight1 + map2 * weight2
class Volume(vedo.Volume):
"""
Overwriting of vedo.Volume constructor that is ill-designed as
it transposes the given numpy array without us knowing about it,
not giving us the option to choose about that.
"""
def __init__(self,
inputobj=None,
c='RdBu_r',
alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
alphaGradient=None,
alphaUnit=1,
mode=0,
shade=False,
spacing=None,
dims=None,
origin=None,
mapper='smart'):
vtk.vtkVolume.__init__(self)
vedo.BaseGrid.__init__(self)
self.axes = [1, 1, 1]
###################
if isinstance(inputobj, str):
if "https://" in inputobj:
from vedo.io import download
inputobj = download(inputobj, verbose=False) # fpath
elif os.path.isfile(inputobj):
pass
else:
inputobj = sorted(glob.glob(inputobj))
###################
if 'gpu' in mapper:
self._mapper = vtk.vtkGPUVolumeRayCastMapper()
elif 'opengl_gpu' in mapper:
self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
elif 'smart' in mapper:
self._mapper = vtk.vtkSmartVolumeMapper()
elif 'fixed' in mapper:
self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
elif isinstance(mapper, vtk.vtkMapper):
self._mapper = mapper
else:
print("Error unknown mapper type", [mapper])
raise RuntimeError()
self.SetMapper(self._mapper)
###################
inputtype = str(type(inputobj))
#colors.printc('Volume inputtype', inputtype)
if inputobj is None:
img = vtk.vtkImageData()
elif vedo.utils.isSequence(inputobj):
if isinstance(inputobj[0], str): # scan sequence of BMP files
ima = vtk.vtkImageAppend()
ima.SetAppendAxis(2)
pb = vedo.utils.ProgressBar(0, len(inputobj))
for i in pb.range():
f = inputobj[i]
picr = vtk.vtkBMPReader()
picr.SetFileName(f)
picr.Update()
mgf = vtk.vtkImageMagnitude()
mgf.SetInputData(picr.GetOutput())
mgf.Update()
ima.AddInputData(mgf.GetOutput())
pb.print('loading...')
ima.Update()
img = ima.GetOutput()
else:
if "ndarray" not in inputtype:
inputobj = np.array(inputobj)
if len(inputobj.shape)==1:
varr = vedo.numpy2vtk(inputobj, dtype=np.float)
else:
# ------------------------------ Nasty lines commented here
#if len(inputobj.shape)>2:
#inputobj = np.transpose(inputobj, axes=[2, 1, 0])
varr = vedo.numpy2vtk(inputobj.ravel(order='F'), dtype=np.float)
varr.SetName('input_scalars')
img = vtk.vtkImageData()
if dims is not None:
img.SetDimensions(dims)
else:
if len(inputobj.shape)==1:
vedo.colors.printc("Error: must set dimensions (dims keyword) in Volume.", c='r')
raise RuntimeError()
img.SetDimensions(inputobj.shape)
img.GetPointData().SetScalars(varr)
#to convert rgb to numpy
# img_scalar = data.GetPointData().GetScalars()
# dims = data.GetDimensions()
# n_comp = img_scalar.GetNumberOfComponents()
# temp = utils.vtk2numpy(img_scalar)
# numpy_data = temp.reshape(dims[1],dims[0],n_comp)
# numpy_data = numpy_data.transpose(0,1,2)
# numpy_data = np.flipud(numpy_data)
elif "ImageData" in inputtype:
img = inputobj
elif isinstance(inputobj, vedo.Volume):
img = inputobj.GetMapper().GetInput()
elif "UniformGrid" in inputtype:
img = inputobj
elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata
if hasattr(inputobj, "Update"):
inputobj.Update()
img = inputobj.GetOutput()
elif isinstance(inputobj, str):
from vedo.io import loadImageData, download
if "https://" in inputobj:
inputobj = download(inputobj, verbose=False)
img = loadImageData(inputobj)
else:
vedo.colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r')
return
if dims is not None:
img.SetDimensions(dims)
if origin is not None:
img.SetOrigin(origin) ### DIFFERENT from volume.origin()!
if spacing is not None:
img.SetSpacing(spacing)
self._data = img
self._mapper.SetInputData(img)
self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient)
self.GetProperty().SetShade(True)
self.GetProperty().SetInterpolationType(1)
self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit)
# remember stuff:
self._mode = mode
self._color = c
self._alpha = alpha
self._alphaGrad = alphaGradient
self._alphaUnit = alphaUnit
@dataclass
class LUTModel:
"""
This class might look slightly convoluted but it's actually simple.
We use double mapping here in order to enable live/interactive visualization
of volumetric data. Instead of replacing values in a 3D volume, we only replace
the colors in the 1D LUT list.
The point is that it's too slow to update a given data, like a segmented
volume with custom values. Instead, we map such custom values to a 1D
array (our LUT) that maps colors to raw volume values.
This is much faster in terms of rendering and it enables interactive visualization.
The scalar_lut is the original LUT for the given scalars (custom values)
and the mapped_lut is the LUT assigned to the surfaces (like slices)
that have copied data from the volume. The volume is given color_map
and alpha_map through vedo methods.
You might say "ok for double mapping, it's the only way for interactive
rendering of a volume, but what about color_map and mapped_lut? Aren't
they the same?". The answer is: they're the same but VTK does not accept
a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction
and a vtkPiecewiseFunction for alpha. There's no way around it.
The color_map will be computed as a vtkColorTransferFunction and
the alpha_map as the vtkPiecewiseFunction.
"""
name: str = NotImplementedError
color_map_function: Any = None
scalar_map: np.ndarray = None
scalar_min: float = 0.0
scalar_max: float = 1.0
scalar_lut: vtk.vtkLookupTable = None
mapped_lut: vtk.vtkLookupTable = None
color_map: np.ndarray = None
alpha_map: np.ndarray = None
base_color_map: np.ndarray = None
def build(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None):
"""
Build several look-up tables (LUT, sometimes known as transfer function) for the volume.
This is where double-mapping occurs for segmented volumes that have values from 0 to n where
each value defines a sub-volume or region. If we want to assign values (say from another model)
to these regions, we'd have to change the volume values and it would be too slow iterating over
each voxel in 3D. Instead we define colors that represent these values and assign them to
segmented regions in a 1D list.
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values
:return: LUTModel
"""
if color_map is None:
return
if nan_rgba is None:
nan_rgba = [0.0, 0.0, 0.0, 0.0]
if self.base_color_map is None:
self.base_color_map = color_map
colors = []
alphas = []
lut = vtk.vtkLookupTable()
scalar_lut = vtk.vtkLookupTable()
# Use the number of values in the volume
num_steps = len(self.base_color_map) if self.base_color_map is not None else len(color_map)
num_steps = 2655
s_min = 0
s_max = num_steps
if scalar_map is None:
if color_map is None and self.base_color_map is not None:
color_map = self.base_color_map
loop = range(num_steps)
noise = None
if isinstance(noise_amount, float) and noise_amount > 0:
noise = np.random.rand(num_steps) * noise_amount - noise_amount / 2
# Vedo works with nested lists:
# [region_id, [r, g, b]] for color, and [region_id, a] for alpha
if scalar_map is None:
# Standard volume that is not segmented
lut.SetRange(s_min, s_max)
lut.SetNumberOfTableValues(num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(num_steps)
for r_id in loop:
color = vedo.colors.getColor(color_map[r_id])
color = np.array(color)
if noise is not None:
color = color + noise[r_id]
color = np.maximum(color, 0.0)
color = np.minimum(color, 1.0)
colors.append([r_id, color])
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if r_id == 0 and zero_is_transparent:
alpha = 0.0
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
scalar_lut.SetTableValue(r_id, *color, alpha)
#scalar_map[r_id] = color_map[r_id]
else:
# Segmented volume
s_min, s_max = scalar_range
lut.SetRange(0, num_steps)
lut.SetNumberOfTableValues(num_steps)
color = None
for r_id in range(num_steps):
try:
value = scalar_map[r_id]
except Exception:
value = None
if value is None:# or s_min > value or s_max < value:
color = nan_rgba[:3]
alpha = nan_rgba[3]
else:
color = vedo.colorMap(value, color_map, s_min, s_max)
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if value == 0 and zero_is_transparent:
alpha = 0.0
colors.append([r_id, color])
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
# Real scalar LUT, mainly as a reference for the user
# Here the colors resulting from the given scalar min to max
# are assigned to segmented values in the volume
mock_values = np.linspace(s_min, s_max, num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(len(mock_values))
for r_id in range(len(mock_values)):
color = list(vedo.colorMap(mock_values[r_id], color_map, s_min, s_max))
alpha = 0.0 if mock_values[r_id] == 0 and zero_is_transparent else 1.0
scalar_lut.SetTableValue(r_id, *color, 1.0)
lut.Build()
scalar_lut.Build()
# Just to avoid confusion: the user can give a string as a color map, like 'viridis'
# but the real color map object is stored in self.color_map. The name 'viridis'
# is stored under self.color_map_function (if needed later on)
self.color_map_function = color_map
self.color_map = colors
self.alpha_map = alphas
self.scalar_map = scalar_map
self.mapped_lut = lut
self.scalar_lut = scalar_lut
def get_sorted_scalars(self):
"""
Get a numpy 2D array of key-value pairs sorted by value
:return: 2D array
"""
sorted_scalars = np.zeros((len(self.scalar_map), 2))
values = list(self.scalar_map.values())
keys = list(self.scalar_map.keys())
sorted_scalars[:, 0] = keys
sorted_scalars[:, 1] = values
sorted_mask = sorted_scalars[:, 1].argsort()
sorted_scalars = sorted_scalars[sorted_mask]
return sorted_scalars
class VolumeController():
"""
Wrapper class that handles both the volume and its slices
"""
def __init__(self, plot, model, initialize=True, clipping=True, slicer_box=True,
center_on_edges=False, alpha_unit_upper_offset=0.0, add_to_scene=True):
"""
Constructor
:param plot: Plot instance
:param model: VolumeModel instance
:param initialize: Whether the initalization
:param clipping: Whether clipping is enabled at init time
:param slicer_box: Whether the slicer box is enabled at init
:param center_on_edges: Whether the volume is offest by half a voxel or not
:param alpha_unit_upper_offset: The offset to apply to alpha unit computation.
If greater than 0, the volume will be less opaque
:param add_to_scene: Whether the volume is added to scene after init
"""
self.plot = plot
self.model = model
self.actor = None
self.picker = None
self.scalars = None
self.mask = None
self.bounding_mesh = None
self.alpha_unit_upper_offset = alpha_unit_upper_offset
self.alpha_factor = 0.001 # * self.model.resolution
self.clipping_planes = None
self.enable_volume_clipping = True
self.clipping_axes = []
self.slicers = OrderedDict()
self.slicers_selectable = False
self.scalar_bar = None
if initialize:
self.initialize(clipping, slicer_box, center_on_edges, add_to_scene)
#msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos())
#logging.info(msg)
def get_related_actors(self):
"""
Get all 3D actors related to this view (for registering it in the application)
:return: List of VTK objects
"""
actors = []
for slicer_id in self.slicers:
actor = self.slicers[slicer_id].actor
if actor is not None:
actors.append(actor)
for iso_id in self.model.isosurfaces:
actors.append(self.model.isosurfaces[iso_id])
actors.append(self.actor)
return actors
def initialize(self, clipping=True, slicer_box=True, center_on_edges=False, add_to_scene=True):
"""
Set the volume actor for visualization in VTK
:param clipping: Whether clipping is enabled
:param slicer_box: Whether the slicer box mode is enabled (6 clipping planes)
:param center_on_edges: Whether the volume's center is aligned to its edges
rather than the voxel center
:param add_to_scene: Whether the object is added to the scene
"""
self.build_actor(center_on_edges, add_to_scene)
self.initialize_picker()
if slicer_box:
self.initialize_slicer_box()
self.initialize_clipping_planes()
self.set_volume_clipping(clipping)
self.set_color_map()
'''
if use_mask:
self.mask = self.actor.clone()
self.mask.threshold(1, replace=1, replaceOut=0)
self.actor.mapper().SetMaskTypeToBinary()
self.actor.mapper().SetMaskInput(self.mask)
'''
def set_volume_visibility(self, on=True):
"""
Set volume visibility
:param on: Visibility boolean
"""
if self.actor is not None:
self.actor.SetVisibility(on)
def set_slices_visibility(self, on=True):
"""
Set the visibility of slices
:param on: Visibility boolean
"""
for slicer_id in self.slicers:
slicer_view = self.slicers.get(slicer_id)
slicer_view.actor.SetVisibility(on)
def get_slices_opacity(self):
"""
Get the opacity of slices (should be the same value for all slices)
A mean calculation is performed on all slices alpha, just in case
:return: Alpha value
"""
value = 0
num_values = 0
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slice_alpha = slicer.actor.GetProperty().GetOpacity()
if slice_alpha is None:
continue
value += slice_alpha
num_values += 1
if num_values == 0 or value == 0:
return None
return value / num_values
def set_slices_opacity(self, value):
"""
Set the opacity of slices
:param value: Alpha value
"""
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slicer.actor.alpha(value)
def get_opacity(self):
"""
Get the relative opacity unit
:return: Float
"""
return self.get_relative_opacity_unit()
def get_relative_opacity_unit(self):
"""
Get the alpha unit relative value
:return: Float
"""
alpha_unit = self.actor.alphaUnit()
r = self.model.resolution
# Inverse function of set_opacity_unit()
value = 1.1 - (alpha_unit / r)**0.5
return value
def set_opacity(self, value):
"""
Set the opacity of the volume like in set_opacity_unit()
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
self.set_opacity_unit(value)
def set_opacity_unit(self, value):
"""
Set the opacity of the volume by modifying its alpha unit (a VTK thing).
The alpha unit defines how much a voxel is transparent to incoming ray.
This method normalizes the range between 0.0 and 1.0 as it depends
on the resolution of the volume
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
r = self.model.resolution
# 1 is chosen and not 1.0 because when value == 1.0, that would
# mean that the volume is fully opaque and this yields artifacts with VTK
alpha_unit = (1 + self.alpha_unit_upper_offset - value)**2 * r
# vedo calls it "alpha" unit, vtk "opacity" unit. same-same!
self.actor.alphaUnit(alpha_unit)
return alpha_unit
def get_spacing(self):
"""
Get the spacing/resolution of the volume
"""
res = self.model.resolution
spacing = None
if isinstance(res, int) or isinstance(res, float):
spacing = np.array([res]*3)
elif len(res) == 3:
spacing = res
else:
raise ValueError(f'Given volume resolution {self.model.resolution} is invalid')
return spacing
def build_actor(self, center_on_edges=False, add_to_scene=True): #[1, 2]
"""
Set the volume actor for visualization in VTK
:param center_on_edges: Whether alignment by one voxel is applied
:param add_to_scene: Whether the object is added to the scene
"""
spacing = self.get_spacing()
self.actor = Volume(self.model.data, spacing=spacing, mapper='smart')
self.scalars = self.actor._data.GetPointData().GetScalars()
self.actor.name = self.model.name
self.actor.shade(False)
self.actor.mode(0)
self.actor.pickable(True)
self.set_interactive_subsampling(False)
if center_on_edges:
# Moving the volume by one voxel. This is possibly due the use of custom spacing.
self.actor.pos(self.actor.pos() + spacing)
center = np.array(self.actor.pos()) + self.actor.center()
if np.linalg.norm(center - self.model.center) > 0:
#print('Adjusting volume center from', self.model.center, 'to', center)
self.model.center = center
self.set_opacity_unit(0.9)
self.actor.jittering(True)
#self.actor._mapper.AutoAdjustSampleDistancesOn()
#self.actor._mapper.SetBlendModeToAverageIntensity()
#self.actor._mapper.SetSampleDistance(100)
if add_to_scene:
self.plot.add(self.actor, render=False)
def set_position(self, position):
"""
Set the position of the volume
"""
self.actor.pos(position)
# TODO: we're entering in unstable things when we move the volume
# because there is not yet a guaranteed support for updating the slices
# with the correct position
self.reset_clipping_planes()
def mirror_volume(self, axes):
"""
Mirror the volume on given axes
:param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which
the volume will be mirrored. Optional
"""
if axes is None or self.actor is None:
return
axes_str = ['x', 'y', 'z']
for axis in axes:
if isinstance(axis, int) and 0 <= axis <= 2:
axis = axes_str[axis]
if isinstance(axis, str) and len(axis) == 1:
self.actor.mirror(axis=axis.lower())
def initialize_picker(self, opacity_iso_value=0.0001):
"""
Initialize the volume picker
:param opacity_iso_value: Threshold that defines at what accumulated
opacity the picker hits the volume. In the case of a segmented volume,
you want to keep this value very low as the default one.
"""
# As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars
# https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point
picker = vtk.vtkVolumePicker()
picker.PickCroppingPlanesOn()
picker.UseVolumeGradientOpacityOff()
picker.SetTolerance(opacity_iso_value)
# A low OpacityIsoValue is necessary in the case of segmented volumes
picker.SetVolumeOpacityIsovalue(opacity_iso_value)
picker.AddPickList(self.actor)
picker.PickFromListOn()
self.picker = picker
def initialize_slicer_box(self):
"""
Initialize 6 slicing planes as a box.
"""
for axis_id in range(6):
slicer_model = SlicerModel(axis=axis_id)
slicer_model.align_to_axis(axis_id, self.model.dimensions)
self.model.slicers.store(slicer_model)
# It's important in this case to have standalone=False
self.slicers[axis_id] = SlicerView(self.plot, self, slicer_model, standalone=False)
def update_slicer(self, slicer_id, value=None, normal=None):
"""
Update a given slicer with the given value
:param slicer_id: SlicerView id
:param value: Value or 3D point
:param normal: Normal
"""
slicer_view = self.slicers.get(slicer_id)
if slicer_view is None:
return
# This is an important part where the slicing plane is itself sliced by other planes
slicer_model = slicer_view.model
slicer_model.clipping_planes = self.get_clipping_planes(slicer_model.axis)
# Use given value (or point) and normal to guide the below code
result = slicer_model.update(value, normal)
if not result:
return
# Update slicing image
slicer_view.update()
def initialize_clipping_planes(self):
"""
Initialize X, Y and Z clipping planes with two planes per axis
for positive and negative slicing
"""
self.clipping_planes = vtk.vtkPlaneCollection()
slicer_models = self.model.slicers
for slicer_id in slicer_models:
self.clipping_planes.AddItem(vtk.vtkPlane())
self.reset_clipping_planes()
return
def get_clipping_planes(self, except_axis=None):
"""
Get the current clipping planes except the ones on the given axis
:param except_axis: Axis id to ignore. If None, all clipping planes will be returned
:return: vtkPlaneCollection
"""
if not isinstance(except_axis, int):
return self.clipping_planes
exceptions = [except_axis * 2, except_axis * 2 + 1]
planes = vtk.vtkPlaneCollection()
for plane_id in range(self.clipping_planes.GetNumberOfItems()):
if plane_id in exceptions:
continue
plane = self.clipping_planes.GetItem(plane_id)
planes.AddItem(plane)
return planes
def reset_clipping_planes(self):
"""
Reset clipping planes
"""
slicer_models = self.model.slicers
for slicer_id in slicer_models:
slicer_model = slicer_models[slicer_id]
plane_id = slicer_model.get_box_plane_id()
plane = self.clipping_planes.GetItem(plane_id)
plane.SetOrigin(slicer_model.origin + self.actor.pos())
plane.SetNormal(slicer_model.normal)
def clip_on_axis(self, position=None, axis=None, normal=None):
"""
Apply clipping on a single axis
:param position: Position
:param axis: Clipping axis, defaults to 0 (X axis)
:param thickness: Whether a thickness (so two clipping planes) are applied
"""
axis_offset = 0
# This should already be sorted in the model but in case it isn't, we double check here
if normal is not None and normal[axis] < 0:
# This means that the given axis has two
# clipping planes and we take the negative one
axis_offset += 1
#position = self.model.dimensions - position
axis_storage_id = axis * 2 + axis_offset
plane = self.clipping_planes.GetItem(axis_storage_id)
plane.SetOrigin(position)
plane.SetNormal(normal)
def set_volume_clipping(self, on=None):
"""
Set volume clipping on or off.
:param on: Whether clipping is enabled or disabled. If None, then
the state is toggled.
"""
if on is None:
self.enable_volume_clipping = not self.enable_volume_clipping
else:
self.enable_volume_clipping = on
if self.enable_volume_clipping:
self.actor.mapper().SetClippingPlanes(self.clipping_planes)
else:
self.actor.mapper().SetClippingPlanes(None)
def clip_to_bounds(self, bounds):
"""
Clip the volume and move the slicing planes according to 6 boundary points
:param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax)
"""
planes = vtk.vtkPlanes()
planes.SetBounds(bounds)
# Normals are reversed with the above code
# so we fix that here with flip_normals=True
self.set_clipping_planes(planes, flip_normals=True)
def box_widget_update(self, widget=None, event=None):
"""
Clip the volume with the current box widget
:param widget: vtkBoxCutter
:param event: vtkEvent
"""
if widget is None:
return
planes = vtk.vtkPlanes()
widget.GetPlanes(planes)
self.set_clipping_planes(planes)
def set_clipping_planes(self, planes, flip_normals=False):
"""
Clip the volume and move the slicing planes according the given planes
:param planes: vtkPlanes
"""
vtk_n = planes.GetNormals()
vtk_pts = planes.GetPoints()
num_pts = vtk_pts.GetNumberOfPoints()
for plane_id in range(num_pts):
normal = vtk_n.GetTuple(plane_id)
origin = vtk_pts.GetPoint(plane_id)
plane = self.clipping_planes.GetItem(plane_id)
current_origin = np.array(plane.GetOrigin())
# We don't need to check the normal because
# we prevent box cutter rotation in our case
if np.linalg.norm(current_origin - origin) < 0.1:
continue
plane.SetOrigin(origin)
if flip_normals:
normal = np.array(normal)*-1
plane.SetNormal(normal)
self.update_slicer(plane_id, origin, normal)
self.clipping_planes.Modified()
self.actor.GetMapper().Update()
def set_alpha_map(self, alpha_map, alpha_factor=None):
"""
Set alpha map to the volume view
:param alpha_map: 2D list of scalar values and alpha values
:param alpha_factor: Alpha factor
"""
if alpha_map is None:
if self.model.luts.current is None:
return
alpha_map = self.model.luts.current.alpha_map
if alpha_factor is None:
alpha_factor = self.alpha_factor
if len(np.array(alpha_map).shape) > 1:
volume_alpha_map = np.ones_like(alpha_map).astype(float)
volume_alpha_map[:] = alpha_map[:]
volume_alpha_map[:, 1] *= alpha_factor
self.actor.alpha(volume_alpha_map)
else:
self.actor.alpha(np.array(alpha_map) * alpha_factor)
def set_color_map(self, color_map=None, alpha_map=None):
"""
Set the color and alpha map to the view objects
:param color_map: Nested list of scalar values and rgb colors
like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...]
:param alpha_map: 2D list of scalar values and alpha values
"""
lut = self.model.luts.current
if color_map is None and lut is not None:
color_map = lut.color_map
if alpha_map is None and lut is not None:
alpha_map = lut.alpha_map
if color_map is None:
return
self.actor.cmap(color_map)
self.set_alpha_map(alpha_map)
if lut is not None:
for surface in self.model.isosurfaces:
surface._mapper.SetLookupTable(lut.opaque_lut)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.apply_lut(lut.mapped_lut)
else:
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.set_color_map(color_map, alpha_map)
def disable_shading(self):
"""
Disable volume shading
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.ShadeOff()
self.actor.SetProperty(volumeProperty)
def enable_shading(self, ambient=0.6, diffuse=0.8, specular=0.9):
"""
Enable volume shading
TODO: See if this method is useful
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(ambient)
volumeProperty.SetDiffuse(diffuse)
volumeProperty.SetSpecular(specular)
volumeProperty.SetScalarOpacityUnitDistance(1)
self.actor.SetProperty(volumeProperty)
def toggle_slices_visibility(self):
"""
Toggle slices visibility
"""
self.model.slices_visible = not self.model.slices_visible
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
self.update_slicer(slicer)
if slicer.actor is not None:
slicer.actor.SetVisibility(self.model.slices_visible)
def toggle_hollow(self):
"""
Toggle hollow mode for volume rendering. This is intended
to work only on segmented (annotated) volumes.
"""
volume_property = self.actor.GetProperty()
# Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff...
disabled = bool(volume_property.GetDisableGradientOpacity())
if disabled:
volume_property.DisableGradientOpacityOff()
alpha_gradient = vtk.vtkPiecewiseFunction()
alpha_gradient.AddPoint(0, 0.0)
alpha_gradient.AddPoint(1, 0.75)
alpha_gradient.AddPoint(2, 1.0)
volume_property.SetGradientOpacity(alpha_gradient)
else:
volume_property.DisableGradientOpacityOn()
return not disabled
def get_value_from_xyz(self, position, normal_step=None, avoid_values=0, cast_to_int=True, none_as_zero=False):
"""
Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step,
that is the normal on which to probe multiplied by the distance you want to travel further into
the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency
is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts.
So you need to go further into the "cloud" so to speak, in order to find the values you want.
:param position: 3D array
:param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields
bad or unwanted results
:param avoid_values: Try and find other values than this
:param cast_to_int: Whether the value should be cast to integer
:return: Scalar value
"""
if isinstance(avoid_values, int) or isinstance(avoid_values, float):
avoid_values = [avoid_values]
# TODO: see if this is faster? To be tested
# ijk_result = [0.0, 0.0, 0.0]
# volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result)
# volume_actor._data.GetPoint(ijk_result)
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if not valid_id or (value in avoid_values):
if normal_step is not None:
position += normal_step
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if cast_to_int and value is not None:
value = int(value)
if value is None and none_as_zero:
value = 0
return value
def raycast(self, origin, screen_position):
"""
Shorthand for pick() method
"""
return self.pick(origin, screen_position)
def pick(self, origin, screen_position):
"""
Find the nearest intersection – even on sliced volume – with the ray formed
by an origin and a screen-space position (given by VTK when you click on an actor)
:param origin: Origin of the vector
:param screen_position: 2D position on screen. This is given by vtk events like MouseRelease
:return: The nearest position and its related value queried in the volume image
"""
self.picker.Pick(*screen_position[:2], 0, self.plot.renderer)
position = np.array(self.picker.GetPickPosition())
ray = position - origin
distance = np.linalg.norm(ray)
normal = ray / distance
# Go half a voxel further to make sure we don't hit "void"
vol_position = position # + normal * self.model.resolution / 2
probe_position = position + normal * self.model.resolution * 10
closest_dist = distance
slice_position = None
# See if the line hits any of the slicers (that are image planes)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.got_slice:
hits = slicer.actor.intersectWithLine(origin, probe_position)
if len(hits) != 1:
continue
new_dist = np.linalg.norm(position - hits[0])
if new_dist < closest_dist and new_dist < self.model.resolution * 2:
closest_dist = new_dist
slice_position = hits[0]
if slice_position is None:
position = vol_position
else:
position = slice_position
value = self.get_value_from_xyz(position, normal * self.model.resolution * 4)
return position, value
def add_probe(self, origin, destination, resolution=40, radius=10, color_map=None,
screen_space=True, min_v=None, max_v=None, add_to_scene=True):
"""
Add a series of points along a line probe
:param origin: Probe origin
:param destination: Probe destination point
:param resolution: Number of (equidistant) points that will be probed along that line
:param radius: Radius of the points
:param color_map: Scalars color map
:param screen_space: Whether the points are screen space or spheres
:param min_v: Min scalar value
:param max_v: Max scalar value
:param add_to_scene: Whether the new probe is added to scene
:return: Points
"""
if color_map is None:
color_map = self.model.luts.current.color_map
positions, values = self.probe(origin, destination, resolution)
points_obj = obj.Points(positions, values=values, radius=radius, screen_space=screen_space,
color_map=color_map, min_v=min_v, max_v=max_v)
points_obj.origin = origin
points_obj.destination = destination
# Dynamic properties assignment
points_obj.target = self.actor
points_obj.target_controller = self
if add_to_scene:
self.plot.add(points_obj)
return points_obj
def update_probe(self, origin, destination, points_obj):
"""
Update a probe with given start and end points
:param origin: Start point
:param destination: End point
:param points_obj: Points object
"""
resolution = points_obj._polydata.GetPoints().GetNumberOfPoints()
positions, values = self.probe(origin, destination, resolution)
points_obj.update_data(positions, values)
def probe(self, origin, destination, resolution=40):
"""
Probe a volume with a line
:param origin: Origin of the line probe
:param destination: Destination of the line probe
:param resolution: Number of point samples along the probe
:return: Positions and values
"""
origin = np.array(origin)
destination = np.array(destination)
distance = np.linalg.norm(destination - origin)
ray = destination - origin
ray_norm = ray / distance
step = distance / resolution
positions = [origin + ray_norm * p_id * step for p_id in range(resolution)]
values = np.array([self.get_value_from_xyz(point, none_as_zero=True) for point in positions])
return positions, values
def set_interactive_subsampling(self, on=False):
"""
Set volume subsampling on or off.
This is enabled by default in VTK and we disable it by default in IBLViewer
:param on: Whether volume subsampling in interactive mode is on or off
"""
#self.plot.window.SetDesiredUpdateRate(0)
#self.actor._mapper.SetInteractiveUpdateRate(0)
self.model.interactive_subsampling = on
self.actor._mapper.SetAutoAdjustSampleDistances(on)
if on:
self.actor._mapper.InteractiveAdjustSampleDistancesOn()
else:
self.actor._mapper.InteractiveAdjustSampleDistancesOff()
def isosurface(self, label, exceptions=[0], force_rebuild=False, set_current=True, to_int=True, split_meshes=True):
"""
Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value.
Unlike general isosurfacing, this method extracts only the surface mesh of the
desired region/label/segmentation, not of all values from 0 to label.
:param label: Label (scalar) value found in the volume
:param exceptions: If the label is found in the exceptions list, isosurfacing will not occur
:param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label
:param set_current: Whether the label is set as the current one in the model
:param to_int: Whether the label is cast to integer
:param split_meshes: Whether we split meshes when multiple ones are found
:return: A list of all manifold meshes for the given label
"""
if label is None or label in exceptions:
return
if to_int:
label = int(label)
existing_meshes = self.model.isosurfaces.get(label)
if existing_meshes is not None and not force_rebuild:
return existing_meshes
lut = self.model.luts.current
simple_lut = vtk.vtkLookupTable()
simple_lut.SetNumberOfColors(1)
simple_lut.SetTableRange(0, 1)
simple_lut.SetScaleToLinear()
simple_lut.SetTableValue(0, 0, 0, 0, 0)
simple_lut.SetTableValue(1, *lut.mapped_lut.GetTableValue(label))
simple_lut.Build()
# Generate object boundaries from labelled volume
discrete = vtk.vtkDiscreteMarchingCubes()
discrete.SetInputData(self.actor.imagedata())
discrete.GenerateValues(1, label, label)
smoothing_iterations = 15
pass_band = 0.001
feature_angle = 120.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(discrete.GetOutputPort())
smoother.SetNumberOfIterations(smoothing_iterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(feature_angle)
smoother.SetPassBand(pass_band)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
self.model.isosurfaces[label] = []
#splitter = vtk.vtkExtractPolyDataGeometry()
if split_meshes:
splitter = vtk.vtkPolyDataConnectivityFilter()
splitter.SetInputConnection(smoother.GetOutputPort())
splitter.SetExtractionModeToAllRegions()
splitter.ColorRegionsOn()
splitter.Update()
for region_id in range(splitter.GetNumberOfExtractedRegions()):
#splitter.AddSpecifiedRegion(region_id)
#splitter.Update()
#poly = vtk.vtkPolyData()
#poly.ShallowCopy(splitter.GetOutput())
threshold = vtk.vtkThreshold()
threshold.SetInputConnection(splitter.GetOutputPort())
threshold.ThresholdBetween(region_id, region_id)
threshold.Update()
actor = vedo.Mesh(threshold.GetOutput())
#actor._mapper.SetScalarRange(min_value, lut.scalar_max)
#actor._mapper.SetUseLookupTableScalarRange(True)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
#actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label)
else:
poly = smoother.GetOutput()
actor = vedo.Mesh(poly)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
'''
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(smoother.GetOutput())
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smoother.GetOutputPort())
mapper.SetLookupTable(lut.scalar_lut)
mapper.SetScalarRange(min_value, lut.scalar_max)
'''
if set_current:
self.model.isosurfaces.set_current(label)
return self.model.isosurfaces[label]
@dataclass
class SlicerModel:
PREFIX = '[Slicer]_'
MIN_SLAB_THICKNESS = 1.0 #um
__count = 0
def unique_name():
SlicerModel.__count += 1
return f'{SlicerModel.PREFIX}_{SlicerModel.__count}'
name: str = field(default_factory=unique_name)
# 0, 1 or 2. See the normal for axis orientation
axis: int = None
value: float = 0.0
bounds: np.ndarray = None
#thickness: float = 0.0
origin: np.ndarray = np.array([0.0, 0.0, 0.0])
normal: np.ndarray = np.array([1.0, 0.0, 0.0])
clipping_planes: vtk.vtkPlaneCollection = None
def get_box_plane_id(self):
"""
Get the plane id
:return: Int
"""
if self.axis is None:
return
offset = 0 if self.normal[self.axis] < 0 else 1
return self.axis * 2 + offset
def get_axis_aligned_info(self, vtk_axis):
"""
VTK stores box clipping planes in the order:
-X to +X: 0, 1
-Y to +Y: 2, 3
-Z to +Z: 4, 5
This method retrieves what is the XYZ axis (0, 1 or 2)
and its orientation sign
:return: Int axis and float orientation
"""
orientation = -1.0 if vtk_axis % 2 == 0 else 1.0
axis = (vtk_axis - vtk_axis % 2) // 2
return axis, orientation
def align_to_axis(self, axis, dimensions=None):
"""
Set the axis of the slicer
:param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info()
:param dimensions: Dimensions of the volume
"""
if not isinstance(axis, int):
return
normal = np.zeros(3).astype(float)
xyz_axis, orientation = self.get_axis_aligned_info(axis)
normal[xyz_axis] = orientation
self.axis = xyz_axis
if dimensions is not None and orientation < 0:
self.origin = np.zeros(3)
self.origin[xyz_axis] = dimensions[xyz_axis]
self.normal = normal
def flip_normal(self):
"""
Flip the normal of the slicer
"""
self.normal *= -1.0
self.check_normal()
if isinstance(self.axis, int):
self.axis *= -1
def check_normal(self):
"""
Check if the normal is axis-aligned.
If not, the axis is set to None.
"""
zeros = self.normal == 0
if len(self.normal[zeros]) >= 2:
self.axis = 0
def update(self, value=None, normal=None, axis=None):
"""
Update slicer
:param value: Origin of the slicing plane
:param normal: Normal of the slicing plane
:param axis: Axis, if the plane is axis-aligned
:return: True if model changed, False if it didn't
"""
if not(isinstance(value, int) or isinstance(value, float)):
if normal is None:
normal = self.normal
if normal is None:
return False
if normal[1] == 0 and normal[2] == 0:
axis = 0 #if normal[0] > 0 else 1
elif normal[0] == 0 and normal[2] == 0:
axis = 1 #if normal[1] > 0 else 1
elif normal[0] == 0 and normal[1] == 0:
axis = 2 #if normal[2] > 0 else 1
if axis is not None:
value = value[axis]
if axis is None:
axis = self.axis
if self.value == value:
return False
if axis is not None:
self.value = value
self.origin = np.array(normal) * value
else:
self.value = None
self.origin = value
self.normal = normal
self.axis = axis
return True
class SlicerView():
slices = {}
def __init__(self, plot, volume_view, slicer_model, standalone=True):
"""
Constructor
:param plot: Plot instance
:param volume_view: VolumeView instance
:param slicer_model: SlicerModel instance
:param standalone: Whether the slice is a standalone actor that
can be clicked. Set this to False if you want to use transparency,
at the expense that because of a VTK bug, you won't be able to
click on it anymore, requiring you to code another way of detecting
where the user clicked. See more in initialize_mapper()
"""
self.plot = plot
self.volume_view = volume_view
self.model = slicer_model
self.actor = None
self.filter = None
self.filter = None
self.actor = None
self.reslice = None
self.slice_type = -1
self.depth_peeling_enabled = None
self.standalone = standalone
self.got_slice = False
self.color_map = None
self.alpha_map = None
self.initialize()
def initialize(self, render=False):
"""
Initialize the slicer object
"""
if self.filter is None:
self.filter = vtk.vtkImageDataGeometryFilter()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
# Adding empty actor so that it's updated later on
self.plot.add(self.actor, render=render)
self.actor.lighting('off')
self.actor.name = self.model.name
self.initialize_mapper()
def initialize_mapper(self):
"""
Initialize the object mapper
"""
mapper = self.actor._mapper
mapper.SetScalarModeToUsePointData() #SetScalarModeToUsePointFieldData
mapper.SetColorModeToMapScalars()
mapper.ScalarVisibilityOn()
# We operate on static volumes thanks to the double LUT mapping implemented here
mapper.SetStatic(True)
# Without using scalar range, the mapping will be off
mapper.SetUseLookupTableScalarRange(True)
# We prevent this actor from being pickable as a result of the bug described below
# when we want to use transparency on the slice.
self.actor.pickable(self.standalone)
if self.standalone:
# There is a bug in VTK 9 that prevents clicking on transparent objects
# as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291
# The "Force opaque fix" below should be gone with the next VTK update hopefully.
# In the meantime, we use this.
# TODO: remove this when this bug is fixed in VTK
self.actor.ForceOpaqueOn()
else:
# We bypass the transparent selection bug when a VolumeView has multiple slicers
# like in box mode because the click detection occurs on the volume and we perform
# an additional test to see if a slicer yields a nearby result. If it does,
# the result is like clicking on the slice and we get transparency for free.
pass
# Make sure we have depth peeling activated, otherwise transparency with volumes
# will look weird and in the wrong order
self.plot.renderer.UseDepthPeelingOn()
self.plot.renderer.UseDepthPeelingForVolumesOn()
segmented = self.volume_view.model.is_segmented()
if segmented:
# This very line below will mess up the entire slice coloring if:
# - you have a segmented volume and this is set to True
# - you have a non-segmented (like raw MRI, CT) volume and this is set to False
mapper.SetInterpolateScalarsBeforeMapping(not segmented)
mapper.Update()
def set_color_map(self, color_map, alpha_map=None):
"""
Set a color map to the slice
:param color_map: Color map, can be a string, a list of colors or more.
See vedo documentation.
"""
self.color_map = color_map
if alpha_map is not None:
self.alpha_map = alpha_map
if self.got_slice and color_map is not None:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
def set_slice_type(self, slice_type):
"""
Set the slice type. 0 for axial, 1 for free slicing
:param slice_type: Int value
"""
if slice_type == 0 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.volume_view.actor.imagedata())
elif slice_type == 1 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.reslice.GetOutput())
def slice_on_normal(self, origin, normal):
"""
Slice a volume with a plane oriented by the given normal.
This allows slicing in all directions.
:param origin: Origin of the slicing plane
:param normal: Normal of the slicing plane
:return: Mesh object with the slice as an image texture
"""
'''
mapper = vtk.vtkImageResliceMapper()
mapper.SetInputData(self.volume_view.actor._data)
mapper.SliceFacesCameraOff()
mapper.SliceAtFocalPointOff()
mapper.JumpToNearestSliceOn()
mapper.SetImageSampleFactor(2)
mapper.BorderOn()
mapper.BackgroundOff()
mapper.UpdateInformation()
mapper.GetSlicePlane().SetOrigin(*origin)
mapper.GetSlicePlane().SetNormal(*normal)
mapper.GetSlicePlane().Modified()
mapper.Modified()
mapper.Update()
self.actor = vtk.vtkImageSlice()
self.actor.SetMapper(mapper)
prop = vtk.vtkImageProperty()
if True:
prop.SetInterpolationTypeToLinear()
else:
prop.SetInterpolationTypeToNearest()
self.actor.SetProperty(prop)
return
'''
if self.reslice is None:
reslice = vtk.vtkImageReslice()
reslice.SetInputData(self.volume_view.actor._data)
#reslice.SetInputData(image)
reslice.SetOutputDimensionality(2)
reslice.SetAutoCropOutput(False)
#reslice.SetInterpolationModeToLinear()
reslice.SetInterpolationModeToNearestNeighbor()
reslice.SetSlabNumberOfSlices(1)
reslice.SetOutputSpacing(self.volume_view.get_spacing())
reslice.ReleaseDataFlagOn()
self.reslice = reslice
self.set_slice_type(1)
M, T = utils.get_transformation_matrix(origin, normal)
self.reslice.SetResliceAxes(M)
self.reslice.Update()
self.filter.Update()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
else:
self.actor._update(self.filter.GetOutput())
self.initialize_mapper()
self.actor.SetOrientation(T.GetOrientation())
self.actor.SetPosition(origin)
self.got_slice = True
return self.actor
def x_slice(self, i):
"""
Extract the slice at index `i` of volume along x-axis.
:param i: I index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if i <= 1 or i > nx - 1:
return False
self.filter.SetExtent(i, i, 0, ny, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def y_slice(self, j):
"""
Extract the slice at index `j` of volume along y-axis.
:param j: J index
"""
self.set_slice_type(0)
#nx, ny, nz = self.volume_view.model.dimensions / resolution
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if j <= 1 or j > ny - 1:
return False
self.filter.SetExtent(0, nx, j, j, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def z_slice(self, k):
"""
Extract the slice at index `k` of volume along z-axis.
:param k: K index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if k <= 1 or k > nz - 1:
return False
self.filter.SetExtent(0, nx, 0, ny, k, k)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def slice_on_axis(self, value=None, normal=None, axis=None, use_reslice=False):
"""
Slice on standard X, Y or Z axis
:param value: Value on the given axis
:param normal: Axis normal, can be either +1.0 or -1.0 along that axis
:param axis: Axis integer, 0 for X, 1 for Y, 2 for Z
:param use_reslice: if True, this enables vtkImageReslice which is useful when
the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned
normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper.
This is why the default is False.
:return: Result boolean, whether slice occured or not
"""
resolution = self.volume_view.model.resolution
volume_dimensions = self.volume_view.model.dimensions
'''
if normal[axis] < 0:
if value > 0:
# Make value consistent with given normal.
value *= normal[axis]
value = volume_dimensions[axis] + value
'''
in_volume_slice = int(value) // resolution
if use_reslice:
self.slice_on_normal(normal * value, normal)
return
if axis == 0:
result = self.x_slice(in_volume_slice)
elif axis == 1:
result = self.y_slice(in_volume_slice)
elif axis == 2:
result = self.z_slice(in_volume_slice)
return result
def update(self):
"""
Update slice object according to data in the model
"""
had_slice = self.got_slice
result = True
if isinstance(self.model.axis, int) and 0 <= self.model.axis <= 2:
result = self.slice_on_axis(self.model.value, self.model.normal, self.model.axis)
else:
self.slice_on_normal(self.model.origin, self.model.normal)
if not result:
self.plot.remove(self.actor)
self.got_slice = False
return
#self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos()))
lut = self.volume_view.model.luts.current
if lut is not None:
'''
This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping
BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha)
So we have to put a color map, alpha map and a vtkLookupTable
built from both maps in a LUTModel.
Alternatively, we could update the LUT with alpha values but it's a pain.
ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction()
lut = vedo.utils.ctf2lut(self.volume_view.actor)
otf = self.volume_view.actor.GetProperty().GetScalarOpacity
# using "ctf" would work only for colors, not for transparency!
self.apply_lut(ctf)
'''
self.apply_lut(lut.mapped_lut)
else:
if self.alpha_map is None:
self.actor.cmap(self.color_map)
else:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
if self.model.clipping_planes is not None:
self.actor.mapper().SetClippingPlanes(self.model.clipping_planes)
if not had_slice:
self.plot.add(self.actor, render=True)
def apply_lut(self, lut=None):
"""
Apply a LUT to the volume
:param lut: vtkLookupTable
:param actor: The actor to receive this
"""
if self.actor is None or lut is None:
return
mapper = self.actor._mapper
mapper.SetLookupTable(lut) | from dataclasses import dataclass, field
from typing import Mapping, List, Any
from datetime import datetime
import logging
import pandas as pd
import glob
import numpy as np
import logging
import os
from collections import OrderedDict
import nrrd
import vtk
import vedo
from vtk.util.numpy_support import numpy_to_vtk
from iblviewer.collection import Collection
import iblviewer.objects as obj
import iblviewer.utils as utils
@dataclass
class VolumeModel:
RAW = 'raw'
SEGMENTED = 'segmented'
NORMALIZED_SUFFIX = '_norm'
DATA_TYPE = {RAW:0, SEGMENTED:1}
PREFIX = 'Volume'
__count = 0
def unique_name():
VolumeModel.__count += 1
return f'{VolumeModel.PREFIX}_{VolumeModel.__count}'
name: str = field(default_factory=unique_name)
file_path: str = None
scalars: Collection = field(default_factory=Collection)
axes: List = field(default_factory=lambda: [1, 1, 1])
data_min: float = None
data_max: float = None
data_map_step: float = 1.0
data: np.ndarray = None
data_type: str = RAW
resolution: int = 1
# Default units are microns.
units: float = 1e-06
base_color_map: Any = None
# At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings
mapping_name: str = None
lateralized: bool = False
# Mapping function. If None, the volume will be given as it is.
mapping: Any = None
luts: Collection = field(default_factory=Collection)
slicers: Collection = field(default_factory=Collection)
isosurfaces: Collection = field(default_factory=Collection)
interactive_subsampling: bool = True
volume_visible: bool = True
slices_visible: bool = True
transpose_shape: Any = None
dimensions: np.ndarray = np.zeros(3).astype(float)
center: np.ndarray = np.zeros(3).astype(float)
def compute_size(self):
"""
Compute volume size
"""
if self.data is None:
return
self.dimensions = np.array(self.data.shape)[:3]
if self.resolution is None:
return
self.resolution = int(self.resolution) # TODO: move this to constructor or init
self.dimensions *= self.resolution
self.center = np.ones(3) * self.resolution / 2 + self.dimensions / 2
def compute_range(self, force=False):
"""
Compute min and max range in the volume
:return: Min and max values
"""
if self.data_min is not None and self.data_max is not None and not force:
return self.data_min, self.data_max
self.data_min = np.min(self.data)
self.data_max = np.max(self.data)
#print('Volume min-max', self.data_min, self.data_max)
return self.data_min, self.data_max
def guess_volume_type(self):
"""
Infer the volume type when it was not specified by the user.
We assume here that typical values between -1 and 1 are raw volumes.
"""
if self.data_type is None:
if self.data_min is None or self.data_max is None:
self.compute_range()
if self.data_min >= -1 and self.data_max <= 1:
guess = VolumeModel.RAW
else:
guess = VolumeModel.SEGMENTED
self.data_type = guess
def is_segmented(self, auto_guess=True):
"""
Get whether current volume/image is segmented
:return: Boolean
"""
if self.data_type is None and auto_guess:
self.guess_volume_type()
return self.data_type == VolumeModel.SEGMENTED
def read_volume(self, file_path):
"""
Read local volume. Downloads the file first if it's remote.
:param file_path: Volume path
:return: 3D array
"""
if file_path.startswith('http') or file_path.startswith('ftp'):
downloaded_temp_file_path = vedo.download(file_path, verbose=False)
if file_path.endswith('nrrd'):
data, header = nrrd.read(downloaded_temp_file_path)
else:
data = vedo.loadImageData(downloaded_temp_file_path)
else:
if file_path.endswith('nrrd'):
data, header = nrrd.read(file_path, index_order='C')
else:
data = vedo.loadImageData(file_path)
return data
def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True):
"""
Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK
:param file_path: Volume file path. Could support other file types easily.
:param remap_scalars: Whether scalar values in the volume are replaced by
their row id from a mapping that stores. This is necessary in the case of segmented
volumes with regions that have a discontinuous id.
:param mapping: Pandas Series or a Dictionary
:param make_current: Set the volume data as the current one
:return: 3D array
"""
data = None
if not remap_scalars or mapping is None:
data = self.import_volume(file_path)
else:
time = datetime.now()
new_file_path = utils.change_file_name(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)
if os.path.exists(new_file_path):
data = self.import_volume(new_file_path)
else:
data = self.import_volume(file_path)
data, mapping = self.remap_slow(data, mapping, new_file_path)
logging.info('Remapped scalar values in: ' + str(utils.time_diff(time)) + 's')
'''
if volume is not None:
logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's')
min_value, max_value = np.amin(data), np.amax(data)
logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value))
else:
logging.error('Failed to open atlas ' + new_file_path)
'''
if make_current and data is not None:
self.data = data
return data, mapping
def transpose(self, shape=None):
"""
Transpose the volume for visualization in VTK
:param shape: The new shape. If None, will default to self.transpose_shape
"""
if shape is None:
shape = self.transpose_shape
if shape is None:
return
self.data = np.transpose(self.data, shape)
def remap_slow(self, data, mapping=None, write_path=None):
"""
Reassign volume values (slow on large volumes!) so that they're continuous
:param data: Volume ndarray
:param write_path: Where the modified volume will be stored
(to spare going through this method next time)
:param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones
:return: Modified volume data
"""
logging.info('\nBuilding appropriate volume from Allen data source...')
#volume = np.vectorize(self.f)(data)
labels = np.sort(np.unique(data))
num_labels = len(labels)
if mapping is None:
mapping = pd.Series(labels)
logging.info('Num regions labeled in volume ' + str(num_labels) + ' from ' + str(mapping.size) + ' in atlas')
logging.info('Reassigning ' + str(num_labels) + ' scalar values...')
for iter_id in range(num_labels):
label = labels[iter_id]
ids = mapping.index[mapping == label].to_list()
if len(ids) < 1:
continue
# On a large volume, this takes a long time
data[data == label] = ids[0]
if num_labels > 10000 and iter_id % 10 == 0:
logging.info(' Progress: ' + str(int(iter_id/num_labels)*100) + '%')
if write_path is not None:
logging.info('Saving volume data under ' + write_path)
nrrd.write(write_path, data, index_order='C')
return data, mapping
def build_lut(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None, make_active=True):
"""
Build a look-up table (LUT, sometimes known as transfer function) for the volume
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values
:param make_active: Whether this one is made active (you still have to update the views after that)
:return: LUTModel
"""
lut_model = LUTModel()
lut_model.build(scalar_map, scalar_range, color_map, alpha_map,
zero_is_transparent, noise_amount, nan_rgba)
self.luts.store(lut_model, set_current=make_active)
return lut_model
def blend_maps(map1, map2, time, total_time):
"""
Blend color maps
"""
weight1 = max(0.0, total_time - time)
weight2 = max(0.0, time)
return map1 * weight1 + map2 * weight2
class Volume(vedo.Volume):
"""
Overwriting of vedo.Volume constructor that is ill-designed as
it transposes the given numpy array without us knowing about it,
not giving us the option to choose about that.
"""
def __init__(self,
inputobj=None,
c='RdBu_r',
alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
alphaGradient=None,
alphaUnit=1,
mode=0,
shade=False,
spacing=None,
dims=None,
origin=None,
mapper='smart'):
vtk.vtkVolume.__init__(self)
vedo.BaseGrid.__init__(self)
self.axes = [1, 1, 1]
###################
if isinstance(inputobj, str):
if "https://" in inputobj:
from vedo.io import download
inputobj = download(inputobj, verbose=False) # fpath
elif os.path.isfile(inputobj):
pass
else:
inputobj = sorted(glob.glob(inputobj))
###################
if 'gpu' in mapper:
self._mapper = vtk.vtkGPUVolumeRayCastMapper()
elif 'opengl_gpu' in mapper:
self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
elif 'smart' in mapper:
self._mapper = vtk.vtkSmartVolumeMapper()
elif 'fixed' in mapper:
self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
elif isinstance(mapper, vtk.vtkMapper):
self._mapper = mapper
else:
print("Error unknown mapper type", [mapper])
raise RuntimeError()
self.SetMapper(self._mapper)
###################
inputtype = str(type(inputobj))
#colors.printc('Volume inputtype', inputtype)
if inputobj is None:
img = vtk.vtkImageData()
elif vedo.utils.isSequence(inputobj):
if isinstance(inputobj[0], str): # scan sequence of BMP files
ima = vtk.vtkImageAppend()
ima.SetAppendAxis(2)
pb = vedo.utils.ProgressBar(0, len(inputobj))
for i in pb.range():
f = inputobj[i]
picr = vtk.vtkBMPReader()
picr.SetFileName(f)
picr.Update()
mgf = vtk.vtkImageMagnitude()
mgf.SetInputData(picr.GetOutput())
mgf.Update()
ima.AddInputData(mgf.GetOutput())
pb.print('loading...')
ima.Update()
img = ima.GetOutput()
else:
if "ndarray" not in inputtype:
inputobj = np.array(inputobj)
if len(inputobj.shape)==1:
varr = vedo.numpy2vtk(inputobj, dtype=np.float)
else:
# ------------------------------ Nasty lines commented here
#if len(inputobj.shape)>2:
#inputobj = np.transpose(inputobj, axes=[2, 1, 0])
varr = vedo.numpy2vtk(inputobj.ravel(order='F'), dtype=np.float)
varr.SetName('input_scalars')
img = vtk.vtkImageData()
if dims is not None:
img.SetDimensions(dims)
else:
if len(inputobj.shape)==1:
vedo.colors.printc("Error: must set dimensions (dims keyword) in Volume.", c='r')
raise RuntimeError()
img.SetDimensions(inputobj.shape)
img.GetPointData().SetScalars(varr)
#to convert rgb to numpy
# img_scalar = data.GetPointData().GetScalars()
# dims = data.GetDimensions()
# n_comp = img_scalar.GetNumberOfComponents()
# temp = utils.vtk2numpy(img_scalar)
# numpy_data = temp.reshape(dims[1],dims[0],n_comp)
# numpy_data = numpy_data.transpose(0,1,2)
# numpy_data = np.flipud(numpy_data)
elif "ImageData" in inputtype:
img = inputobj
elif isinstance(inputobj, vedo.Volume):
img = inputobj.GetMapper().GetInput()
elif "UniformGrid" in inputtype:
img = inputobj
elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata
if hasattr(inputobj, "Update"):
inputobj.Update()
img = inputobj.GetOutput()
elif isinstance(inputobj, str):
from vedo.io import loadImageData, download
if "https://" in inputobj:
inputobj = download(inputobj, verbose=False)
img = loadImageData(inputobj)
else:
vedo.colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r')
return
if dims is not None:
img.SetDimensions(dims)
if origin is not None:
img.SetOrigin(origin) ### DIFFERENT from volume.origin()!
if spacing is not None:
img.SetSpacing(spacing)
self._data = img
self._mapper.SetInputData(img)
self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient)
self.GetProperty().SetShade(True)
self.GetProperty().SetInterpolationType(1)
self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit)
# remember stuff:
self._mode = mode
self._color = c
self._alpha = alpha
self._alphaGrad = alphaGradient
self._alphaUnit = alphaUnit
@dataclass
class LUTModel:
"""
This class might look slightly convoluted but it's actually simple.
We use double mapping here in order to enable live/interactive visualization
of volumetric data. Instead of replacing values in a 3D volume, we only replace
the colors in the 1D LUT list.
The point is that it's too slow to update a given data, like a segmented
volume with custom values. Instead, we map such custom values to a 1D
array (our LUT) that maps colors to raw volume values.
This is much faster in terms of rendering and it enables interactive visualization.
The scalar_lut is the original LUT for the given scalars (custom values)
and the mapped_lut is the LUT assigned to the surfaces (like slices)
that have copied data from the volume. The volume is given color_map
and alpha_map through vedo methods.
You might say "ok for double mapping, it's the only way for interactive
rendering of a volume, but what about color_map and mapped_lut? Aren't
they the same?". The answer is: they're the same but VTK does not accept
a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction
and a vtkPiecewiseFunction for alpha. There's no way around it.
The color_map will be computed as a vtkColorTransferFunction and
the alpha_map as the vtkPiecewiseFunction.
"""
name: str = NotImplementedError
color_map_function: Any = None
scalar_map: np.ndarray = None
scalar_min: float = 0.0
scalar_max: float = 1.0
scalar_lut: vtk.vtkLookupTable = None
mapped_lut: vtk.vtkLookupTable = None
color_map: np.ndarray = None
alpha_map: np.ndarray = None
base_color_map: np.ndarray = None
def build(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None):
"""
Build several look-up tables (LUT, sometimes known as transfer function) for the volume.
This is where double-mapping occurs for segmented volumes that have values from 0 to n where
each value defines a sub-volume or region. If we want to assign values (say from another model)
to these regions, we'd have to change the volume values and it would be too slow iterating over
each voxel in 3D. Instead we define colors that represent these values and assign them to
segmented regions in a 1D list.
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values
:return: LUTModel
"""
if color_map is None:
return
if nan_rgba is None:
nan_rgba = [0.0, 0.0, 0.0, 0.0]
if self.base_color_map is None:
self.base_color_map = color_map
colors = []
alphas = []
lut = vtk.vtkLookupTable()
scalar_lut = vtk.vtkLookupTable()
# Use the number of values in the volume
num_steps = len(self.base_color_map) if self.base_color_map is not None else len(color_map)
num_steps = 2655
s_min = 0
s_max = num_steps
if scalar_map is None:
if color_map is None and self.base_color_map is not None:
color_map = self.base_color_map
loop = range(num_steps)
noise = None
if isinstance(noise_amount, float) and noise_amount > 0:
noise = np.random.rand(num_steps) * noise_amount - noise_amount / 2
# Vedo works with nested lists:
# [region_id, [r, g, b]] for color, and [region_id, a] for alpha
if scalar_map is None:
# Standard volume that is not segmented
lut.SetRange(s_min, s_max)
lut.SetNumberOfTableValues(num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(num_steps)
for r_id in loop:
color = vedo.colors.getColor(color_map[r_id])
color = np.array(color)
if noise is not None:
color = color + noise[r_id]
color = np.maximum(color, 0.0)
color = np.minimum(color, 1.0)
colors.append([r_id, color])
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if r_id == 0 and zero_is_transparent:
alpha = 0.0
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
scalar_lut.SetTableValue(r_id, *color, alpha)
#scalar_map[r_id] = color_map[r_id]
else:
# Segmented volume
s_min, s_max = scalar_range
lut.SetRange(0, num_steps)
lut.SetNumberOfTableValues(num_steps)
color = None
for r_id in range(num_steps):
try:
value = scalar_map[r_id]
except Exception:
value = None
if value is None:# or s_min > value or s_max < value:
color = nan_rgba[:3]
alpha = nan_rgba[3]
else:
color = vedo.colorMap(value, color_map, s_min, s_max)
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if value == 0 and zero_is_transparent:
alpha = 0.0
colors.append([r_id, color])
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
# Real scalar LUT, mainly as a reference for the user
# Here the colors resulting from the given scalar min to max
# are assigned to segmented values in the volume
mock_values = np.linspace(s_min, s_max, num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(len(mock_values))
for r_id in range(len(mock_values)):
color = list(vedo.colorMap(mock_values[r_id], color_map, s_min, s_max))
alpha = 0.0 if mock_values[r_id] == 0 and zero_is_transparent else 1.0
scalar_lut.SetTableValue(r_id, *color, 1.0)
lut.Build()
scalar_lut.Build()
# Just to avoid confusion: the user can give a string as a color map, like 'viridis'
# but the real color map object is stored in self.color_map. The name 'viridis'
# is stored under self.color_map_function (if needed later on)
self.color_map_function = color_map
self.color_map = colors
self.alpha_map = alphas
self.scalar_map = scalar_map
self.mapped_lut = lut
self.scalar_lut = scalar_lut
def get_sorted_scalars(self):
"""
Get a numpy 2D array of key-value pairs sorted by value
:return: 2D array
"""
sorted_scalars = np.zeros((len(self.scalar_map), 2))
values = list(self.scalar_map.values())
keys = list(self.scalar_map.keys())
sorted_scalars[:, 0] = keys
sorted_scalars[:, 1] = values
sorted_mask = sorted_scalars[:, 1].argsort()
sorted_scalars = sorted_scalars[sorted_mask]
return sorted_scalars
class VolumeController():
"""
Wrapper class that handles both the volume and its slices
"""
def __init__(self, plot, model, initialize=True, clipping=True, slicer_box=True,
center_on_edges=False, alpha_unit_upper_offset=0.0, add_to_scene=True):
"""
Constructor
:param plot: Plot instance
:param model: VolumeModel instance
:param initialize: Whether the initalization
:param clipping: Whether clipping is enabled at init time
:param slicer_box: Whether the slicer box is enabled at init
:param center_on_edges: Whether the volume is offest by half a voxel or not
:param alpha_unit_upper_offset: The offset to apply to alpha unit computation.
If greater than 0, the volume will be less opaque
:param add_to_scene: Whether the volume is added to scene after init
"""
self.plot = plot
self.model = model
self.actor = None
self.picker = None
self.scalars = None
self.mask = None
self.bounding_mesh = None
self.alpha_unit_upper_offset = alpha_unit_upper_offset
self.alpha_factor = 0.001 # * self.model.resolution
self.clipping_planes = None
self.enable_volume_clipping = True
self.clipping_axes = []
self.slicers = OrderedDict()
self.slicers_selectable = False
self.scalar_bar = None
if initialize:
self.initialize(clipping, slicer_box, center_on_edges, add_to_scene)
#msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos())
#logging.info(msg)
def get_related_actors(self):
"""
Get all 3D actors related to this view (for registering it in the application)
:return: List of VTK objects
"""
actors = []
for slicer_id in self.slicers:
actor = self.slicers[slicer_id].actor
if actor is not None:
actors.append(actor)
for iso_id in self.model.isosurfaces:
actors.append(self.model.isosurfaces[iso_id])
actors.append(self.actor)
return actors
def initialize(self, clipping=True, slicer_box=True, center_on_edges=False, add_to_scene=True):
"""
Set the volume actor for visualization in VTK
:param clipping: Whether clipping is enabled
:param slicer_box: Whether the slicer box mode is enabled (6 clipping planes)
:param center_on_edges: Whether the volume's center is aligned to its edges
rather than the voxel center
:param add_to_scene: Whether the object is added to the scene
"""
self.build_actor(center_on_edges, add_to_scene)
self.initialize_picker()
if slicer_box:
self.initialize_slicer_box()
self.initialize_clipping_planes()
self.set_volume_clipping(clipping)
self.set_color_map()
'''
if use_mask:
self.mask = self.actor.clone()
self.mask.threshold(1, replace=1, replaceOut=0)
self.actor.mapper().SetMaskTypeToBinary()
self.actor.mapper().SetMaskInput(self.mask)
'''
def set_volume_visibility(self, on=True):
"""
Set volume visibility
:param on: Visibility boolean
"""
if self.actor is not None:
self.actor.SetVisibility(on)
def set_slices_visibility(self, on=True):
"""
Set the visibility of slices
:param on: Visibility boolean
"""
for slicer_id in self.slicers:
slicer_view = self.slicers.get(slicer_id)
slicer_view.actor.SetVisibility(on)
def get_slices_opacity(self):
"""
Get the opacity of slices (should be the same value for all slices)
A mean calculation is performed on all slices alpha, just in case
:return: Alpha value
"""
value = 0
num_values = 0
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slice_alpha = slicer.actor.GetProperty().GetOpacity()
if slice_alpha is None:
continue
value += slice_alpha
num_values += 1
if num_values == 0 or value == 0:
return None
return value / num_values
def set_slices_opacity(self, value):
"""
Set the opacity of slices
:param value: Alpha value
"""
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slicer.actor.alpha(value)
def get_opacity(self):
"""
Get the relative opacity unit
:return: Float
"""
return self.get_relative_opacity_unit()
def get_relative_opacity_unit(self):
"""
Get the alpha unit relative value
:return: Float
"""
alpha_unit = self.actor.alphaUnit()
r = self.model.resolution
# Inverse function of set_opacity_unit()
value = 1.1 - (alpha_unit / r)**0.5
return value
def set_opacity(self, value):
"""
Set the opacity of the volume like in set_opacity_unit()
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
self.set_opacity_unit(value)
def set_opacity_unit(self, value):
"""
Set the opacity of the volume by modifying its alpha unit (a VTK thing).
The alpha unit defines how much a voxel is transparent to incoming ray.
This method normalizes the range between 0.0 and 1.0 as it depends
on the resolution of the volume
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
r = self.model.resolution
# 1 is chosen and not 1.0 because when value == 1.0, that would
# mean that the volume is fully opaque and this yields artifacts with VTK
alpha_unit = (1 + self.alpha_unit_upper_offset - value)**2 * r
# vedo calls it "alpha" unit, vtk "opacity" unit. same-same!
self.actor.alphaUnit(alpha_unit)
return alpha_unit
def get_spacing(self):
"""
Get the spacing/resolution of the volume
"""
res = self.model.resolution
spacing = None
if isinstance(res, int) or isinstance(res, float):
spacing = np.array([res]*3)
elif len(res) == 3:
spacing = res
else:
raise ValueError(f'Given volume resolution {self.model.resolution} is invalid')
return spacing
def build_actor(self, center_on_edges=False, add_to_scene=True): #[1, 2]
"""
Set the volume actor for visualization in VTK
:param center_on_edges: Whether alignment by one voxel is applied
:param add_to_scene: Whether the object is added to the scene
"""
spacing = self.get_spacing()
self.actor = Volume(self.model.data, spacing=spacing, mapper='smart')
self.scalars = self.actor._data.GetPointData().GetScalars()
self.actor.name = self.model.name
self.actor.shade(False)
self.actor.mode(0)
self.actor.pickable(True)
self.set_interactive_subsampling(False)
if center_on_edges:
# Moving the volume by one voxel. This is possibly due the use of custom spacing.
self.actor.pos(self.actor.pos() + spacing)
center = np.array(self.actor.pos()) + self.actor.center()
if np.linalg.norm(center - self.model.center) > 0:
#print('Adjusting volume center from', self.model.center, 'to', center)
self.model.center = center
self.set_opacity_unit(0.9)
self.actor.jittering(True)
#self.actor._mapper.AutoAdjustSampleDistancesOn()
#self.actor._mapper.SetBlendModeToAverageIntensity()
#self.actor._mapper.SetSampleDistance(100)
if add_to_scene:
self.plot.add(self.actor, render=False)
def set_position(self, position):
"""
Set the position of the volume
"""
self.actor.pos(position)
# TODO: we're entering in unstable things when we move the volume
# because there is not yet a guaranteed support for updating the slices
# with the correct position
self.reset_clipping_planes()
def mirror_volume(self, axes):
"""
Mirror the volume on given axes
:param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which
the volume will be mirrored. Optional
"""
if axes is None or self.actor is None:
return
axes_str = ['x', 'y', 'z']
for axis in axes:
if isinstance(axis, int) and 0 <= axis <= 2:
axis = axes_str[axis]
if isinstance(axis, str) and len(axis) == 1:
self.actor.mirror(axis=axis.lower())
def initialize_picker(self, opacity_iso_value=0.0001):
"""
Initialize the volume picker
:param opacity_iso_value: Threshold that defines at what accumulated
opacity the picker hits the volume. In the case of a segmented volume,
you want to keep this value very low as the default one.
"""
# As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars
# https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point
picker = vtk.vtkVolumePicker()
picker.PickCroppingPlanesOn()
picker.UseVolumeGradientOpacityOff()
picker.SetTolerance(opacity_iso_value)
# A low OpacityIsoValue is necessary in the case of segmented volumes
picker.SetVolumeOpacityIsovalue(opacity_iso_value)
picker.AddPickList(self.actor)
picker.PickFromListOn()
self.picker = picker
def initialize_slicer_box(self):
"""
Initialize 6 slicing planes as a box.
"""
for axis_id in range(6):
slicer_model = SlicerModel(axis=axis_id)
slicer_model.align_to_axis(axis_id, self.model.dimensions)
self.model.slicers.store(slicer_model)
# It's important in this case to have standalone=False
self.slicers[axis_id] = SlicerView(self.plot, self, slicer_model, standalone=False)
def update_slicer(self, slicer_id, value=None, normal=None):
"""
Update a given slicer with the given value
:param slicer_id: SlicerView id
:param value: Value or 3D point
:param normal: Normal
"""
slicer_view = self.slicers.get(slicer_id)
if slicer_view is None:
return
# This is an important part where the slicing plane is itself sliced by other planes
slicer_model = slicer_view.model
slicer_model.clipping_planes = self.get_clipping_planes(slicer_model.axis)
# Use given value (or point) and normal to guide the below code
result = slicer_model.update(value, normal)
if not result:
return
# Update slicing image
slicer_view.update()
def initialize_clipping_planes(self):
"""
Initialize X, Y and Z clipping planes with two planes per axis
for positive and negative slicing
"""
self.clipping_planes = vtk.vtkPlaneCollection()
slicer_models = self.model.slicers
for slicer_id in slicer_models:
self.clipping_planes.AddItem(vtk.vtkPlane())
self.reset_clipping_planes()
return
def get_clipping_planes(self, except_axis=None):
"""
Get the current clipping planes except the ones on the given axis
:param except_axis: Axis id to ignore. If None, all clipping planes will be returned
:return: vtkPlaneCollection
"""
if not isinstance(except_axis, int):
return self.clipping_planes
exceptions = [except_axis * 2, except_axis * 2 + 1]
planes = vtk.vtkPlaneCollection()
for plane_id in range(self.clipping_planes.GetNumberOfItems()):
if plane_id in exceptions:
continue
plane = self.clipping_planes.GetItem(plane_id)
planes.AddItem(plane)
return planes
def reset_clipping_planes(self):
"""
Reset clipping planes
"""
slicer_models = self.model.slicers
for slicer_id in slicer_models:
slicer_model = slicer_models[slicer_id]
plane_id = slicer_model.get_box_plane_id()
plane = self.clipping_planes.GetItem(plane_id)
plane.SetOrigin(slicer_model.origin + self.actor.pos())
plane.SetNormal(slicer_model.normal)
def clip_on_axis(self, position=None, axis=None, normal=None):
"""
Apply clipping on a single axis
:param position: Position
:param axis: Clipping axis, defaults to 0 (X axis)
:param thickness: Whether a thickness (so two clipping planes) are applied
"""
axis_offset = 0
# This should already be sorted in the model but in case it isn't, we double check here
if normal is not None and normal[axis] < 0:
# This means that the given axis has two
# clipping planes and we take the negative one
axis_offset += 1
#position = self.model.dimensions - position
axis_storage_id = axis * 2 + axis_offset
plane = self.clipping_planes.GetItem(axis_storage_id)
plane.SetOrigin(position)
plane.SetNormal(normal)
def set_volume_clipping(self, on=None):
"""
Set volume clipping on or off.
:param on: Whether clipping is enabled or disabled. If None, then
the state is toggled.
"""
if on is None:
self.enable_volume_clipping = not self.enable_volume_clipping
else:
self.enable_volume_clipping = on
if self.enable_volume_clipping:
self.actor.mapper().SetClippingPlanes(self.clipping_planes)
else:
self.actor.mapper().SetClippingPlanes(None)
def clip_to_bounds(self, bounds):
"""
Clip the volume and move the slicing planes according to 6 boundary points
:param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax)
"""
planes = vtk.vtkPlanes()
planes.SetBounds(bounds)
# Normals are reversed with the above code
# so we fix that here with flip_normals=True
self.set_clipping_planes(planes, flip_normals=True)
def box_widget_update(self, widget=None, event=None):
"""
Clip the volume with the current box widget
:param widget: vtkBoxCutter
:param event: vtkEvent
"""
if widget is None:
return
planes = vtk.vtkPlanes()
widget.GetPlanes(planes)
self.set_clipping_planes(planes)
def set_clipping_planes(self, planes, flip_normals=False):
"""
Clip the volume and move the slicing planes according the given planes
:param planes: vtkPlanes
"""
vtk_n = planes.GetNormals()
vtk_pts = planes.GetPoints()
num_pts = vtk_pts.GetNumberOfPoints()
for plane_id in range(num_pts):
normal = vtk_n.GetTuple(plane_id)
origin = vtk_pts.GetPoint(plane_id)
plane = self.clipping_planes.GetItem(plane_id)
current_origin = np.array(plane.GetOrigin())
# We don't need to check the normal because
# we prevent box cutter rotation in our case
if np.linalg.norm(current_origin - origin) < 0.1:
continue
plane.SetOrigin(origin)
if flip_normals:
normal = np.array(normal)*-1
plane.SetNormal(normal)
self.update_slicer(plane_id, origin, normal)
self.clipping_planes.Modified()
self.actor.GetMapper().Update()
def set_alpha_map(self, alpha_map, alpha_factor=None):
"""
Set alpha map to the volume view
:param alpha_map: 2D list of scalar values and alpha values
:param alpha_factor: Alpha factor
"""
if alpha_map is None:
if self.model.luts.current is None:
return
alpha_map = self.model.luts.current.alpha_map
if alpha_factor is None:
alpha_factor = self.alpha_factor
if len(np.array(alpha_map).shape) > 1:
volume_alpha_map = np.ones_like(alpha_map).astype(float)
volume_alpha_map[:] = alpha_map[:]
volume_alpha_map[:, 1] *= alpha_factor
self.actor.alpha(volume_alpha_map)
else:
self.actor.alpha(np.array(alpha_map) * alpha_factor)
def set_color_map(self, color_map=None, alpha_map=None):
"""
Set the color and alpha map to the view objects
:param color_map: Nested list of scalar values and rgb colors
like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...]
:param alpha_map: 2D list of scalar values and alpha values
"""
lut = self.model.luts.current
if color_map is None and lut is not None:
color_map = lut.color_map
if alpha_map is None and lut is not None:
alpha_map = lut.alpha_map
if color_map is None:
return
self.actor.cmap(color_map)
self.set_alpha_map(alpha_map)
if lut is not None:
for surface in self.model.isosurfaces:
surface._mapper.SetLookupTable(lut.opaque_lut)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.apply_lut(lut.mapped_lut)
else:
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.set_color_map(color_map, alpha_map)
def disable_shading(self):
"""
Disable volume shading
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.ShadeOff()
self.actor.SetProperty(volumeProperty)
def enable_shading(self, ambient=0.6, diffuse=0.8, specular=0.9):
"""
Enable volume shading
TODO: See if this method is useful
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(ambient)
volumeProperty.SetDiffuse(diffuse)
volumeProperty.SetSpecular(specular)
volumeProperty.SetScalarOpacityUnitDistance(1)
self.actor.SetProperty(volumeProperty)
def toggle_slices_visibility(self):
"""
Toggle slices visibility
"""
self.model.slices_visible = not self.model.slices_visible
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
self.update_slicer(slicer)
if slicer.actor is not None:
slicer.actor.SetVisibility(self.model.slices_visible)
def toggle_hollow(self):
"""
Toggle hollow mode for volume rendering. This is intended
to work only on segmented (annotated) volumes.
"""
volume_property = self.actor.GetProperty()
# Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff...
disabled = bool(volume_property.GetDisableGradientOpacity())
if disabled:
volume_property.DisableGradientOpacityOff()
alpha_gradient = vtk.vtkPiecewiseFunction()
alpha_gradient.AddPoint(0, 0.0)
alpha_gradient.AddPoint(1, 0.75)
alpha_gradient.AddPoint(2, 1.0)
volume_property.SetGradientOpacity(alpha_gradient)
else:
volume_property.DisableGradientOpacityOn()
return not disabled
def get_value_from_xyz(self, position, normal_step=None, avoid_values=0, cast_to_int=True, none_as_zero=False):
"""
Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step,
that is the normal on which to probe multiplied by the distance you want to travel further into
the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency
is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts.
So you need to go further into the "cloud" so to speak, in order to find the values you want.
:param position: 3D array
:param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields
bad or unwanted results
:param avoid_values: Try and find other values than this
:param cast_to_int: Whether the value should be cast to integer
:return: Scalar value
"""
if isinstance(avoid_values, int) or isinstance(avoid_values, float):
avoid_values = [avoid_values]
# TODO: see if this is faster? To be tested
# ijk_result = [0.0, 0.0, 0.0]
# volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result)
# volume_actor._data.GetPoint(ijk_result)
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if not valid_id or (value in avoid_values):
if normal_step is not None:
position += normal_step
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if cast_to_int and value is not None:
value = int(value)
if value is None and none_as_zero:
value = 0
return value
def raycast(self, origin, screen_position):
"""
Shorthand for pick() method
"""
return self.pick(origin, screen_position)
def pick(self, origin, screen_position):
"""
Find the nearest intersection – even on sliced volume – with the ray formed
by an origin and a screen-space position (given by VTK when you click on an actor)
:param origin: Origin of the vector
:param screen_position: 2D position on screen. This is given by vtk events like MouseRelease
:return: The nearest position and its related value queried in the volume image
"""
self.picker.Pick(*screen_position[:2], 0, self.plot.renderer)
position = np.array(self.picker.GetPickPosition())
ray = position - origin
distance = np.linalg.norm(ray)
normal = ray / distance
# Go half a voxel further to make sure we don't hit "void"
vol_position = position # + normal * self.model.resolution / 2
probe_position = position + normal * self.model.resolution * 10
closest_dist = distance
slice_position = None
# See if the line hits any of the slicers (that are image planes)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.got_slice:
hits = slicer.actor.intersectWithLine(origin, probe_position)
if len(hits) != 1:
continue
new_dist = np.linalg.norm(position - hits[0])
if new_dist < closest_dist and new_dist < self.model.resolution * 2:
closest_dist = new_dist
slice_position = hits[0]
if slice_position is None:
position = vol_position
else:
position = slice_position
value = self.get_value_from_xyz(position, normal * self.model.resolution * 4)
return position, value
def add_probe(self, origin, destination, resolution=40, radius=10, color_map=None,
screen_space=True, min_v=None, max_v=None, add_to_scene=True):
"""
Add a series of points along a line probe
:param origin: Probe origin
:param destination: Probe destination point
:param resolution: Number of (equidistant) points that will be probed along that line
:param radius: Radius of the points
:param color_map: Scalars color map
:param screen_space: Whether the points are screen space or spheres
:param min_v: Min scalar value
:param max_v: Max scalar value
:param add_to_scene: Whether the new probe is added to scene
:return: Points
"""
if color_map is None:
color_map = self.model.luts.current.color_map
positions, values = self.probe(origin, destination, resolution)
points_obj = obj.Points(positions, values=values, radius=radius, screen_space=screen_space,
color_map=color_map, min_v=min_v, max_v=max_v)
points_obj.origin = origin
points_obj.destination = destination
# Dynamic properties assignment
points_obj.target = self.actor
points_obj.target_controller = self
if add_to_scene:
self.plot.add(points_obj)
return points_obj
def update_probe(self, origin, destination, points_obj):
"""
Update a probe with given start and end points
:param origin: Start point
:param destination: End point
:param points_obj: Points object
"""
resolution = points_obj._polydata.GetPoints().GetNumberOfPoints()
positions, values = self.probe(origin, destination, resolution)
points_obj.update_data(positions, values)
def probe(self, origin, destination, resolution=40):
"""
Probe a volume with a line
:param origin: Origin of the line probe
:param destination: Destination of the line probe
:param resolution: Number of point samples along the probe
:return: Positions and values
"""
origin = np.array(origin)
destination = np.array(destination)
distance = np.linalg.norm(destination - origin)
ray = destination - origin
ray_norm = ray / distance
step = distance / resolution
positions = [origin + ray_norm * p_id * step for p_id in range(resolution)]
values = np.array([self.get_value_from_xyz(point, none_as_zero=True) for point in positions])
return positions, values
def set_interactive_subsampling(self, on=False):
"""
Set volume subsampling on or off.
This is enabled by default in VTK and we disable it by default in IBLViewer
:param on: Whether volume subsampling in interactive mode is on or off
"""
#self.plot.window.SetDesiredUpdateRate(0)
#self.actor._mapper.SetInteractiveUpdateRate(0)
self.model.interactive_subsampling = on
self.actor._mapper.SetAutoAdjustSampleDistances(on)
if on:
self.actor._mapper.InteractiveAdjustSampleDistancesOn()
else:
self.actor._mapper.InteractiveAdjustSampleDistancesOff()
def isosurface(self, label, exceptions=[0], force_rebuild=False, set_current=True, to_int=True, split_meshes=True):
"""
Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value.
Unlike general isosurfacing, this method extracts only the surface mesh of the
desired region/label/segmentation, not of all values from 0 to label.
:param label: Label (scalar) value found in the volume
:param exceptions: If the label is found in the exceptions list, isosurfacing will not occur
:param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label
:param set_current: Whether the label is set as the current one in the model
:param to_int: Whether the label is cast to integer
:param split_meshes: Whether we split meshes when multiple ones are found
:return: A list of all manifold meshes for the given label
"""
if label is None or label in exceptions:
return
if to_int:
label = int(label)
existing_meshes = self.model.isosurfaces.get(label)
if existing_meshes is not None and not force_rebuild:
return existing_meshes
lut = self.model.luts.current
simple_lut = vtk.vtkLookupTable()
simple_lut.SetNumberOfColors(1)
simple_lut.SetTableRange(0, 1)
simple_lut.SetScaleToLinear()
simple_lut.SetTableValue(0, 0, 0, 0, 0)
simple_lut.SetTableValue(1, *lut.mapped_lut.GetTableValue(label))
simple_lut.Build()
# Generate object boundaries from labelled volume
discrete = vtk.vtkDiscreteMarchingCubes()
discrete.SetInputData(self.actor.imagedata())
discrete.GenerateValues(1, label, label)
smoothing_iterations = 15
pass_band = 0.001
feature_angle = 120.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(discrete.GetOutputPort())
smoother.SetNumberOfIterations(smoothing_iterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(feature_angle)
smoother.SetPassBand(pass_band)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
self.model.isosurfaces[label] = []
#splitter = vtk.vtkExtractPolyDataGeometry()
if split_meshes:
splitter = vtk.vtkPolyDataConnectivityFilter()
splitter.SetInputConnection(smoother.GetOutputPort())
splitter.SetExtractionModeToAllRegions()
splitter.ColorRegionsOn()
splitter.Update()
for region_id in range(splitter.GetNumberOfExtractedRegions()):
#splitter.AddSpecifiedRegion(region_id)
#splitter.Update()
#poly = vtk.vtkPolyData()
#poly.ShallowCopy(splitter.GetOutput())
threshold = vtk.vtkThreshold()
threshold.SetInputConnection(splitter.GetOutputPort())
threshold.ThresholdBetween(region_id, region_id)
threshold.Update()
actor = vedo.Mesh(threshold.GetOutput())
#actor._mapper.SetScalarRange(min_value, lut.scalar_max)
#actor._mapper.SetUseLookupTableScalarRange(True)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
#actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label)
else:
poly = smoother.GetOutput()
actor = vedo.Mesh(poly)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
'''
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(smoother.GetOutput())
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smoother.GetOutputPort())
mapper.SetLookupTable(lut.scalar_lut)
mapper.SetScalarRange(min_value, lut.scalar_max)
'''
if set_current:
self.model.isosurfaces.set_current(label)
return self.model.isosurfaces[label]
@dataclass
class SlicerModel:
PREFIX = '[Slicer]_'
MIN_SLAB_THICKNESS = 1.0 #um
__count = 0
def unique_name():
SlicerModel.__count += 1
return f'{SlicerModel.PREFIX}_{SlicerModel.__count}'
name: str = field(default_factory=unique_name)
# 0, 1 or 2. See the normal for axis orientation
axis: int = None
value: float = 0.0
bounds: np.ndarray = None
#thickness: float = 0.0
origin: np.ndarray = np.array([0.0, 0.0, 0.0])
normal: np.ndarray = np.array([1.0, 0.0, 0.0])
clipping_planes: vtk.vtkPlaneCollection = None
def get_box_plane_id(self):
"""
Get the plane id
:return: Int
"""
if self.axis is None:
return
offset = 0 if self.normal[self.axis] < 0 else 1
return self.axis * 2 + offset
def get_axis_aligned_info(self, vtk_axis):
"""
VTK stores box clipping planes in the order:
-X to +X: 0, 1
-Y to +Y: 2, 3
-Z to +Z: 4, 5
This method retrieves what is the XYZ axis (0, 1 or 2)
and its orientation sign
:return: Int axis and float orientation
"""
orientation = -1.0 if vtk_axis % 2 == 0 else 1.0
axis = (vtk_axis - vtk_axis % 2) // 2
return axis, orientation
def align_to_axis(self, axis, dimensions=None):
"""
Set the axis of the slicer
:param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info()
:param dimensions: Dimensions of the volume
"""
if not isinstance(axis, int):
return
normal = np.zeros(3).astype(float)
xyz_axis, orientation = self.get_axis_aligned_info(axis)
normal[xyz_axis] = orientation
self.axis = xyz_axis
if dimensions is not None and orientation < 0:
self.origin = np.zeros(3)
self.origin[xyz_axis] = dimensions[xyz_axis]
self.normal = normal
def flip_normal(self):
"""
Flip the normal of the slicer
"""
self.normal *= -1.0
self.check_normal()
if isinstance(self.axis, int):
self.axis *= -1
def check_normal(self):
"""
Check if the normal is axis-aligned.
If not, the axis is set to None.
"""
zeros = self.normal == 0
if len(self.normal[zeros]) >= 2:
self.axis = 0
def update(self, value=None, normal=None, axis=None):
"""
Update slicer
:param value: Origin of the slicing plane
:param normal: Normal of the slicing plane
:param axis: Axis, if the plane is axis-aligned
:return: True if model changed, False if it didn't
"""
if not(isinstance(value, int) or isinstance(value, float)):
if normal is None:
normal = self.normal
if normal is None:
return False
if normal[1] == 0 and normal[2] == 0:
axis = 0 #if normal[0] > 0 else 1
elif normal[0] == 0 and normal[2] == 0:
axis = 1 #if normal[1] > 0 else 1
elif normal[0] == 0 and normal[1] == 0:
axis = 2 #if normal[2] > 0 else 1
if axis is not None:
value = value[axis]
if axis is None:
axis = self.axis
if self.value == value:
return False
if axis is not None:
self.value = value
self.origin = np.array(normal) * value
else:
self.value = None
self.origin = value
self.normal = normal
self.axis = axis
return True
class SlicerView():
slices = {}
def __init__(self, plot, volume_view, slicer_model, standalone=True):
"""
Constructor
:param plot: Plot instance
:param volume_view: VolumeView instance
:param slicer_model: SlicerModel instance
:param standalone: Whether the slice is a standalone actor that
can be clicked. Set this to False if you want to use transparency,
at the expense that because of a VTK bug, you won't be able to
click on it anymore, requiring you to code another way of detecting
where the user clicked. See more in initialize_mapper()
"""
self.plot = plot
self.volume_view = volume_view
self.model = slicer_model
self.actor = None
self.filter = None
self.filter = None
self.actor = None
self.reslice = None
self.slice_type = -1
self.depth_peeling_enabled = None
self.standalone = standalone
self.got_slice = False
self.color_map = None
self.alpha_map = None
self.initialize()
def initialize(self, render=False):
"""
Initialize the slicer object
"""
if self.filter is None:
self.filter = vtk.vtkImageDataGeometryFilter()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
# Adding empty actor so that it's updated later on
self.plot.add(self.actor, render=render)
self.actor.lighting('off')
self.actor.name = self.model.name
self.initialize_mapper()
def initialize_mapper(self):
"""
Initialize the object mapper
"""
mapper = self.actor._mapper
mapper.SetScalarModeToUsePointData() #SetScalarModeToUsePointFieldData
mapper.SetColorModeToMapScalars()
mapper.ScalarVisibilityOn()
# We operate on static volumes thanks to the double LUT mapping implemented here
mapper.SetStatic(True)
# Without using scalar range, the mapping will be off
mapper.SetUseLookupTableScalarRange(True)
# We prevent this actor from being pickable as a result of the bug described below
# when we want to use transparency on the slice.
self.actor.pickable(self.standalone)
if self.standalone:
# There is a bug in VTK 9 that prevents clicking on transparent objects
# as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291
# The "Force opaque fix" below should be gone with the next VTK update hopefully.
# In the meantime, we use this.
# TODO: remove this when this bug is fixed in VTK
self.actor.ForceOpaqueOn()
else:
# We bypass the transparent selection bug when a VolumeView has multiple slicers
# like in box mode because the click detection occurs on the volume and we perform
# an additional test to see if a slicer yields a nearby result. If it does,
# the result is like clicking on the slice and we get transparency for free.
pass
# Make sure we have depth peeling activated, otherwise transparency with volumes
# will look weird and in the wrong order
self.plot.renderer.UseDepthPeelingOn()
self.plot.renderer.UseDepthPeelingForVolumesOn()
segmented = self.volume_view.model.is_segmented()
if segmented:
# This very line below will mess up the entire slice coloring if:
# - you have a segmented volume and this is set to True
# - you have a non-segmented (like raw MRI, CT) volume and this is set to False
mapper.SetInterpolateScalarsBeforeMapping(not segmented)
mapper.Update()
def set_color_map(self, color_map, alpha_map=None):
"""
Set a color map to the slice
:param color_map: Color map, can be a string, a list of colors or more.
See vedo documentation.
"""
self.color_map = color_map
if alpha_map is not None:
self.alpha_map = alpha_map
if self.got_slice and color_map is not None:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
def set_slice_type(self, slice_type):
"""
Set the slice type. 0 for axial, 1 for free slicing
:param slice_type: Int value
"""
if slice_type == 0 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.volume_view.actor.imagedata())
elif slice_type == 1 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.reslice.GetOutput())
def slice_on_normal(self, origin, normal):
"""
Slice a volume with a plane oriented by the given normal.
This allows slicing in all directions.
:param origin: Origin of the slicing plane
:param normal: Normal of the slicing plane
:return: Mesh object with the slice as an image texture
"""
'''
mapper = vtk.vtkImageResliceMapper()
mapper.SetInputData(self.volume_view.actor._data)
mapper.SliceFacesCameraOff()
mapper.SliceAtFocalPointOff()
mapper.JumpToNearestSliceOn()
mapper.SetImageSampleFactor(2)
mapper.BorderOn()
mapper.BackgroundOff()
mapper.UpdateInformation()
mapper.GetSlicePlane().SetOrigin(*origin)
mapper.GetSlicePlane().SetNormal(*normal)
mapper.GetSlicePlane().Modified()
mapper.Modified()
mapper.Update()
self.actor = vtk.vtkImageSlice()
self.actor.SetMapper(mapper)
prop = vtk.vtkImageProperty()
if True:
prop.SetInterpolationTypeToLinear()
else:
prop.SetInterpolationTypeToNearest()
self.actor.SetProperty(prop)
return
'''
if self.reslice is None:
reslice = vtk.vtkImageReslice()
reslice.SetInputData(self.volume_view.actor._data)
#reslice.SetInputData(image)
reslice.SetOutputDimensionality(2)
reslice.SetAutoCropOutput(False)
#reslice.SetInterpolationModeToLinear()
reslice.SetInterpolationModeToNearestNeighbor()
reslice.SetSlabNumberOfSlices(1)
reslice.SetOutputSpacing(self.volume_view.get_spacing())
reslice.ReleaseDataFlagOn()
self.reslice = reslice
self.set_slice_type(1)
M, T = utils.get_transformation_matrix(origin, normal)
self.reslice.SetResliceAxes(M)
self.reslice.Update()
self.filter.Update()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
else:
self.actor._update(self.filter.GetOutput())
self.initialize_mapper()
self.actor.SetOrientation(T.GetOrientation())
self.actor.SetPosition(origin)
self.got_slice = True
return self.actor
def x_slice(self, i):
"""
Extract the slice at index `i` of volume along x-axis.
:param i: I index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if i <= 1 or i > nx - 1:
return False
self.filter.SetExtent(i, i, 0, ny, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def y_slice(self, j):
"""
Extract the slice at index `j` of volume along y-axis.
:param j: J index
"""
self.set_slice_type(0)
#nx, ny, nz = self.volume_view.model.dimensions / resolution
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if j <= 1 or j > ny - 1:
return False
self.filter.SetExtent(0, nx, j, j, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def z_slice(self, k):
"""
Extract the slice at index `k` of volume along z-axis.
:param k: K index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if k <= 1 or k > nz - 1:
return False
self.filter.SetExtent(0, nx, 0, ny, k, k)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def slice_on_axis(self, value=None, normal=None, axis=None, use_reslice=False):
"""
Slice on standard X, Y or Z axis
:param value: Value on the given axis
:param normal: Axis normal, can be either +1.0 or -1.0 along that axis
:param axis: Axis integer, 0 for X, 1 for Y, 2 for Z
:param use_reslice: if True, this enables vtkImageReslice which is useful when
the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned
normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper.
This is why the default is False.
:return: Result boolean, whether slice occured or not
"""
resolution = self.volume_view.model.resolution
volume_dimensions = self.volume_view.model.dimensions
'''
if normal[axis] < 0:
if value > 0:
# Make value consistent with given normal.
value *= normal[axis]
value = volume_dimensions[axis] + value
'''
in_volume_slice = int(value) // resolution
if use_reslice:
self.slice_on_normal(normal * value, normal)
return
if axis == 0:
result = self.x_slice(in_volume_slice)
elif axis == 1:
result = self.y_slice(in_volume_slice)
elif axis == 2:
result = self.z_slice(in_volume_slice)
return result
def update(self):
"""
Update slice object according to data in the model
"""
had_slice = self.got_slice
result = True
if isinstance(self.model.axis, int) and 0 <= self.model.axis <= 2:
result = self.slice_on_axis(self.model.value, self.model.normal, self.model.axis)
else:
self.slice_on_normal(self.model.origin, self.model.normal)
if not result:
self.plot.remove(self.actor)
self.got_slice = False
return
#self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos()))
lut = self.volume_view.model.luts.current
if lut is not None:
'''
This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping
BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha)
So we have to put a color map, alpha map and a vtkLookupTable
built from both maps in a LUTModel.
Alternatively, we could update the LUT with alpha values but it's a pain.
ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction()
lut = vedo.utils.ctf2lut(self.volume_view.actor)
otf = self.volume_view.actor.GetProperty().GetScalarOpacity
# using "ctf" would work only for colors, not for transparency!
self.apply_lut(ctf)
'''
self.apply_lut(lut.mapped_lut)
else:
if self.alpha_map is None:
self.actor.cmap(self.color_map)
else:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
if self.model.clipping_planes is not None:
self.actor.mapper().SetClippingPlanes(self.model.clipping_planes)
if not had_slice:
self.plot.add(self.actor, render=True)
def apply_lut(self, lut=None):
"""
Apply a LUT to the volume
:param lut: vtkLookupTable
:param actor: The actor to receive this
"""
if self.actor is None or lut is None:
return
mapper = self.actor._mapper
mapper.SetLookupTable(lut) | en | 0.723173 | # Default units are microns. # At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings # Mapping function. If None, the volume will be given as it is. Compute volume size # TODO: move this to constructor or init Compute min and max range in the volume :return: Min and max values #print('Volume min-max', self.data_min, self.data_max) Infer the volume type when it was not specified by the user. We assume here that typical values between -1 and 1 are raw volumes. Get whether current volume/image is segmented :return: Boolean Read local volume. Downloads the file first if it's remote. :param file_path: Volume path :return: 3D array Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK :param file_path: Volume file path. Could support other file types easily. :param remap_scalars: Whether scalar values in the volume are replaced by their row id from a mapping that stores. This is necessary in the case of segmented volumes with regions that have a discontinuous id. :param mapping: Pandas Series or a Dictionary :param make_current: Set the volume data as the current one :return: 3D array if volume is not None: logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's') min_value, max_value = np.amin(data), np.amax(data) logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value)) else: logging.error('Failed to open atlas ' + new_file_path) Transpose the volume for visualization in VTK :param shape: The new shape. If None, will default to self.transpose_shape Reassign volume values (slow on large volumes!) so that they're continuous :param data: Volume ndarray :param write_path: Where the modified volume will be stored (to spare going through this method next time) :param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones :return: Modified volume data #volume = np.vectorize(self.f)(data) # On a large volume, this takes a long time Build a look-up table (LUT, sometimes known as transfer function) for the volume :param scalar_map: A 2D list with values in first column from the volume itself and values from the second column being your scalar values that correspond to such region :param scalar_range: Min and max values in a list :param color_map: Color map name to apply :param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that says how transparent a scalar value should be :param zero_is_transparent: Whether zero values are made transparent, True by default :param noise_amount: Whether a noise value is applied on the colors :param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values :param make_active: Whether this one is made active (you still have to update the views after that) :return: LUTModel Blend color maps Overwriting of vedo.Volume constructor that is ill-designed as it transposes the given numpy array without us knowing about it, not giving us the option to choose about that. ################### # fpath ################### ################### #colors.printc('Volume inputtype', inputtype) # scan sequence of BMP files # ------------------------------ Nasty lines commented here #if len(inputobj.shape)>2: #inputobj = np.transpose(inputobj, axes=[2, 1, 0]) #to convert rgb to numpy # img_scalar = data.GetPointData().GetScalars() # dims = data.GetDimensions() # n_comp = img_scalar.GetNumberOfComponents() # temp = utils.vtk2numpy(img_scalar) # numpy_data = temp.reshape(dims[1],dims[0],n_comp) # numpy_data = numpy_data.transpose(0,1,2) # numpy_data = np.flipud(numpy_data) # passing vtk object, try extract imagdedata ### DIFFERENT from volume.origin()! # remember stuff: This class might look slightly convoluted but it's actually simple. We use double mapping here in order to enable live/interactive visualization of volumetric data. Instead of replacing values in a 3D volume, we only replace the colors in the 1D LUT list. The point is that it's too slow to update a given data, like a segmented volume with custom values. Instead, we map such custom values to a 1D array (our LUT) that maps colors to raw volume values. This is much faster in terms of rendering and it enables interactive visualization. The scalar_lut is the original LUT for the given scalars (custom values) and the mapped_lut is the LUT assigned to the surfaces (like slices) that have copied data from the volume. The volume is given color_map and alpha_map through vedo methods. You might say "ok for double mapping, it's the only way for interactive rendering of a volume, but what about color_map and mapped_lut? Aren't they the same?". The answer is: they're the same but VTK does not accept a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction and a vtkPiecewiseFunction for alpha. There's no way around it. The color_map will be computed as a vtkColorTransferFunction and the alpha_map as the vtkPiecewiseFunction. Build several look-up tables (LUT, sometimes known as transfer function) for the volume. This is where double-mapping occurs for segmented volumes that have values from 0 to n where each value defines a sub-volume or region. If we want to assign values (say from another model) to these regions, we'd have to change the volume values and it would be too slow iterating over each voxel in 3D. Instead we define colors that represent these values and assign them to segmented regions in a 1D list. :param scalar_map: A 2D list with values in first column from the volume itself and values from the second column being your scalar values that correspond to such region :param scalar_range: Min and max values in a list :param color_map: Color map name to apply :param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that says how transparent a scalar value should be :param zero_is_transparent: Whether zero values are made transparent, True by default :param noise_amount: Whether a noise value is applied on the colors :param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values :return: LUTModel # Use the number of values in the volume # Vedo works with nested lists: # [region_id, [r, g, b]] for color, and [region_id, a] for alpha # Standard volume that is not segmented #scalar_map[r_id] = color_map[r_id] # Segmented volume # or s_min > value or s_max < value: # Real scalar LUT, mainly as a reference for the user # Here the colors resulting from the given scalar min to max # are assigned to segmented values in the volume # Just to avoid confusion: the user can give a string as a color map, like 'viridis' # but the real color map object is stored in self.color_map. The name 'viridis' # is stored under self.color_map_function (if needed later on) Get a numpy 2D array of key-value pairs sorted by value :return: 2D array Wrapper class that handles both the volume and its slices Constructor :param plot: Plot instance :param model: VolumeModel instance :param initialize: Whether the initalization :param clipping: Whether clipping is enabled at init time :param slicer_box: Whether the slicer box is enabled at init :param center_on_edges: Whether the volume is offest by half a voxel or not :param alpha_unit_upper_offset: The offset to apply to alpha unit computation. If greater than 0, the volume will be less opaque :param add_to_scene: Whether the volume is added to scene after init # * self.model.resolution #msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos()) #logging.info(msg) Get all 3D actors related to this view (for registering it in the application) :return: List of VTK objects Set the volume actor for visualization in VTK :param clipping: Whether clipping is enabled :param slicer_box: Whether the slicer box mode is enabled (6 clipping planes) :param center_on_edges: Whether the volume's center is aligned to its edges rather than the voxel center :param add_to_scene: Whether the object is added to the scene if use_mask: self.mask = self.actor.clone() self.mask.threshold(1, replace=1, replaceOut=0) self.actor.mapper().SetMaskTypeToBinary() self.actor.mapper().SetMaskInput(self.mask) Set volume visibility :param on: Visibility boolean Set the visibility of slices :param on: Visibility boolean Get the opacity of slices (should be the same value for all slices) A mean calculation is performed on all slices alpha, just in case :return: Alpha value Set the opacity of slices :param value: Alpha value Get the relative opacity unit :return: Float Get the alpha unit relative value :return: Float # Inverse function of set_opacity_unit() Set the opacity of the volume like in set_opacity_unit() :param value: Opacity value between 0.0 and 1.0 :return: Resulting alpha unit Set the opacity of the volume by modifying its alpha unit (a VTK thing). The alpha unit defines how much a voxel is transparent to incoming ray. This method normalizes the range between 0.0 and 1.0 as it depends on the resolution of the volume :param value: Opacity value between 0.0 and 1.0 :return: Resulting alpha unit # 1 is chosen and not 1.0 because when value == 1.0, that would # mean that the volume is fully opaque and this yields artifacts with VTK # vedo calls it "alpha" unit, vtk "opacity" unit. same-same! Get the spacing/resolution of the volume #[1, 2] Set the volume actor for visualization in VTK :param center_on_edges: Whether alignment by one voxel is applied :param add_to_scene: Whether the object is added to the scene # Moving the volume by one voxel. This is possibly due the use of custom spacing. #print('Adjusting volume center from', self.model.center, 'to', center) #self.actor._mapper.AutoAdjustSampleDistancesOn() #self.actor._mapper.SetBlendModeToAverageIntensity() #self.actor._mapper.SetSampleDistance(100) Set the position of the volume # TODO: we're entering in unstable things when we move the volume # because there is not yet a guaranteed support for updating the slices # with the correct position Mirror the volume on given axes :param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which the volume will be mirrored. Optional Initialize the volume picker :param opacity_iso_value: Threshold that defines at what accumulated opacity the picker hits the volume. In the case of a segmented volume, you want to keep this value very low as the default one. # As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars # https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point # A low OpacityIsoValue is necessary in the case of segmented volumes Initialize 6 slicing planes as a box. # It's important in this case to have standalone=False Update a given slicer with the given value :param slicer_id: SlicerView id :param value: Value or 3D point :param normal: Normal # This is an important part where the slicing plane is itself sliced by other planes # Use given value (or point) and normal to guide the below code # Update slicing image Initialize X, Y and Z clipping planes with two planes per axis for positive and negative slicing Get the current clipping planes except the ones on the given axis :param except_axis: Axis id to ignore. If None, all clipping planes will be returned :return: vtkPlaneCollection Reset clipping planes Apply clipping on a single axis :param position: Position :param axis: Clipping axis, defaults to 0 (X axis) :param thickness: Whether a thickness (so two clipping planes) are applied # This should already be sorted in the model but in case it isn't, we double check here # This means that the given axis has two # clipping planes and we take the negative one #position = self.model.dimensions - position Set volume clipping on or off. :param on: Whether clipping is enabled or disabled. If None, then the state is toggled. Clip the volume and move the slicing planes according to 6 boundary points :param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax) # Normals are reversed with the above code # so we fix that here with flip_normals=True Clip the volume with the current box widget :param widget: vtkBoxCutter :param event: vtkEvent Clip the volume and move the slicing planes according the given planes :param planes: vtkPlanes # We don't need to check the normal because # we prevent box cutter rotation in our case Set alpha map to the volume view :param alpha_map: 2D list of scalar values and alpha values :param alpha_factor: Alpha factor Set the color and alpha map to the view objects :param color_map: Nested list of scalar values and rgb colors like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...] :param alpha_map: 2D list of scalar values and alpha values Disable volume shading Enable volume shading TODO: See if this method is useful Toggle slices visibility Toggle hollow mode for volume rendering. This is intended to work only on segmented (annotated) volumes. # Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff... Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step, that is the normal on which to probe multiplied by the distance you want to travel further into the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts. So you need to go further into the "cloud" so to speak, in order to find the values you want. :param position: 3D array :param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields bad or unwanted results :param avoid_values: Try and find other values than this :param cast_to_int: Whether the value should be cast to integer :return: Scalar value # TODO: see if this is faster? To be tested # ijk_result = [0.0, 0.0, 0.0] # volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result) # volume_actor._data.GetPoint(ijk_result) Shorthand for pick() method Find the nearest intersection – even on sliced volume – with the ray formed by an origin and a screen-space position (given by VTK when you click on an actor) :param origin: Origin of the vector :param screen_position: 2D position on screen. This is given by vtk events like MouseRelease :return: The nearest position and its related value queried in the volume image # Go half a voxel further to make sure we don't hit "void" # + normal * self.model.resolution / 2 # See if the line hits any of the slicers (that are image planes) Add a series of points along a line probe :param origin: Probe origin :param destination: Probe destination point :param resolution: Number of (equidistant) points that will be probed along that line :param radius: Radius of the points :param color_map: Scalars color map :param screen_space: Whether the points are screen space or spheres :param min_v: Min scalar value :param max_v: Max scalar value :param add_to_scene: Whether the new probe is added to scene :return: Points # Dynamic properties assignment Update a probe with given start and end points :param origin: Start point :param destination: End point :param points_obj: Points object Probe a volume with a line :param origin: Origin of the line probe :param destination: Destination of the line probe :param resolution: Number of point samples along the probe :return: Positions and values Set volume subsampling on or off. This is enabled by default in VTK and we disable it by default in IBLViewer :param on: Whether volume subsampling in interactive mode is on or off #self.plot.window.SetDesiredUpdateRate(0) #self.actor._mapper.SetInteractiveUpdateRate(0) Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value. Unlike general isosurfacing, this method extracts only the surface mesh of the desired region/label/segmentation, not of all values from 0 to label. :param label: Label (scalar) value found in the volume :param exceptions: If the label is found in the exceptions list, isosurfacing will not occur :param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label :param set_current: Whether the label is set as the current one in the model :param to_int: Whether the label is cast to integer :param split_meshes: Whether we split meshes when multiple ones are found :return: A list of all manifold meshes for the given label # Generate object boundaries from labelled volume #splitter = vtk.vtkExtractPolyDataGeometry() #splitter.AddSpecifiedRegion(region_id) #splitter.Update() #poly = vtk.vtkPolyData() #poly.ShallowCopy(splitter.GetOutput()) #actor._mapper.SetScalarRange(min_value, lut.scalar_max) #actor._mapper.SetUseLookupTableScalarRange(True) #actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label) pdnorm = vtk.vtkPolyDataNormals() pdnorm.SetInputData(smoother.GetOutput()) pdnorm.ComputePointNormalsOn() pdnorm.ComputeCellNormalsOn() pdnorm.FlipNormalsOff() pdnorm.ConsistencyOn() pdnorm.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(smoother.GetOutputPort()) mapper.SetLookupTable(lut.scalar_lut) mapper.SetScalarRange(min_value, lut.scalar_max) #um # 0, 1 or 2. See the normal for axis orientation #thickness: float = 0.0 Get the plane id :return: Int VTK stores box clipping planes in the order: -X to +X: 0, 1 -Y to +Y: 2, 3 -Z to +Z: 4, 5 This method retrieves what is the XYZ axis (0, 1 or 2) and its orientation sign :return: Int axis and float orientation Set the axis of the slicer :param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info() :param dimensions: Dimensions of the volume Flip the normal of the slicer Check if the normal is axis-aligned. If not, the axis is set to None. Update slicer :param value: Origin of the slicing plane :param normal: Normal of the slicing plane :param axis: Axis, if the plane is axis-aligned :return: True if model changed, False if it didn't #if normal[0] > 0 else 1 #if normal[1] > 0 else 1 #if normal[2] > 0 else 1 Constructor :param plot: Plot instance :param volume_view: VolumeView instance :param slicer_model: SlicerModel instance :param standalone: Whether the slice is a standalone actor that can be clicked. Set this to False if you want to use transparency, at the expense that because of a VTK bug, you won't be able to click on it anymore, requiring you to code another way of detecting where the user clicked. See more in initialize_mapper() Initialize the slicer object # Adding empty actor so that it's updated later on Initialize the object mapper #SetScalarModeToUsePointFieldData # We operate on static volumes thanks to the double LUT mapping implemented here # Without using scalar range, the mapping will be off # We prevent this actor from being pickable as a result of the bug described below # when we want to use transparency on the slice. # There is a bug in VTK 9 that prevents clicking on transparent objects # as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291 # The "Force opaque fix" below should be gone with the next VTK update hopefully. # In the meantime, we use this. # TODO: remove this when this bug is fixed in VTK # We bypass the transparent selection bug when a VolumeView has multiple slicers # like in box mode because the click detection occurs on the volume and we perform # an additional test to see if a slicer yields a nearby result. If it does, # the result is like clicking on the slice and we get transparency for free. # Make sure we have depth peeling activated, otherwise transparency with volumes # will look weird and in the wrong order # This very line below will mess up the entire slice coloring if: # - you have a segmented volume and this is set to True # - you have a non-segmented (like raw MRI, CT) volume and this is set to False Set a color map to the slice :param color_map: Color map, can be a string, a list of colors or more. See vedo documentation. Set the slice type. 0 for axial, 1 for free slicing :param slice_type: Int value Slice a volume with a plane oriented by the given normal. This allows slicing in all directions. :param origin: Origin of the slicing plane :param normal: Normal of the slicing plane :return: Mesh object with the slice as an image texture mapper = vtk.vtkImageResliceMapper() mapper.SetInputData(self.volume_view.actor._data) mapper.SliceFacesCameraOff() mapper.SliceAtFocalPointOff() mapper.JumpToNearestSliceOn() mapper.SetImageSampleFactor(2) mapper.BorderOn() mapper.BackgroundOff() mapper.UpdateInformation() mapper.GetSlicePlane().SetOrigin(*origin) mapper.GetSlicePlane().SetNormal(*normal) mapper.GetSlicePlane().Modified() mapper.Modified() mapper.Update() self.actor = vtk.vtkImageSlice() self.actor.SetMapper(mapper) prop = vtk.vtkImageProperty() if True: prop.SetInterpolationTypeToLinear() else: prop.SetInterpolationTypeToNearest() self.actor.SetProperty(prop) return #reslice.SetInputData(image) #reslice.SetInterpolationModeToLinear() Extract the slice at index `i` of volume along x-axis. :param i: I index Extract the slice at index `j` of volume along y-axis. :param j: J index #nx, ny, nz = self.volume_view.model.dimensions / resolution Extract the slice at index `k` of volume along z-axis. :param k: K index Slice on standard X, Y or Z axis :param value: Value on the given axis :param normal: Axis normal, can be either +1.0 or -1.0 along that axis :param axis: Axis integer, 0 for X, 1 for Y, 2 for Z :param use_reslice: if True, this enables vtkImageReslice which is useful when the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper. This is why the default is False. :return: Result boolean, whether slice occured or not if normal[axis] < 0: if value > 0: # Make value consistent with given normal. value *= normal[axis] value = volume_dimensions[axis] + value Update slice object according to data in the model #self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos())) This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha) So we have to put a color map, alpha map and a vtkLookupTable built from both maps in a LUTModel. Alternatively, we could update the LUT with alpha values but it's a pain. ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction() lut = vedo.utils.ctf2lut(self.volume_view.actor) otf = self.volume_view.actor.GetProperty().GetScalarOpacity # using "ctf" would work only for colors, not for transparency! self.apply_lut(ctf) Apply a LUT to the volume :param lut: vtkLookupTable :param actor: The actor to receive this | 2.128786 | 2 |
modeling/dataset.py | LaudateCorpus1/ml-cread | 18 | 8601 | <gh_stars>10-100
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
'''
Dataset file
'''
import sys
import time
import json
import copy
from itertools import chain
from tqdm import tqdm, trange
import torch
from torch.utils.data import DataLoader, RandomSampler
SPECIAL_TOKENS = {
"bos_token": "<BOS>",
"eos_token": "<EOS>",
"pad_token": "<PAD>",
"sep_token": "<SEP>",
"additional_special_tokens": ["<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
}
SPECIAL_TOKENS_VALUES = ["<BOS>", "<EOS>", "<PAD>", "<SEP>", "<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
class Dataset(torch.utils.data.Dataset):
def __init__(self, args, tokenizer, data_type, generation, data_size):
assert data_type in ['train', 'dev', 'test']
self.args = args
self.data_size = data_size
self.tokenizer = tokenizer
self.data_type = data_type
self.generation = generation
self._get_special_token_ids()
self._create_examples()
def _get_special_token_ids(self):
self.SPECIAL_TOKENS = SPECIAL_TOKENS
self.SPECIAL_TOKENS_VALUES = SPECIAL_TOKENS_VALUES
self.bos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["bos_token"])
self.eos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["eos_token"])
self.pad_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["pad_token"])
self.sep_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["sep_token"])
# mention detection vocab
self.mc_cl2idx = {'<N>': 0, '<M>': 1, '</M>': 2} # <N>: none, <M>: start of mention, "</M>": end of mention
self.mc_idx2cl = {v: k for k, v in self.mc_cl2idx.items()}
def prepare_reference_label(self, word_label_index, wordId2tokenId, input_ids):
'''
record the index of start/end of mention and refernece in the input otterance
this info will be used as attention signal in reference resolution step
'''
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(input_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
token_label_index = []
for start_end_link in word_label_index:
for link_meta in start_end_link:
attention_word_idx, mention_word_idx = link_meta['attention_idx'], link_meta['mention_idx']
if link_meta['mention_type'] == 'start':
attention_token_idx = wordId2tokenId[attention_word_idx][0]
else: # end
attention_token_idx = wordId2tokenId[attention_word_idx][-1]
for mention_token_idx in wordId2tokenId[mention_word_idx]:
link = {}
link['mention_token_idx'] = mention_token_idx
link['attention_token_idx'] = attention_token_idx
assert reconstruct_sentence[mention_token_idx] in link_meta['mention_word']
assert reconstruct_sentence[attention_token_idx] in link_meta['attention_word']
token_label_index.append(link)
return token_label_index
def prepare_binary_label(self, input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx):
''' only the start of rewriting token receives binary signal '''
binary_label = [-100] * len(input_ids)
assert isinstance(binary_rewrite, bool)
if binary_rewrite == True:
binary_label[curr_end_token_idx] = 1 # rewrite
else:
binary_label[curr_end_token_idx] = 0 # not rewrite
return binary_label
def prepare_mention_label(self, input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx):
'''
get label index for mention detection
only the parts of current utterance receive signal, everwhere else will get -100
'''
mention_label = [-100] * len(input_ids)
curr_start_idx = wordId2tokenId[curr_start_idx][0]
curr_end_idx = wordId2tokenId[curr_end_idx-1][-1] + 1
# align class <N> (none) to everywehere in current utterance first
mention_label[curr_start_idx: curr_end_idx] = [ self.mc_cl2idx['<N>'] ] * (curr_end_idx-curr_start_idx)
for start_end_link in word_label_index: # iterate over links in one example
for link_meta in start_end_link: # iterate over start and end of a link
idx = link_meta['mention_idx']
if link_meta['mention_type'] == 'start': # align class <M> (start of mention)
for idx in wordId2tokenId[idx]:
mention_label[idx] = self.mc_cl2idx['<M>']
else: # # align class </M> (end of mention)
idx = wordId2tokenId[idx][-1]
mention_label[idx] = self.mc_cl2idx['</M>']
return mention_label, curr_start_idx, curr_end_idx
def _check_label_index(self, whole_input, links):
''' sanity check for index correctness '''
seq = whole_input.split()
for link in links:
for start_or_end in link:
for word_type in ['mention', 'attention']:
assert seq[start_or_end['{}_idx'.format(word_type)]] == start_or_end['{}_word'.format(word_type)]
def _create_examples(self):
if self.data_type == 'train':
data_file = self.args.train_file
elif self.data_type == 'dev':
data_file = self.args.dev_file
else:
data_file = self.args.test_file
with open(data_file) as f:
data = json.load(f)
self.examples = []
for example_num, example in enumerate(tqdm(data, disable=self.args.disable_display)):
if self.data_size != -1 and example_num == self.data_size:
break
# get data
context = example['dialogue context'] # context, list of str
curr_utt = example['current utterance'] # current utterance, str
rewt_utt = example['rewrite utterance'] # rewrite utterance, str
word_label_index = example['link index'] # index of mention/reference span
binary_rewrite = example['rewrite happen'] # binary label for rewrite or not, bool
# prepare input sequence to model
whole_input = copy.deepcopy(context)
whole_input.append(curr_utt)
curr_start_idx = sum([len(s.split()) for s in context]) # the (word) start idx of current utt
curr_end_idx = curr_start_idx + len(curr_utt.split())
whole_input = " ".join(whole_input)
self._check_label_index(whole_input, word_label_index)
input_ids, wordId2tokenId, tokenId2wordId = self.tokenize_with_map(whole_input)
if rewt_utt == "":
rewt_utt_ids = []
else:
rewt_utt_ids = self.tokenizer(rewt_utt)['input_ids'] # list
target_utt_ids = rewt_utt_ids
target_utt_len = len(target_utt_ids)
if not self.generation:
# input seq: CTX <CUR> current utterance <SEP> rewritten utterance <EOS>
input_ids = input_ids + [self.sep_id] + target_utt_ids + [self.eos_id]
# mention detection signal
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
# reference resolution signal
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
# binary classification of rewriting signal
binary_label = self.prepare_binary_label(input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx)
# rewriting singal
ignore_len = len(input_ids) - target_utt_len - 1 # eos_id
label_ids = [-100] * ignore_len + target_utt_ids + [self.eos_id]
assert len(input_ids) == len(label_ids)
else: # generation
# <sep> is given at first step during decoding
input_ids = input_ids
label_ids = None
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
binary_label = None
self.examples.append({
'input_ids': input_ids, # list of ids
'label_ids': label_ids, # list of ids
'mention_label_ids': mention_label,
'curr_start_token_idx': curr_start_token_idx,
'curr_end_token_idx': curr_end_token_idx,
'reference_label': reference_label_index,
'wordId2tokenId': wordId2tokenId,
'tokenId2wordId': tokenId2wordId,
'context': context,
'curr_utt': curr_utt,
'whole_input': whole_input,
'rewt_utt': rewt_utt,
'example_id': example['example index'],
'spk': example['speaker'],
'coref_label': word_label_index,
'binary_label_ids': binary_label,
'binary_rewrite': binary_rewrite
})
print('Data Statistics: {} -> {} examples'.format(self.data_type, len(self.examples)))
def _pad(self, sentences, pad_id):
'''
sentences: a list of list with ids
'''
max_len = max((map(len, sentences)))
attention_mask = []
sentences_pad = []
for sent in sentences:
pad_len = max_len - len(sent)
sentences_pad.append( sent + [pad_id]*pad_len )
attention_mask.append( [1]*len(sent) + [0]*pad_len)
return sentences_pad, attention_mask
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return self.examples[index]
def collate_fn(self, batch):
input_ids = [example['input_ids'] for example in batch]
input_ids, attention_mask = self._pad(input_ids, self.pad_id)
input_ids, attention_mask = torch.tensor(input_ids).long().to(self.args.device), torch.tensor(attention_mask).long().to(self.args.device)
if not self.generation:
label_ids = [example['label_ids'] for example in batch]
label_ids, _ = self._pad(label_ids, -100)
label_ids = torch.tensor(label_ids).long().to(self.args.device)
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = [example['binary_label_ids'] for example in batch]
binary_label_ids, _ = self._pad(binary_label_ids, -100)
binary_label_ids = torch.tensor(binary_label_ids).long().to(self.args.device)
else:
label_ids = None
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = None
token_type_ids = None # TODO: not sure if this makes any effect to gpt2
# record info
context = [example['context'] for example in batch]
curr_utt = [example['curr_utt'] for example in batch]
rewt_utt = [example['rewt_utt'] for example in batch]
example_ids = [example['example_id'] for example in batch] # record the example idx in batch
curr_start_token_idx = [example['curr_start_token_idx'] for example in batch]
curr_end_token_idx = [example['curr_end_token_idx'] for example in batch]
reference_label = [example['reference_label'] for example in batch]
wordId2tokenId = [example['wordId2tokenId'] for example in batch]
tokenId2wordId = [example['tokenId2wordId'] for example in batch]
whole_input = [example['whole_input'] for example in batch]
spk = [example['spk'] for example in batch]
coref_label = [example['coref_label'] for example in batch]
binary_rewrite = [example['binary_rewrite'] for example in batch]
return {'input_ids': input_ids, 'attention_mask': attention_mask, \
'token_type_ids': token_type_ids, 'label_ids': label_ids, \
'context': context, 'curr_utt': curr_utt, 'rewt_utt': rewt_utt, \
'example_ids': example_ids, 'spk': spk, 'mention_label_ids': mention_label_ids, \
'curr_start_token_idx': curr_start_token_idx, 'curr_end_token_idx': curr_end_token_idx, \
'reference_label': reference_label, 'wordId2tokenId': wordId2tokenId, \
'tokenId2wordId': tokenId2wordId, 'whole_input': whole_input, \
'coref_label': coref_label, 'binary_label_ids': binary_label_ids, \
'binary_rewrite': binary_rewrite}
def tokenize_with_map(self, sentence):
'''
Build the mapping of indexes before/after tokenizer to handel BPE
Input:
sentence: a natural sentence, str
Returns:
wordId2tokenId, a 1-to-many map
tokenId2wordId, a many-to-1 map
'''
assert isinstance(sentence, str)
token_ids = self.tokenizer(sentence)['input_ids']
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(token_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
sentence = sentence.split()
wordId2tokenId = {}
tokenId = 0
for wordId, word in enumerate(sentence):
wordId2tokenId[wordId] = []
token = ""
while word != token:
wordId2tokenId[wordId].append(tokenId)
token += reconstruct_sentence[tokenId]
tokenId += 1
tokenId2wordId = {}
for wordId, tokenIds in wordId2tokenId.items():
for tokenId in tokenIds:
assert tokenId not in tokenId2wordId
tokenId2wordId[tokenId] = wordId
assert len(wordId2tokenId) == len(sentence)
assert len(tokenId2wordId) == len(reconstruct_sentence)
return token_ids, wordId2tokenId, tokenId2wordId
if __name__ == '__main__':
pass
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
'''
Dataset file
'''
import sys
import time
import json
import copy
from itertools import chain
from tqdm import tqdm, trange
import torch
from torch.utils.data import DataLoader, RandomSampler
SPECIAL_TOKENS = {
"bos_token": "<BOS>",
"eos_token": "<EOS>",
"pad_token": "<PAD>",
"sep_token": "<SEP>",
"additional_special_tokens": ["<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
}
SPECIAL_TOKENS_VALUES = ["<BOS>", "<EOS>", "<PAD>", "<SEP>", "<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
class Dataset(torch.utils.data.Dataset):
def __init__(self, args, tokenizer, data_type, generation, data_size):
assert data_type in ['train', 'dev', 'test']
self.args = args
self.data_size = data_size
self.tokenizer = tokenizer
self.data_type = data_type
self.generation = generation
self._get_special_token_ids()
self._create_examples()
def _get_special_token_ids(self):
self.SPECIAL_TOKENS = SPECIAL_TOKENS
self.SPECIAL_TOKENS_VALUES = SPECIAL_TOKENS_VALUES
self.bos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["bos_token"])
self.eos_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["eos_token"])
self.pad_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["pad_token"])
self.sep_id = self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS["sep_token"])
# mention detection vocab
self.mc_cl2idx = {'<N>': 0, '<M>': 1, '</M>': 2} # <N>: none, <M>: start of mention, "</M>": end of mention
self.mc_idx2cl = {v: k for k, v in self.mc_cl2idx.items()}
def prepare_reference_label(self, word_label_index, wordId2tokenId, input_ids):
'''
record the index of start/end of mention and refernece in the input otterance
this info will be used as attention signal in reference resolution step
'''
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(input_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
token_label_index = []
for start_end_link in word_label_index:
for link_meta in start_end_link:
attention_word_idx, mention_word_idx = link_meta['attention_idx'], link_meta['mention_idx']
if link_meta['mention_type'] == 'start':
attention_token_idx = wordId2tokenId[attention_word_idx][0]
else: # end
attention_token_idx = wordId2tokenId[attention_word_idx][-1]
for mention_token_idx in wordId2tokenId[mention_word_idx]:
link = {}
link['mention_token_idx'] = mention_token_idx
link['attention_token_idx'] = attention_token_idx
assert reconstruct_sentence[mention_token_idx] in link_meta['mention_word']
assert reconstruct_sentence[attention_token_idx] in link_meta['attention_word']
token_label_index.append(link)
return token_label_index
def prepare_binary_label(self, input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx):
''' only the start of rewriting token receives binary signal '''
binary_label = [-100] * len(input_ids)
assert isinstance(binary_rewrite, bool)
if binary_rewrite == True:
binary_label[curr_end_token_idx] = 1 # rewrite
else:
binary_label[curr_end_token_idx] = 0 # not rewrite
return binary_label
def prepare_mention_label(self, input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx):
'''
get label index for mention detection
only the parts of current utterance receive signal, everwhere else will get -100
'''
mention_label = [-100] * len(input_ids)
curr_start_idx = wordId2tokenId[curr_start_idx][0]
curr_end_idx = wordId2tokenId[curr_end_idx-1][-1] + 1
# align class <N> (none) to everywehere in current utterance first
mention_label[curr_start_idx: curr_end_idx] = [ self.mc_cl2idx['<N>'] ] * (curr_end_idx-curr_start_idx)
for start_end_link in word_label_index: # iterate over links in one example
for link_meta in start_end_link: # iterate over start and end of a link
idx = link_meta['mention_idx']
if link_meta['mention_type'] == 'start': # align class <M> (start of mention)
for idx in wordId2tokenId[idx]:
mention_label[idx] = self.mc_cl2idx['<M>']
else: # # align class </M> (end of mention)
idx = wordId2tokenId[idx][-1]
mention_label[idx] = self.mc_cl2idx['</M>']
return mention_label, curr_start_idx, curr_end_idx
def _check_label_index(self, whole_input, links):
''' sanity check for index correctness '''
seq = whole_input.split()
for link in links:
for start_or_end in link:
for word_type in ['mention', 'attention']:
assert seq[start_or_end['{}_idx'.format(word_type)]] == start_or_end['{}_word'.format(word_type)]
def _create_examples(self):
if self.data_type == 'train':
data_file = self.args.train_file
elif self.data_type == 'dev':
data_file = self.args.dev_file
else:
data_file = self.args.test_file
with open(data_file) as f:
data = json.load(f)
self.examples = []
for example_num, example in enumerate(tqdm(data, disable=self.args.disable_display)):
if self.data_size != -1 and example_num == self.data_size:
break
# get data
context = example['dialogue context'] # context, list of str
curr_utt = example['current utterance'] # current utterance, str
rewt_utt = example['rewrite utterance'] # rewrite utterance, str
word_label_index = example['link index'] # index of mention/reference span
binary_rewrite = example['rewrite happen'] # binary label for rewrite or not, bool
# prepare input sequence to model
whole_input = copy.deepcopy(context)
whole_input.append(curr_utt)
curr_start_idx = sum([len(s.split()) for s in context]) # the (word) start idx of current utt
curr_end_idx = curr_start_idx + len(curr_utt.split())
whole_input = " ".join(whole_input)
self._check_label_index(whole_input, word_label_index)
input_ids, wordId2tokenId, tokenId2wordId = self.tokenize_with_map(whole_input)
if rewt_utt == "":
rewt_utt_ids = []
else:
rewt_utt_ids = self.tokenizer(rewt_utt)['input_ids'] # list
target_utt_ids = rewt_utt_ids
target_utt_len = len(target_utt_ids)
if not self.generation:
# input seq: CTX <CUR> current utterance <SEP> rewritten utterance <EOS>
input_ids = input_ids + [self.sep_id] + target_utt_ids + [self.eos_id]
# mention detection signal
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
# reference resolution signal
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
# binary classification of rewriting signal
binary_label = self.prepare_binary_label(input_ids, wordId2tokenId, binary_rewrite, curr_end_token_idx)
# rewriting singal
ignore_len = len(input_ids) - target_utt_len - 1 # eos_id
label_ids = [-100] * ignore_len + target_utt_ids + [self.eos_id]
assert len(input_ids) == len(label_ids)
else: # generation
# <sep> is given at first step during decoding
input_ids = input_ids
label_ids = None
mention_label, curr_start_token_idx, curr_end_token_idx = \
self.prepare_mention_label(input_ids, word_label_index, wordId2tokenId, curr_start_idx, curr_end_idx)
reference_label_index = self.prepare_reference_label(word_label_index, wordId2tokenId, input_ids)
binary_label = None
self.examples.append({
'input_ids': input_ids, # list of ids
'label_ids': label_ids, # list of ids
'mention_label_ids': mention_label,
'curr_start_token_idx': curr_start_token_idx,
'curr_end_token_idx': curr_end_token_idx,
'reference_label': reference_label_index,
'wordId2tokenId': wordId2tokenId,
'tokenId2wordId': tokenId2wordId,
'context': context,
'curr_utt': curr_utt,
'whole_input': whole_input,
'rewt_utt': rewt_utt,
'example_id': example['example index'],
'spk': example['speaker'],
'coref_label': word_label_index,
'binary_label_ids': binary_label,
'binary_rewrite': binary_rewrite
})
print('Data Statistics: {} -> {} examples'.format(self.data_type, len(self.examples)))
def _pad(self, sentences, pad_id):
'''
sentences: a list of list with ids
'''
max_len = max((map(len, sentences)))
attention_mask = []
sentences_pad = []
for sent in sentences:
pad_len = max_len - len(sent)
sentences_pad.append( sent + [pad_id]*pad_len )
attention_mask.append( [1]*len(sent) + [0]*pad_len)
return sentences_pad, attention_mask
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return self.examples[index]
def collate_fn(self, batch):
input_ids = [example['input_ids'] for example in batch]
input_ids, attention_mask = self._pad(input_ids, self.pad_id)
input_ids, attention_mask = torch.tensor(input_ids).long().to(self.args.device), torch.tensor(attention_mask).long().to(self.args.device)
if not self.generation:
label_ids = [example['label_ids'] for example in batch]
label_ids, _ = self._pad(label_ids, -100)
label_ids = torch.tensor(label_ids).long().to(self.args.device)
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = [example['binary_label_ids'] for example in batch]
binary_label_ids, _ = self._pad(binary_label_ids, -100)
binary_label_ids = torch.tensor(binary_label_ids).long().to(self.args.device)
else:
label_ids = None
mention_label_ids = [example['mention_label_ids'] for example in batch]
mention_label_ids, _ = self._pad(mention_label_ids, -100)
mention_label_ids = torch.tensor(mention_label_ids).long().to(self.args.device)
binary_label_ids = None
token_type_ids = None # TODO: not sure if this makes any effect to gpt2
# record info
context = [example['context'] for example in batch]
curr_utt = [example['curr_utt'] for example in batch]
rewt_utt = [example['rewt_utt'] for example in batch]
example_ids = [example['example_id'] for example in batch] # record the example idx in batch
curr_start_token_idx = [example['curr_start_token_idx'] for example in batch]
curr_end_token_idx = [example['curr_end_token_idx'] for example in batch]
reference_label = [example['reference_label'] for example in batch]
wordId2tokenId = [example['wordId2tokenId'] for example in batch]
tokenId2wordId = [example['tokenId2wordId'] for example in batch]
whole_input = [example['whole_input'] for example in batch]
spk = [example['spk'] for example in batch]
coref_label = [example['coref_label'] for example in batch]
binary_rewrite = [example['binary_rewrite'] for example in batch]
return {'input_ids': input_ids, 'attention_mask': attention_mask, \
'token_type_ids': token_type_ids, 'label_ids': label_ids, \
'context': context, 'curr_utt': curr_utt, 'rewt_utt': rewt_utt, \
'example_ids': example_ids, 'spk': spk, 'mention_label_ids': mention_label_ids, \
'curr_start_token_idx': curr_start_token_idx, 'curr_end_token_idx': curr_end_token_idx, \
'reference_label': reference_label, 'wordId2tokenId': wordId2tokenId, \
'tokenId2wordId': tokenId2wordId, 'whole_input': whole_input, \
'coref_label': coref_label, 'binary_label_ids': binary_label_ids, \
'binary_rewrite': binary_rewrite}
def tokenize_with_map(self, sentence):
'''
Build the mapping of indexes before/after tokenizer to handel BPE
Input:
sentence: a natural sentence, str
Returns:
wordId2tokenId, a 1-to-many map
tokenId2wordId, a many-to-1 map
'''
assert isinstance(sentence, str)
token_ids = self.tokenizer(sentence)['input_ids']
reconstruct_sentence = self.tokenizer.convert_ids_to_tokens(token_ids)
reconstruct_sentence = [token.replace('Ġ', '') for token in reconstruct_sentence]
sentence = sentence.split()
wordId2tokenId = {}
tokenId = 0
for wordId, word in enumerate(sentence):
wordId2tokenId[wordId] = []
token = ""
while word != token:
wordId2tokenId[wordId].append(tokenId)
token += reconstruct_sentence[tokenId]
tokenId += 1
tokenId2wordId = {}
for wordId, tokenIds in wordId2tokenId.items():
for tokenId in tokenIds:
assert tokenId not in tokenId2wordId
tokenId2wordId[tokenId] = wordId
assert len(wordId2tokenId) == len(sentence)
assert len(tokenId2wordId) == len(reconstruct_sentence)
return token_ids, wordId2tokenId, tokenId2wordId
if __name__ == '__main__':
pass | en | 0.771455 | # # For licensing see accompanying LICENSE file. # Copyright (C) 2021 Apple Inc. All Rights Reserved. # Dataset file # mention detection vocab # <N>: none, <M>: start of mention, "</M>": end of mention record the index of start/end of mention and refernece in the input otterance this info will be used as attention signal in reference resolution step # end only the start of rewriting token receives binary signal # rewrite # not rewrite get label index for mention detection only the parts of current utterance receive signal, everwhere else will get -100 # align class <N> (none) to everywehere in current utterance first # iterate over links in one example # iterate over start and end of a link # align class <M> (start of mention) # # align class </M> (end of mention) sanity check for index correctness # get data # context, list of str # current utterance, str # rewrite utterance, str # index of mention/reference span # binary label for rewrite or not, bool # prepare input sequence to model # the (word) start idx of current utt # list # input seq: CTX <CUR> current utterance <SEP> rewritten utterance <EOS> # mention detection signal # reference resolution signal # binary classification of rewriting signal # rewriting singal # eos_id # generation # <sep> is given at first step during decoding # list of ids # list of ids sentences: a list of list with ids # TODO: not sure if this makes any effect to gpt2 # record info # record the example idx in batch Build the mapping of indexes before/after tokenizer to handel BPE Input: sentence: a natural sentence, str Returns: wordId2tokenId, a 1-to-many map tokenId2wordId, a many-to-1 map | 2.042634 | 2 |
hy/lex/lexer.py | schuster-rainer/hy | 12 | 8602 | # Copyright (c) 2013 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from rply import LexerGenerator
lg = LexerGenerator()
# A regexp for something that should end a quoting/unquoting operator
# i.e. a space or a closing brace/paren/curly
end_quote = r'(?![\s\)\]\}])'
lg.add('LPAREN', r'\(')
lg.add('RPAREN', r'\)')
lg.add('LBRACKET', r'\[')
lg.add('RBRACKET', r'\]')
lg.add('LCURLY', r'\{')
lg.add('RCURLY', r'\}')
lg.add('HLCURLY', r'#\{')
lg.add('QUOTE', r'\'%s' % end_quote)
lg.add('QUASIQUOTE', r'`%s' % end_quote)
lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
lg.add('UNQUOTE', r'~%s' % end_quote)
lg.add('HASHBANG', r'#!.*[^\r\n]')
lg.add('HASHREADER', r'#[^{]')
# A regexp which matches incomplete strings, used to support
# multi-line strings in the interpreter
partial_string = r'''(?x)
(?:u|r|ur|ru)? # prefix
" # start string
(?:
| [^"\\] # non-quote or backslash
| \\(.|\n) # or escaped single character or newline
| \\x[0-9a-fA-F]{2} # or escaped raw character
| \\u[0-9a-fA-F]{4} # or unicode escape
| \\U[0-9a-fA-F]{8} # or long unicode escape
)* # one or more times
'''
lg.add('STRING', r'%s"' % partial_string)
lg.add('PARTIAL_STRING', partial_string)
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
lexer = lg.build()
| # Copyright (c) 2013 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from rply import LexerGenerator
lg = LexerGenerator()
# A regexp for something that should end a quoting/unquoting operator
# i.e. a space or a closing brace/paren/curly
end_quote = r'(?![\s\)\]\}])'
lg.add('LPAREN', r'\(')
lg.add('RPAREN', r'\)')
lg.add('LBRACKET', r'\[')
lg.add('RBRACKET', r'\]')
lg.add('LCURLY', r'\{')
lg.add('RCURLY', r'\}')
lg.add('HLCURLY', r'#\{')
lg.add('QUOTE', r'\'%s' % end_quote)
lg.add('QUASIQUOTE', r'`%s' % end_quote)
lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
lg.add('UNQUOTE', r'~%s' % end_quote)
lg.add('HASHBANG', r'#!.*[^\r\n]')
lg.add('HASHREADER', r'#[^{]')
# A regexp which matches incomplete strings, used to support
# multi-line strings in the interpreter
partial_string = r'''(?x)
(?:u|r|ur|ru)? # prefix
" # start string
(?:
| [^"\\] # non-quote or backslash
| \\(.|\n) # or escaped single character or newline
| \\x[0-9a-fA-F]{2} # or escaped raw character
| \\u[0-9a-fA-F]{4} # or unicode escape
| \\U[0-9a-fA-F]{8} # or long unicode escape
)* # one or more times
'''
lg.add('STRING', r'%s"' % partial_string)
lg.add('PARTIAL_STRING', partial_string)
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
lexer = lg.build()
| en | 0.719819 | # Copyright (c) 2013 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # A regexp for something that should end a quoting/unquoting operator # i.e. a space or a closing brace/paren/curly # A regexp which matches incomplete strings, used to support # multi-line strings in the interpreter (?x) (?:u|r|ur|ru)? # prefix " # start string (?: | [^"\\] # non-quote or backslash | \\(.|\n) # or escaped single character or newline | \\x[0-9a-fA-F]{2} # or escaped raw character | \\u[0-9a-fA-F]{4} # or unicode escape | \\U[0-9a-fA-F]{8} # or long unicode escape )* # one or more times | 1.660084 | 2 |
week6/shuffle.py | solideveloper/afs-210 | 1 | 8603 | # Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this.
# For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible.
# Add a comment to your code stating what the time complexity of your algorithm is and why.
# Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items.
data = [7, 20, 26, 31, 40, 51, 55, 63, 74, 81]
ndata = len(data)
import random
def shuffleAlgorithm(data, ndata):
for i in range(ndata-1, 0, -1):
r = random.randint(0, i)
data[i], data[r] = data[r], data[i]
return data
print(data)
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
# fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it.
# instead i'm modifying the list in place or at a 'constant space' making it O(n)
# swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected | # Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this.
# For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible.
# Add a comment to your code stating what the time complexity of your algorithm is and why.
# Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items.
data = [7, 20, 26, 31, 40, 51, 55, 63, 74, 81]
ndata = len(data)
import random
def shuffleAlgorithm(data, ndata):
for i in range(ndata-1, 0, -1):
r = random.randint(0, i)
data[i], data[r] = data[r], data[i]
return data
print(data)
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
# fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it.
# instead i'm modifying the list in place or at a 'constant space' making it O(n)
# swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected | en | 0.894702 | # Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this. # For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible. # Add a comment to your code stating what the time complexity of your algorithm is and why. # Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items. # fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it. # instead i'm modifying the list in place or at a 'constant space' making it O(n) # swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected | 4.691014 | 5 |
workbox/workbox/lib/helpers.py | pr3sto/workbox | 0 | 8604 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Template Helpers used in workbox"""
import logging
import socket
from datetime import datetime
from markupsafe import Markup
import psutil
import tg
log = logging.getLogger(__name__)
def current_year():
""" Return current year. """
now = datetime.now()
return now.strftime('%Y')
def is_docker_enabled():
""" Detect if docker service is started. """
for proc in psutil.process_iter():
if 'docker' in proc.name():
return True
return False
def get_server_load_value():
""" Get server load value. """
return psutil.virtual_memory().percent
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port
def get_vagrantfiles_base_folder():
""" Return base folder for vagrantfiles. """
return tg.config.get('workbox.vagrantfiles.basefolder')
def get_hostname():
""" Return hostname. """
return tg.config.get('workbox.hostname')
try:
from webhelpers2 import date, html, number, misc, text
except SyntaxError:
log.error("WebHelpers2 helpers not available with this Python Version")
| # -*- coding: utf-8 -*-
"""Template Helpers used in workbox"""
import logging
import socket
from datetime import datetime
from markupsafe import Markup
import psutil
import tg
log = logging.getLogger(__name__)
def current_year():
""" Return current year. """
now = datetime.now()
return now.strftime('%Y')
def is_docker_enabled():
""" Detect if docker service is started. """
for proc in psutil.process_iter():
if 'docker' in proc.name():
return True
return False
def get_server_load_value():
""" Get server load value. """
return psutil.virtual_memory().percent
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port
def get_vagrantfiles_base_folder():
""" Return base folder for vagrantfiles. """
return tg.config.get('workbox.vagrantfiles.basefolder')
def get_hostname():
""" Return hostname. """
return tg.config.get('workbox.hostname')
try:
from webhelpers2 import date, html, number, misc, text
except SyntaxError:
log.error("WebHelpers2 helpers not available with this Python Version") | en | 0.596205 | # -*- coding: utf-8 -*- Template Helpers used in workbox Return current year. Detect if docker service is started. Get server load value. Find and returns free port number. Return base folder for vagrantfiles. Return hostname. | 2.310697 | 2 |
tadataka/dataset/new_tsukuba.py | IshitaTakeshi/Tadataka | 54 | 8605 | <filename>tadataka/dataset/new_tsukuba.py
import csv
import os
from pathlib import Path
from xml.etree import ElementTree as ET
from tqdm import tqdm
from scipy.spatial.transform import Rotation
from skimage.io import imread
import numpy as np
from tadataka.camera import CameraModel, CameraParameters, FOV
from tadataka.dataset.frame import Frame
from tadataka.dataset.base import BaseDataset
from tadataka.pose import Pose
def load_depth(path):
tree = ET.parse(path)
root = tree.getroot()
rows_node, cols_node, dt_node, data_node = root[0]
height, width = int(rows_node.text), int(cols_node.text)
depth_text = data_node.text
depth_text = depth_text.replace('\n', '').strip()
depth_map = np.fromstring(depth_text, sep=' ')
return depth_map.reshape(height, width)
def generate_cache(src_dir, cache_dir, src_extension, loader):
def generate_(subdir):
os.makedirs(str(Path(cache_dir, subdir)))
print(f"Generating cache from {subdir}")
paths = Path(src_dir, subdir).glob("*" + src_extension)
for path in tqdm(list(paths)):
filename = path.name.replace(src_extension, ".npy")
cache_path = Path(cache_dir, subdir, filename)
array = loader(path)
np.save(str(cache_path), array)
generate_("left")
generate_("right")
def generate_image_cache(image_dir, cache_dir):
print("Generating image cache")
generate_cache(image_dir, cache_dir, ".png", imread)
def generate_depth_cache(depth_dir, cache_dir):
print("Generating depth cache")
generate_cache(depth_dir, cache_dir, ".xml", load_depth)
def align_coordinate_system(positions, euler_angles):
# Camera coordinate system and world coordinate system are not aligned
#
# Usually camera coordinate system is represented in the format that
# x: right y: down z: forward
# however, in 'camera_track.txt', they are written in
# x: right y: up z: backward
#
# This means the camera coordinate system is
# rotated 180 degrees around the x-axis from the world coordinate system
# rotate 180 degrees around the x-axis
R = Rotation.from_rotvec([np.pi, 0, 0]).as_matrix()
positions = np.dot(R, positions.T).T
# Reverse rotations around y and z because axes are flipped
# (rot_x, rot_y, rot_z) <- (rot_x, -rot_y, -rot_z)
euler_angles[:, 1:3] = -euler_angles[:, 1:3]
return positions, euler_angles
def load_poses(pose_path):
poses = np.loadtxt(pose_path, delimiter=',')
positions, euler_angles = poses[:, 0:3], poses[:, 3:6]
positions, euler_angles = align_coordinate_system(positions, euler_angles)
rotations = Rotation.from_euler('xyz', euler_angles, degrees=True)
return rotations, positions
def discard_alpha(image):
return image[:, :, 0:3]
def calc_baseline_offset(rotation, baseline_length):
local_offset = np.array([baseline_length, 0, 0])
R = rotation.as_matrix()
return np.dot(R, local_offset)
# TODO download and set dataset_root automatically
class NewTsukubaDataset(BaseDataset):
def __init__(self, dataset_root, condition="daylight"):
self.camera_model = CameraModel(
CameraParameters(focal_length=[615, 615], offset=[320, 240]),
distortion_model=None
)
groundtruth_dir = Path(dataset_root, "groundtruth")
illumination_dir = Path(dataset_root, "illumination")
pose_path = Path(groundtruth_dir, "camera_track.txt")
self.baseline_length = 10.0
self.rotations, self.positions = load_poses(pose_path)
depth_dir = Path(groundtruth_dir, "depth_maps")
depth_cache_dir = Path(groundtruth_dir, "depth_cache")
if not depth_cache_dir.exists():
generate_depth_cache(depth_dir, depth_cache_dir)
self.depth_L_paths = sorted(Path(depth_cache_dir, "left").glob("*.npy"))
self.depth_R_paths = sorted(Path(depth_cache_dir, "right").glob("*.npy"))
image_dir = Path(illumination_dir, condition)
image_cache_dir = Path(illumination_dir, condition + "_cache")
if not image_cache_dir.exists():
generate_image_cache(image_dir, image_cache_dir)
self.image_L_paths = sorted(Path(image_cache_dir, "left").glob("*.npy"))
self.image_R_paths = sorted(Path(image_cache_dir, "right").glob("*.npy"))
assert((len(self.depth_L_paths) == len(self.depth_R_paths) ==
len(self.image_L_paths) == len(self.image_R_paths) ==
len(self.rotations) == len(self.positions)))
for i in range(len(self.positions)):
DL = self.depth_L_paths[i].name
DR = self.depth_R_paths[i].name
IL = self.image_L_paths[i].name
IR = self.image_R_paths[i].name
assert(DL[-8:] == DR[-8:] == IL[-8:] == IR[-8:])
def __len__(self):
return len(self.positions)
def load(self, index):
image_l = np.load(self.image_L_paths[index])
image_r = np.load(self.image_R_paths[index])
image_l = discard_alpha(image_l)
image_r = discard_alpha(image_r)
depth_l = np.load(self.depth_L_paths[index])
depth_r = np.load(self.depth_R_paths[index])
position_center = self.positions[index]
rotation = self.rotations[index]
offset = calc_baseline_offset(rotation, self.baseline_length)
pose_wl = Pose(rotation, position_center - offset / 2.0)
pose_wr = Pose(rotation, position_center + offset / 2.0)
return (
Frame(self.camera_model, pose_wl, image_l, depth_l),
Frame(self.camera_model, pose_wr, image_r, depth_r)
)
| <filename>tadataka/dataset/new_tsukuba.py
import csv
import os
from pathlib import Path
from xml.etree import ElementTree as ET
from tqdm import tqdm
from scipy.spatial.transform import Rotation
from skimage.io import imread
import numpy as np
from tadataka.camera import CameraModel, CameraParameters, FOV
from tadataka.dataset.frame import Frame
from tadataka.dataset.base import BaseDataset
from tadataka.pose import Pose
def load_depth(path):
tree = ET.parse(path)
root = tree.getroot()
rows_node, cols_node, dt_node, data_node = root[0]
height, width = int(rows_node.text), int(cols_node.text)
depth_text = data_node.text
depth_text = depth_text.replace('\n', '').strip()
depth_map = np.fromstring(depth_text, sep=' ')
return depth_map.reshape(height, width)
def generate_cache(src_dir, cache_dir, src_extension, loader):
def generate_(subdir):
os.makedirs(str(Path(cache_dir, subdir)))
print(f"Generating cache from {subdir}")
paths = Path(src_dir, subdir).glob("*" + src_extension)
for path in tqdm(list(paths)):
filename = path.name.replace(src_extension, ".npy")
cache_path = Path(cache_dir, subdir, filename)
array = loader(path)
np.save(str(cache_path), array)
generate_("left")
generate_("right")
def generate_image_cache(image_dir, cache_dir):
print("Generating image cache")
generate_cache(image_dir, cache_dir, ".png", imread)
def generate_depth_cache(depth_dir, cache_dir):
print("Generating depth cache")
generate_cache(depth_dir, cache_dir, ".xml", load_depth)
def align_coordinate_system(positions, euler_angles):
# Camera coordinate system and world coordinate system are not aligned
#
# Usually camera coordinate system is represented in the format that
# x: right y: down z: forward
# however, in 'camera_track.txt', they are written in
# x: right y: up z: backward
#
# This means the camera coordinate system is
# rotated 180 degrees around the x-axis from the world coordinate system
# rotate 180 degrees around the x-axis
R = Rotation.from_rotvec([np.pi, 0, 0]).as_matrix()
positions = np.dot(R, positions.T).T
# Reverse rotations around y and z because axes are flipped
# (rot_x, rot_y, rot_z) <- (rot_x, -rot_y, -rot_z)
euler_angles[:, 1:3] = -euler_angles[:, 1:3]
return positions, euler_angles
def load_poses(pose_path):
poses = np.loadtxt(pose_path, delimiter=',')
positions, euler_angles = poses[:, 0:3], poses[:, 3:6]
positions, euler_angles = align_coordinate_system(positions, euler_angles)
rotations = Rotation.from_euler('xyz', euler_angles, degrees=True)
return rotations, positions
def discard_alpha(image):
return image[:, :, 0:3]
def calc_baseline_offset(rotation, baseline_length):
local_offset = np.array([baseline_length, 0, 0])
R = rotation.as_matrix()
return np.dot(R, local_offset)
# TODO download and set dataset_root automatically
class NewTsukubaDataset(BaseDataset):
def __init__(self, dataset_root, condition="daylight"):
self.camera_model = CameraModel(
CameraParameters(focal_length=[615, 615], offset=[320, 240]),
distortion_model=None
)
groundtruth_dir = Path(dataset_root, "groundtruth")
illumination_dir = Path(dataset_root, "illumination")
pose_path = Path(groundtruth_dir, "camera_track.txt")
self.baseline_length = 10.0
self.rotations, self.positions = load_poses(pose_path)
depth_dir = Path(groundtruth_dir, "depth_maps")
depth_cache_dir = Path(groundtruth_dir, "depth_cache")
if not depth_cache_dir.exists():
generate_depth_cache(depth_dir, depth_cache_dir)
self.depth_L_paths = sorted(Path(depth_cache_dir, "left").glob("*.npy"))
self.depth_R_paths = sorted(Path(depth_cache_dir, "right").glob("*.npy"))
image_dir = Path(illumination_dir, condition)
image_cache_dir = Path(illumination_dir, condition + "_cache")
if not image_cache_dir.exists():
generate_image_cache(image_dir, image_cache_dir)
self.image_L_paths = sorted(Path(image_cache_dir, "left").glob("*.npy"))
self.image_R_paths = sorted(Path(image_cache_dir, "right").glob("*.npy"))
assert((len(self.depth_L_paths) == len(self.depth_R_paths) ==
len(self.image_L_paths) == len(self.image_R_paths) ==
len(self.rotations) == len(self.positions)))
for i in range(len(self.positions)):
DL = self.depth_L_paths[i].name
DR = self.depth_R_paths[i].name
IL = self.image_L_paths[i].name
IR = self.image_R_paths[i].name
assert(DL[-8:] == DR[-8:] == IL[-8:] == IR[-8:])
def __len__(self):
return len(self.positions)
def load(self, index):
image_l = np.load(self.image_L_paths[index])
image_r = np.load(self.image_R_paths[index])
image_l = discard_alpha(image_l)
image_r = discard_alpha(image_r)
depth_l = np.load(self.depth_L_paths[index])
depth_r = np.load(self.depth_R_paths[index])
position_center = self.positions[index]
rotation = self.rotations[index]
offset = calc_baseline_offset(rotation, self.baseline_length)
pose_wl = Pose(rotation, position_center - offset / 2.0)
pose_wr = Pose(rotation, position_center + offset / 2.0)
return (
Frame(self.camera_model, pose_wl, image_l, depth_l),
Frame(self.camera_model, pose_wr, image_r, depth_r)
)
| en | 0.798185 | # Camera coordinate system and world coordinate system are not aligned # # Usually camera coordinate system is represented in the format that # x: right y: down z: forward # however, in 'camera_track.txt', they are written in # x: right y: up z: backward # # This means the camera coordinate system is # rotated 180 degrees around the x-axis from the world coordinate system # rotate 180 degrees around the x-axis # Reverse rotations around y and z because axes are flipped # (rot_x, rot_y, rot_z) <- (rot_x, -rot_y, -rot_z) # TODO download and set dataset_root automatically | 2.222965 | 2 |
krogon/maybe.py | enamrik/krogon | 1 | 8606 | from typing import Callable, TypeVar, Union, Tuple
from krogon.infix import Infix
A = TypeVar('A')
B = TypeVar('B')
E = TypeVar('E')
Maybe = Union[Tuple['just', A], Tuple['nothing']]
def just(value=None):
return "just", value
def nothing():
return "nothing", None
def from_value(value) -> Maybe[B]:
return _cast_to_maybe(value)
def from_value_or_default(value, default) -> Maybe[B]:
return from_maybe(
_cast_to_maybe(value),
dict(if_just=lambda x: just(x),
if_nothing=lambda: _cast_to_maybe(default)))
@Infix
def then(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "just":
return _cast_to_maybe(func(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def catch_nothing(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "nothing":
return _cast_to_maybe(func())
elif maybe[0] == "just":
return maybe
@Infix
def map(maybe: Maybe[A], mapper: Callable[[A], B]) -> Maybe[B]:
if maybe[0] == "just":
return just(mapper(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def value_or_default(maybe: Maybe[A], default_value: B):
return maybe | from_maybe | (dict(if_just=lambda x: x, if_nothing=lambda: default_value))
@Infix
def from_maybe(maybe: Maybe[A], dict_args: dict) -> B:
if_just: Callable = dict_args['if_just']
if_nothing: Callable = dict_args['if_nothing']
if maybe[0] == "just" and if_just is not None:
return if_just(maybe[1])
elif maybe[0] == "nothing" and if_nothing is not None:
return if_nothing()
else:
raise Exception('Invalid Maybe: {}, {}'.format(maybe, dict_args))
def _cast_to_maybe(result):
if result is None:
return nothing()
if isinstance(result, tuple) and len(result) == 2:
maybe_type, value = result
if maybe_type == "just" or maybe_type == "nothing":
return result
return just(result)
| from typing import Callable, TypeVar, Union, Tuple
from krogon.infix import Infix
A = TypeVar('A')
B = TypeVar('B')
E = TypeVar('E')
Maybe = Union[Tuple['just', A], Tuple['nothing']]
def just(value=None):
return "just", value
def nothing():
return "nothing", None
def from_value(value) -> Maybe[B]:
return _cast_to_maybe(value)
def from_value_or_default(value, default) -> Maybe[B]:
return from_maybe(
_cast_to_maybe(value),
dict(if_just=lambda x: just(x),
if_nothing=lambda: _cast_to_maybe(default)))
@Infix
def then(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "just":
return _cast_to_maybe(func(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def catch_nothing(maybe: Maybe[A], func: Callable[[A], Maybe[B]]) -> Maybe[B]:
if maybe[0] == "nothing":
return _cast_to_maybe(func())
elif maybe[0] == "just":
return maybe
@Infix
def map(maybe: Maybe[A], mapper: Callable[[A], B]) -> Maybe[B]:
if maybe[0] == "just":
return just(mapper(maybe[1]))
elif maybe[0] == "nothing":
return maybe
@Infix
def value_or_default(maybe: Maybe[A], default_value: B):
return maybe | from_maybe | (dict(if_just=lambda x: x, if_nothing=lambda: default_value))
@Infix
def from_maybe(maybe: Maybe[A], dict_args: dict) -> B:
if_just: Callable = dict_args['if_just']
if_nothing: Callable = dict_args['if_nothing']
if maybe[0] == "just" and if_just is not None:
return if_just(maybe[1])
elif maybe[0] == "nothing" and if_nothing is not None:
return if_nothing()
else:
raise Exception('Invalid Maybe: {}, {}'.format(maybe, dict_args))
def _cast_to_maybe(result):
if result is None:
return nothing()
if isinstance(result, tuple) and len(result) == 2:
maybe_type, value = result
if maybe_type == "just" or maybe_type == "nothing":
return result
return just(result)
| none | 1 | 2.918928 | 3 |
|
Python (desafios)/desafio 009.py | EbersonDias/html-css | 0 | 8607 | <reponame>EbersonDias/html-css
# Desafio 009
# Faça um programa que leia um numero inteiro qualquer
# e mostre na tela a sua tabuada.
n = int(input('digite um numero. '))
r1 = n * 1
r2 = (n * 2)
r3 = (n * 3)
r4 = (n * 4)
r5 = (n * 5)
r6 = (n * 6)
r7 = (n * 7)
r8 = (n * 8)
r9 = (n * 9)
r10 = (n * 10)
print('A Tabuada de {} é'.format(n))
print ('{} x 1 = {}'.format(n,r1))
print ('{} x 2 = {}'.format(n,r2))
print ('{} x 3 = {}'.format(n,r3))
print ('{} x 4 = {}'.format(n,r4))
print ('{} x 5 = {}'.format(n,r5))
print ('{} x 6 = {}'.format(n,r6))
print ('{} x 7 = {}'.format(n,r7))
print ('{} x 8 = {}'.format(n,r8))
print ('{} x 9 = {}'.format(n,r9))
print ('{} x 10 = {}'.format(n,r10))
#Outra forma de ser feito
n = int(input('Quanto é a Tabuada de '))
print('A Tabuada de {} é'.format(n))
print('-'*12)
print ('{} x {:2} = {}'.format(n, 1, n*1))
print ('{} x {:2} = {}'.format(n, 2, n*2))
print ('{} x {:2} = {}'.format(n, 3, n*3))
print ('{} x {:2} = {}'.format(n, 4, n*4))
print ('{} x {:2} = {}'.format(n, 5, n*5))
print ('{} x {:2} = {}'.format(n, 6, n*6))
print ('{} x {:2} = {}'.format(n, 7, n*7))
print ('{} x {:2} = {}'.format(n, 8, n*8))
print ('{} x {:2} = {}'.format(n, 9, n*9))
print ('{} x {:2} = {}'.format(n, 10, n*10))
print('-'*12) | # Desafio 009
# Faça um programa que leia um numero inteiro qualquer
# e mostre na tela a sua tabuada.
n = int(input('digite um numero. '))
r1 = n * 1
r2 = (n * 2)
r3 = (n * 3)
r4 = (n * 4)
r5 = (n * 5)
r6 = (n * 6)
r7 = (n * 7)
r8 = (n * 8)
r9 = (n * 9)
r10 = (n * 10)
print('A Tabuada de {} é'.format(n))
print ('{} x 1 = {}'.format(n,r1))
print ('{} x 2 = {}'.format(n,r2))
print ('{} x 3 = {}'.format(n,r3))
print ('{} x 4 = {}'.format(n,r4))
print ('{} x 5 = {}'.format(n,r5))
print ('{} x 6 = {}'.format(n,r6))
print ('{} x 7 = {}'.format(n,r7))
print ('{} x 8 = {}'.format(n,r8))
print ('{} x 9 = {}'.format(n,r9))
print ('{} x 10 = {}'.format(n,r10))
#Outra forma de ser feito
n = int(input('Quanto é a Tabuada de '))
print('A Tabuada de {} é'.format(n))
print('-'*12)
print ('{} x {:2} = {}'.format(n, 1, n*1))
print ('{} x {:2} = {}'.format(n, 2, n*2))
print ('{} x {:2} = {}'.format(n, 3, n*3))
print ('{} x {:2} = {}'.format(n, 4, n*4))
print ('{} x {:2} = {}'.format(n, 5, n*5))
print ('{} x {:2} = {}'.format(n, 6, n*6))
print ('{} x {:2} = {}'.format(n, 7, n*7))
print ('{} x {:2} = {}'.format(n, 8, n*8))
print ('{} x {:2} = {}'.format(n, 9, n*9))
print ('{} x {:2} = {}'.format(n, 10, n*10))
print('-'*12) | pt | 0.968999 | # Desafio 009 # Faça um programa que leia um numero inteiro qualquer # e mostre na tela a sua tabuada. #Outra forma de ser feito | 4.025932 | 4 |
tools/__init__.py | supercatex/TelloEdu | 1 | 8608 | from tools.TelloEdu import TelloEdu
from tools.Controller import *
from tools.SocketObject import SocketClient | from tools.TelloEdu import TelloEdu
from tools.Controller import *
from tools.SocketObject import SocketClient | none | 1 | 1.161954 | 1 |
|
neutron/agent/ovsdb/native/helpers.py | congnt95/neutron | 1,080 | 8609 | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from neutron.conf.agent import ovs_conf as agent_ovs_conf
from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf
from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers
agent_ovs_conf.register_ovs_agent_opts(cfg.CONF)
ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF)
enable_connection_uri = functools.partial(
priv_helpers.enable_connection_uri,
log_fail_as_error=False, check_exit_code=False,
timeout=cfg.CONF.OVS.ovsdb_timeout,
inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
| # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from neutron.conf.agent import ovs_conf as agent_ovs_conf
from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf
from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers
agent_ovs_conf.register_ovs_agent_opts(cfg.CONF)
ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF)
enable_connection_uri = functools.partial(
priv_helpers.enable_connection_uri,
log_fail_as_error=False, check_exit_code=False,
timeout=cfg.CONF.OVS.ovsdb_timeout,
inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
| en | 0.854798 | # Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.17854 | 1 |
conlo/serializer/json_serializer.py | kira607/config_loader | 0 | 8610 | import json
from .base_serializer import BaseSerializer
class JsonSerializer(BaseSerializer):
'''Json serializer.'''
def _serialize(self, data: dict, **kwargs) -> str:
return json.dumps(data)
def _deserialize(self, data: str, **kwargs) -> dict:
return json.loads(data)
| import json
from .base_serializer import BaseSerializer
class JsonSerializer(BaseSerializer):
'''Json serializer.'''
def _serialize(self, data: dict, **kwargs) -> str:
return json.dumps(data)
def _deserialize(self, data: str, **kwargs) -> dict:
return json.loads(data)
| en | 0.304022 | Json serializer. | 2.766278 | 3 |
console_weather.py | AlBan52/API_weather | 0 | 8611 | <reponame>AlBan52/API_weather<filename>console_weather.py
import requests
locations = ['Лондон', 'Шереметьево', 'Череповец']
payload = {'mnTq': '', 'lang': 'ru'}
for location in locations:
response = requests.get(f'http://wttr.in/{location}', params=payload)
response.raise_for_status()
print(response.text)
| import requests
locations = ['Лондон', 'Шереметьево', 'Череповец']
payload = {'mnTq': '', 'lang': 'ru'}
for location in locations:
response = requests.get(f'http://wttr.in/{location}', params=payload)
response.raise_for_status()
print(response.text) | none | 1 | 2.58305 | 3 |
|
migrations/versions/576712576c48_added_model_for_photo_comments.py | Torniojaws/vortech-backend | 0 | 8612 | <reponame>Torniojaws/vortech-backend
"""Added model for photo comments
Revision ID: 576712576c48
Revises: <PASSWORD>
Create Date: 2018-03-30 02:06:22.877079
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '576712576c48'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('CommentsPhotos',
sa.Column('CommentID', sa.Integer(), nullable=False),
sa.Column('PhotoID', sa.Integer(), nullable=False),
sa.Column('Comment', sa.Text(), nullable=False),
sa.Column('UserID', sa.Integer(), nullable=False),
sa.Column('Created', sa.DateTime(), nullable=True),
sa.Column('Updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['PhotoID'], ['Photos.PhotoID'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['UserID'], ['Users.UserID'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('CommentID')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('CommentsPhotos')
# ### end Alembic commands ###
| """Added model for photo comments
Revision ID: 576712576c48
Revises: <PASSWORD>
Create Date: 2018-03-30 02:06:22.877079
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '576712576c48'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('CommentsPhotos',
sa.Column('CommentID', sa.Integer(), nullable=False),
sa.Column('PhotoID', sa.Integer(), nullable=False),
sa.Column('Comment', sa.Text(), nullable=False),
sa.Column('UserID', sa.Integer(), nullable=False),
sa.Column('Created', sa.DateTime(), nullable=True),
sa.Column('Updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['PhotoID'], ['Photos.PhotoID'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['UserID'], ['Users.UserID'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('CommentID')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('CommentsPhotos')
# ### end Alembic commands ### | en | 0.568353 | Added model for photo comments
Revision ID: 576712576c48
Revises: <PASSWORD>
Create Date: 2018-03-30 02:06:22.877079 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.44758 | 1 |
__init__.py | m3sserschmitt/basic-http | 0 | 8613 | <filename>__init__.py
import basic_http.session
basic_http.session.LIB_VERSION = 'v0.0.4-beta'
basic_http.session.DEFAULT_AGENT = 'basic-http version ' + basic_http.session.LIB_VERSION
| <filename>__init__.py
import basic_http.session
basic_http.session.LIB_VERSION = 'v0.0.4-beta'
basic_http.session.DEFAULT_AGENT = 'basic-http version ' + basic_http.session.LIB_VERSION
| none | 1 | 1.323146 | 1 |
|
usaspending_api/etl/helpers.py | truthiswill/usaspending-api | 0 | 8614 | from datetime import datetime
import warnings
import logging
from django.db.models import Q, Case, Value, When
from django.core.cache import caches, CacheKeyWarning
import django.apps
from usaspending_api.references.models import Agency, Location, RefCountryCode
from usaspending_api.references.helpers import canonicalize_location_dict
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.data.daims_maps import daims_maps
warnings.simplefilter("ignore", CacheKeyWarning)
def clear_caches():
for cache_name in ('default', 'locations', 'awards'):
caches[cache_name].clear()
def cleanse_values(row):
"""
Remove textual quirks from CSV values.
"""
row = {k: v.strip() for (k, v) in row.items()}
row = {k: (None if v.lower() == 'null' else v) for (k, v) in row.items()}
return row
def convert_date(date):
if date == "":
return None
return datetime.strptime(date, '%m/%d/%Y').strftime('%Y-%m-%d')
def get_subtier_agency_dict():
"""Returns a dictionary with key = subtier agency code and value = agency id."""
# there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier
# codes, the dictionary we return will reflect the most recently updated one
agencies = Agency.objects.all().values(
'id',
'subtier_agency__subtier_code').order_by('subtier_agency__update_date')
subtier_agency_dict = {
a['subtier_agency__subtier_code']: a['id'] for a in agencies
}
return subtier_agency_dict
def fetch_country_code(vendor_country_code):
code_str = up2colon(vendor_country_code)
if code_str == "":
return None
country_code = RefCountryCode.objects.filter(Q(country_code=code_str) | Q(country_name__iexact=code_str)).first()
if not country_code:
# We don't have an exact match on the name or the code, so we need to
# chain filter on the name
query_set = RefCountryCode.objects
for word in code_str.split():
query_set = query_set.filter(country_name__icontains=word)
country_code = query_set.first()
return country_code
location_cache = caches['locations']
def get_or_create_location(row, mapper):
location_dict = mapper(row)
# Country-specific adjustments
if location_dict["location_country_code"] == "USA":
# Apparently zip codes are optional...
if location_dict["location_zip"]:
location_dict.update(
zip5=location_dict["location_zip"][:5],
zip_last4=location_dict["location_zip"][5:])
location_dict.pop("location_zip")
else:
location_dict.update(
foreign_postal_code=location_dict.pop("location_zip", None),
foreign_province=location_dict.pop("state_code", None))
if "city_name" in location_dict:
location_dict['foreign_city_name'] = location_dict.pop("city_name")
location_dict = canonicalize_location_dict(location_dict)
location_tup = tuple(location_dict.items())
location = location_cache.get(location_tup)
if location:
return location
location = Location.objects.filter(**location_dict).first()
if not location:
location = Location.objects.create(**location_dict)
location_cache.set(location_tup, location)
return location
def up2colon(input_string):
'Takes the part of a string before `:`, if any.'
if input_string:
return input_string.split(':')[0].strip()
return ''
def parse_numeric_value(string):
try:
return float(string)
except Exception:
return None
def get_fiscal_quarter(fiscal_reporting_period):
"""
Return the fiscal quarter.
Note: the reporting period being passed should already be in "federal fiscal format",
where period 1 = Oct. and period 12 = Sept.
"""
if fiscal_reporting_period in [1, 2, 3]:
return 1
elif fiscal_reporting_period in [4, 5, 6]:
return 2
elif fiscal_reporting_period in [7, 8, 9]:
return 3
elif fiscal_reporting_period in [10, 11, 12]:
return 4
def get_previous_submission(cgac_code, fiscal_year, fiscal_period):
"""
For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the
previous submission within the same fiscal year.
"""
previous_submission = SubmissionAttributes.objects \
.filter(
cgac_code=cgac_code,
reporting_fiscal_year=fiscal_year,
reporting_fiscal_period__lt=fiscal_period,
quarter_format_flag=True) \
.order_by('-reporting_fiscal_period') \
.first()
return previous_submission
def update_model_description_fields():
"""
This method searches through every model Django has registered, checks if it
belongs to a list of apps we should update, and updates all fields with
'_description' at the end with their relevant information.
Dictionaries for DAIMS definitions should be stored in:
usaspending_api/data/daims_maps.py
Each map should be <field_name>_map for discoverability.
If there are conflicting maps (i.e., two models use type_description, but
different enumerations) prepend the map name with the model name and a dot.
For examples of these situations, see the documentation in daims_maps.py
"""
logger = logging.getLogger('console')
# This is a list of apps whose models will be checked for description fields
updatable_apps = [
"accounts",
"awards",
"common",
"financial_activities",
"references",
"submissions"
]
# This iterates over every model that Django has registered
for model in django.apps.apps.get_models():
# This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps.
# Thus, we'll skip any django admin apps, like auth, corsheaders, etc.
if model._meta.app_label not in updatable_apps:
continue
if model.__name__[:10] == "Historical":
continue
model_fields = [f.name for f in model._meta.get_fields()]
# This supports multi-case DAIMS
# We must filter on the model level rather than add them to the when clauses, because if there is a FK in the
# when clause Django is not guaranteed to join on that table properly.
#
# This is an array of tuples of the following format
# (Q object of filter, field_names -> case objects map for this filter)
#
# It is initialized with a blank filter and empty list, which is where default updates are stored
model_filtered_update_case_map = [(Q(), {})]
desc_fields = [field for field in model_fields if field.split('_')[-1] ==
"description"[:len(field.split('_')[-1])]]
non_desc_fields = [field for field in model_fields if field not in desc_fields]
desc_fields_mapping = {}
for desc_field in desc_fields:
actual_field_short = "_".join(desc_field.split('_')[:-1])
actual_field = None
for field in non_desc_fields:
if actual_field_short == field:
actual_field = field
elif actual_field_short == field[:len(actual_field_short)]:
actual_field = field
desc_fields_mapping[desc_field] = actual_field
# Loop through each of the models fields to construct a case for each applicable field
for field in model_fields:
# We're looking for field names ending in _description
split_name = field.split("_")
# If the last element in our split name isn't description, skip it
if len(split_name) == 1 or split_name[-1] != "description"[:len(split_name[-1])]:
continue
source_field = "_".join(split_name[:-1])
destination_field = field
# This is the map name, prefixed by model name for when there are non-unique description fields
source_field = desc_fields_mapping[field] if field in desc_fields_mapping else source_field
model_map_name = "{}.{}_map".format(model.__name__, source_field)
map_name = "{}_map".format(source_field)
# This stores a direct reference to the enumeration mapping
code_map = None
# Validate we have the source field
if source_field not in model_fields:
logger.debug("Tried to update '{}' on model '{}', but source field '{}' does not exist.".
format(destination_field, model.__name__, source_field))
continue
# Validate we have a map
# Prefer model_map_name over map_name
if model_map_name in daims_maps.keys():
code_map = daims_maps[model_map_name]
elif map_name in daims_maps.keys():
code_map = daims_maps[map_name]
else:
logger.warn("Tried to update '{}' on model '{}', but neither map '{}' nor '{}' exists.".
format(destination_field, model.__name__, model_map_name, map_name))
continue
# Cases start from 1
case_number = 1
case_name = "case_1"
case_map = "case_1_map"
while case_name in code_map.keys():
case_object = create_case(code_map[case_map], source_field)
# Construct a Q filter for this case
case_filter = Q(**code_map[case_name])
# See if we already have a tuple for this filter
case_tuple = [x for x in model_filtered_update_case_map if x[0] == case_filter]
if len(case_tuple) == 0:
# We don't, so create the tuple
temp_case_dict = {}
temp_case_dict[field] = case_object
model_filtered_update_case_map.append((case_filter, temp_case_dict))
else:
# We do, so just add our case object to that dictionary
case_tuple[0][1][field] = case_object
# Check for the next case
case_number += 1
case_name = "case_{}".format(case_number)
case_map = "case_{}_map".format(case_number)
# If our case number is still 1, then we didn't have any cases. Therefore, we perform the default
if case_number == 1:
case_object = create_case(code_map, source_field)
# Grab the first tuple, which has no filters
case_tuple = model_filtered_update_case_map[0]
# Add it to our dictionary
case_tuple[1][field] = case_object
for filter_tuple in model_filtered_update_case_map:
# For each filter tuple, check if the dictionary has any entries
if len(filter_tuple[1].keys()) > 0:
print("Updating model {}\n FILTERS:\n {}\n FIELDS:\n {}".
format(model.__name__, str(filter_tuple[0]), "\n ".join(filter_tuple[1].keys())))
try:
model.objects.filter(filter_tuple[0]).update(**filter_tuple[1])
except django.db.utils.ProgrammingError as e:
logger.warn(str(e))
logger.warn("(OK if invoked from a migration, when the table may not yet have been created)")
# Utility method for update_model_description_fields, creates the Case object
def create_case(code_map, source_field):
when_list = []
default = None
for code in code_map.keys():
when_args = {}
when_args[source_field] = code
when_args["then"] = Value(code_map[code])
# If our code is blank, change the comparison to ""
if code == "_BLANK":
when_args[source_field] = Value("")
# We handle the default case later
if code == "_DEFAULT":
default = Value(code_map[code])
continue
# Append a new when to our when-list
when_list.append(When(**when_args))
return Case(*when_list, default=default)
| from datetime import datetime
import warnings
import logging
from django.db.models import Q, Case, Value, When
from django.core.cache import caches, CacheKeyWarning
import django.apps
from usaspending_api.references.models import Agency, Location, RefCountryCode
from usaspending_api.references.helpers import canonicalize_location_dict
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.data.daims_maps import daims_maps
warnings.simplefilter("ignore", CacheKeyWarning)
def clear_caches():
for cache_name in ('default', 'locations', 'awards'):
caches[cache_name].clear()
def cleanse_values(row):
"""
Remove textual quirks from CSV values.
"""
row = {k: v.strip() for (k, v) in row.items()}
row = {k: (None if v.lower() == 'null' else v) for (k, v) in row.items()}
return row
def convert_date(date):
if date == "":
return None
return datetime.strptime(date, '%m/%d/%Y').strftime('%Y-%m-%d')
def get_subtier_agency_dict():
"""Returns a dictionary with key = subtier agency code and value = agency id."""
# there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier
# codes, the dictionary we return will reflect the most recently updated one
agencies = Agency.objects.all().values(
'id',
'subtier_agency__subtier_code').order_by('subtier_agency__update_date')
subtier_agency_dict = {
a['subtier_agency__subtier_code']: a['id'] for a in agencies
}
return subtier_agency_dict
def fetch_country_code(vendor_country_code):
code_str = up2colon(vendor_country_code)
if code_str == "":
return None
country_code = RefCountryCode.objects.filter(Q(country_code=code_str) | Q(country_name__iexact=code_str)).first()
if not country_code:
# We don't have an exact match on the name or the code, so we need to
# chain filter on the name
query_set = RefCountryCode.objects
for word in code_str.split():
query_set = query_set.filter(country_name__icontains=word)
country_code = query_set.first()
return country_code
location_cache = caches['locations']
def get_or_create_location(row, mapper):
location_dict = mapper(row)
# Country-specific adjustments
if location_dict["location_country_code"] == "USA":
# Apparently zip codes are optional...
if location_dict["location_zip"]:
location_dict.update(
zip5=location_dict["location_zip"][:5],
zip_last4=location_dict["location_zip"][5:])
location_dict.pop("location_zip")
else:
location_dict.update(
foreign_postal_code=location_dict.pop("location_zip", None),
foreign_province=location_dict.pop("state_code", None))
if "city_name" in location_dict:
location_dict['foreign_city_name'] = location_dict.pop("city_name")
location_dict = canonicalize_location_dict(location_dict)
location_tup = tuple(location_dict.items())
location = location_cache.get(location_tup)
if location:
return location
location = Location.objects.filter(**location_dict).first()
if not location:
location = Location.objects.create(**location_dict)
location_cache.set(location_tup, location)
return location
def up2colon(input_string):
'Takes the part of a string before `:`, if any.'
if input_string:
return input_string.split(':')[0].strip()
return ''
def parse_numeric_value(string):
try:
return float(string)
except Exception:
return None
def get_fiscal_quarter(fiscal_reporting_period):
"""
Return the fiscal quarter.
Note: the reporting period being passed should already be in "federal fiscal format",
where period 1 = Oct. and period 12 = Sept.
"""
if fiscal_reporting_period in [1, 2, 3]:
return 1
elif fiscal_reporting_period in [4, 5, 6]:
return 2
elif fiscal_reporting_period in [7, 8, 9]:
return 3
elif fiscal_reporting_period in [10, 11, 12]:
return 4
def get_previous_submission(cgac_code, fiscal_year, fiscal_period):
"""
For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the
previous submission within the same fiscal year.
"""
previous_submission = SubmissionAttributes.objects \
.filter(
cgac_code=cgac_code,
reporting_fiscal_year=fiscal_year,
reporting_fiscal_period__lt=fiscal_period,
quarter_format_flag=True) \
.order_by('-reporting_fiscal_period') \
.first()
return previous_submission
def update_model_description_fields():
"""
This method searches through every model Django has registered, checks if it
belongs to a list of apps we should update, and updates all fields with
'_description' at the end with their relevant information.
Dictionaries for DAIMS definitions should be stored in:
usaspending_api/data/daims_maps.py
Each map should be <field_name>_map for discoverability.
If there are conflicting maps (i.e., two models use type_description, but
different enumerations) prepend the map name with the model name and a dot.
For examples of these situations, see the documentation in daims_maps.py
"""
logger = logging.getLogger('console')
# This is a list of apps whose models will be checked for description fields
updatable_apps = [
"accounts",
"awards",
"common",
"financial_activities",
"references",
"submissions"
]
# This iterates over every model that Django has registered
for model in django.apps.apps.get_models():
# This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps.
# Thus, we'll skip any django admin apps, like auth, corsheaders, etc.
if model._meta.app_label not in updatable_apps:
continue
if model.__name__[:10] == "Historical":
continue
model_fields = [f.name for f in model._meta.get_fields()]
# This supports multi-case DAIMS
# We must filter on the model level rather than add them to the when clauses, because if there is a FK in the
# when clause Django is not guaranteed to join on that table properly.
#
# This is an array of tuples of the following format
# (Q object of filter, field_names -> case objects map for this filter)
#
# It is initialized with a blank filter and empty list, which is where default updates are stored
model_filtered_update_case_map = [(Q(), {})]
desc_fields = [field for field in model_fields if field.split('_')[-1] ==
"description"[:len(field.split('_')[-1])]]
non_desc_fields = [field for field in model_fields if field not in desc_fields]
desc_fields_mapping = {}
for desc_field in desc_fields:
actual_field_short = "_".join(desc_field.split('_')[:-1])
actual_field = None
for field in non_desc_fields:
if actual_field_short == field:
actual_field = field
elif actual_field_short == field[:len(actual_field_short)]:
actual_field = field
desc_fields_mapping[desc_field] = actual_field
# Loop through each of the models fields to construct a case for each applicable field
for field in model_fields:
# We're looking for field names ending in _description
split_name = field.split("_")
# If the last element in our split name isn't description, skip it
if len(split_name) == 1 or split_name[-1] != "description"[:len(split_name[-1])]:
continue
source_field = "_".join(split_name[:-1])
destination_field = field
# This is the map name, prefixed by model name for when there are non-unique description fields
source_field = desc_fields_mapping[field] if field in desc_fields_mapping else source_field
model_map_name = "{}.{}_map".format(model.__name__, source_field)
map_name = "{}_map".format(source_field)
# This stores a direct reference to the enumeration mapping
code_map = None
# Validate we have the source field
if source_field not in model_fields:
logger.debug("Tried to update '{}' on model '{}', but source field '{}' does not exist.".
format(destination_field, model.__name__, source_field))
continue
# Validate we have a map
# Prefer model_map_name over map_name
if model_map_name in daims_maps.keys():
code_map = daims_maps[model_map_name]
elif map_name in daims_maps.keys():
code_map = daims_maps[map_name]
else:
logger.warn("Tried to update '{}' on model '{}', but neither map '{}' nor '{}' exists.".
format(destination_field, model.__name__, model_map_name, map_name))
continue
# Cases start from 1
case_number = 1
case_name = "case_1"
case_map = "case_1_map"
while case_name in code_map.keys():
case_object = create_case(code_map[case_map], source_field)
# Construct a Q filter for this case
case_filter = Q(**code_map[case_name])
# See if we already have a tuple for this filter
case_tuple = [x for x in model_filtered_update_case_map if x[0] == case_filter]
if len(case_tuple) == 0:
# We don't, so create the tuple
temp_case_dict = {}
temp_case_dict[field] = case_object
model_filtered_update_case_map.append((case_filter, temp_case_dict))
else:
# We do, so just add our case object to that dictionary
case_tuple[0][1][field] = case_object
# Check for the next case
case_number += 1
case_name = "case_{}".format(case_number)
case_map = "case_{}_map".format(case_number)
# If our case number is still 1, then we didn't have any cases. Therefore, we perform the default
if case_number == 1:
case_object = create_case(code_map, source_field)
# Grab the first tuple, which has no filters
case_tuple = model_filtered_update_case_map[0]
# Add it to our dictionary
case_tuple[1][field] = case_object
for filter_tuple in model_filtered_update_case_map:
# For each filter tuple, check if the dictionary has any entries
if len(filter_tuple[1].keys()) > 0:
print("Updating model {}\n FILTERS:\n {}\n FIELDS:\n {}".
format(model.__name__, str(filter_tuple[0]), "\n ".join(filter_tuple[1].keys())))
try:
model.objects.filter(filter_tuple[0]).update(**filter_tuple[1])
except django.db.utils.ProgrammingError as e:
logger.warn(str(e))
logger.warn("(OK if invoked from a migration, when the table may not yet have been created)")
# Utility method for update_model_description_fields, creates the Case object
def create_case(code_map, source_field):
when_list = []
default = None
for code in code_map.keys():
when_args = {}
when_args[source_field] = code
when_args["then"] = Value(code_map[code])
# If our code is blank, change the comparison to ""
if code == "_BLANK":
when_args[source_field] = Value("")
# We handle the default case later
if code == "_DEFAULT":
default = Value(code_map[code])
continue
# Append a new when to our when-list
when_list.append(When(**when_args))
return Case(*when_list, default=default)
| en | 0.866645 | Remove textual quirks from CSV values. Returns a dictionary with key = subtier agency code and value = agency id. # there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier # codes, the dictionary we return will reflect the most recently updated one # We don't have an exact match on the name or the code, so we need to # chain filter on the name # Country-specific adjustments # Apparently zip codes are optional... Return the fiscal quarter. Note: the reporting period being passed should already be in "federal fiscal format", where period 1 = Oct. and period 12 = Sept. For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the previous submission within the same fiscal year. This method searches through every model Django has registered, checks if it belongs to a list of apps we should update, and updates all fields with '_description' at the end with their relevant information. Dictionaries for DAIMS definitions should be stored in: usaspending_api/data/daims_maps.py Each map should be <field_name>_map for discoverability. If there are conflicting maps (i.e., two models use type_description, but different enumerations) prepend the map name with the model name and a dot. For examples of these situations, see the documentation in daims_maps.py # This is a list of apps whose models will be checked for description fields # This iterates over every model that Django has registered # This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps. # Thus, we'll skip any django admin apps, like auth, corsheaders, etc. # This supports multi-case DAIMS # We must filter on the model level rather than add them to the when clauses, because if there is a FK in the # when clause Django is not guaranteed to join on that table properly. # # This is an array of tuples of the following format # (Q object of filter, field_names -> case objects map for this filter) # # It is initialized with a blank filter and empty list, which is where default updates are stored # Loop through each of the models fields to construct a case for each applicable field # We're looking for field names ending in _description # If the last element in our split name isn't description, skip it # This is the map name, prefixed by model name for when there are non-unique description fields # This stores a direct reference to the enumeration mapping # Validate we have the source field # Validate we have a map # Prefer model_map_name over map_name # Cases start from 1 # Construct a Q filter for this case # See if we already have a tuple for this filter # We don't, so create the tuple # We do, so just add our case object to that dictionary # Check for the next case # If our case number is still 1, then we didn't have any cases. Therefore, we perform the default # Grab the first tuple, which has no filters # Add it to our dictionary # For each filter tuple, check if the dictionary has any entries # Utility method for update_model_description_fields, creates the Case object # If our code is blank, change the comparison to "" # We handle the default case later # Append a new when to our when-list | 2.034613 | 2 |
{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py | DevAerial/flask-api-template | 0 | 8615 | <gh_stars>0
from flask_marshmallow import Marshmallow{% if cookiecutter.use_celery == 'yes'%}
from celery import Celery
celery = Celery(){% endif %}
ma = Marshmallow()
| from flask_marshmallow import Marshmallow{% if cookiecutter.use_celery == 'yes'%}
from celery import Celery
celery = Celery(){% endif %}
ma = Marshmallow() | none | 1 | 2.036231 | 2 |
|
code/mapplot.py | young-astronomer/vlpy | 0 | 8616 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 11:11:56 2020
This program is use to plot polarization map from vlbi fits image.
You should specify the input fits images by -i or --infile,
output file by -o or --output,
contour levs by -l or --levs
contour base by -c or --cmul
polarization parameters by -p or --pol: "icut pcut inc scale"
plot window by -w or --win
restore beam position by -b or --bpos
figsize by -f or --figsize
Installation:
1. copy file
chmod a+x mapplot.py
cp mapplot.py ~/myapp
2. set envioment parameters
Add the following line to ~/.bashrc
export PATH=$PATH:/home/usename/myapp
source ~/.bashrc
Running like this:
mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul>
mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol>
Examples:
1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5'
2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3
https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize
@author: <NAME>
Shanghai Astronomical Observatory, Chinese Academy of Sciences
E-mail: <EMAIL>; <EMAIL>
"""
import sys
import getopt
from astropy.io import fits
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as mcolors
def add_beam(ax, win, h, bpos=None, pad=2.0):
if bpos==None :
x = win[0] - pad * h['bmaj']*3.6E6
y = win[2] + pad * h['bmaj']*3.6E6
bpos = (x, y)
bmaj = h['bmaj'] * 3.6E6
bmin = h['bmin'] * 3.6E6
bpa = 90 - h['bpa']
e = Ellipse(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')
ax.add_artist(e)
def annotate(ax, notefile=''):
if notefile != '':
tab = Table.read(notefile, format='csv')
for t in tab:
ax.text(t['x'], t['y'], t['text'])
# ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction')
# ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction')
def cut_cmap(cmap, N_cut=0):
# cmap = mcolors.Colormap(cmap)
cmap = plt.get_cmap(cmap)
x = np.arange(N_cut, 256) / 256.0
color_index = cmap(x)
cmap = mcolors.ListedColormap(color_index)
return cmap
def get_normalize(args, vmin=0.0, vmax=1.0):
if args == '':
norm = mcolors.Normalize(vmin, vmax)
args = args.split(' ')
name = args[0]
if name == 'linear':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.Normalize(vmin, vmax, True)
elif name == 'power':
if len(args)==1:
gamma = 0.5
if len(args)==2:
gamma = float(args[1])
elif len(args)==4:
gamma, vmin, vmax = np.array(args[1:], dtype='f4')
if gamma < 1.0 and vmin < 0.0:
vmin = 0.0
norm = mcolors.PowerNorm(gamma, vmin, vmax, True)
elif name == 'log':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.LogNorm(vmin, vmax)
elif name == 'symlog':
if len(args)==2:
linthresh = float(args[1])
linscale = 1.0
elif len(args)==3:
linthresh, linscale = np.array(args[1:], dtype='f4')
elif len(args)==5:
linthresh, linscale, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.SymLogNorm(linthresh, linscale, vmin, vmax)
elif name == 'twoslope':
if len(args)==2:
vcenter = float(args[1])
elif len(args)==4:
vcenter, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.TwoSlopeNorm(vcenter, vmin, vmax)
return norm
def add_annotation(ax, infile=''):
if infile == '':
return
with open(infile, 'r') as f:
for line in f.readlines():
row = line.split(',')
row = [col.strip() for col in row]
typ = row[0]
args = row[1:]
if typ == 'text':
x, y, text = args
x, y = float(x), float(y)
ax.text(x, y, text)
elif typ == 'arrow':
x1, y1, x2, y2 = np.array(args, dtype='f4')
ax.annotate("", xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'annotation':
x1, y1, x2, y2 = np.array(args[:-1], dtype='f4')
text = args[-1]
ax.annotate(text, xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'ellipse':
x, y, majax, minax, pa = np.array(args, dtype='f4')
e = Ellipse((x,y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')
ax.add_artist(e)
def set_axis(ax, w):
ax.set_aspect('equal')
ax.set_xlabel('Relative R.A. (mas)')
ax.set_ylabel('Relative Dec. (mas)')
ax.set_xlim(w[0],w[1])
ax.set_ylim(w[2],w[3])
ax.tick_params(which='both', direction='in', length=6, right=True, top=True)
ax.tick_params(which='minor',length=4)
ax.minorticks_on()
def word2pix(w, h):
if w == None:
W = [0, h['naxis1'], 0, h['naxis2']]
else:
x0, x1, y0, y1 = w
X0 = h['crpix1'] + x0/(h['cdelt1']*3.6E6)
Y0 = h['crpix2'] + y0/(h['cdelt2']*3.6E6)
X1 = h['crpix1'] + x1/(h['cdelt1']*3.6E6)
Y1 = h['crpix2'] + y1/(h['cdelt2']*3.6E6)
W = [int(X0), int(X1), int(Y0), int(Y1)]
return W
def pix2word(W, h):
if W == None:
W = [0, h['naxis1'], 0, h['naxis2']]
X0, X1, Y0, Y1 = W
x0 = h['cdelt1']*3.6E6 * (X0-h['crpix1'])
y0 = h['cdelt2']*3.6E6 * (Y0-h['crpix2'])
x1 = h['cdelt1']*3.6E6 * (X1-h['crpix1'])
y1 = h['cdelt2']*3.6E6 * (Y1-h['crpix2'])
w = [x0, x1, y0, y1]
return w
def savefig(outfile, dpi=100):
if outfile.lower().endswith('.pdf') :
plt.savefig(outfile)
elif outfile.lower().endswith('.jpg') or outfile.lower().endswith('.jpeg'):
plt.savefig(outfile, dpi=dpi)
elif outfile.lower().endswith('.png'):
plt.savefig(outfile, dpi=dpi)
def mapplot(infile, cmul, outfile='', win=None, levs=None, bpos=None,
figsize=None, dpi=100, annotationfile='', cmap='', N_cut=0,
norm='', fraction=0.05):
hdul = fits.open(infile)
h = hdul[0].header
# img = hdul[0].data[0, 0, :, :]
if levs==None:
levs = cmul*np.array([-1,1,2,4,8,16,32,64,128,256,512,1024,2048,4096])
# print(win)
if figsize == None :
figsize = (6, 6)
if win == None:
win = pix2word(None, h)
W = word2pix(None, h)
else:
W = word2pix(win, h)
img = hdul[0].data[0, 0, W[2]:W[3], W[0]:W[1]]
if cmap == '':
cmap = 'rainbow'
cmap = cut_cmap(cmap, N_cut)
vmin, vmax = np.min(img), np.max(img)
if norm == '':
norm = 'linear %.3f %.3f' % (vmin, vmax)
norm = get_normalize(norm, vmin, vmax)
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
set_axis(ax, win)
add_beam(ax, win, h, bpos=bpos)
add_annotation(ax, annotationfile)
ax.contour(img, levs, extent=win,
linewidths=0.5, colors='k')
pcm = ax.imshow(img, extent=win, origin='lower',
interpolation='none', cmap=cmap, norm=norm)
cbar = fig.colorbar(pcm, ax=ax, fraction=fraction)
# cbar.ax.minorticks_off()
cbar.ax.tick_params('both',direction='in',right=True,top=True,which='both')
cbar.ax.tick_params(axis='y', labelrotation=90)
fig.tight_layout(pad=0.5)
if outfile != '':
savefig(outfile, dpi)
hdul.close()
def myhelp():
print('Help: mapplot.py -w "18 -8 -20 6" -f "7 6" -n "power 0.5" <cta102.fits> <1.8e-3>')
print(' or: mapplot.py -i cta102.fits -o cta102.png -w "18 -8 -20 6" -f "7 6" -n "power 0.5"')
def main(argv):
# infile = r'3c66a-calib/circe-beam.fits'
infile = ''
outfile = ''
annotationfile = ''
cmul = ''
win = None
levs = None
bpos = None
figsize = None
dpi = 100
colormap = ''
N_cut = 0
norm = ''
fraction = 0.05
try:
opts, args = getopt.getopt(argv, "hi:c:o:w:l:b:f:d:a:n:N:",
['help', 'infile=', 'cmul=', 'outfile=', 'win=',
'bpos=', 'figsize=', 'dpi=', 'annotatefile=', 'levs=', 'colormap=',
'N_cut=', 'norm=', 'fraction='])
except getopt.GetoptError:
myhelp()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
myhelp()
elif opt in ('-i', '--infile'):
infile = arg
elif opt in ('-c', '--cmul'):
cmul = arg
elif opt in ('-o', '--outfile'):
outfile = arg
elif opt in ('-w', '--win'):
win = arg
elif opt in ('-l', '--levs'):
levs = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-b', '--bpos'):
bpos = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-f', '--figsize'):
figsize = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-d', '--dpi'):
dpi = int(arg)
elif opt in ('-a', '--annotatefile'):
annotationfile = arg
elif opt in ('--colormap', ):
colormap = arg
elif opt in ('-N', '--N_cut'):
N_cut = int(arg)
elif opt in ('-n', '--norm'):
norm = arg
elif opt in ('--fraction',):
fraction = float(arg)
if infile=='' and len(args)==2:
infile, cmul = args
if infile=='' and len(args)==3:
infile, outfile, cmul = args
if infile=='' and len(args)==4:
infile, outfile, cmul, win = args
if outfile == '':
outfile = infile.split('.')[0] + '.pdf'
cmul = float(cmul)
if type(win) == str:
win = np.array(win.split(), dtype=np.float64).tolist()
mapplot(infile, cmul, outfile=outfile, win=win, levs=levs, bpos=bpos,
figsize=figsize, dpi=dpi, annotationfile=annotationfile,
cmap=colormap, N_cut=N_cut, norm=norm, fraction=fraction)
if __name__ == '__main__' :
main(sys.argv[1:]) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 11:11:56 2020
This program is use to plot polarization map from vlbi fits image.
You should specify the input fits images by -i or --infile,
output file by -o or --output,
contour levs by -l or --levs
contour base by -c or --cmul
polarization parameters by -p or --pol: "icut pcut inc scale"
plot window by -w or --win
restore beam position by -b or --bpos
figsize by -f or --figsize
Installation:
1. copy file
chmod a+x mapplot.py
cp mapplot.py ~/myapp
2. set envioment parameters
Add the following line to ~/.bashrc
export PATH=$PATH:/home/usename/myapp
source ~/.bashrc
Running like this:
mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul>
mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol>
Examples:
1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5'
2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3
https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize
@author: <NAME>
Shanghai Astronomical Observatory, Chinese Academy of Sciences
E-mail: <EMAIL>; <EMAIL>
"""
import sys
import getopt
from astropy.io import fits
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as mcolors
def add_beam(ax, win, h, bpos=None, pad=2.0):
if bpos==None :
x = win[0] - pad * h['bmaj']*3.6E6
y = win[2] + pad * h['bmaj']*3.6E6
bpos = (x, y)
bmaj = h['bmaj'] * 3.6E6
bmin = h['bmin'] * 3.6E6
bpa = 90 - h['bpa']
e = Ellipse(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')
ax.add_artist(e)
def annotate(ax, notefile=''):
if notefile != '':
tab = Table.read(notefile, format='csv')
for t in tab:
ax.text(t['x'], t['y'], t['text'])
# ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction')
# ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction')
def cut_cmap(cmap, N_cut=0):
# cmap = mcolors.Colormap(cmap)
cmap = plt.get_cmap(cmap)
x = np.arange(N_cut, 256) / 256.0
color_index = cmap(x)
cmap = mcolors.ListedColormap(color_index)
return cmap
def get_normalize(args, vmin=0.0, vmax=1.0):
if args == '':
norm = mcolors.Normalize(vmin, vmax)
args = args.split(' ')
name = args[0]
if name == 'linear':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.Normalize(vmin, vmax, True)
elif name == 'power':
if len(args)==1:
gamma = 0.5
if len(args)==2:
gamma = float(args[1])
elif len(args)==4:
gamma, vmin, vmax = np.array(args[1:], dtype='f4')
if gamma < 1.0 and vmin < 0.0:
vmin = 0.0
norm = mcolors.PowerNorm(gamma, vmin, vmax, True)
elif name == 'log':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.LogNorm(vmin, vmax)
elif name == 'symlog':
if len(args)==2:
linthresh = float(args[1])
linscale = 1.0
elif len(args)==3:
linthresh, linscale = np.array(args[1:], dtype='f4')
elif len(args)==5:
linthresh, linscale, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.SymLogNorm(linthresh, linscale, vmin, vmax)
elif name == 'twoslope':
if len(args)==2:
vcenter = float(args[1])
elif len(args)==4:
vcenter, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.TwoSlopeNorm(vcenter, vmin, vmax)
return norm
def add_annotation(ax, infile=''):
if infile == '':
return
with open(infile, 'r') as f:
for line in f.readlines():
row = line.split(',')
row = [col.strip() for col in row]
typ = row[0]
args = row[1:]
if typ == 'text':
x, y, text = args
x, y = float(x), float(y)
ax.text(x, y, text)
elif typ == 'arrow':
x1, y1, x2, y2 = np.array(args, dtype='f4')
ax.annotate("", xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'annotation':
x1, y1, x2, y2 = np.array(args[:-1], dtype='f4')
text = args[-1]
ax.annotate(text, xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'ellipse':
x, y, majax, minax, pa = np.array(args, dtype='f4')
e = Ellipse((x,y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')
ax.add_artist(e)
def set_axis(ax, w):
ax.set_aspect('equal')
ax.set_xlabel('Relative R.A. (mas)')
ax.set_ylabel('Relative Dec. (mas)')
ax.set_xlim(w[0],w[1])
ax.set_ylim(w[2],w[3])
ax.tick_params(which='both', direction='in', length=6, right=True, top=True)
ax.tick_params(which='minor',length=4)
ax.minorticks_on()
def word2pix(w, h):
if w == None:
W = [0, h['naxis1'], 0, h['naxis2']]
else:
x0, x1, y0, y1 = w
X0 = h['crpix1'] + x0/(h['cdelt1']*3.6E6)
Y0 = h['crpix2'] + y0/(h['cdelt2']*3.6E6)
X1 = h['crpix1'] + x1/(h['cdelt1']*3.6E6)
Y1 = h['crpix2'] + y1/(h['cdelt2']*3.6E6)
W = [int(X0), int(X1), int(Y0), int(Y1)]
return W
def pix2word(W, h):
if W == None:
W = [0, h['naxis1'], 0, h['naxis2']]
X0, X1, Y0, Y1 = W
x0 = h['cdelt1']*3.6E6 * (X0-h['crpix1'])
y0 = h['cdelt2']*3.6E6 * (Y0-h['crpix2'])
x1 = h['cdelt1']*3.6E6 * (X1-h['crpix1'])
y1 = h['cdelt2']*3.6E6 * (Y1-h['crpix2'])
w = [x0, x1, y0, y1]
return w
def savefig(outfile, dpi=100):
if outfile.lower().endswith('.pdf') :
plt.savefig(outfile)
elif outfile.lower().endswith('.jpg') or outfile.lower().endswith('.jpeg'):
plt.savefig(outfile, dpi=dpi)
elif outfile.lower().endswith('.png'):
plt.savefig(outfile, dpi=dpi)
def mapplot(infile, cmul, outfile='', win=None, levs=None, bpos=None,
figsize=None, dpi=100, annotationfile='', cmap='', N_cut=0,
norm='', fraction=0.05):
hdul = fits.open(infile)
h = hdul[0].header
# img = hdul[0].data[0, 0, :, :]
if levs==None:
levs = cmul*np.array([-1,1,2,4,8,16,32,64,128,256,512,1024,2048,4096])
# print(win)
if figsize == None :
figsize = (6, 6)
if win == None:
win = pix2word(None, h)
W = word2pix(None, h)
else:
W = word2pix(win, h)
img = hdul[0].data[0, 0, W[2]:W[3], W[0]:W[1]]
if cmap == '':
cmap = 'rainbow'
cmap = cut_cmap(cmap, N_cut)
vmin, vmax = np.min(img), np.max(img)
if norm == '':
norm = 'linear %.3f %.3f' % (vmin, vmax)
norm = get_normalize(norm, vmin, vmax)
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
set_axis(ax, win)
add_beam(ax, win, h, bpos=bpos)
add_annotation(ax, annotationfile)
ax.contour(img, levs, extent=win,
linewidths=0.5, colors='k')
pcm = ax.imshow(img, extent=win, origin='lower',
interpolation='none', cmap=cmap, norm=norm)
cbar = fig.colorbar(pcm, ax=ax, fraction=fraction)
# cbar.ax.minorticks_off()
cbar.ax.tick_params('both',direction='in',right=True,top=True,which='both')
cbar.ax.tick_params(axis='y', labelrotation=90)
fig.tight_layout(pad=0.5)
if outfile != '':
savefig(outfile, dpi)
hdul.close()
def myhelp():
print('Help: mapplot.py -w "18 -8 -20 6" -f "7 6" -n "power 0.5" <cta102.fits> <1.8e-3>')
print(' or: mapplot.py -i cta102.fits -o cta102.png -w "18 -8 -20 6" -f "7 6" -n "power 0.5"')
def main(argv):
# infile = r'3c66a-calib/circe-beam.fits'
infile = ''
outfile = ''
annotationfile = ''
cmul = ''
win = None
levs = None
bpos = None
figsize = None
dpi = 100
colormap = ''
N_cut = 0
norm = ''
fraction = 0.05
try:
opts, args = getopt.getopt(argv, "hi:c:o:w:l:b:f:d:a:n:N:",
['help', 'infile=', 'cmul=', 'outfile=', 'win=',
'bpos=', 'figsize=', 'dpi=', 'annotatefile=', 'levs=', 'colormap=',
'N_cut=', 'norm=', 'fraction='])
except getopt.GetoptError:
myhelp()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
myhelp()
elif opt in ('-i', '--infile'):
infile = arg
elif opt in ('-c', '--cmul'):
cmul = arg
elif opt in ('-o', '--outfile'):
outfile = arg
elif opt in ('-w', '--win'):
win = arg
elif opt in ('-l', '--levs'):
levs = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-b', '--bpos'):
bpos = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-f', '--figsize'):
figsize = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-d', '--dpi'):
dpi = int(arg)
elif opt in ('-a', '--annotatefile'):
annotationfile = arg
elif opt in ('--colormap', ):
colormap = arg
elif opt in ('-N', '--N_cut'):
N_cut = int(arg)
elif opt in ('-n', '--norm'):
norm = arg
elif opt in ('--fraction',):
fraction = float(arg)
if infile=='' and len(args)==2:
infile, cmul = args
if infile=='' and len(args)==3:
infile, outfile, cmul = args
if infile=='' and len(args)==4:
infile, outfile, cmul, win = args
if outfile == '':
outfile = infile.split('.')[0] + '.pdf'
cmul = float(cmul)
if type(win) == str:
win = np.array(win.split(), dtype=np.float64).tolist()
mapplot(infile, cmul, outfile=outfile, win=win, levs=levs, bpos=bpos,
figsize=figsize, dpi=dpi, annotationfile=annotationfile,
cmap=colormap, N_cut=N_cut, norm=norm, fraction=fraction)
if __name__ == '__main__' :
main(sys.argv[1:]) | en | 0.428234 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Wed Oct 21 11:11:56 2020 This program is use to plot polarization map from vlbi fits image. You should specify the input fits images by -i or --infile, output file by -o or --output, contour levs by -l or --levs contour base by -c or --cmul polarization parameters by -p or --pol: "icut pcut inc scale" plot window by -w or --win restore beam position by -b or --bpos figsize by -f or --figsize Installation: 1. copy file chmod a+x mapplot.py cp mapplot.py ~/myapp 2. set envioment parameters Add the following line to ~/.bashrc export PATH=$PATH:/home/usename/myapp source ~/.bashrc Running like this: mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul> mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol> Examples: 1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5' 2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3 https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize @author: <NAME> Shanghai Astronomical Observatory, Chinese Academy of Sciences E-mail: <EMAIL>; <EMAIL> # ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction') # ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction') # cmap = mcolors.Colormap(cmap) # img = hdul[0].data[0, 0, :, :] # print(win) # cbar.ax.minorticks_off() # infile = r'3c66a-calib/circe-beam.fits' | 2.555305 | 3 |
umbrella/api/v1/router.py | pizhi/umbrella | 1 | 8617 | <gh_stars>1-10
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from umbrella.api.v1 import api
from umbrella.common import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v1 API requests."""
def __init__(self, mapper):
api_resource = api.create_resource()
mapper.connect("/",
controller=api_resource,
action="index")
mapper.connect("/images",
controller=api_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect("/images/{id}",
controller=api_resource,
action="show",
conditions=dict(method=["GET"]))
mapper.connect("/net/{instance_uuid}",
controller=api_resource,
action="get_net_sample",
conditions=dict(method=["GET"]))
mapper.connect("/cpu/{instance_uuid}",
controller=api_resource,
action="get_cpu_sample",
conditions=dict(method=["GET"]))
mapper.connect("/disk/{instance_uuid}",
controller=api_resource,
action="get_disk_sample",
conditions=dict(method=["GET"]))
mapper.connect("/mem/{instance_uuid}",
controller=api_resource,
action="get_mem_sample",
conditions=dict(method=["GET"]))
super(API, self).__init__(mapper)
| # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from umbrella.api.v1 import api
from umbrella.common import wsgi
class API(wsgi.Router):
"""WSGI router for Glance v1 API requests."""
def __init__(self, mapper):
api_resource = api.create_resource()
mapper.connect("/",
controller=api_resource,
action="index")
mapper.connect("/images",
controller=api_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect("/images/{id}",
controller=api_resource,
action="show",
conditions=dict(method=["GET"]))
mapper.connect("/net/{instance_uuid}",
controller=api_resource,
action="get_net_sample",
conditions=dict(method=["GET"]))
mapper.connect("/cpu/{instance_uuid}",
controller=api_resource,
action="get_cpu_sample",
conditions=dict(method=["GET"]))
mapper.connect("/disk/{instance_uuid}",
controller=api_resource,
action="get_disk_sample",
conditions=dict(method=["GET"]))
mapper.connect("/mem/{instance_uuid}",
controller=api_resource,
action="get_mem_sample",
conditions=dict(method=["GET"]))
super(API, self).__init__(mapper) | en | 0.844686 | # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. WSGI router for Glance v1 API requests. | 2.006021 | 2 |
exemples/test_thomson_simu.py | butala/TomograPy | 7 | 8618 | <filename>exemples/test_thomson_simu.py
#!/usr/bin/env python
import time
import numpy as np
import tomograpy
import lo
# object
obj = tomograpy.centered_cubic_map(10, 64)
obj[:] = tomograpy.phantom.shepp_logan(obj.shape)
# data
radius = 200
a = tomograpy.fov(obj, radius)
data = tomograpy.centered_stack(a, 128, n_images=60, radius=radius, max_lon=np.pi)
# model
kwargs = {"pb":"pb", "obj_rmin":1.5, "data_rmin":1.5}
P, D, obj_mask, data_mask = tomograpy.models.thomson(data, obj, u=.5, **kwargs)
# projection
t = time.time()
data[:] = (P * obj.ravel()).reshape(data.shape)
print("projection time : " + str(time.time() - t))
# data
# backprojection
t = time.time()
x0 = P.T * data.ravel()
bpj = x0.reshape(obj.shape)
print("backprojection time : " + str(time.time() - t))
# inversion using scipy.sparse.linalg
t = time.time()
sol = lo.acg(P, data.ravel(), D, 1e-3 * np.ones(3), maxiter=100, tol=1e-8)
sol = sol.reshape(obj.shape)
print("inversion time : " + str(time.time() - t))
| <filename>exemples/test_thomson_simu.py
#!/usr/bin/env python
import time
import numpy as np
import tomograpy
import lo
# object
obj = tomograpy.centered_cubic_map(10, 64)
obj[:] = tomograpy.phantom.shepp_logan(obj.shape)
# data
radius = 200
a = tomograpy.fov(obj, radius)
data = tomograpy.centered_stack(a, 128, n_images=60, radius=radius, max_lon=np.pi)
# model
kwargs = {"pb":"pb", "obj_rmin":1.5, "data_rmin":1.5}
P, D, obj_mask, data_mask = tomograpy.models.thomson(data, obj, u=.5, **kwargs)
# projection
t = time.time()
data[:] = (P * obj.ravel()).reshape(data.shape)
print("projection time : " + str(time.time() - t))
# data
# backprojection
t = time.time()
x0 = P.T * data.ravel()
bpj = x0.reshape(obj.shape)
print("backprojection time : " + str(time.time() - t))
# inversion using scipy.sparse.linalg
t = time.time()
sol = lo.acg(P, data.ravel(), D, 1e-3 * np.ones(3), maxiter=100, tol=1e-8)
sol = sol.reshape(obj.shape)
print("inversion time : " + str(time.time() - t))
| en | 0.279742 | #!/usr/bin/env python # object # data # model # projection # data # backprojection # inversion using scipy.sparse.linalg | 2.463807 | 2 |
geopy/geocoders/google.py | ulope/geopy | 1 | 8619 | import logging
from urllib import urlencode
from urllib2 import urlopen
import simplejson
import xml
from xml.parsers.expat import ExpatError
from geopy.geocoders.base import Geocoder
from geopy import Point, Location, util
class Google(Geocoder):
"""Geocoder using the Google Maps API."""
def __init__(self, api_key=None, domain='maps.google.com',
resource='maps/geo', format_string='%s', output_format='kml'):
"""Initialize a customized Google geocoder with location-specific
address information and your Google Maps API key.
``api_key`` should be a valid Google Maps API key. It is required for
the 'maps/geo' resource to work.
``domain`` should be a the Google Maps domain to connect to. The default
is 'maps.google.com', but if you're geocoding address in the UK (for
example), you may want to set it to 'maps.google.co.uk'.
``resource`` is the HTTP resource to give the query parameter.
'maps/geo' is the HTTP geocoder and is a documented API resource.
'maps' is the actual Google Maps interface and its use for just
geocoding is undocumented. Anything else probably won't work.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` can be 'json', 'xml', 'kml', 'csv', or 'js' and will
control the output format of Google's response. The default is 'kml'
since it is supported by both the 'maps' and 'maps/geo' resources. The
'js' format is the most likely to break since it parses Google's
JavaScript, which could change. However, it currently returns the best
results for restricted geocoder areas such as the UK.
"""
self.api_key = api_key
self.domain = domain
self.resource = resource
self.format_string = format_string
self.output_format = output_format
@property
def url(self):
domain = self.domain.strip('/')
resource = self.resource.strip('/')
return "http://%(domain)s/%(resource)s?%%s" % locals()
def geocode(self, string, exactly_one=True, language_code=None,
sensor=False, viewport_center=None, viewport_span=None):
params = {'q': self.format_string % string,
'output': self.output_format.lower(),
'sensor': str(sensor).lower(),
}
if language_code:
params.update({'gl': language_code})
if viewport_center and viewport_span:
params.update({
'll': viewport_center,
'spn': viewport_span,
})
if self.resource.rstrip('/').endswith('geo'):
# An API key is only required for the HTTP geocoder.
params['key'] = self.api_key
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def reverse(self, coord, exactly_one=True):
(lat,lng) = coord
params = {'q': self.format_string % lat+','+self.format_string % lng,
'output': self.output_format.lower()
}
if self.resource.rstrip('/').endswith('geo'):
# An API key is only required for the HTTP geocoder.
params['key'] = self.api_key
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one, reverse=True)
def geocode_url(self, url, exactly_one=True, reverse=False):
logging.getLogger().info("Fetching %s..." % url)
page = urlopen(url)
dispatch = getattr(self, 'parse_' + self.output_format)
return dispatch(page, exactly_one, reverse)
def parse_xml(self, page, exactly_one=True, reverse=False):
"""Parse a location name, latitude, and longitude from an XML response.
"""
if not isinstance(page, basestring):
page = util.decode_page(page)
try:
doc = xml.dom.minidom.parseString(page)
except ExpatError:
places = []
else:
places = doc.getElementsByTagName('Placemark')
if (exactly_one and len(places) != 1) and (not reverse):
raise ValueError("Didn't find exactly one placemark! " \
"(Found %d.)" % len(places))
def parse_place(place):
location = util.get_first_text(place, ['address', 'name']) or None
points = place.getElementsByTagName('Point')
point = points and points[0] or None
coords = util.get_first_text(point, 'coordinates') or None
if coords:
longitude, latitude = [float(f) for f in coords.split(',')[:2]]
else:
latitude = longitude = None
_, (latitude, longitude) = self.geocode(location)
return (location, (latitude, longitude))
if exactly_one:
return parse_place(places[0])
else:
return (parse_place(place) for place in places)
def parse_csv(self, page, exactly_one=True, reverse=False):
raise NotImplementedError
def parse_kml(self, page, exactly_one=True, reverse=False):
return self.parse_xml(page, exactly_one, reverse)
def parse_json(self, page, exactly_one=True, reverse=False):
if not isinstance(page, basestring):
page = util.decode_page(page)
json = simplejson.loads(page)
places = json.get('Placemark', [])
if (exactly_one and len(places) != 1) and (not reverse):
raise ValueError("Didn't find exactly one placemark! " \
"(Found %d.)" % len(places))
def parse_place(place):
location = place.get('address')
longitude, latitude = place['Point']['coordinates'][:2]
# Add support for pulling out the canonical name
locality = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('Locality',{}).get('LocalityName')
administrative = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('AdministrativeAreaName')
return util.RichResult((location, (latitude, longitude)), locality=locality, administrative=administrative)
if exactly_one:
return parse_place(places[0])
else:
return (parse_place(place) for place in places)
def parse_js(self, page, exactly_one=True, reverse=False):
"""This parses JavaScript returned by queries the actual Google Maps
interface and could thus break easily. However, this is desirable if
the HTTP geocoder doesn't work for addresses in your country (the
UK, for example).
"""
if not isinstance(page, basestring):
page = util.decode_page(page)
LATITUDE = r"[\s,]lat:\s*(?P<latitude>-?\d+\.\d+)"
LONGITUDE = r"[\s,]lng:\s*(?P<longitude>-?\d+\.\d+)"
LOCATION = r"[\s,]laddr:\s*'(?P<location>.*?)(?<!\\)',"
ADDRESS = r"(?P<address>.*?)(?:(?: \(.*?@)|$)"
MARKER = '.*?'.join([LATITUDE, LONGITUDE, LOCATION])
MARKERS = r"{markers: (?P<markers>\[.*?\]),\s*polylines:"
def parse_marker(marker):
latitude, longitude, location = marker
location = re.match(ADDRESS, location).group('address')
latitude, longitude = float(latitude), float(longitude)
return (location, (latitude, longitude))
match = re.search(MARKERS, page)
markers = match and match.group('markers') or ''
markers = re.findall(MARKER, markers)
if exactly_one:
if len(markers) != 1 and (not reverse):
raise ValueError("Didn't find exactly one marker! " \
"(Found %d.)" % len(markers))
marker = markers[0]
return parse_marker(marker)
else:
return (parse_marker(marker) for marker in markers)
| import logging
from urllib import urlencode
from urllib2 import urlopen
import simplejson
import xml
from xml.parsers.expat import ExpatError
from geopy.geocoders.base import Geocoder
from geopy import Point, Location, util
class Google(Geocoder):
"""Geocoder using the Google Maps API."""
def __init__(self, api_key=None, domain='maps.google.com',
resource='maps/geo', format_string='%s', output_format='kml'):
"""Initialize a customized Google geocoder with location-specific
address information and your Google Maps API key.
``api_key`` should be a valid Google Maps API key. It is required for
the 'maps/geo' resource to work.
``domain`` should be a the Google Maps domain to connect to. The default
is 'maps.google.com', but if you're geocoding address in the UK (for
example), you may want to set it to 'maps.google.co.uk'.
``resource`` is the HTTP resource to give the query parameter.
'maps/geo' is the HTTP geocoder and is a documented API resource.
'maps' is the actual Google Maps interface and its use for just
geocoding is undocumented. Anything else probably won't work.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` can be 'json', 'xml', 'kml', 'csv', or 'js' and will
control the output format of Google's response. The default is 'kml'
since it is supported by both the 'maps' and 'maps/geo' resources. The
'js' format is the most likely to break since it parses Google's
JavaScript, which could change. However, it currently returns the best
results for restricted geocoder areas such as the UK.
"""
self.api_key = api_key
self.domain = domain
self.resource = resource
self.format_string = format_string
self.output_format = output_format
@property
def url(self):
domain = self.domain.strip('/')
resource = self.resource.strip('/')
return "http://%(domain)s/%(resource)s?%%s" % locals()
def geocode(self, string, exactly_one=True, language_code=None,
sensor=False, viewport_center=None, viewport_span=None):
params = {'q': self.format_string % string,
'output': self.output_format.lower(),
'sensor': str(sensor).lower(),
}
if language_code:
params.update({'gl': language_code})
if viewport_center and viewport_span:
params.update({
'll': viewport_center,
'spn': viewport_span,
})
if self.resource.rstrip('/').endswith('geo'):
# An API key is only required for the HTTP geocoder.
params['key'] = self.api_key
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def reverse(self, coord, exactly_one=True):
(lat,lng) = coord
params = {'q': self.format_string % lat+','+self.format_string % lng,
'output': self.output_format.lower()
}
if self.resource.rstrip('/').endswith('geo'):
# An API key is only required for the HTTP geocoder.
params['key'] = self.api_key
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one, reverse=True)
def geocode_url(self, url, exactly_one=True, reverse=False):
logging.getLogger().info("Fetching %s..." % url)
page = urlopen(url)
dispatch = getattr(self, 'parse_' + self.output_format)
return dispatch(page, exactly_one, reverse)
def parse_xml(self, page, exactly_one=True, reverse=False):
"""Parse a location name, latitude, and longitude from an XML response.
"""
if not isinstance(page, basestring):
page = util.decode_page(page)
try:
doc = xml.dom.minidom.parseString(page)
except ExpatError:
places = []
else:
places = doc.getElementsByTagName('Placemark')
if (exactly_one and len(places) != 1) and (not reverse):
raise ValueError("Didn't find exactly one placemark! " \
"(Found %d.)" % len(places))
def parse_place(place):
location = util.get_first_text(place, ['address', 'name']) or None
points = place.getElementsByTagName('Point')
point = points and points[0] or None
coords = util.get_first_text(point, 'coordinates') or None
if coords:
longitude, latitude = [float(f) for f in coords.split(',')[:2]]
else:
latitude = longitude = None
_, (latitude, longitude) = self.geocode(location)
return (location, (latitude, longitude))
if exactly_one:
return parse_place(places[0])
else:
return (parse_place(place) for place in places)
def parse_csv(self, page, exactly_one=True, reverse=False):
raise NotImplementedError
def parse_kml(self, page, exactly_one=True, reverse=False):
return self.parse_xml(page, exactly_one, reverse)
def parse_json(self, page, exactly_one=True, reverse=False):
if not isinstance(page, basestring):
page = util.decode_page(page)
json = simplejson.loads(page)
places = json.get('Placemark', [])
if (exactly_one and len(places) != 1) and (not reverse):
raise ValueError("Didn't find exactly one placemark! " \
"(Found %d.)" % len(places))
def parse_place(place):
location = place.get('address')
longitude, latitude = place['Point']['coordinates'][:2]
# Add support for pulling out the canonical name
locality = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('Locality',{}).get('LocalityName')
administrative = place.get('AddressDetails',{}).get('Country',{}).get('AdministrativeArea',{}).get('AdministrativeAreaName')
return util.RichResult((location, (latitude, longitude)), locality=locality, administrative=administrative)
if exactly_one:
return parse_place(places[0])
else:
return (parse_place(place) for place in places)
def parse_js(self, page, exactly_one=True, reverse=False):
"""This parses JavaScript returned by queries the actual Google Maps
interface and could thus break easily. However, this is desirable if
the HTTP geocoder doesn't work for addresses in your country (the
UK, for example).
"""
if not isinstance(page, basestring):
page = util.decode_page(page)
LATITUDE = r"[\s,]lat:\s*(?P<latitude>-?\d+\.\d+)"
LONGITUDE = r"[\s,]lng:\s*(?P<longitude>-?\d+\.\d+)"
LOCATION = r"[\s,]laddr:\s*'(?P<location>.*?)(?<!\\)',"
ADDRESS = r"(?P<address>.*?)(?:(?: \(.*?@)|$)"
MARKER = '.*?'.join([LATITUDE, LONGITUDE, LOCATION])
MARKERS = r"{markers: (?P<markers>\[.*?\]),\s*polylines:"
def parse_marker(marker):
latitude, longitude, location = marker
location = re.match(ADDRESS, location).group('address')
latitude, longitude = float(latitude), float(longitude)
return (location, (latitude, longitude))
match = re.search(MARKERS, page)
markers = match and match.group('markers') or ''
markers = re.findall(MARKER, markers)
if exactly_one:
if len(markers) != 1 and (not reverse):
raise ValueError("Didn't find exactly one marker! " \
"(Found %d.)" % len(markers))
marker = markers[0]
return parse_marker(marker)
else:
return (parse_marker(marker) for marker in markers)
| en | 0.829806 | Geocoder using the Google Maps API. Initialize a customized Google geocoder with location-specific address information and your Google Maps API key. ``api_key`` should be a valid Google Maps API key. It is required for the 'maps/geo' resource to work. ``domain`` should be a the Google Maps domain to connect to. The default is 'maps.google.com', but if you're geocoding address in the UK (for example), you may want to set it to 'maps.google.co.uk'. ``resource`` is the HTTP resource to give the query parameter. 'maps/geo' is the HTTP geocoder and is a documented API resource. 'maps' is the actual Google Maps interface and its use for just geocoding is undocumented. Anything else probably won't work. ``format_string`` is a string containing '%s' where the string to geocode should be interpolated before querying the geocoder. For example: '%s, Mountain View, CA'. The default is just '%s'. ``output_format`` can be 'json', 'xml', 'kml', 'csv', or 'js' and will control the output format of Google's response. The default is 'kml' since it is supported by both the 'maps' and 'maps/geo' resources. The 'js' format is the most likely to break since it parses Google's JavaScript, which could change. However, it currently returns the best results for restricted geocoder areas such as the UK. # An API key is only required for the HTTP geocoder. # An API key is only required for the HTTP geocoder. Parse a location name, latitude, and longitude from an XML response. # Add support for pulling out the canonical name This parses JavaScript returned by queries the actual Google Maps interface and could thus break easily. However, this is desirable if the HTTP geocoder doesn't work for addresses in your country (the UK, for example). | 3.186395 | 3 |
interactive_grabcut/repo/drag2draw.py | hiankun/py_sandbox | 0 | 8620 | <reponame>hiankun/py_sandbox
# source: https://www.youtube.com/watch?v=U0sVp1xLiyo
from tkinter import *
def paint(event):
color = 'red'
x1, y1 = (event.x-1), (event.y-1)
x2, y2 = (event.x+1), (event.y+1)
c.create_oval(x1,y1,x2,y2,fill=color,outline=color)
master = Tk()
c = Canvas(master, width=600, height=400, bg='white')
c.pack(expand=True, fill=BOTH)
c.bind('<B1-Motion>', paint)
master.mainloop()
| # source: https://www.youtube.com/watch?v=U0sVp1xLiyo
from tkinter import *
def paint(event):
color = 'red'
x1, y1 = (event.x-1), (event.y-1)
x2, y2 = (event.x+1), (event.y+1)
c.create_oval(x1,y1,x2,y2,fill=color,outline=color)
master = Tk()
c = Canvas(master, width=600, height=400, bg='white')
c.pack(expand=True, fill=BOTH)
c.bind('<B1-Motion>', paint)
master.mainloop() | en | 0.479014 | # source: https://www.youtube.com/watch?v=U0sVp1xLiyo | 3.303042 | 3 |
migrations/20220114_03_Heqaz-insert-default-serverinfo.py | lin483/Funny-Nations | 126 | 8621 | <gh_stars>100-1000
"""
insert default serverInfo
"""
from yoyo import step
__depends__ = {'20220114_02_lHBKM-new-table-serverinfo'}
steps = [
step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);")
]
| """
insert default serverInfo
"""
from yoyo import step
__depends__ = {'20220114_02_lHBKM-new-table-serverinfo'}
steps = [
step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);")
] | fa | 0.070448 | insert default serverInfo | 0.820231 | 1 |
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py | slawqo/python-neutronclient | 120 | 8622 | <gh_stars>100-1000
# Copyright 2017 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import columns as column_util
from oslo_log import log as logging
from neutronclient._i18n import _
from neutronclient.common import utils as nc_utils
from neutronclient.osc import utils as osc_utils
from neutronclient.osc.v2.vpnaas import utils as vpn_utils
LOG = logging.getLogger(__name__)
_formatters = {
'peer_cidrs': format_columns.ListColumn
}
_attr_map = (
('id', 'ID', column_util.LIST_BOTH),
('name', 'Name', column_util.LIST_BOTH),
('peer_address', 'Peer Address', column_util.LIST_BOTH),
('auth_mode', 'Authentication Algorithm', column_util.LIST_BOTH),
('status', 'Status', column_util.LIST_BOTH),
('tenant_id', 'Project', column_util.LIST_LONG_ONLY),
('peer_cidrs', 'Peer CIDRs', column_util.LIST_LONG_ONLY),
('vpnservice_id', 'VPN Service', column_util.LIST_LONG_ONLY),
('ipsecpolicy_id', 'IPSec Policy', column_util.LIST_LONG_ONLY),
('ikepolicy_id', 'IKE Policy', column_util.LIST_LONG_ONLY),
('mtu', 'MTU', column_util.LIST_LONG_ONLY),
('initiator', 'Initiator', column_util.LIST_LONG_ONLY),
('admin_state_up', 'State', column_util.LIST_LONG_ONLY),
('description', 'Description', column_util.LIST_LONG_ONLY),
('psk', 'Pre-shared Key', column_util.LIST_LONG_ONLY),
('route_mode', 'Route Mode', column_util.LIST_LONG_ONLY),
('local_id', 'Local ID', column_util.LIST_LONG_ONLY),
('peer_id', 'Peer ID', column_util.LIST_LONG_ONLY),
('local_ep_group_id', 'Local Endpoint Group ID',
column_util.LIST_LONG_ONLY),
('peer_ep_group_id', 'Peer Endpoint Group ID', column_util.LIST_LONG_ONLY),
)
def _convert_to_lowercase(string):
return string.lower()
def _get_common_parser(parser, is_create=True):
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description for the connection'))
parser.add_argument(
'--dpd',
metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT",
type=nc_utils.str2dict_type(
optional_keys=['action', 'interval', 'timeout']),
help=vpn_utils.dpd_help("IPsec connection"))
parser.add_argument(
'--mtu',
help=_('MTU size for the connection'))
parser.add_argument(
'--initiator',
choices=['bi-directional', 'response-only'],
type=_convert_to_lowercase,
help=_('Initiator state'))
peer_group = parser.add_mutually_exclusive_group()
peer_group.add_argument(
'--peer-cidr',
dest='peer_cidrs',
help=_('Remote subnet(s) in CIDR format. '
'Cannot be specified when using endpoint groups. Only '
'applicable, if subnet provided for VPN service.')
)
peer_group.add_argument(
'--local-endpoint-group',
help=_('Local endpoint group (name or ID) with subnet(s) '
'for IPsec connection')
)
parser.add_argument(
'--peer-endpoint-group',
help=_('Peer endpoint group (name or ID) with CIDR(s) for '
'IPSec connection'))
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
help=_("Enable IPSec site connection")
)
admin_group.add_argument(
'--disable',
action='store_true',
help=_("Disable IPSec site connection")
)
parser.add_argument(
'--local-id',
help=_('An ID to be used instead of the external IP '
'address for a virtual router'))
return parser
def _get_common_attrs(client_manager, parsed_args, is_create=True):
attrs = {}
if is_create:
if 'project' in parsed_args and parsed_args.project is not None:
attrs['tenant_id'] = osc_utils.find_project(
client_manager.identity,
parsed_args.project,
parsed_args.project_domain,
).id
if parsed_args.description:
attrs['description'] = str(parsed_args.description)
if parsed_args.mtu:
attrs['mtu'] = parsed_args.mtu
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
attrs['admin_state_up'] = False
if parsed_args.initiator:
attrs['initiator'] = parsed_args.initiator
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
attrs['dpd'] = parsed_args.dpd
if parsed_args.local_endpoint_group:
_local_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.local_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['local_ep_group_id'] = _local_epg
if parsed_args.peer_endpoint_group:
_peer_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.peer_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['peer_ep_group_id'] = _peer_epg
if parsed_args.peer_cidrs:
attrs['peer_cidrs'] = parsed_args.peer_cidrs
if parsed_args.local_id:
attrs['local_id'] = parsed_args.local_id
return attrs
class CreateIPsecSiteConnection(command.ShowOne):
_description = _("Create an IPsec site connection")
def get_parser(self, prog_name):
parser = super(CreateIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
required=True,
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
required=True,
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--psk',
required=True,
help=_('Pre-shared key string.'))
parser.add_argument(
'--vpnservice',
metavar='VPNSERVICE',
required=True,
help=_('VPN service instance associated with this '
'connection (name or ID)'))
parser.add_argument(
'--ikepolicy',
metavar='IKEPOLICY',
required=True,
help=_('IKE policy associated with this connection (name or ID)'))
parser.add_argument(
'--ipsecpolicy',
metavar='IPSECPOLICY',
required=True,
help=_('IPsec policy associated with this connection '
'(name or ID)'))
parser.add_argument(
'name',
metavar='<name>',
help=_('Set friendly name for the connection'))
osc_utils.add_project_owner_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager, parsed_args)
if parsed_args.vpnservice:
_vpnservice_id = client.find_resource(
'vpnservice',
parsed_args.vpnservice,
cmd_resource='vpnservice')['id']
attrs['vpnservice_id'] = _vpnservice_id
if parsed_args.ikepolicy:
_ikepolicy_id = client.find_resource(
'ikepolicy',
parsed_args.ikepolicy,
cmd_resource='ikepolicy')['id']
attrs['ikepolicy_id'] = _ikepolicy_id
if parsed_args.ipsecpolicy:
_ipsecpolicy_id = client.find_resource(
'ipsecpolicy',
parsed_args.ipsecpolicy,
cmd_resource='ipsecpolicy')['id']
attrs['ipsecpolicy_id'] = _ipsecpolicy_id
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.psk:
attrs['psk'] = parsed_args.psk
if parsed_args.name:
attrs['name'] = parsed_args.name
if (bool(parsed_args.local_endpoint_group) !=
bool(parsed_args.peer_endpoint_group)):
message = _("You must specify both local and peer endpoint "
"groups")
raise exceptions.CommandError(message)
if not parsed_args.peer_cidrs and not parsed_args.local_endpoint_group:
message = _("You must specify endpoint groups or peer CIDR(s)")
raise exceptions.CommandError(message)
obj = client.create_ipsec_site_connection(
{'ipsec_site_connection': attrs})['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return display_columns, data
class DeleteIPsecSiteConnection(command.Command):
_description = _("Delete IPsec site connection(s)")
def get_parser(self, prog_name):
parser = super(DeleteIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
nargs='+',
help=_('IPsec site connection to delete (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
result = 0
for ipsec_conn in parsed_args.ipsec_site_connection:
try:
ipsec_con_id = client.find_resource(
'ipsec_site_connection',
ipsec_conn,
cmd_resource='ipsec_site_connection')['id']
client.delete_ipsec_site_connection(ipsec_con_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete IPsec site connection with "
"name or ID '%(ipsec_site_conn)s': %(e)s"),
{'ipsec_site_conn': ipsec_conn, 'e': e})
if result > 0:
total = len(parsed_args.ipsec_site_connection)
msg = (_("%(result)s of %(total)s IPsec site connection failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListIPsecSiteConnection(command.Lister):
_description = _("List IPsec site connections "
"that belong to a given project")
def get_parser(self, prog_name):
parser = super(ListIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
obj = client.list_ipsec_site_connections()['ipsec_site_connections']
headers, columns = column_util.get_column_definitions(
_attr_map, long_listing=parsed_args.long)
return (headers, (utils.get_dict_properties(
s, columns, formatters=_formatters) for s in obj))
class SetIPsecSiteConnection(command.Command):
_description = _("Set IPsec site connection properties")
def get_parser(self, prog_name):
parser = super(SetIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set friendly name for the connection'))
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to set (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager,
parsed_args, is_create=False)
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.name:
attrs['name'] = parsed_args.name
ipsec_conn_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
try:
client.update_ipsec_site_connection(
ipsec_conn_id,
{'ipsec_site_connection': attrs})
except Exception as e:
msg = (_("Failed to set IPsec site "
"connection '%(ipsec_conn)s': %(e)s")
% {'ipsec_conn': parsed_args.ipsec_site_connection, 'e': e})
raise exceptions.CommandError(msg)
class ShowIPsecSiteConnection(command.ShowOne):
_description = _("Show information of a given IPsec site connection")
def get_parser(self, prog_name):
parser = super(ShowIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to display (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
ipsec_site_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
obj = client.show_ipsec_site_connection(
ipsec_site_id)['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
| # Copyright 2017 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import columns as column_util
from oslo_log import log as logging
from neutronclient._i18n import _
from neutronclient.common import utils as nc_utils
from neutronclient.osc import utils as osc_utils
from neutronclient.osc.v2.vpnaas import utils as vpn_utils
LOG = logging.getLogger(__name__)
_formatters = {
'peer_cidrs': format_columns.ListColumn
}
_attr_map = (
('id', 'ID', column_util.LIST_BOTH),
('name', 'Name', column_util.LIST_BOTH),
('peer_address', 'Peer Address', column_util.LIST_BOTH),
('auth_mode', 'Authentication Algorithm', column_util.LIST_BOTH),
('status', 'Status', column_util.LIST_BOTH),
('tenant_id', 'Project', column_util.LIST_LONG_ONLY),
('peer_cidrs', 'Peer CIDRs', column_util.LIST_LONG_ONLY),
('vpnservice_id', 'VPN Service', column_util.LIST_LONG_ONLY),
('ipsecpolicy_id', 'IPSec Policy', column_util.LIST_LONG_ONLY),
('ikepolicy_id', 'IKE Policy', column_util.LIST_LONG_ONLY),
('mtu', 'MTU', column_util.LIST_LONG_ONLY),
('initiator', 'Initiator', column_util.LIST_LONG_ONLY),
('admin_state_up', 'State', column_util.LIST_LONG_ONLY),
('description', 'Description', column_util.LIST_LONG_ONLY),
('psk', 'Pre-shared Key', column_util.LIST_LONG_ONLY),
('route_mode', 'Route Mode', column_util.LIST_LONG_ONLY),
('local_id', 'Local ID', column_util.LIST_LONG_ONLY),
('peer_id', 'Peer ID', column_util.LIST_LONG_ONLY),
('local_ep_group_id', 'Local Endpoint Group ID',
column_util.LIST_LONG_ONLY),
('peer_ep_group_id', 'Peer Endpoint Group ID', column_util.LIST_LONG_ONLY),
)
def _convert_to_lowercase(string):
return string.lower()
def _get_common_parser(parser, is_create=True):
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description for the connection'))
parser.add_argument(
'--dpd',
metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT",
type=nc_utils.str2dict_type(
optional_keys=['action', 'interval', 'timeout']),
help=vpn_utils.dpd_help("IPsec connection"))
parser.add_argument(
'--mtu',
help=_('MTU size for the connection'))
parser.add_argument(
'--initiator',
choices=['bi-directional', 'response-only'],
type=_convert_to_lowercase,
help=_('Initiator state'))
peer_group = parser.add_mutually_exclusive_group()
peer_group.add_argument(
'--peer-cidr',
dest='peer_cidrs',
help=_('Remote subnet(s) in CIDR format. '
'Cannot be specified when using endpoint groups. Only '
'applicable, if subnet provided for VPN service.')
)
peer_group.add_argument(
'--local-endpoint-group',
help=_('Local endpoint group (name or ID) with subnet(s) '
'for IPsec connection')
)
parser.add_argument(
'--peer-endpoint-group',
help=_('Peer endpoint group (name or ID) with CIDR(s) for '
'IPSec connection'))
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
help=_("Enable IPSec site connection")
)
admin_group.add_argument(
'--disable',
action='store_true',
help=_("Disable IPSec site connection")
)
parser.add_argument(
'--local-id',
help=_('An ID to be used instead of the external IP '
'address for a virtual router'))
return parser
def _get_common_attrs(client_manager, parsed_args, is_create=True):
attrs = {}
if is_create:
if 'project' in parsed_args and parsed_args.project is not None:
attrs['tenant_id'] = osc_utils.find_project(
client_manager.identity,
parsed_args.project,
parsed_args.project_domain,
).id
if parsed_args.description:
attrs['description'] = str(parsed_args.description)
if parsed_args.mtu:
attrs['mtu'] = parsed_args.mtu
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
attrs['admin_state_up'] = False
if parsed_args.initiator:
attrs['initiator'] = parsed_args.initiator
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
attrs['dpd'] = parsed_args.dpd
if parsed_args.local_endpoint_group:
_local_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.local_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['local_ep_group_id'] = _local_epg
if parsed_args.peer_endpoint_group:
_peer_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.peer_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['peer_ep_group_id'] = _peer_epg
if parsed_args.peer_cidrs:
attrs['peer_cidrs'] = parsed_args.peer_cidrs
if parsed_args.local_id:
attrs['local_id'] = parsed_args.local_id
return attrs
class CreateIPsecSiteConnection(command.ShowOne):
_description = _("Create an IPsec site connection")
def get_parser(self, prog_name):
parser = super(CreateIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
required=True,
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
required=True,
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--psk',
required=True,
help=_('Pre-shared key string.'))
parser.add_argument(
'--vpnservice',
metavar='VPNSERVICE',
required=True,
help=_('VPN service instance associated with this '
'connection (name or ID)'))
parser.add_argument(
'--ikepolicy',
metavar='IKEPOLICY',
required=True,
help=_('IKE policy associated with this connection (name or ID)'))
parser.add_argument(
'--ipsecpolicy',
metavar='IPSECPOLICY',
required=True,
help=_('IPsec policy associated with this connection '
'(name or ID)'))
parser.add_argument(
'name',
metavar='<name>',
help=_('Set friendly name for the connection'))
osc_utils.add_project_owner_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager, parsed_args)
if parsed_args.vpnservice:
_vpnservice_id = client.find_resource(
'vpnservice',
parsed_args.vpnservice,
cmd_resource='vpnservice')['id']
attrs['vpnservice_id'] = _vpnservice_id
if parsed_args.ikepolicy:
_ikepolicy_id = client.find_resource(
'ikepolicy',
parsed_args.ikepolicy,
cmd_resource='ikepolicy')['id']
attrs['ikepolicy_id'] = _ikepolicy_id
if parsed_args.ipsecpolicy:
_ipsecpolicy_id = client.find_resource(
'ipsecpolicy',
parsed_args.ipsecpolicy,
cmd_resource='ipsecpolicy')['id']
attrs['ipsecpolicy_id'] = _ipsecpolicy_id
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.psk:
attrs['psk'] = parsed_args.psk
if parsed_args.name:
attrs['name'] = parsed_args.name
if (bool(parsed_args.local_endpoint_group) !=
bool(parsed_args.peer_endpoint_group)):
message = _("You must specify both local and peer endpoint "
"groups")
raise exceptions.CommandError(message)
if not parsed_args.peer_cidrs and not parsed_args.local_endpoint_group:
message = _("You must specify endpoint groups or peer CIDR(s)")
raise exceptions.CommandError(message)
obj = client.create_ipsec_site_connection(
{'ipsec_site_connection': attrs})['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return display_columns, data
class DeleteIPsecSiteConnection(command.Command):
_description = _("Delete IPsec site connection(s)")
def get_parser(self, prog_name):
parser = super(DeleteIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
nargs='+',
help=_('IPsec site connection to delete (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
result = 0
for ipsec_conn in parsed_args.ipsec_site_connection:
try:
ipsec_con_id = client.find_resource(
'ipsec_site_connection',
ipsec_conn,
cmd_resource='ipsec_site_connection')['id']
client.delete_ipsec_site_connection(ipsec_con_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete IPsec site connection with "
"name or ID '%(ipsec_site_conn)s': %(e)s"),
{'ipsec_site_conn': ipsec_conn, 'e': e})
if result > 0:
total = len(parsed_args.ipsec_site_connection)
msg = (_("%(result)s of %(total)s IPsec site connection failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListIPsecSiteConnection(command.Lister):
_description = _("List IPsec site connections "
"that belong to a given project")
def get_parser(self, prog_name):
parser = super(ListIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
obj = client.list_ipsec_site_connections()['ipsec_site_connections']
headers, columns = column_util.get_column_definitions(
_attr_map, long_listing=parsed_args.long)
return (headers, (utils.get_dict_properties(
s, columns, formatters=_formatters) for s in obj))
class SetIPsecSiteConnection(command.Command):
_description = _("Set IPsec site connection properties")
def get_parser(self, prog_name):
parser = super(SetIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set friendly name for the connection'))
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to set (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager,
parsed_args, is_create=False)
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.name:
attrs['name'] = parsed_args.name
ipsec_conn_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
try:
client.update_ipsec_site_connection(
ipsec_conn_id,
{'ipsec_site_connection': attrs})
except Exception as e:
msg = (_("Failed to set IPsec site "
"connection '%(ipsec_conn)s': %(e)s")
% {'ipsec_conn': parsed_args.ipsec_site_connection, 'e': e})
raise exceptions.CommandError(msg)
class ShowIPsecSiteConnection(command.ShowOne):
_description = _("Show information of a given IPsec site connection")
def get_parser(self, prog_name):
parser = super(ShowIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to display (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
ipsec_site_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
obj = client.show_ipsec_site_connection(
ipsec_site_id)['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return (display_columns, data) | en | 0.843487 | # Copyright 2017 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # | 1.254884 | 1 |
Arknights/flags.py | AlaricGilbert/ArknightsAutoHelper | 0 | 8623 | <gh_stars>0
TINY_WAIT = 1
SMALL_WAIT = 3
MEDIUM_WAIT = 5
BIG_WAIT = 10
SECURITY_WAIT = 15
BATTLE_FINISH_DETECT = 12
BATTLE_NONE_DETECT_TIME = 90
BATTLE_END_SIGNAL_MAX_EXECUTE_TIME = 15
# 关键动作的偏移
FLAGS_START_BATTLE_BIAS = (50, 25)
FLAGS_ENSURE_TEAM_INFO_BIAS = (25, 50)
# 正方形偏移
FLAGS_CLICK_BIAS_TINY = (3, 3)
FLAGS_CLICK_BIAS_SMALL = (5, 5)
FLAGS_CLICK_BIAS_MEDIUM = (10, 10)
FLAGS_CLICK_BIAS_BIG = (15, 15)
FLAGS_CLICK_BIAS_HUGE = (30, 30)
# 拖动偏移
# 用于左右拖动的偏移,也就是偏移初始坐标点
FLAGS_SWIPE_BIAS_TO_LEFT = ((1, 1), (1, 1))
FLAGS_SWIPE_BIAS_TO_RIGHT = ((1, 1), (1, 1))
| TINY_WAIT = 1
SMALL_WAIT = 3
MEDIUM_WAIT = 5
BIG_WAIT = 10
SECURITY_WAIT = 15
BATTLE_FINISH_DETECT = 12
BATTLE_NONE_DETECT_TIME = 90
BATTLE_END_SIGNAL_MAX_EXECUTE_TIME = 15
# 关键动作的偏移
FLAGS_START_BATTLE_BIAS = (50, 25)
FLAGS_ENSURE_TEAM_INFO_BIAS = (25, 50)
# 正方形偏移
FLAGS_CLICK_BIAS_TINY = (3, 3)
FLAGS_CLICK_BIAS_SMALL = (5, 5)
FLAGS_CLICK_BIAS_MEDIUM = (10, 10)
FLAGS_CLICK_BIAS_BIG = (15, 15)
FLAGS_CLICK_BIAS_HUGE = (30, 30)
# 拖动偏移
# 用于左右拖动的偏移,也就是偏移初始坐标点
FLAGS_SWIPE_BIAS_TO_LEFT = ((1, 1), (1, 1))
FLAGS_SWIPE_BIAS_TO_RIGHT = ((1, 1), (1, 1)) | zh | 0.894882 | # 关键动作的偏移 # 正方形偏移 # 拖动偏移 # 用于左右拖动的偏移,也就是偏移初始坐标点 | 1.104967 | 1 |
elasticsearch/client/shutdown.py | Conky5/elasticsearch-py | 4 | 8624 | <filename>elasticsearch/client/shutdown.py
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class ShutdownClient(NamespacedClient):
@query_params()
def delete_node(self, node_id, params=None, headers=None):
"""
Removes a node from the shutdown list
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: The node id of node to be removed from the
shutdown state
"""
if node_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'node_id'.")
return self.transport.perform_request(
"DELETE",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
)
@query_params()
def get_node(self, node_id=None, params=None, headers=None):
"""
Retrieve status of a node or nodes that are currently marked as shutting down
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: Which node for which to retrieve the shutdown
status
"""
return self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
)
@query_params()
def put_node(self, node_id, body, params=None, headers=None):
"""
Adds a node to be shut down
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: The node id of node to be shut down
:arg body: The shutdown type definition to register
"""
for param in (node_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
body=body,
)
| <filename>elasticsearch/client/shutdown.py
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
class ShutdownClient(NamespacedClient):
@query_params()
def delete_node(self, node_id, params=None, headers=None):
"""
Removes a node from the shutdown list
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: The node id of node to be removed from the
shutdown state
"""
if node_id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'node_id'.")
return self.transport.perform_request(
"DELETE",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
)
@query_params()
def get_node(self, node_id=None, params=None, headers=None):
"""
Retrieve status of a node or nodes that are currently marked as shutting down
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: Which node for which to retrieve the shutdown
status
"""
return self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
)
@query_params()
def put_node(self, node_id, body, params=None, headers=None):
"""
Adds a node to be shut down
`<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg node_id: The node id of node to be shut down
:arg body: The shutdown type definition to register
"""
for param in (node_id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT",
_make_path("_nodes", node_id, "shutdown"),
params=params,
headers=headers,
body=body,
)
| en | 0.842557 | # Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Removes a node from the shutdown list `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: The node id of node to be removed from the shutdown state Retrieve status of a node or nodes that are currently marked as shutting down `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: Which node for which to retrieve the shutdown status Adds a node to be shut down `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: The node id of node to be shut down :arg body: The shutdown type definition to register | 1.79521 | 2 |
sushichef.py | RechercheTech/sushi-chef-arvind-gupta-toys | 1 | 8625 | <reponame>RechercheTech/sushi-chef-arvind-gupta-toys
#!/usr/bin/env python
import os
import requests
import re
import shutil
from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from ricecooker.chefs import SushiChef
from ricecooker.classes.files import YouTubeVideoFile
from ricecooker.classes.licenses import get_license
from ricecooker.classes.nodes import VideoNode, TopicNode
ARVIND = "<NAME>"
ARVIND_URL = "http://www.arvindguptatoys.com/films.html"
ROOT_DIR_PATH = os.getcwd()
DOWNLOADS_PATH = os.path.join(ROOT_DIR_PATH, "downloads")
DOWNLOADS_VIDEOS_PATH = os.path.join(DOWNLOADS_PATH, "videos/")
SKIP_VIDEOS_PATH = os.path.join(ROOT_DIR_PATH, "skip_videos.txt")
# These are the languages that has no sub topics on its videos.
SINGLE_TOPIC_LANGUAGES = [
"bhojpuri; bajpuri; bhojapuri", # actual lang_obj.name in le-utils
"bhojpuri", # future-proofing for upcoming lang_obj.name changes
"nepali",
"malayalam",
"telugu",
"bengali",
"odiya",
"punjabi",
"marwari; marwadi", # actual lang_obj.name in le-utils
"marwari", # future-proofing for upcoming lang_obj.name changes
"assamese",
"urdu",
"spanish",
"chinese",
"indonesian",
"sci_edu",
"science/educational",
]
# List of multiple languages on its topics
MULTI_LANGUAGE_TOPIC = ["russian", "french",]
# This are the estimate total count of arvind gupta toys language contents
TOTAL_ARVIND_LANG = 23
SINGLE_TOPIC = "single"
STANDARD_TOPIC = "standard"
MULTI_LANGUAGE = "multi"
YOUTUBE_DOMAINS = ["youtu.be", "youtube.com"]
DEBUG_MODE = True # Print extra debug info durig the chef run (disable in prod)
def clean_video_title(title, lang_obj):
# Remove redundant and misleading words in the video title
clean_title = title
try:
if title != None:
clean_str = title.replace("-", " ").replace("MB", "").replace("|", "")
clean_uplang = clean_str.replace(lang_obj.name.upper(), "")
clean_lowlang = clean_uplang.replace(lang_obj.name.lower(), "")
clean_caplang = clean_lowlang.replace(lang_obj.name.capitalize() , "")
clean_format = clean_caplang.replace(".avi", "").replace(".wmv", "").strip()
clean_extra_spaces = re.sub(" +", " ",clean_format)
is_int = clean_extra_spaces[-2:]
if is_int.isdigit():
clean_extra_spaces = clean_extra_spaces.replace(is_int, "")
clean_title = clean_extra_spaces
print("Cleaned video title ====> ", clean_title)
except Exception as e:
print('Error cleaning this video title: ', clean_title)
return clean_title
def include_video_topic(topic_node, video_data, lang_obj):
# Include video details to the parent topic node
video_id = video_data.uid
video_source_id = 'arvind-video-{0}'.format(video_id)
video_node = VideoNode(
source_id=video_source_id,
title=clean_video_title(video_data.title, lang_obj),
description=video_data.description,
author=ARVIND,
thumbnail=video_data.thumbnail,
license=get_license("CC BY-NC", copyright_holder=ARVIND),
files=[
YouTubeVideoFile(
youtube_id=video_id,
language=video_data.language,
high_resolution=False,
)
])
topic_node.add_child(video_node)
def save_skip_videos(video, topic, lang_obj):
# Compile skip videos into text file
if not os.path.exists(SKIP_VIDEOS_PATH):
open(SKIP_VIDEOS_PATH,"w+")
text_file = open(SKIP_VIDEOS_PATH, "a")
video_info = video.language + " - " + topic + " - " + video.url + " - " + video.license + "\n"
text_file.write(video_info)
text_file.close()
def download_video_topics(data, topic, topic_node, lang_obj):
"""
Scrape, collect, and download the videos and their thumbnails.
"""
video_source_ids = []
for vinfo in data[topic]:
try:
video = ArvindVideo(
url=vinfo['video_url'],
title=vinfo['video_title'],
language=lang_obj.code)
if video.download_info():
if video.license_common:
video_source_id = 'arvind-video-{0}'.format(video.uid)
if video_source_id not in video_source_ids:
include_video_topic(topic_node, video, lang_obj)
video_source_ids.append(video_source_id)
else:
print('Skipping duplicate video: ' + str(vinfo['video_url']))
else:
save_skip_videos(video, topic, lang_obj)
else:
save_skip_videos(video, topic, lang_obj)
except Exception as e:
print('Error downloading this video:', e)
def generate_child_topics(arvind_contents, main_topic, lang_obj, topic_type):
# Create a topic for each languages
data = arvind_contents[lang_obj.name]
for topic_index in data:
topic_name = topic_index
if topic_type == STANDARD_TOPIC:
source_id = 'arvind-child-topic-{0}'.format(topic_name)
topic_node = TopicNode(title=topic_name, source_id=source_id)
download_video_topics(data, topic_name, topic_node, lang_obj)
main_topic.add_child(topic_node)
if topic_type == SINGLE_TOPIC:
download_video_topics(data, topic_name, main_topic, lang_obj)
return main_topic
def create_language_data(lang_data, lang_obj):
"""
Process the list of elements in `lang_data` to extract video links.
"""
topic_contents = {}
initial_topics = []
prev_topic = ""
first_count = 1
total_loop = len(lang_data)
lang_name = lang_obj.name.lower()
for item in lang_data:
total_loop -= 1
if isinstance(item, NavigableString) or item.name == 'br':
continue # skip whitespace and <br/> tags
try:
title = item.text.rstrip().strip()
video_link = ""
try:
video_a_tag = item.find('a')
if video_a_tag:
video_link = video_a_tag.get("href") # for videos
else:
video_link = "" # for headings
topic_details = {}
if any(ytd in video_link for ytd in YOUTUBE_DOMAINS):
if lang_name in MULTI_LANGUAGE_TOPIC:
current_lang = title.split()[0].lower()
if first_count == 1:
first_count = 0
prev_topic = current_lang
topic_details['video_url'] = video_link
topic_details['video_title'] = title
if lang_name in MULTI_LANGUAGE_TOPIC:
if prev_topic != current_lang:
topic_contents[prev_topic] = initial_topics
initial_topics = []
prev_topic = current_lang
initial_topics.append(topic_details)
except Exception as e:
print('>> passing on', e)
pass
if first_count == 1:
if ":" in title:
first_count = 0
prev_topic = title.replace(":", "").strip()
if video_link == "":
if ":" in title:
topic_contents[prev_topic] = initial_topics
prev_topic = title.replace(":", "").strip()
initial_topics = []
except Exception as e:
print('>>> passing on', e)
pass
# This wasn't working (last topic in each standard language was missing) ...
# if total_loop == 0:
# topic_contents[prev_topic] = initial_topics
# ... so changed to this:
topic_contents[prev_topic] = initial_topics
return topic_contents
def scrape_arvind_page():
url = ARVIND_URL
response = requests.get(url)
page = BeautifulSoup(response.text, 'html5lib')
content_divs = page.body.div
list_divs = list(content_divs.children)
languages_div_start = 5
languages_list = list(list_divs[languages_div_start].children)
return languages_list
def get_language_details(lang_name):
video_lang = ArvindLanguage(name=lang_name)
if video_lang.get_lang_obj():
return video_lang
return None
def create_language_topic():
arvind_languages = scrape_arvind_page()
main_topic_list = []
if os.path.exists(SKIP_VIDEOS_PATH):
os.remove(SKIP_VIDEOS_PATH)
loop_max = TOTAL_ARVIND_LANG
language_next_int = 7
loop_couter = 0
while (loop_couter != loop_max):
try:
lang_name = arvind_languages[language_next_int].get('id')
lang_obj = get_language_details(lang_name.lower())
if lang_obj != None:
lang_name = lang_obj.name
lang_name_lower = lang_name.lower()
print('== Processing ', lang_name, '='*60)
language_source_id = 'arvind-parent-topic-{0}'.format(lang_name_lower)
# print('language_source_id =', language_source_id)
get_language_data = list(arvind_languages[language_next_int])
# print('len(get_language_data) = ', len(get_language_data))
data_contents = { lang_name: create_language_data(get_language_data, lang_obj) }
# print('len(data_contents[lang_name])', len(data_contents[lang_name]))
language_topic = TopicNode(title=lang_name.capitalize(), source_id=language_source_id)
if lang_name_lower not in SINGLE_TOPIC_LANGUAGES and lang_name_lower not in MULTI_LANGUAGE_TOPIC:
print("=======> This Language is in standard format", lang_name)
topic_type = STANDARD_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in SINGLE_TOPIC_LANGUAGES:
print("=====> This Language is in single topic format ", lang_name)
topic_type = SINGLE_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in MULTI_LANGUAGE_TOPIC:
print("=====> This Language is in multiple langauage topic format ", lang_name)
lang_data = create_language_data(get_language_data, lang_obj)
for lang in lang_data:
current_lang = get_language_details(lang.lower())
if current_lang != None:
parent_source_id = 'arvind-parent-topic-{0}'.format(current_lang.name)
parent_topic = TopicNode(title=lang.capitalize(), source_id=parent_source_id)
data_dic = {current_lang.name: {"": lang_data[lang]}}
topic_type = SINGLE_TOPIC
generate_child_topics(data_dic, parent_topic, current_lang, topic_type)
main_topic_list.append(parent_topic)
print("=====>finished ", lang)
except Exception as e:
print("===> error getting language topics: ", e)
# raise(e)
language_next_int += 4
loop_couter += 1
return main_topic_list
class ArvindChef(SushiChef):
channel_info = {
"CHANNEL_TITLE": "Arvind Gupta Toys",
"CHANNEL_SOURCE_DOMAIN": "arvindguptatoys.com",
"CHANNEL_SOURCE_ID": "toys-from-trash",
"CHANNEL_LANGUAGE": "mul",
"CHANNEL_THUMBNAIL": 'chefdata/arvind_gupta_thumbnail.png',
"CHANNEL_DESCRIPTION": "Math and science activities through low-cost " \
"materials all in the form of videos to provide various pathways for children to explore" \
" and deepen their understanding of concepts in low-resource contexts around the world." \
" Valuable resource library for teachers to incorporate in their lessons, for parents to" \
" work with children at home using readily available, simple, and low-cost materials.",
}
def pre_run(self, args, options):
"""This function will get called by ricecooker before the chef runs."""
if args['update']:
# delete video info .json files cached in chefdata/youtubecache/
print('Deleting vinfo .json files in {}'.format(YOUTUBE_CACHE_DIR))
if os.path.exists(YOUTUBE_CACHE_DIR):
shutil.rmtree(YOUTUBE_CACHE_DIR)
os.makedirs(YOUTUBE_CACHE_DIR)
def construct_channel(self, **kwargs):
channel = self.get_channel(**kwargs)
languages_topic = create_language_topic()
for lang_topic in languages_topic:
channel.add_child(lang_topic)
return channel
if __name__ == "__main__":
"""
Run this script on the command line using:
python sushichef.py -v --reset --token=YOURTOKENHERE9139139f3a23232
"""
chef = ArvindChef()
chef.main()
| #!/usr/bin/env python
import os
import requests
import re
import shutil
from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from ricecooker.chefs import SushiChef
from ricecooker.classes.files import YouTubeVideoFile
from ricecooker.classes.licenses import get_license
from ricecooker.classes.nodes import VideoNode, TopicNode
ARVIND = "<NAME>"
ARVIND_URL = "http://www.arvindguptatoys.com/films.html"
ROOT_DIR_PATH = os.getcwd()
DOWNLOADS_PATH = os.path.join(ROOT_DIR_PATH, "downloads")
DOWNLOADS_VIDEOS_PATH = os.path.join(DOWNLOADS_PATH, "videos/")
SKIP_VIDEOS_PATH = os.path.join(ROOT_DIR_PATH, "skip_videos.txt")
# These are the languages that has no sub topics on its videos.
SINGLE_TOPIC_LANGUAGES = [
"bhojpuri; bajpuri; bhojapuri", # actual lang_obj.name in le-utils
"bhojpuri", # future-proofing for upcoming lang_obj.name changes
"nepali",
"malayalam",
"telugu",
"bengali",
"odiya",
"punjabi",
"marwari; marwadi", # actual lang_obj.name in le-utils
"marwari", # future-proofing for upcoming lang_obj.name changes
"assamese",
"urdu",
"spanish",
"chinese",
"indonesian",
"sci_edu",
"science/educational",
]
# List of multiple languages on its topics
MULTI_LANGUAGE_TOPIC = ["russian", "french",]
# This are the estimate total count of arvind gupta toys language contents
TOTAL_ARVIND_LANG = 23
SINGLE_TOPIC = "single"
STANDARD_TOPIC = "standard"
MULTI_LANGUAGE = "multi"
YOUTUBE_DOMAINS = ["youtu.be", "youtube.com"]
DEBUG_MODE = True # Print extra debug info durig the chef run (disable in prod)
def clean_video_title(title, lang_obj):
# Remove redundant and misleading words in the video title
clean_title = title
try:
if title != None:
clean_str = title.replace("-", " ").replace("MB", "").replace("|", "")
clean_uplang = clean_str.replace(lang_obj.name.upper(), "")
clean_lowlang = clean_uplang.replace(lang_obj.name.lower(), "")
clean_caplang = clean_lowlang.replace(lang_obj.name.capitalize() , "")
clean_format = clean_caplang.replace(".avi", "").replace(".wmv", "").strip()
clean_extra_spaces = re.sub(" +", " ",clean_format)
is_int = clean_extra_spaces[-2:]
if is_int.isdigit():
clean_extra_spaces = clean_extra_spaces.replace(is_int, "")
clean_title = clean_extra_spaces
print("Cleaned video title ====> ", clean_title)
except Exception as e:
print('Error cleaning this video title: ', clean_title)
return clean_title
def include_video_topic(topic_node, video_data, lang_obj):
# Include video details to the parent topic node
video_id = video_data.uid
video_source_id = 'arvind-video-{0}'.format(video_id)
video_node = VideoNode(
source_id=video_source_id,
title=clean_video_title(video_data.title, lang_obj),
description=video_data.description,
author=ARVIND,
thumbnail=video_data.thumbnail,
license=get_license("CC BY-NC", copyright_holder=ARVIND),
files=[
YouTubeVideoFile(
youtube_id=video_id,
language=video_data.language,
high_resolution=False,
)
])
topic_node.add_child(video_node)
def save_skip_videos(video, topic, lang_obj):
# Compile skip videos into text file
if not os.path.exists(SKIP_VIDEOS_PATH):
open(SKIP_VIDEOS_PATH,"w+")
text_file = open(SKIP_VIDEOS_PATH, "a")
video_info = video.language + " - " + topic + " - " + video.url + " - " + video.license + "\n"
text_file.write(video_info)
text_file.close()
def download_video_topics(data, topic, topic_node, lang_obj):
"""
Scrape, collect, and download the videos and their thumbnails.
"""
video_source_ids = []
for vinfo in data[topic]:
try:
video = ArvindVideo(
url=vinfo['video_url'],
title=vinfo['video_title'],
language=lang_obj.code)
if video.download_info():
if video.license_common:
video_source_id = 'arvind-video-{0}'.format(video.uid)
if video_source_id not in video_source_ids:
include_video_topic(topic_node, video, lang_obj)
video_source_ids.append(video_source_id)
else:
print('Skipping duplicate video: ' + str(vinfo['video_url']))
else:
save_skip_videos(video, topic, lang_obj)
else:
save_skip_videos(video, topic, lang_obj)
except Exception as e:
print('Error downloading this video:', e)
def generate_child_topics(arvind_contents, main_topic, lang_obj, topic_type):
# Create a topic for each languages
data = arvind_contents[lang_obj.name]
for topic_index in data:
topic_name = topic_index
if topic_type == STANDARD_TOPIC:
source_id = 'arvind-child-topic-{0}'.format(topic_name)
topic_node = TopicNode(title=topic_name, source_id=source_id)
download_video_topics(data, topic_name, topic_node, lang_obj)
main_topic.add_child(topic_node)
if topic_type == SINGLE_TOPIC:
download_video_topics(data, topic_name, main_topic, lang_obj)
return main_topic
def create_language_data(lang_data, lang_obj):
"""
Process the list of elements in `lang_data` to extract video links.
"""
topic_contents = {}
initial_topics = []
prev_topic = ""
first_count = 1
total_loop = len(lang_data)
lang_name = lang_obj.name.lower()
for item in lang_data:
total_loop -= 1
if isinstance(item, NavigableString) or item.name == 'br':
continue # skip whitespace and <br/> tags
try:
title = item.text.rstrip().strip()
video_link = ""
try:
video_a_tag = item.find('a')
if video_a_tag:
video_link = video_a_tag.get("href") # for videos
else:
video_link = "" # for headings
topic_details = {}
if any(ytd in video_link for ytd in YOUTUBE_DOMAINS):
if lang_name in MULTI_LANGUAGE_TOPIC:
current_lang = title.split()[0].lower()
if first_count == 1:
first_count = 0
prev_topic = current_lang
topic_details['video_url'] = video_link
topic_details['video_title'] = title
if lang_name in MULTI_LANGUAGE_TOPIC:
if prev_topic != current_lang:
topic_contents[prev_topic] = initial_topics
initial_topics = []
prev_topic = current_lang
initial_topics.append(topic_details)
except Exception as e:
print('>> passing on', e)
pass
if first_count == 1:
if ":" in title:
first_count = 0
prev_topic = title.replace(":", "").strip()
if video_link == "":
if ":" in title:
topic_contents[prev_topic] = initial_topics
prev_topic = title.replace(":", "").strip()
initial_topics = []
except Exception as e:
print('>>> passing on', e)
pass
# This wasn't working (last topic in each standard language was missing) ...
# if total_loop == 0:
# topic_contents[prev_topic] = initial_topics
# ... so changed to this:
topic_contents[prev_topic] = initial_topics
return topic_contents
def scrape_arvind_page():
url = ARVIND_URL
response = requests.get(url)
page = BeautifulSoup(response.text, 'html5lib')
content_divs = page.body.div
list_divs = list(content_divs.children)
languages_div_start = 5
languages_list = list(list_divs[languages_div_start].children)
return languages_list
def get_language_details(lang_name):
video_lang = ArvindLanguage(name=lang_name)
if video_lang.get_lang_obj():
return video_lang
return None
def create_language_topic():
arvind_languages = scrape_arvind_page()
main_topic_list = []
if os.path.exists(SKIP_VIDEOS_PATH):
os.remove(SKIP_VIDEOS_PATH)
loop_max = TOTAL_ARVIND_LANG
language_next_int = 7
loop_couter = 0
while (loop_couter != loop_max):
try:
lang_name = arvind_languages[language_next_int].get('id')
lang_obj = get_language_details(lang_name.lower())
if lang_obj != None:
lang_name = lang_obj.name
lang_name_lower = lang_name.lower()
print('== Processing ', lang_name, '='*60)
language_source_id = 'arvind-parent-topic-{0}'.format(lang_name_lower)
# print('language_source_id =', language_source_id)
get_language_data = list(arvind_languages[language_next_int])
# print('len(get_language_data) = ', len(get_language_data))
data_contents = { lang_name: create_language_data(get_language_data, lang_obj) }
# print('len(data_contents[lang_name])', len(data_contents[lang_name]))
language_topic = TopicNode(title=lang_name.capitalize(), source_id=language_source_id)
if lang_name_lower not in SINGLE_TOPIC_LANGUAGES and lang_name_lower not in MULTI_LANGUAGE_TOPIC:
print("=======> This Language is in standard format", lang_name)
topic_type = STANDARD_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in SINGLE_TOPIC_LANGUAGES:
print("=====> This Language is in single topic format ", lang_name)
topic_type = SINGLE_TOPIC
generate_child_topics(data_contents, language_topic, lang_obj, topic_type)
main_topic_list.append(language_topic)
print("=====>finished", lang_name)
if lang_name_lower in MULTI_LANGUAGE_TOPIC:
print("=====> This Language is in multiple langauage topic format ", lang_name)
lang_data = create_language_data(get_language_data, lang_obj)
for lang in lang_data:
current_lang = get_language_details(lang.lower())
if current_lang != None:
parent_source_id = 'arvind-parent-topic-{0}'.format(current_lang.name)
parent_topic = TopicNode(title=lang.capitalize(), source_id=parent_source_id)
data_dic = {current_lang.name: {"": lang_data[lang]}}
topic_type = SINGLE_TOPIC
generate_child_topics(data_dic, parent_topic, current_lang, topic_type)
main_topic_list.append(parent_topic)
print("=====>finished ", lang)
except Exception as e:
print("===> error getting language topics: ", e)
# raise(e)
language_next_int += 4
loop_couter += 1
return main_topic_list
class ArvindChef(SushiChef):
channel_info = {
"CHANNEL_TITLE": "Arvind Gupta Toys",
"CHANNEL_SOURCE_DOMAIN": "arvindguptatoys.com",
"CHANNEL_SOURCE_ID": "toys-from-trash",
"CHANNEL_LANGUAGE": "mul",
"CHANNEL_THUMBNAIL": 'chefdata/arvind_gupta_thumbnail.png',
"CHANNEL_DESCRIPTION": "Math and science activities through low-cost " \
"materials all in the form of videos to provide various pathways for children to explore" \
" and deepen their understanding of concepts in low-resource contexts around the world." \
" Valuable resource library for teachers to incorporate in their lessons, for parents to" \
" work with children at home using readily available, simple, and low-cost materials.",
}
def pre_run(self, args, options):
"""This function will get called by ricecooker before the chef runs."""
if args['update']:
# delete video info .json files cached in chefdata/youtubecache/
print('Deleting vinfo .json files in {}'.format(YOUTUBE_CACHE_DIR))
if os.path.exists(YOUTUBE_CACHE_DIR):
shutil.rmtree(YOUTUBE_CACHE_DIR)
os.makedirs(YOUTUBE_CACHE_DIR)
def construct_channel(self, **kwargs):
channel = self.get_channel(**kwargs)
languages_topic = create_language_topic()
for lang_topic in languages_topic:
channel.add_child(lang_topic)
return channel
if __name__ == "__main__":
"""
Run this script on the command line using:
python sushichef.py -v --reset --token=YOURTOKENHERE9139139f3a23232
"""
chef = ArvindChef()
chef.main() | en | 0.657775 | #!/usr/bin/env python # These are the languages that has no sub topics on its videos. # actual lang_obj.name in le-utils # future-proofing for upcoming lang_obj.name changes # actual lang_obj.name in le-utils # future-proofing for upcoming lang_obj.name changes # List of multiple languages on its topics # This are the estimate total count of arvind gupta toys language contents # Print extra debug info durig the chef run (disable in prod) # Remove redundant and misleading words in the video title # Include video details to the parent topic node # Compile skip videos into text file Scrape, collect, and download the videos and their thumbnails. # Create a topic for each languages Process the list of elements in `lang_data` to extract video links. # skip whitespace and <br/> tags # for videos # for headings # This wasn't working (last topic in each standard language was missing) ... # if total_loop == 0: # topic_contents[prev_topic] = initial_topics # ... so changed to this: # print('language_source_id =', language_source_id) # print('len(get_language_data) = ', len(get_language_data)) # print('len(data_contents[lang_name])', len(data_contents[lang_name])) # raise(e) This function will get called by ricecooker before the chef runs. # delete video info .json files cached in chefdata/youtubecache/ Run this script on the command line using: python sushichef.py -v --reset --token=YOURTOKENHERE9139139f3a23232 | 2.101994 | 2 |
api/views/domain.py | lndba/apasa_backend | 1 | 8626 | from rest_framework.viewsets import ModelViewSet,GenericViewSet
from rest_framework.response import Response
from api.serializers.domain import *
from api.pagination.page import MyPageNumberPagination
from api.models import *
class MDomainListViewSet(ModelViewSet):
queryset = MasterDomainName.objects.all().order_by('id')
pagination_class = MyPageNumberPagination
serializer_class = MDomainListSerializers
class DnsListViewSet(GenericViewSet):
def list(self, request, *args, **kwargs):
res = {"count": 0, 'results': None}
domain_id = request.query_params.get('domain')
dns_list = Dns.objects.all().filter(master_domain_name=domain_id)
dns_count = Dns.objects.all().filter(master_domain_name=domain_id).count()
page = MyPageNumberPagination()
page_dns_list = page.paginate_queryset(dns_list,request,self)
ser = DnsListSerializers(instance=page_dns_list,many=True)
res['results'] = ser.data
res['count'] = dns_count
return Response(res)
class DnsUpdataViewSet(ModelViewSet):
queryset = Dns.objects.all().order_by('id')
serializer_class = DnsUpdataSerializers
| from rest_framework.viewsets import ModelViewSet,GenericViewSet
from rest_framework.response import Response
from api.serializers.domain import *
from api.pagination.page import MyPageNumberPagination
from api.models import *
class MDomainListViewSet(ModelViewSet):
queryset = MasterDomainName.objects.all().order_by('id')
pagination_class = MyPageNumberPagination
serializer_class = MDomainListSerializers
class DnsListViewSet(GenericViewSet):
def list(self, request, *args, **kwargs):
res = {"count": 0, 'results': None}
domain_id = request.query_params.get('domain')
dns_list = Dns.objects.all().filter(master_domain_name=domain_id)
dns_count = Dns.objects.all().filter(master_domain_name=domain_id).count()
page = MyPageNumberPagination()
page_dns_list = page.paginate_queryset(dns_list,request,self)
ser = DnsListSerializers(instance=page_dns_list,many=True)
res['results'] = ser.data
res['count'] = dns_count
return Response(res)
class DnsUpdataViewSet(ModelViewSet):
queryset = Dns.objects.all().order_by('id')
serializer_class = DnsUpdataSerializers
| none | 1 | 2.135065 | 2 |
|
90-subsets-ii.py | yuenliou/leetcode | 0 | 8627 | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
"""
题解:https://leetcode-cn.com/problems/subsets/solution/c-zong-jie-liao-hui-su-wen-ti-lei-xing-dai-ni-gao-/
"""
def backtrack(start, path):
#结束条件:无
res.append(path[:])
for i in range(start, len(nums)):
#和上个数字相等就跳过
if i > start and nums[i] == nums[i - 1]: continue
# 做选择
path.append(nums[i])
# 进入下一行决策
backtrack(i + 1, path)
# 撤销选择
path.pop()
res = []
nums.sort()
backtrack( 0, [])
return res
def main():
param = [1,2,2]
solution = Solution()
ret = solution.subsetsWithDup(param)
print(ret)
'''90. 子集 II
给定一个可能包含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
说明:解集不能包含重复的子集。
示例:
输入: [1,2,2]
输出:
[
[2],
[1],
[1,2,2],
[2,2],
[1,2],
[]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/subsets-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
if __name__ == '__main__':
main()
| #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
"""
题解:https://leetcode-cn.com/problems/subsets/solution/c-zong-jie-liao-hui-su-wen-ti-lei-xing-dai-ni-gao-/
"""
def backtrack(start, path):
#结束条件:无
res.append(path[:])
for i in range(start, len(nums)):
#和上个数字相等就跳过
if i > start and nums[i] == nums[i - 1]: continue
# 做选择
path.append(nums[i])
# 进入下一行决策
backtrack(i + 1, path)
# 撤销选择
path.pop()
res = []
nums.sort()
backtrack( 0, [])
return res
def main():
param = [1,2,2]
solution = Solution()
ret = solution.subsetsWithDup(param)
print(ret)
'''90. 子集 II
给定一个可能包含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
说明:解集不能包含重复的子集。
示例:
输入: [1,2,2]
输出:
[
[2],
[1],
[1,2,2],
[2,2],
[1,2],
[]
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/subsets-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
if __name__ == '__main__':
main()
| zh | 0.771387 | #!/usr/local/bin/python3.7 # -*- coding: utf-8 -*- 题解:https://leetcode-cn.com/problems/subsets/solution/c-zong-jie-liao-hui-su-wen-ti-lei-xing-dai-ni-gao-/ #结束条件:无 #和上个数字相等就跳过 # 做选择 # 进入下一行决策 # 撤销选择 90. 子集 II 给定一个可能包含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。 说明:解集不能包含重复的子集。 示例: 输入: [1,2,2] 输出: [ [2], [1], [1,2,2], [2,2], [1,2], [] ] 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/subsets-ii 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。 | 3.865126 | 4 |
tools/output_tool.py | climberwb/bert-pli | 5 | 8628 | import json
from .accuracy_tool import gen_micro_macro_result
def null_output_function(data, config, *args, **params):
return ""
def basic_output_function(data, config, *args, **params):
which = config.get("output", "output_value").replace(" ", "").split(",")
temp = gen_micro_macro_result(data)
result = {}
for name in which:
result[name] = temp[name]
return json.dumps(result, sort_keys=True)
| import json
from .accuracy_tool import gen_micro_macro_result
def null_output_function(data, config, *args, **params):
return ""
def basic_output_function(data, config, *args, **params):
which = config.get("output", "output_value").replace(" ", "").split(",")
temp = gen_micro_macro_result(data)
result = {}
for name in which:
result[name] = temp[name]
return json.dumps(result, sort_keys=True)
| none | 1 | 2.306857 | 2 |
|
python/drydock_provisioner/ingester/plugins/deckhand.py | Vjrx/airship-drydock | 14 | 8629 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This data ingester will consume YAML site topology documents."""
import yaml
import logging
import jsonschema
import os
import pkg_resources
import copy
import hashlib
import drydock_provisioner.objects.fields as hd_fields
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from drydock_provisioner import error as errors
from drydock_provisioner import objects
from drydock_provisioner.ingester.plugins import IngesterPlugin
cache_opts = {
'cache.type': 'memory',
'expire': 1800,
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
class DeckhandIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.deckhand')
self.load_schemas()
def get_name(self):
return "deckhand"
def ingest_data(self, **kwargs):
"""Parse and save design data.
:param content: String of valid Deckhand YAML
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
"""
def local_parse():
return self.parse_docs(kwargs.get('content'))
if 'content' in kwargs:
try:
# Hash the input to use as the cache key. This is not a security
# related hash, so use cheap and fast MD5
hv = hashlib.md5(kwargs.get('content', b'')).hexdigest()
local_cache = cache.get_cache('parsed_docs')
results = local_cache.get(key=hv, createfunc=local_parse)
parse_status, models = results
except Exception as ex:
self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex)
raise ex
else:
raise ValueError('Missing parameter "content"')
return parse_status, models
def parse_docs(self, doc_blob):
"""Translate a YAML string into the internal Drydock model.
Returns a tuple of a objects.TaskStatus instance to summarize all
document processing and a list of models yielded by successful processing
:param doc_blob: bytes representing a utf-8 encoded YAML string
"""
models = []
yaml_string = doc_blob.decode()
self.logger.debug("yamlingester:parse_docs - Parsing YAML string.")
try:
parsed_data = yaml.safe_load_all(yaml_string)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
raise errors.IngesterError(
"Error parsing YAML at (l:%s, c:%s): %s" %
(mark.line + 1, mark.column + 1, err))
else:
raise errors.IngesterError("Error parsing YAML: %s" % (err))
# tracking processing status to provide a complete summary of issues
ps = objects.Validation()
ps.set_status(hd_fields.ValidationResult.Success)
for d in parsed_data:
try:
(schema_ns, doc_kind, doc_version) = d.get('schema',
'').split('/')
except ValueError as ex:
self.logger.error(
"Error with document structure.", exc_info=ex)
self.logger.debug("Error document\n%s" % yaml.dump(d))
continue
if schema_ns == 'drydock':
try:
doc_ref = objects.DocumentReference(
doc_type=hd_fields.DocumentType.Deckhand,
doc_schema=d.get('schema'),
doc_name=d.get('metadata', {}).get('name', 'Unknown'))
doc_errors = self.validate_drydock_document(d)
if len(doc_errors) > 0:
for e in doc_errors:
ps.add_detail_msg(
objects.ValidationMessage(
msg="%s:%s schema validation error: %s" %
(doc_kind, doc_version, e),
name="DD001",
docs=[doc_ref],
error=True,
level=hd_fields.MessageLevels.ERROR,
diagnostic=
"Invalid input file - see Drydock Troubleshooting Guide for DD001"
))
ps.set_status(hd_fields.ActionResult.Failure)
continue
model = self.process_drydock_document(d)
model.doc_ref = doc_ref
models.append(model)
except errors.IngesterError as ie:
msg = "Error processing document: %s" % str(ie)
self.logger.warning(msg)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Exception during document processing "
"- see Drydock Troubleshooting Guide "
"for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex)
self.logger.error(msg, exc_info=True)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Unexpected exception during document "
"processing - see Drydock Troubleshooting "
"Guide for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
return (ps, models)
def process_drydock_document(self, doc):
"""Process a parsed YAML document.
:param doc: The dictionary from parsing the YAML document
"""
(schema_ns, kind, version) = doc.get('schema', '').split('/')
if version == 'v1':
doc_processor = DeckhandIngester.v1_doc_handlers.get(kind, None)
else:
doc_processor = None
if doc_processor is None:
raise errors.IngesterError(
"Invalid document - Kind %s and Version %s" % (kind, version))
metadata = doc.get('metadata', {})
doc_name = metadata.get('name')
return doc_processor(self, doc_name, doc.get('data', {}))
def validate_drydock_document(self, doc):
"""Validate a parsed document via jsonschema.
If a schema for a document Kind is not available, the document is
considered valid. Schema is chosen by the doc['kind'] field.
Returns a empty list for valid documents, otherwise returns a list
of all found errors
:param doc: dictionary of the parsed document.
"""
schemaname = doc.get('schema', '')
(schema_ns, doc_kind, doc_version) = schemaname.split('/')
errors_found = []
if doc_version == 'v1':
if schemaname in self.v1_doc_schemas:
validator = jsonschema.Draft4Validator(
self.v1_doc_schemas.get(schemaname))
for error in validator.iter_errors(doc.get('data', [])):
errors_found.append(error.message)
return errors_found
def process_drydock_region(self, name, data):
"""Process the data/spec section of a Region document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Site()
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = name
model.status = hd_fields.SiteStatus.Unknown
model.source = hd_fields.ModelSource.Designed
model.tag_definitions = objects.NodeTagDefinitionList()
tag_defs = data.get('tag_definitions', [])
for t in tag_defs:
tag_model = objects.NodeTagDefinition()
tag_model.tag = t.get('tag', '')
tag_model.type = t.get('definition_type', '')
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
model.authorized_keys = [k for k in auth_keys]
repos = data.get('repositories', None)
if repos:
model.repositories = self.process_drydock_region_repo_list(repos)
return model
def process_drydock_region_repo_list(self, data):
"""Process a package repository list.
:param data: The data from the ``repositories`` key in a Region document
"""
model = objects.RepositoryList()
for k, v in data.items():
if k == 'remove_unlisted':
model.remove_unlisted = v
else:
model.append(objects.Repository(name=k, **v))
return model
def process_drydock_rack(self, name, data):
"""Process the data/spec section of a Rack document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Rack()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.tor_switches = objects.TorSwitchList()
tors = data.get('tor_switches', {})
for k, v in tors.items():
tor = objects.TorSwitch()
tor.switch_name = k
tor.mgmt_ip = v.get('mgmt_ip', None)
tor.sdn_api_uri = v.get('sdn_api_url', None)
model.tor_switches.append(tor)
model.location = copy.deepcopy(data.get('location', {}))
model.local_networks = [n for n in data.get('local_networks', [])]
return model
def process_drydock_networklink(self, name, data):
"""Process the data/spec section of a NetworkLink document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.NetworkLink()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
bonding = data.get('bonding', {})
model.bonding_mode = bonding.get(
'mode', hd_fields.NetworkLinkBondingMode.Disabled)
if model.bonding_mode in \
(hd_fields.NetworkLinkBondingMode.LACP,
hd_fields.NetworkLinkBondingMode.RoundRobin,
hd_fields.NetworkLinkBondingMode.Standby):
model.bonding_mon_rate = bonding.get('mon_rate', '100')
model.bonding_up_delay = bonding.get('up_delay', '200')
model.bonding_down_delay = bonding.get('down_delay', '200')
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
model.mtu = data.get('mtu', None)
model.linkspeed = data.get('linkspeed', None)
trunking = data.get('trunking', {})
model.trunk_mode = trunking.get(
'mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
model.native_network = trunking.get('default_network', None)
model.allowed_networks = data.get('allowed_networks', None)
return model
def process_drydock_network(self, name, data):
"""Process the data/spec section of a Network document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Network()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
model.cidr = data.get('cidr', None)
model.vlan_id = data.get('vlan', None)
model.mtu = data.get('mtu', None)
model.routedomain = data.get('routedomain', None)
dns = data.get('dns', {})
model.dns_domain = dns.get('domain', 'local')
model.dns_servers = dns.get('servers', None)
ranges = data.get('ranges', [])
model.ranges = []
for r in ranges:
model.ranges.append({
'type': r.get('type', None),
'start': r.get('start', None),
'end': r.get('end', None),
})
routes = data.get('routes', [])
model.routes = []
for r in routes:
model.routes.append({
'subnet': r.get('subnet', None),
'gateway': r.get('gateway', None),
'metric': r.get('metric', None),
'routedomain': r.get('routedomain', None),
})
dhcp_relay = data.get('dhcp_relay', None)
if dhcp_relay is not None:
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
model.dhcp_relay_upstream_target = dhcp_relay.get(
'upstream_target', None)
return model
def process_drydock_hwprofile(self, name, data):
"""Process the data/spec section of a HardwareProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HardwareProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
model.vendor = data.get('vendor', None)
model.generation = data.get('generation', None)
model.hw_version = data.get('hw_version', None)
model.bios_version = data.get('bios_version', None)
model.boot_mode = data.get('boot_mode', None)
model.bootstrap_protocol = data.get('bootstrap_protocol', None)
model.pxe_interface = data.get('pxe_interface', None)
model.devices = objects.HardwareDeviceAliasList()
device_aliases = data.get('device_aliases', {})
for d, v in device_aliases.items():
dev_model = objects.HardwareDeviceAlias()
dev_model.source = hd_fields.ModelSource.Designed
dev_model.alias = d
dev_model.bus_type = v.get('bus_type', None)
dev_model.dev_type = v.get('dev_type', None)
dev_model.address = v.get('address', None)
model.devices.append(dev_model)
model.cpu_sets = data.get('cpu_sets', None) or dict()
model.hugepages_confs = objects.HugepagesConfList()
for c, d in data.get('hugepages', {}).items():
conf = objects.HugepagesConf(
name=c, size=d.get('size'), count=d.get('count'))
model.hugepages_confs.append(conf)
return model
def process_drydock_hostprofile(self, name, data):
"""Process the data/spec section of a HostProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HostProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
return model
def process_drydock_bootaction(self, name, data):
"""Process the data/spec section of a BootAction document.
:param name: the document name attribute
:Param data: the dictionary of the parsed data/spec section
"""
model = objects.BootAction()
model.name = name
model.source = hd_fields.ModelSource.Designed
assets = data.get('assets')
model.asset_list = objects.BootActionAssetList()
for a in assets:
ba = self.process_bootaction_asset(a)
model.asset_list.append(ba)
node_filter = data.get('node_filter', None)
if node_filter is not None:
nfs = self.process_bootaction_nodefilter(node_filter)
model.node_filter = nfs
model.signaling = data.get('signaling', None)
return model
def process_bootaction_asset(self, asset_dict):
"""Process a dictionary representing a BootAction Data Asset.
:param asset_dict: dictionary representing the bootaction asset
"""
model = objects.BootActionAsset(**asset_dict)
return model
def process_bootaction_nodefilter(self, nf):
"""Process a dictionary representing a BootAction NodeFilter Set.
:param nf: dictionary representing the bootaction nodefilter set.
"""
model = objects.NodeFilterSet()
model.filter_set_type = nf.get('filter_set_type', None)
model.filter_set = []
for nf in nf.get('filter_set', []):
nf_model = objects.NodeFilter(**nf)
model.filter_set.append(nf_model)
return model
def process_drydock_node(self, name, data):
"""Process the data/spec section of a BaremetalNode document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.BaremetalNode()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
node_metadata = data.get('metadata', {})
model.boot_mac = node_metadata.get('boot_mac', None)
addresses = data.get('addressing', [])
if len(addresses) == 0:
raise errors.IngesterError('BaremetalNode needs at least'
' 1 assigned address')
model.addressing = objects.IpAddressAssignmentList()
for a in addresses:
assignment = objects.IpAddressAssignment()
address = a.get('address', '')
if address == 'dhcp':
assignment.type = 'dhcp'
assignment.address = None
assignment.network = a.get('network')
model.addressing.append(assignment)
elif address != '':
assignment.type = 'static'
assignment.address = a.get('address')
assignment.network = a.get('network')
model.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s" %
(address, self.name))
return model
def process_host_common_fields(self, data, model):
"""Process fields common to the host-based documents.
Update the provided model with the values of fields common
to BaremetalNode and HostProfile documents.
:param data: dictionary from YAML parsing of the document data/spec section
:param model: instance of objects.HostProfile or objects.BaremetalNode to update
"""
model.parent_profile = data.get('host_profile', None)
model.hardware_profile = data.get('hardware_profile', None)
oob = data.get('oob', {})
model.oob_parameters = {}
for k, v in oob.items():
if k == 'type':
model.oob_type = oob.get('type', None)
else:
model.oob_parameters[k] = v
(model.storage_devices,
model.volume_groups) = self.process_node_storage(
data.get('storage', {}))
interfaces = data.get('interfaces', {})
model.interfaces = objects.HostInterfaceList()
for k, v in interfaces.items():
int_model = objects.HostInterface()
# A null value indicates this interface should be removed
# from any parent profiles
if v is None:
int_model.device_name = '!' + k
continue
int_model.device_name = k
int_model.network_link = v.get('device_link', None)
int_model.hardware_slaves = []
slaves = v.get('slaves', [])
for s in slaves:
int_model.hardware_slaves.append(s)
int_model.networks = []
networks = v.get('networks', [])
for n in networks:
int_model.networks.append(n)
if 'sriov' in v:
int_model.sriov = True
int_model.vf_count = v.get('sriov', {}).get('vf_count', 0)
int_model.trustedmode = v.get('sriov', {}).get(
'trustedmode', False)
model.interfaces.append(int_model)
platform = data.get('platform', {})
model.image = platform.get('image', None)
model.kernel = platform.get('kernel', None)
model.kernel_params = {}
for k, v in platform.get('kernel_params', {}).items():
model.kernel_params[k] = v
model.primary_network = data.get('primary_network', None)
node_metadata = data.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
model.tags = metadata_tags
owner_data = node_metadata.get('owner_data', {})
model.owner_data = {}
for k, v in owner_data.items():
model.owner_data[k] = v
model.rack = node_metadata.get('rack', None)
return model
def process_node_storage(self, storage):
"""Process the storage data for a node-based document.
Return a tuple of of two lists the first is a StorageDeviceList, the
second is a VolumeGroupList.
:param storage: dictionary of the storage section of a document
"""
phys_devs = storage.get('physical_devices', {})
storage_devices = objects.HostStorageDeviceList()
for k, v in phys_devs.items():
sd = objects.HostStorageDevice(name=k)
sd.source = hd_fields.ModelSource.Designed
if 'labels' in v:
sd.labels = v.get('labels').copy()
if 'volume_group' in v:
vg = v.get('volume_group')
sd.volume_group = vg
elif 'partitions' in v:
sd.partitions = objects.HostPartitionList()
for vv in v.get('partitions', []):
part_model = objects.HostPartition()
part_model.name = vv.get('name')
part_model.source = hd_fields.ModelSource.Designed
part_model.part_uuid = vv.get('part_uuid', None)
part_model.size = vv.get('size', None)
if 'labels' in vv:
part_model.labels = vv.get('labels').copy()
if 'volume_group' in vv:
part_model.volume_group = vv.get('vg')
elif 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
part_model.mountpoint = fs_info.get('mountpoint', None)
part_model.fstype = fs_info.get('fstype', 'ext4')
part_model.mount_options = fs_info.get(
'mount_options', 'defaults')
part_model.fs_uuid = fs_info.get('fs_uuid', None)
part_model.fs_label = fs_info.get('fs_label', None)
sd.partitions.append(part_model)
storage_devices.append(sd)
volume_groups = objects.HostVolumeGroupList()
vol_groups = storage.get('volume_groups', {})
for k, v in vol_groups.items():
vg = objects.HostVolumeGroup(name=k)
vg.vg_uuid = v.get('vg_uuid', None)
vg.logical_volumes = objects.HostVolumeList()
volume_groups.append(vg)
for vv in v.get('logical_volumes', []):
lv = objects.HostVolume(name=vv.get('name'))
lv.size = vv.get('size', None)
lv.lv_uuid = vv.get('lv_uuid', None)
if 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
lv.mountpoint = fs_info.get('mountpoint', None)
lv.fstype = fs_info.get('fstype', 'ext4')
lv.mount_options = fs_info.get('mount_options', 'defaults')
lv.fs_uuid = fs_info.get('fs_uuid', None)
lv.fs_label = fs_info.get('fs_label', None)
vg.logical_volumes.append(lv)
return (storage_devices, volume_groups)
def load_schemas(self):
self.v1_doc_schemas = dict()
schema_dir = self._get_schema_dir()
for schema_file in os.listdir(schema_dir):
f = open(os.path.join(schema_dir, schema_file), 'r')
for schema in yaml.safe_load_all(f):
schema_for = schema['metadata']['name']
if schema_for in self.v1_doc_schemas:
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.v1_doc_schemas[schema_for] = schema.get('data')
f.close()
def _get_schema_dir(self):
return pkg_resources.resource_filename('drydock_provisioner',
'schemas')
# Mapping of handlers for different document kinds
v1_doc_handlers = {
'Region': process_drydock_region,
'Rack': process_drydock_rack,
'NetworkLink': process_drydock_networklink,
'Network': process_drydock_network,
'HardwareProfile': process_drydock_hwprofile,
'HostProfile': process_drydock_hostprofile,
'BaremetalNode': process_drydock_node,
'BootAction': process_drydock_bootaction,
}
| # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This data ingester will consume YAML site topology documents."""
import yaml
import logging
import jsonschema
import os
import pkg_resources
import copy
import hashlib
import drydock_provisioner.objects.fields as hd_fields
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from drydock_provisioner import error as errors
from drydock_provisioner import objects
from drydock_provisioner.ingester.plugins import IngesterPlugin
cache_opts = {
'cache.type': 'memory',
'expire': 1800,
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
class DeckhandIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.deckhand')
self.load_schemas()
def get_name(self):
return "deckhand"
def ingest_data(self, **kwargs):
"""Parse and save design data.
:param content: String of valid Deckhand YAML
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
"""
def local_parse():
return self.parse_docs(kwargs.get('content'))
if 'content' in kwargs:
try:
# Hash the input to use as the cache key. This is not a security
# related hash, so use cheap and fast MD5
hv = hashlib.md5(kwargs.get('content', b'')).hexdigest()
local_cache = cache.get_cache('parsed_docs')
results = local_cache.get(key=hv, createfunc=local_parse)
parse_status, models = results
except Exception as ex:
self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex)
raise ex
else:
raise ValueError('Missing parameter "content"')
return parse_status, models
def parse_docs(self, doc_blob):
"""Translate a YAML string into the internal Drydock model.
Returns a tuple of a objects.TaskStatus instance to summarize all
document processing and a list of models yielded by successful processing
:param doc_blob: bytes representing a utf-8 encoded YAML string
"""
models = []
yaml_string = doc_blob.decode()
self.logger.debug("yamlingester:parse_docs - Parsing YAML string.")
try:
parsed_data = yaml.safe_load_all(yaml_string)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
raise errors.IngesterError(
"Error parsing YAML at (l:%s, c:%s): %s" %
(mark.line + 1, mark.column + 1, err))
else:
raise errors.IngesterError("Error parsing YAML: %s" % (err))
# tracking processing status to provide a complete summary of issues
ps = objects.Validation()
ps.set_status(hd_fields.ValidationResult.Success)
for d in parsed_data:
try:
(schema_ns, doc_kind, doc_version) = d.get('schema',
'').split('/')
except ValueError as ex:
self.logger.error(
"Error with document structure.", exc_info=ex)
self.logger.debug("Error document\n%s" % yaml.dump(d))
continue
if schema_ns == 'drydock':
try:
doc_ref = objects.DocumentReference(
doc_type=hd_fields.DocumentType.Deckhand,
doc_schema=d.get('schema'),
doc_name=d.get('metadata', {}).get('name', 'Unknown'))
doc_errors = self.validate_drydock_document(d)
if len(doc_errors) > 0:
for e in doc_errors:
ps.add_detail_msg(
objects.ValidationMessage(
msg="%s:%s schema validation error: %s" %
(doc_kind, doc_version, e),
name="DD001",
docs=[doc_ref],
error=True,
level=hd_fields.MessageLevels.ERROR,
diagnostic=
"Invalid input file - see Drydock Troubleshooting Guide for DD001"
))
ps.set_status(hd_fields.ActionResult.Failure)
continue
model = self.process_drydock_document(d)
model.doc_ref = doc_ref
models.append(model)
except errors.IngesterError as ie:
msg = "Error processing document: %s" % str(ie)
self.logger.warning(msg)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Exception during document processing "
"- see Drydock Troubleshooting Guide "
"for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex)
self.logger.error(msg, exc_info=True)
ps.add_detail_msg(
objects.ValidationMessage(
msg=msg,
name="DD000",
error=True,
level=hd_fields.MessageLevels.ERROR,
docs=[doc_ref],
diagnostic="Unexpected exception during document "
"processing - see Drydock Troubleshooting "
"Guide for DD000"))
ps.set_status(hd_fields.ActionResult.Failure)
return (ps, models)
def process_drydock_document(self, doc):
"""Process a parsed YAML document.
:param doc: The dictionary from parsing the YAML document
"""
(schema_ns, kind, version) = doc.get('schema', '').split('/')
if version == 'v1':
doc_processor = DeckhandIngester.v1_doc_handlers.get(kind, None)
else:
doc_processor = None
if doc_processor is None:
raise errors.IngesterError(
"Invalid document - Kind %s and Version %s" % (kind, version))
metadata = doc.get('metadata', {})
doc_name = metadata.get('name')
return doc_processor(self, doc_name, doc.get('data', {}))
def validate_drydock_document(self, doc):
"""Validate a parsed document via jsonschema.
If a schema for a document Kind is not available, the document is
considered valid. Schema is chosen by the doc['kind'] field.
Returns a empty list for valid documents, otherwise returns a list
of all found errors
:param doc: dictionary of the parsed document.
"""
schemaname = doc.get('schema', '')
(schema_ns, doc_kind, doc_version) = schemaname.split('/')
errors_found = []
if doc_version == 'v1':
if schemaname in self.v1_doc_schemas:
validator = jsonschema.Draft4Validator(
self.v1_doc_schemas.get(schemaname))
for error in validator.iter_errors(doc.get('data', [])):
errors_found.append(error.message)
return errors_found
def process_drydock_region(self, name, data):
"""Process the data/spec section of a Region document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Site()
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = name
model.status = hd_fields.SiteStatus.Unknown
model.source = hd_fields.ModelSource.Designed
model.tag_definitions = objects.NodeTagDefinitionList()
tag_defs = data.get('tag_definitions', [])
for t in tag_defs:
tag_model = objects.NodeTagDefinition()
tag_model.tag = t.get('tag', '')
tag_model.type = t.get('definition_type', '')
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
model.authorized_keys = [k for k in auth_keys]
repos = data.get('repositories', None)
if repos:
model.repositories = self.process_drydock_region_repo_list(repos)
return model
def process_drydock_region_repo_list(self, data):
"""Process a package repository list.
:param data: The data from the ``repositories`` key in a Region document
"""
model = objects.RepositoryList()
for k, v in data.items():
if k == 'remove_unlisted':
model.remove_unlisted = v
else:
model.append(objects.Repository(name=k, **v))
return model
def process_drydock_rack(self, name, data):
"""Process the data/spec section of a Rack document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Rack()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.tor_switches = objects.TorSwitchList()
tors = data.get('tor_switches', {})
for k, v in tors.items():
tor = objects.TorSwitch()
tor.switch_name = k
tor.mgmt_ip = v.get('mgmt_ip', None)
tor.sdn_api_uri = v.get('sdn_api_url', None)
model.tor_switches.append(tor)
model.location = copy.deepcopy(data.get('location', {}))
model.local_networks = [n for n in data.get('local_networks', [])]
return model
def process_drydock_networklink(self, name, data):
"""Process the data/spec section of a NetworkLink document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.NetworkLink()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
bonding = data.get('bonding', {})
model.bonding_mode = bonding.get(
'mode', hd_fields.NetworkLinkBondingMode.Disabled)
if model.bonding_mode in \
(hd_fields.NetworkLinkBondingMode.LACP,
hd_fields.NetworkLinkBondingMode.RoundRobin,
hd_fields.NetworkLinkBondingMode.Standby):
model.bonding_mon_rate = bonding.get('mon_rate', '100')
model.bonding_up_delay = bonding.get('up_delay', '200')
model.bonding_down_delay = bonding.get('down_delay', '200')
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
model.mtu = data.get('mtu', None)
model.linkspeed = data.get('linkspeed', None)
trunking = data.get('trunking', {})
model.trunk_mode = trunking.get(
'mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
model.native_network = trunking.get('default_network', None)
model.allowed_networks = data.get('allowed_networks', None)
return model
def process_drydock_network(self, name, data):
"""Process the data/spec section of a Network document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Network()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
model.cidr = data.get('cidr', None)
model.vlan_id = data.get('vlan', None)
model.mtu = data.get('mtu', None)
model.routedomain = data.get('routedomain', None)
dns = data.get('dns', {})
model.dns_domain = dns.get('domain', 'local')
model.dns_servers = dns.get('servers', None)
ranges = data.get('ranges', [])
model.ranges = []
for r in ranges:
model.ranges.append({
'type': r.get('type', None),
'start': r.get('start', None),
'end': r.get('end', None),
})
routes = data.get('routes', [])
model.routes = []
for r in routes:
model.routes.append({
'subnet': r.get('subnet', None),
'gateway': r.get('gateway', None),
'metric': r.get('metric', None),
'routedomain': r.get('routedomain', None),
})
dhcp_relay = data.get('dhcp_relay', None)
if dhcp_relay is not None:
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
model.dhcp_relay_upstream_target = dhcp_relay.get(
'upstream_target', None)
return model
def process_drydock_hwprofile(self, name, data):
"""Process the data/spec section of a HardwareProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HardwareProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
model.vendor = data.get('vendor', None)
model.generation = data.get('generation', None)
model.hw_version = data.get('hw_version', None)
model.bios_version = data.get('bios_version', None)
model.boot_mode = data.get('boot_mode', None)
model.bootstrap_protocol = data.get('bootstrap_protocol', None)
model.pxe_interface = data.get('pxe_interface', None)
model.devices = objects.HardwareDeviceAliasList()
device_aliases = data.get('device_aliases', {})
for d, v in device_aliases.items():
dev_model = objects.HardwareDeviceAlias()
dev_model.source = hd_fields.ModelSource.Designed
dev_model.alias = d
dev_model.bus_type = v.get('bus_type', None)
dev_model.dev_type = v.get('dev_type', None)
dev_model.address = v.get('address', None)
model.devices.append(dev_model)
model.cpu_sets = data.get('cpu_sets', None) or dict()
model.hugepages_confs = objects.HugepagesConfList()
for c, d in data.get('hugepages', {}).items():
conf = objects.HugepagesConf(
name=c, size=d.get('size'), count=d.get('count'))
model.hugepages_confs.append(conf)
return model
def process_drydock_hostprofile(self, name, data):
"""Process the data/spec section of a HostProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HostProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
return model
def process_drydock_bootaction(self, name, data):
"""Process the data/spec section of a BootAction document.
:param name: the document name attribute
:Param data: the dictionary of the parsed data/spec section
"""
model = objects.BootAction()
model.name = name
model.source = hd_fields.ModelSource.Designed
assets = data.get('assets')
model.asset_list = objects.BootActionAssetList()
for a in assets:
ba = self.process_bootaction_asset(a)
model.asset_list.append(ba)
node_filter = data.get('node_filter', None)
if node_filter is not None:
nfs = self.process_bootaction_nodefilter(node_filter)
model.node_filter = nfs
model.signaling = data.get('signaling', None)
return model
def process_bootaction_asset(self, asset_dict):
"""Process a dictionary representing a BootAction Data Asset.
:param asset_dict: dictionary representing the bootaction asset
"""
model = objects.BootActionAsset(**asset_dict)
return model
def process_bootaction_nodefilter(self, nf):
"""Process a dictionary representing a BootAction NodeFilter Set.
:param nf: dictionary representing the bootaction nodefilter set.
"""
model = objects.NodeFilterSet()
model.filter_set_type = nf.get('filter_set_type', None)
model.filter_set = []
for nf in nf.get('filter_set', []):
nf_model = objects.NodeFilter(**nf)
model.filter_set.append(nf_model)
return model
def process_drydock_node(self, name, data):
"""Process the data/spec section of a BaremetalNode document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.BaremetalNode()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
node_metadata = data.get('metadata', {})
model.boot_mac = node_metadata.get('boot_mac', None)
addresses = data.get('addressing', [])
if len(addresses) == 0:
raise errors.IngesterError('BaremetalNode needs at least'
' 1 assigned address')
model.addressing = objects.IpAddressAssignmentList()
for a in addresses:
assignment = objects.IpAddressAssignment()
address = a.get('address', '')
if address == 'dhcp':
assignment.type = 'dhcp'
assignment.address = None
assignment.network = a.get('network')
model.addressing.append(assignment)
elif address != '':
assignment.type = 'static'
assignment.address = a.get('address')
assignment.network = a.get('network')
model.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s" %
(address, self.name))
return model
def process_host_common_fields(self, data, model):
"""Process fields common to the host-based documents.
Update the provided model with the values of fields common
to BaremetalNode and HostProfile documents.
:param data: dictionary from YAML parsing of the document data/spec section
:param model: instance of objects.HostProfile or objects.BaremetalNode to update
"""
model.parent_profile = data.get('host_profile', None)
model.hardware_profile = data.get('hardware_profile', None)
oob = data.get('oob', {})
model.oob_parameters = {}
for k, v in oob.items():
if k == 'type':
model.oob_type = oob.get('type', None)
else:
model.oob_parameters[k] = v
(model.storage_devices,
model.volume_groups) = self.process_node_storage(
data.get('storage', {}))
interfaces = data.get('interfaces', {})
model.interfaces = objects.HostInterfaceList()
for k, v in interfaces.items():
int_model = objects.HostInterface()
# A null value indicates this interface should be removed
# from any parent profiles
if v is None:
int_model.device_name = '!' + k
continue
int_model.device_name = k
int_model.network_link = v.get('device_link', None)
int_model.hardware_slaves = []
slaves = v.get('slaves', [])
for s in slaves:
int_model.hardware_slaves.append(s)
int_model.networks = []
networks = v.get('networks', [])
for n in networks:
int_model.networks.append(n)
if 'sriov' in v:
int_model.sriov = True
int_model.vf_count = v.get('sriov', {}).get('vf_count', 0)
int_model.trustedmode = v.get('sriov', {}).get(
'trustedmode', False)
model.interfaces.append(int_model)
platform = data.get('platform', {})
model.image = platform.get('image', None)
model.kernel = platform.get('kernel', None)
model.kernel_params = {}
for k, v in platform.get('kernel_params', {}).items():
model.kernel_params[k] = v
model.primary_network = data.get('primary_network', None)
node_metadata = data.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
model.tags = metadata_tags
owner_data = node_metadata.get('owner_data', {})
model.owner_data = {}
for k, v in owner_data.items():
model.owner_data[k] = v
model.rack = node_metadata.get('rack', None)
return model
def process_node_storage(self, storage):
"""Process the storage data for a node-based document.
Return a tuple of of two lists the first is a StorageDeviceList, the
second is a VolumeGroupList.
:param storage: dictionary of the storage section of a document
"""
phys_devs = storage.get('physical_devices', {})
storage_devices = objects.HostStorageDeviceList()
for k, v in phys_devs.items():
sd = objects.HostStorageDevice(name=k)
sd.source = hd_fields.ModelSource.Designed
if 'labels' in v:
sd.labels = v.get('labels').copy()
if 'volume_group' in v:
vg = v.get('volume_group')
sd.volume_group = vg
elif 'partitions' in v:
sd.partitions = objects.HostPartitionList()
for vv in v.get('partitions', []):
part_model = objects.HostPartition()
part_model.name = vv.get('name')
part_model.source = hd_fields.ModelSource.Designed
part_model.part_uuid = vv.get('part_uuid', None)
part_model.size = vv.get('size', None)
if 'labels' in vv:
part_model.labels = vv.get('labels').copy()
if 'volume_group' in vv:
part_model.volume_group = vv.get('vg')
elif 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
part_model.mountpoint = fs_info.get('mountpoint', None)
part_model.fstype = fs_info.get('fstype', 'ext4')
part_model.mount_options = fs_info.get(
'mount_options', 'defaults')
part_model.fs_uuid = fs_info.get('fs_uuid', None)
part_model.fs_label = fs_info.get('fs_label', None)
sd.partitions.append(part_model)
storage_devices.append(sd)
volume_groups = objects.HostVolumeGroupList()
vol_groups = storage.get('volume_groups', {})
for k, v in vol_groups.items():
vg = objects.HostVolumeGroup(name=k)
vg.vg_uuid = v.get('vg_uuid', None)
vg.logical_volumes = objects.HostVolumeList()
volume_groups.append(vg)
for vv in v.get('logical_volumes', []):
lv = objects.HostVolume(name=vv.get('name'))
lv.size = vv.get('size', None)
lv.lv_uuid = vv.get('lv_uuid', None)
if 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
lv.mountpoint = fs_info.get('mountpoint', None)
lv.fstype = fs_info.get('fstype', 'ext4')
lv.mount_options = fs_info.get('mount_options', 'defaults')
lv.fs_uuid = fs_info.get('fs_uuid', None)
lv.fs_label = fs_info.get('fs_label', None)
vg.logical_volumes.append(lv)
return (storage_devices, volume_groups)
def load_schemas(self):
self.v1_doc_schemas = dict()
schema_dir = self._get_schema_dir()
for schema_file in os.listdir(schema_dir):
f = open(os.path.join(schema_dir, schema_file), 'r')
for schema in yaml.safe_load_all(f):
schema_for = schema['metadata']['name']
if schema_for in self.v1_doc_schemas:
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.v1_doc_schemas[schema_for] = schema.get('data')
f.close()
def _get_schema_dir(self):
return pkg_resources.resource_filename('drydock_provisioner',
'schemas')
# Mapping of handlers for different document kinds
v1_doc_handlers = {
'Region': process_drydock_region,
'Rack': process_drydock_rack,
'NetworkLink': process_drydock_networklink,
'Network': process_drydock_network,
'HardwareProfile': process_drydock_hwprofile,
'HostProfile': process_drydock_hostprofile,
'BaremetalNode': process_drydock_node,
'BootAction': process_drydock_bootaction,
}
| en | 0.685509 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This data ingester will consume YAML site topology documents. Parse and save design data. :param content: String of valid Deckhand YAML :returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects # Hash the input to use as the cache key. This is not a security # related hash, so use cheap and fast MD5 Translate a YAML string into the internal Drydock model. Returns a tuple of a objects.TaskStatus instance to summarize all document processing and a list of models yielded by successful processing :param doc_blob: bytes representing a utf-8 encoded YAML string # tracking processing status to provide a complete summary of issues Process a parsed YAML document. :param doc: The dictionary from parsing the YAML document Validate a parsed document via jsonschema. If a schema for a document Kind is not available, the document is considered valid. Schema is chosen by the doc['kind'] field. Returns a empty list for valid documents, otherwise returns a list of all found errors :param doc: dictionary of the parsed document. Process the data/spec section of a Region document. :param name: the document name attribute :param data: the dictionary of the data/spec section # Need to add validation logic, we'll assume the input is # valid for now Process a package repository list. :param data: The data from the ``repositories`` key in a Region document Process the data/spec section of a Rack document. :param name: the document name attribute :param data: the dictionary of the data/spec section Process the data/spec section of a NetworkLink document. :param name: the document name attribute :param data: the dictionary of the data/spec section Process the data/spec section of a Network document. :param name: the document name attribute :param data: the dictionary of the data/spec section Process the data/spec section of a HardwareProfile document. :param name: the document name attribute :param data: the dictionary of the data/spec section Process the data/spec section of a HostProfile document. :param name: the document name attribute :param data: the dictionary of the data/spec section Process the data/spec section of a BootAction document. :param name: the document name attribute :Param data: the dictionary of the parsed data/spec section Process a dictionary representing a BootAction Data Asset. :param asset_dict: dictionary representing the bootaction asset Process a dictionary representing a BootAction NodeFilter Set. :param nf: dictionary representing the bootaction nodefilter set. Process the data/spec section of a BaremetalNode document. :param name: the document name attribute :param data: the dictionary of the data/spec section Process fields common to the host-based documents. Update the provided model with the values of fields common to BaremetalNode and HostProfile documents. :param data: dictionary from YAML parsing of the document data/spec section :param model: instance of objects.HostProfile or objects.BaremetalNode to update # A null value indicates this interface should be removed # from any parent profiles Process the storage data for a node-based document. Return a tuple of of two lists the first is a StorageDeviceList, the second is a VolumeGroupList. :param storage: dictionary of the storage section of a document # Mapping of handlers for different document kinds | 1.913506 | 2 |
porting_tools/package_xml_porter.py | nreplogle/ros2-migration-tools | 92 | 8630 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
""" Contains a class and method for porting a package.xml file from catkin to ament"""
import xml.etree.ElementTree as etree
from .constants import CatkinToAmentMigration, PACKAGE_XML_ELEMENT_ORDER
from .utils import get_functions_with
def new_element(tag, text="", tail="\n", attrib=None):
""" Helper function to make creating an element with a text and tail easier """
if not attrib:
attrib = {}
element = etree.Element(tag, attrib=attrib)
element.text = text
element.tail = tail
return element
def tag_order(tag):
""" Returns integer to order tags """
if tag in PACKAGE_XML_ELEMENT_ORDER:
return PACKAGE_XML_ELEMENT_ORDER.index(tag)
return float("inf")
class PackageXMLPorter:
"""A class for porting a package.xml file from catkin to ament"""
@staticmethod
def port(tree, extra_rules=[]):
"""
Ports package.xml from catkin to ament
Arguments:
tree - the xml tree representing the package.xml file (output of etree.parse("package.xml"))
extra_rules - a list of functions to apply to the xml tree
Returns:
The new xml tree
"""
# Pulls out all methods in this class with name starting with "rule"
rules = get_functions_with(criteria=lambda name: name.startswith("rule"),
from_class=PackageXMLPorter)
package_root = tree.getroot()
for rule in rules + extra_rules:
rule(package_root)
# Make sure there's a final newline
package_root.tail = "\n"
# Reorder the elements
package_root[:] = sorted(list(package_root), key=lambda elem: tag_order(elem.tag))
# Correct indentation
PackageXMLPorter.indent_tree(elem=package_root, level=0)
#########################
# RULES #
#########################
@staticmethod
def rule_set_format(package_root):
# ROS 2 supports formats 2,3
package_root.set("format", "3")
@staticmethod
def rule_set_build_tool(package_root):
for elem in package_root.findall("buildtool_depend"):
if elem.text and elem.text.strip() == "catkin":
package_root.remove(elem)
package_root.append(new_element(tag="buildtool_depend", text="ament_cmake"))
@staticmethod
def rule_set_client_library(package_root):
for elem in list(package_root):
if elem.text and elem.text.strip() in CatkinToAmentMigration.CLIENT_CONVERSION:
elem.text = CatkinToAmentMigration.CLIENT_CONVERSION[elem.text.strip()]
@staticmethod
def rule_add_export_build_type(package_root):
build_elem = new_element(tag="build_type", text="ament_cmake", tail="\n ")
export_elem = new_element(tag="export", text="\n ")
export_elem.append(build_elem)
package_root.append(export_elem)
@staticmethod
def rule_set_run_to_exec_depend(package_root):
for elem in package_root.findall("run_depend"):
elem.tag = "exec_depend"
@staticmethod
def rule_set_depend_to_run_exec(package_root):
for elem in package_root.findall("depend"):
elem.tag = "build_depend"
package_root.append(new_element(tag="exec_depend", text=elem.text, attrib=elem.attrib))
@staticmethod
def rule_update_message_gen_dependency(package_root):
message_generation_used = False
for elem in list(package_root):
if elem.text and elem.text == "message_generation" or elem.text == "message_runtime":
package_root.remove(elem)
message_generation_used = True
if message_generation_used:
package_root.append(new_element(tag="buildtool_depend", text="rosidl_default_generators"))
package_root.append(new_element(tag="build_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="rosidl_default_runtime"))
package_root.append(new_element(tag="member_of_group", text="rosidl_interface_packages"))
#########################
# HELPERS #
#########################
@staticmethod
def indent_tree(elem, level):
if len(elem) > 0: # element has children
if elem.text is None or len(elem.text) == 0:
elem.text = "\n" + (" "*(level+1)) # sets the indent for the children
list(elem)[-1].tail = "\n" + " "*level
for child in list(elem)[:-1]:
child.tail = "\n" + (" "*(level+1))
PackageXMLPorter.indent_tree(elem=child, level=level+1)
if __name__ == '__main__':
tree = etree.parse("package.xml")
PackageXMLPorter.port(tree=tree)
tree.write("updated_package.xml", encoding="utf-8", xml_declaration=True)
| # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
""" Contains a class and method for porting a package.xml file from catkin to ament"""
import xml.etree.ElementTree as etree
from .constants import CatkinToAmentMigration, PACKAGE_XML_ELEMENT_ORDER
from .utils import get_functions_with
def new_element(tag, text="", tail="\n", attrib=None):
""" Helper function to make creating an element with a text and tail easier """
if not attrib:
attrib = {}
element = etree.Element(tag, attrib=attrib)
element.text = text
element.tail = tail
return element
def tag_order(tag):
""" Returns integer to order tags """
if tag in PACKAGE_XML_ELEMENT_ORDER:
return PACKAGE_XML_ELEMENT_ORDER.index(tag)
return float("inf")
class PackageXMLPorter:
"""A class for porting a package.xml file from catkin to ament"""
@staticmethod
def port(tree, extra_rules=[]):
"""
Ports package.xml from catkin to ament
Arguments:
tree - the xml tree representing the package.xml file (output of etree.parse("package.xml"))
extra_rules - a list of functions to apply to the xml tree
Returns:
The new xml tree
"""
# Pulls out all methods in this class with name starting with "rule"
rules = get_functions_with(criteria=lambda name: name.startswith("rule"),
from_class=PackageXMLPorter)
package_root = tree.getroot()
for rule in rules + extra_rules:
rule(package_root)
# Make sure there's a final newline
package_root.tail = "\n"
# Reorder the elements
package_root[:] = sorted(list(package_root), key=lambda elem: tag_order(elem.tag))
# Correct indentation
PackageXMLPorter.indent_tree(elem=package_root, level=0)
#########################
# RULES #
#########################
@staticmethod
def rule_set_format(package_root):
# ROS 2 supports formats 2,3
package_root.set("format", "3")
@staticmethod
def rule_set_build_tool(package_root):
for elem in package_root.findall("buildtool_depend"):
if elem.text and elem.text.strip() == "catkin":
package_root.remove(elem)
package_root.append(new_element(tag="buildtool_depend", text="ament_cmake"))
@staticmethod
def rule_set_client_library(package_root):
for elem in list(package_root):
if elem.text and elem.text.strip() in CatkinToAmentMigration.CLIENT_CONVERSION:
elem.text = CatkinToAmentMigration.CLIENT_CONVERSION[elem.text.strip()]
@staticmethod
def rule_add_export_build_type(package_root):
build_elem = new_element(tag="build_type", text="ament_cmake", tail="\n ")
export_elem = new_element(tag="export", text="\n ")
export_elem.append(build_elem)
package_root.append(export_elem)
@staticmethod
def rule_set_run_to_exec_depend(package_root):
for elem in package_root.findall("run_depend"):
elem.tag = "exec_depend"
@staticmethod
def rule_set_depend_to_run_exec(package_root):
for elem in package_root.findall("depend"):
elem.tag = "build_depend"
package_root.append(new_element(tag="exec_depend", text=elem.text, attrib=elem.attrib))
@staticmethod
def rule_update_message_gen_dependency(package_root):
message_generation_used = False
for elem in list(package_root):
if elem.text and elem.text == "message_generation" or elem.text == "message_runtime":
package_root.remove(elem)
message_generation_used = True
if message_generation_used:
package_root.append(new_element(tag="buildtool_depend", text="rosidl_default_generators"))
package_root.append(new_element(tag="build_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="builtin_interfaces"))
package_root.append(new_element(tag="exec_depend", text="rosidl_default_runtime"))
package_root.append(new_element(tag="member_of_group", text="rosidl_interface_packages"))
#########################
# HELPERS #
#########################
@staticmethod
def indent_tree(elem, level):
if len(elem) > 0: # element has children
if elem.text is None or len(elem.text) == 0:
elem.text = "\n" + (" "*(level+1)) # sets the indent for the children
list(elem)[-1].tail = "\n" + " "*level
for child in list(elem)[:-1]:
child.tail = "\n" + (" "*(level+1))
PackageXMLPorter.indent_tree(elem=child, level=level+1)
if __name__ == '__main__':
tree = etree.parse("package.xml")
PackageXMLPorter.port(tree=tree)
tree.write("updated_package.xml", encoding="utf-8", xml_declaration=True)
| en | 0.775406 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. Contains a class and method for porting a package.xml file from catkin to ament Helper function to make creating an element with a text and tail easier Returns integer to order tags A class for porting a package.xml file from catkin to ament Ports package.xml from catkin to ament Arguments: tree - the xml tree representing the package.xml file (output of etree.parse("package.xml")) extra_rules - a list of functions to apply to the xml tree Returns: The new xml tree # Pulls out all methods in this class with name starting with "rule" # Make sure there's a final newline # Reorder the elements # Correct indentation ######################### # RULES # ######################### # ROS 2 supports formats 2,3 ######################### # HELPERS # ######################### # element has children # sets the indent for the children | 2.228052 | 2 |
endpoints/api/permission_models_interface.py | giuseppe/quay | 2,027 | 8631 | <gh_stars>1000+
import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class SaveException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(SaveException, self).__init__(str(other))
class DeleteException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(DeleteException, self).__init__(str(other))
class Role(namedtuple("Role", ["role_name"])):
def to_dict(self):
return {
"role": self.role_name,
}
class UserPermission(
namedtuple(
"UserPermission",
[
"role_name",
"username",
"is_robot",
"avatar",
"is_org_member",
"has_org",
],
)
):
def to_dict(self):
perm_dict = {
"role": self.role_name,
"name": self.username,
"is_robot": self.is_robot,
"avatar": self.avatar,
}
if self.has_org:
perm_dict["is_org_member"] = self.is_org_member
return perm_dict
class RobotPermission(
namedtuple(
"RobotPermission",
[
"role_name",
"username",
"is_robot",
"is_org_member",
],
)
):
def to_dict(self, user=None, team=None, org_members=None):
return {
"role": self.role_name,
"name": self.username,
"is_robot": True,
"is_org_member": self.is_org_member,
}
class TeamPermission(
namedtuple(
"TeamPermission",
[
"role_name",
"team_name",
"avatar",
],
)
):
def to_dict(self):
return {
"role": self.role_name,
"name": self.team_name,
"avatar": self.avatar,
}
@add_metaclass(ABCMeta)
class PermissionDataInterface(object):
"""
Data interface used by permissions API.
"""
@abstractmethod
def get_repo_permissions_by_user(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(UserPermission)
"""
@abstractmethod
def get_repo_roles(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
list(Role) or None
"""
@abstractmethod
def get_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
UserPermission
"""
@abstractmethod
def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
role_name: string
Returns:
UserPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
void
Raises:
DeleteException
"""
@abstractmethod
def get_repo_permissions_by_team(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(TeamPermission)
"""
@abstractmethod
def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
Role
"""
@abstractmethod
def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
permission: string
Returns:
TeamPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
TeamPermission
Raises:
DeleteException
"""
| import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class SaveException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(SaveException, self).__init__(str(other))
class DeleteException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(DeleteException, self).__init__(str(other))
class Role(namedtuple("Role", ["role_name"])):
def to_dict(self):
return {
"role": self.role_name,
}
class UserPermission(
namedtuple(
"UserPermission",
[
"role_name",
"username",
"is_robot",
"avatar",
"is_org_member",
"has_org",
],
)
):
def to_dict(self):
perm_dict = {
"role": self.role_name,
"name": self.username,
"is_robot": self.is_robot,
"avatar": self.avatar,
}
if self.has_org:
perm_dict["is_org_member"] = self.is_org_member
return perm_dict
class RobotPermission(
namedtuple(
"RobotPermission",
[
"role_name",
"username",
"is_robot",
"is_org_member",
],
)
):
def to_dict(self, user=None, team=None, org_members=None):
return {
"role": self.role_name,
"name": self.username,
"is_robot": True,
"is_org_member": self.is_org_member,
}
class TeamPermission(
namedtuple(
"TeamPermission",
[
"role_name",
"team_name",
"avatar",
],
)
):
def to_dict(self):
return {
"role": self.role_name,
"name": self.team_name,
"avatar": self.avatar,
}
@add_metaclass(ABCMeta)
class PermissionDataInterface(object):
"""
Data interface used by permissions API.
"""
@abstractmethod
def get_repo_permissions_by_user(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(UserPermission)
"""
@abstractmethod
def get_repo_roles(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
list(Role) or None
"""
@abstractmethod
def get_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
UserPermission
"""
@abstractmethod
def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
role_name: string
Returns:
UserPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
void
Raises:
DeleteException
"""
@abstractmethod
def get_repo_permissions_by_team(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(TeamPermission)
"""
@abstractmethod
def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
Role
"""
@abstractmethod
def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
permission: string
Returns:
TeamPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
TeamPermission
Raises:
DeleteException
""" | en | 0.29313 | Data interface used by permissions API. Args: namespace_name: string repository_name: string Returns: list(UserPermission) Args: username: string namespace_name: string repository_name: string Returns: list(Role) or None Args: username: string namespace_name: string repository_name: string Returns: UserPermission Args: username: string namespace_name: string repository_name: string role_name: string Returns: UserPermission Raises: SaveException Args: username: string namespace_name: string repository_name: string Returns: void Raises: DeleteException Args: namespace_name: string repository_name: string Returns: list(TeamPermission) Args: team_name: string namespace_name: string repository_name: string Returns: Role Args: team_name: string namespace_name: string repository_name: string permission: string Returns: TeamPermission Raises: SaveException Args: team_name: string namespace_name: string repository_name: string Returns: TeamPermission Raises: DeleteException | 3.16111 | 3 |
configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py | sht47/mmtracking | 12 | 8632 | <reponame>sht47/mmtracking<gh_stars>10-100
_base_ = ['./tracktor_faster-rcnn_r50_fpn_4e_mot17-public-half.py']
model = dict(
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth' # noqa: E501
))
data_root = 'data/MOT17/'
test_set = 'test'
data = dict(
train=dict(ann_file=data_root + 'annotations/train_cocoformat.json'),
val=dict(
ann_file=data_root + 'annotations/train_cocoformat.json',
detection_file=data_root + 'annotations/train_detections.pkl'),
test=dict(
ann_file=data_root + f'annotations/{test_set}_cocoformat.json',
img_prefix=data_root + test_set,
detection_file=data_root + f'annotations/{test_set}_detections.pkl'))
| _base_ = ['./tracktor_faster-rcnn_r50_fpn_4e_mot17-public-half.py']
model = dict(
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth' # noqa: E501
))
data_root = 'data/MOT17/'
test_set = 'test'
data = dict(
train=dict(ann_file=data_root + 'annotations/train_cocoformat.json'),
val=dict(
ann_file=data_root + 'annotations/train_cocoformat.json',
detection_file=data_root + 'annotations/train_detections.pkl'),
test=dict(
ann_file=data_root + f'annotations/{test_set}_cocoformat.json',
img_prefix=data_root + test_set,
detection_file=data_root + f'annotations/{test_set}_detections.pkl')) | it | 0.33047 | # noqa: E251 # noqa: E501 | 1.60184 | 2 |
browserstack/first_sample_build.py | Shaimyst/scrive_test | 0 | 8633 | <reponame>Shaimyst/scrive_test
from threading import Thread
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# This array 'caps' defines the capabilities browser, device and OS combinations where the test will run
caps=[{
'os_version': '10',
'os': 'Windows',
'browser': 'ie',
'browser_version': '11.0',
'name': 'Parallel Test1', # test name
'build': 'browserstack-build-1' # Your tests will be organized within this build
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'chrome',
'browser_version': '95.0',
'name': 'Parallel Test2',
'build': 'browserstack-build-1'
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'firefox',
'browser_version': '93.0',
'name': 'Parallel Test3',
'build': 'browserstack-build-1'
}]
#run_session function searches for 'BrowserStack' on google.com
def run_session(desired_cap):
driver = webdriver.Remote(
command_executor='https://jessicasadler_RbBTVv:[email protected]/wd/hub',
desired_capabilities=desired_cap)
driver.get("https://www.google.com")
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
elem = driver.find_element_by_name("q")
elem.send_keys("BrowserStack")
elem.submit()
try:
WebDriverWait(driver, 5).until(EC.title_contains("BrowserStack"))
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"passed", "reason": "Title matched!"}}')
except TimeoutException:
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"failed", "reason": "Title not matched"}}')
print(driver.title)
driver.quit()
#The Thread function takes run_session function and each set of capability from the caps array as an argument to run each session parallelly
for cap in caps:
Thread(target=run_session, args=(cap,)).start() | from threading import Thread
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# This array 'caps' defines the capabilities browser, device and OS combinations where the test will run
caps=[{
'os_version': '10',
'os': 'Windows',
'browser': 'ie',
'browser_version': '11.0',
'name': 'Parallel Test1', # test name
'build': 'browserstack-build-1' # Your tests will be organized within this build
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'chrome',
'browser_version': '95.0',
'name': 'Parallel Test2',
'build': 'browserstack-build-1'
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'firefox',
'browser_version': '93.0',
'name': 'Parallel Test3',
'build': 'browserstack-build-1'
}]
#run_session function searches for 'BrowserStack' on google.com
def run_session(desired_cap):
driver = webdriver.Remote(
command_executor='https://jessicasadler_RbBTVv:[email protected]/wd/hub',
desired_capabilities=desired_cap)
driver.get("https://www.google.com")
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
elem = driver.find_element_by_name("q")
elem.send_keys("BrowserStack")
elem.submit()
try:
WebDriverWait(driver, 5).until(EC.title_contains("BrowserStack"))
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"passed", "reason": "Title matched!"}}')
except TimeoutException:
driver.execute_script('browserstack_executor: {"action": "setSessionStatus", "arguments": {"status":"failed", "reason": "Title not matched"}}')
print(driver.title)
driver.quit()
#The Thread function takes run_session function and each set of capability from the caps array as an argument to run each session parallelly
for cap in caps:
Thread(target=run_session, args=(cap,)).start() | en | 0.696045 | # This array 'caps' defines the capabilities browser, device and OS combinations where the test will run # test name # Your tests will be organized within this build #run_session function searches for 'BrowserStack' on google.com #The Thread function takes run_session function and each set of capability from the caps array as an argument to run each session parallelly | 2.594955 | 3 |
sanitizers/mvj.py | suutari-ai/mvj | 1 | 8634 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
from random import choice
from string import digits
from faker import Faker
fake = Faker("fi_FI")
def sanitize_address(value):
return fake.address()
def sanitize_address_if_exist(value):
if value:
return sanitize_address(value)
def sanitize_business_id(value):
return fake.pystr_format(string_format="#######-#", letters="0123456789")
def sanitize_business_id_if_exist(value):
if value:
return sanitize_business_id(value)
def sanitize_city(value):
return fake.city()
def sanitize_city_if_exist(value):
if value:
return sanitize_city(value)
def sanitize_company(value):
return fake.company()
def sanitize_company_if_exist(value):
if value:
return sanitize_company(value)
def sanitize_email(value):
return fake.email()
def sanitize_email_if_exist(value):
if value:
return sanitize_email(value)
def sanitize_first_name(value):
return fake.first_name()
def sanitize_first_name_if_exist(value):
if value:
return sanitize_first_name(value)
def sanitize_generate_random_numbers(value):
return "".join([choice(digits) for i in range(random.randint(0, 10))])
def sanitize_generate_random_numbers_if_exist(value):
if value:
return sanitize_generate_random_numbers(value)
def sanitize_last_name(value):
return fake.first_name()
def sanitize_last_name_if_exist(value):
if value:
return sanitize_last_name(value)
def sanitize_national_identification_number(value):
return fake.pystr_format(string_format="######-####", letters="0123456789")
def sanitize_national_identification_number_if_exist(value):
if value:
return sanitize_national_identification_number(value)
def sanitize_name(value):
return fake.name()
def sanitize_paragraph(value):
return fake.paragraph()
def sanitize_paragraph_if_exist(value):
if value:
return sanitize_paragraph(value)
def sanitize_phone_number(value):
return fake.phone_number()
def sanitize_phone_number_if_exist(value):
if value:
return sanitize_phone_number(value)
def sanitize_postcode(value):
return fake.postcode()
def sanitize_postcode_if_exist(value):
if value:
return sanitize_postcode(value)
def sanitize_url(value):
return fake.url()
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
from random import choice
from string import digits
from faker import Faker
fake = Faker("fi_FI")
def sanitize_address(value):
return fake.address()
def sanitize_address_if_exist(value):
if value:
return sanitize_address(value)
def sanitize_business_id(value):
return fake.pystr_format(string_format="#######-#", letters="0123456789")
def sanitize_business_id_if_exist(value):
if value:
return sanitize_business_id(value)
def sanitize_city(value):
return fake.city()
def sanitize_city_if_exist(value):
if value:
return sanitize_city(value)
def sanitize_company(value):
return fake.company()
def sanitize_company_if_exist(value):
if value:
return sanitize_company(value)
def sanitize_email(value):
return fake.email()
def sanitize_email_if_exist(value):
if value:
return sanitize_email(value)
def sanitize_first_name(value):
return fake.first_name()
def sanitize_first_name_if_exist(value):
if value:
return sanitize_first_name(value)
def sanitize_generate_random_numbers(value):
return "".join([choice(digits) for i in range(random.randint(0, 10))])
def sanitize_generate_random_numbers_if_exist(value):
if value:
return sanitize_generate_random_numbers(value)
def sanitize_last_name(value):
return fake.first_name()
def sanitize_last_name_if_exist(value):
if value:
return sanitize_last_name(value)
def sanitize_national_identification_number(value):
return fake.pystr_format(string_format="######-####", letters="0123456789")
def sanitize_national_identification_number_if_exist(value):
if value:
return sanitize_national_identification_number(value)
def sanitize_name(value):
return fake.name()
def sanitize_paragraph(value):
return fake.paragraph()
def sanitize_paragraph_if_exist(value):
if value:
return sanitize_paragraph(value)
def sanitize_phone_number(value):
return fake.phone_number()
def sanitize_phone_number_if_exist(value):
if value:
return sanitize_phone_number(value)
def sanitize_postcode(value):
return fake.postcode()
def sanitize_postcode_if_exist(value):
if value:
return sanitize_postcode(value)
def sanitize_url(value):
return fake.url()
| en | 0.230499 | # -*- coding: utf-8 -*- ######-#", letters="0123456789") #####-####", letters="0123456789") | 3.079708 | 3 |
ISM_catalog_profile/scripts/ISM/ISM.py | rhmdnd/compliance-trestle-demos | 10 | 8635 | <reponame>rhmdnd/compliance-trestle-demos
#!/usr/bin/env python3
# # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# limitations under the License.
"""Create ISM catalogs.
This script is used to convert Australian Government Information Security Manual (ISM) into OSCAL formats.
The ISM is the equivalent of NIST 800-53 / FedRAMP / IL6 and similar documents in the USA. The goal is to produce a
similar set OSCAL documents to what NIST and FedRAMP are currently publishing.
It does this via pulling the ISM xml doc and creating:
1 Catalog for all the controls
4 profiles (Official, protected, secret, TS)
Ideally this would be a cron job based script, however, as ACSC publish revisions
with specific names this would need to be discovered by crawling. This will be a potential future enhancement.
This script pulls down the controls in a 'dumb' way from the xml to get the actual controls. A full featured catalog
will need to parse appropriate word / xml documents to provide groups /guidance.
"""
import io
import json
import logging
import pathlib
import sys
import urllib.request
import zipfile
from datetime import datetime
from uuid import uuid4
from ilcli import Command
import trestle.oscal.catalog as catalog
import trestle.oscal.common as common
import trestle.oscal.profile as profile
import xmltodict
# Globally define logging behaviour.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
remarks_tuple = '\n'.join(
[
'This is not an official version of the Australian Government Information Security Manual.',
'',
'Find the official versions here: https://www.cyber.gov.au/acsc/view-all-content/ism',
'This content was generated using scrips/ISM/ISM.py'
]
)
class ISMManager():
"""ISMManager a class to manage conversion of ISM artifacts into OSCAL."""
def __init__(self):
"""Initialize ISM manager. No required parameters."""
self._profile_controls = {'OFFICIAL': [], 'PROTECTED': [], 'SECRET': [], 'TOP_SECRET': []}
self._profiles = {}
def fetch_ism(self, url):
"""Fetch an Australian government ISM and covert to a dict."""
logger.debug('Fetching ISM from: ' + url)
request_url = urllib.request.urlopen(url)
document = request_url.read()
zipfile_content = zipfile.ZipFile(io.BytesIO(document))
content_list = zipfile_content.namelist()
xml_files = [x for x in content_list if '.xml' in x]
assert len(xml_files) == 1
self.ism_xml = xmltodict.parse(zipfile_content.open(xml_files[0]).read())
def _populate_control_list(self, control, raw_id):
"""Populate control lists based on a dict from the xml version of the ISM."""
# TODO: Really not pythonic but anyway.
control_id = 'control-' + raw_id
for security_level in self._profile_controls.keys():
# Dealing with schema changes 'Yes' and 'true' appear to both be valid options.
if control[security_level].lower() == 'yes' or control[security_level].lower() == 'true':
self._profile_controls[security_level].append(control_id)
def _probe_for_keys(self, ism_control):
"""Probe for the appropriate keys for l2 groups based on whether or not section exists."""
l2_group_key = 'Section'
if l2_group_key not in ism_control.keys():
l2_group_key = 'Topic'
return l2_group_key
def _name_clean(self, name: str) -> str:
"""Normalize string to ncname format."""
return name.strip().lower().replace(' ', '_').replace('/', '-')
def create_ism_catalog(self, version: str) -> None:
"""Parse ISM object and create a catalog."""
m = common.Metadata(
**{
'title': 'Australian Government Information Security manual',
'last-modified': datetime.now().astimezone(),
'version': version,
'oscal-version': '1.0.0',
'remarks': remarks_tuple
}
)
ism_catalog = catalog.Catalog(metadata=m, uuid=str(uuid4()))
# Create basic metadata:
ism_controls = self.ism_xml['ISM']['Control']
l2_group_key = self._probe_for_keys(ism_controls[0])
"""
Approach:
- Two levels of groups - no sub controls.
- below this will be parts
"""
# Get list of top level controls
tl_group_titles = set(map(lambda x: x['Guideline'], ism_controls))
groups = []
for tl_group_name in tl_group_titles:
group = catalog.Group(id=self._name_clean(tl_group_name), title=tl_group_name)
# now add l2 groups
control_subset = list(filter(lambda x: x['Guideline'] == tl_group_name, ism_controls))
# get set l2 group names.
l2_group_titles = set(map(lambda x: x[l2_group_key], control_subset))
l2_groups = []
for l2_group_name in l2_group_titles:
clean_id = self._name_clean(l2_group_name)
l2_group = catalog.Group(id=clean_id, title=l2_group_name)
# Now identify and add the controls
oscal_controls = []
l2_control_subset = list(filter(lambda x: x[l2_group_key] == l2_group_name, control_subset))
# now we can create and add controls.
# TODO: Make more pythonic
for ism_control in l2_control_subset:
raw_id = ism_control['Identifier']
description = ism_control['Description']
topic = ism_control['Topic']
# make description the part statement
statement_part = common.Part(id='control-' + raw_id + '-stmt', name='statement', prose=description)
# this is very minimial
oscal_control = catalog.Control(id='control-' + raw_id, title=topic, parts=[statement_part])
self._populate_control_list(ism_control, raw_id)
oscal_controls.append(oscal_control)
l2_group.controls = oscal_controls
l2_groups.append(l2_group)
group.groups = l2_groups
groups.append(group)
ism_catalog.groups = groups
self._ism_catalog = ism_catalog
def create_ism_profiles(self, revision_date, uri='./ISM_catalog.yaml'):
"""Create profile for each ISM environment."""
for security_level in self._profile_controls.keys():
ism_profile = profile.Profile(
uuid=str(uuid4()),
metadata=common.Metadata(
**{
'title': 'Australian Government Information Security Manual profile for ' + security_level,
'version': revision_date,
'oscal-version': '1.0.0',
'last-modified': datetime.now().astimezone(),
'remarks': remarks_tuple
}
),
imports=[profile.Import(href=uri)]
)
controls_list = self._profile_controls[security_level]
ism_profile.imports[0].include_controls = self._populate_import_include(controls_list)
self._profiles[security_level] = ism_profile
def _populate_import_include(self, control_list):
include_controls = []
selector = profile.SelectControlById()
selector.with_ids = control_list
include_controls.append(selector)
return include_controls
def write_catalog(self, catalogs_path, ism_name):
"""Wrap and write oscal catalog object."""
ism_dir_path = catalogs_path / ism_name
ism_dir_path.mkdir(exist_ok=True)
ism_file_path = ism_dir_path / 'catalog.json'
self._ism_catalog.oscal_write(ism_file_path)
def write_profiles(self, profiles_dir, ism_name):
"""Write out all profiles."""
for security_level in self._profiles.keys():
profile_dir = profiles_dir / (ism_name + '_' + security_level)
profile_dir.mkdir(exist_ok=True)
profile_path = profile_dir / 'profile.json'
self._profiles[security_level].oscal_write(profile_path)
class ISM(Command):
"""
Convert the Australian goverment information security manual (in various versions) into catalogs and profiles.
This CLI has presumptions on resource structures that are returned.
Please note that this project current presumes information about the project structure.
"""
def _init_arguments(self):
self.add_argument('-r', '--root-dir', help='Trestle project root.', default='./')
def _run(self, args):
# little test
root_dir = pathlib.Path(args.root_dir).resolve()
catalogs_dir = root_dir.joinpath('catalogs').resolve()
profiles_dir = root_dir.joinpath('profiles').resolve()
ism_json_file = root_dir.joinpath('scripts/ISM/ism_editions.json').resolve()
if not root_dir.exists():
logger.error('Root trestle project does not exist')
return 1
if not catalogs_dir.exists():
logger.error('Catalogs directory does not exist.')
return 1
if not profiles_dir.exists():
logger.error('Profiles directory does not exist.')
return 1
ism_versions = json.load(ism_json_file.open())
for ism_file in ism_versions['isms']:
# ISM file format: 'ISM - List of Security Controls (August 2019).xml'
logger.info(ism_file)
url = ism_file['version_url']
ism_manager = ISMManager()
ism_manager.fetch_ism(url)
revision_date = ism_file['version_name'].split()
revision_string = revision_date[0] + '_' + revision_date[1]
logger.info(f'Revision date: {revision_date}')
logger.info(f'Revision string: {revision_string}')
logger.info(revision_string)
ism_name = 'ISM_' + revision_string
ism_manager.create_ism_catalog(revision_string)
# This is presumed to be relative for now to the catalog repo based on this
ism_manager.write_catalog(catalogs_dir, ism_name)
ism_manager.create_ism_profiles(revision_string, 'trestle://' + ism_name + '/catalog.json')
ism_manager.write_profiles(profiles_dir, ism_name)
if __name__ == '__main__':
sys.exit(ISM().run())
| #!/usr/bin/env python3
# # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# limitations under the License.
"""Create ISM catalogs.
This script is used to convert Australian Government Information Security Manual (ISM) into OSCAL formats.
The ISM is the equivalent of NIST 800-53 / FedRAMP / IL6 and similar documents in the USA. The goal is to produce a
similar set OSCAL documents to what NIST and FedRAMP are currently publishing.
It does this via pulling the ISM xml doc and creating:
1 Catalog for all the controls
4 profiles (Official, protected, secret, TS)
Ideally this would be a cron job based script, however, as ACSC publish revisions
with specific names this would need to be discovered by crawling. This will be a potential future enhancement.
This script pulls down the controls in a 'dumb' way from the xml to get the actual controls. A full featured catalog
will need to parse appropriate word / xml documents to provide groups /guidance.
"""
import io
import json
import logging
import pathlib
import sys
import urllib.request
import zipfile
from datetime import datetime
from uuid import uuid4
from ilcli import Command
import trestle.oscal.catalog as catalog
import trestle.oscal.common as common
import trestle.oscal.profile as profile
import xmltodict
# Globally define logging behaviour.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
remarks_tuple = '\n'.join(
[
'This is not an official version of the Australian Government Information Security Manual.',
'',
'Find the official versions here: https://www.cyber.gov.au/acsc/view-all-content/ism',
'This content was generated using scrips/ISM/ISM.py'
]
)
class ISMManager():
"""ISMManager a class to manage conversion of ISM artifacts into OSCAL."""
def __init__(self):
"""Initialize ISM manager. No required parameters."""
self._profile_controls = {'OFFICIAL': [], 'PROTECTED': [], 'SECRET': [], 'TOP_SECRET': []}
self._profiles = {}
def fetch_ism(self, url):
"""Fetch an Australian government ISM and covert to a dict."""
logger.debug('Fetching ISM from: ' + url)
request_url = urllib.request.urlopen(url)
document = request_url.read()
zipfile_content = zipfile.ZipFile(io.BytesIO(document))
content_list = zipfile_content.namelist()
xml_files = [x for x in content_list if '.xml' in x]
assert len(xml_files) == 1
self.ism_xml = xmltodict.parse(zipfile_content.open(xml_files[0]).read())
def _populate_control_list(self, control, raw_id):
"""Populate control lists based on a dict from the xml version of the ISM."""
# TODO: Really not pythonic but anyway.
control_id = 'control-' + raw_id
for security_level in self._profile_controls.keys():
# Dealing with schema changes 'Yes' and 'true' appear to both be valid options.
if control[security_level].lower() == 'yes' or control[security_level].lower() == 'true':
self._profile_controls[security_level].append(control_id)
def _probe_for_keys(self, ism_control):
"""Probe for the appropriate keys for l2 groups based on whether or not section exists."""
l2_group_key = 'Section'
if l2_group_key not in ism_control.keys():
l2_group_key = 'Topic'
return l2_group_key
def _name_clean(self, name: str) -> str:
"""Normalize string to ncname format."""
return name.strip().lower().replace(' ', '_').replace('/', '-')
def create_ism_catalog(self, version: str) -> None:
"""Parse ISM object and create a catalog."""
m = common.Metadata(
**{
'title': 'Australian Government Information Security manual',
'last-modified': datetime.now().astimezone(),
'version': version,
'oscal-version': '1.0.0',
'remarks': remarks_tuple
}
)
ism_catalog = catalog.Catalog(metadata=m, uuid=str(uuid4()))
# Create basic metadata:
ism_controls = self.ism_xml['ISM']['Control']
l2_group_key = self._probe_for_keys(ism_controls[0])
"""
Approach:
- Two levels of groups - no sub controls.
- below this will be parts
"""
# Get list of top level controls
tl_group_titles = set(map(lambda x: x['Guideline'], ism_controls))
groups = []
for tl_group_name in tl_group_titles:
group = catalog.Group(id=self._name_clean(tl_group_name), title=tl_group_name)
# now add l2 groups
control_subset = list(filter(lambda x: x['Guideline'] == tl_group_name, ism_controls))
# get set l2 group names.
l2_group_titles = set(map(lambda x: x[l2_group_key], control_subset))
l2_groups = []
for l2_group_name in l2_group_titles:
clean_id = self._name_clean(l2_group_name)
l2_group = catalog.Group(id=clean_id, title=l2_group_name)
# Now identify and add the controls
oscal_controls = []
l2_control_subset = list(filter(lambda x: x[l2_group_key] == l2_group_name, control_subset))
# now we can create and add controls.
# TODO: Make more pythonic
for ism_control in l2_control_subset:
raw_id = ism_control['Identifier']
description = ism_control['Description']
topic = ism_control['Topic']
# make description the part statement
statement_part = common.Part(id='control-' + raw_id + '-stmt', name='statement', prose=description)
# this is very minimial
oscal_control = catalog.Control(id='control-' + raw_id, title=topic, parts=[statement_part])
self._populate_control_list(ism_control, raw_id)
oscal_controls.append(oscal_control)
l2_group.controls = oscal_controls
l2_groups.append(l2_group)
group.groups = l2_groups
groups.append(group)
ism_catalog.groups = groups
self._ism_catalog = ism_catalog
def create_ism_profiles(self, revision_date, uri='./ISM_catalog.yaml'):
"""Create profile for each ISM environment."""
for security_level in self._profile_controls.keys():
ism_profile = profile.Profile(
uuid=str(uuid4()),
metadata=common.Metadata(
**{
'title': 'Australian Government Information Security Manual profile for ' + security_level,
'version': revision_date,
'oscal-version': '1.0.0',
'last-modified': datetime.now().astimezone(),
'remarks': remarks_tuple
}
),
imports=[profile.Import(href=uri)]
)
controls_list = self._profile_controls[security_level]
ism_profile.imports[0].include_controls = self._populate_import_include(controls_list)
self._profiles[security_level] = ism_profile
def _populate_import_include(self, control_list):
include_controls = []
selector = profile.SelectControlById()
selector.with_ids = control_list
include_controls.append(selector)
return include_controls
def write_catalog(self, catalogs_path, ism_name):
"""Wrap and write oscal catalog object."""
ism_dir_path = catalogs_path / ism_name
ism_dir_path.mkdir(exist_ok=True)
ism_file_path = ism_dir_path / 'catalog.json'
self._ism_catalog.oscal_write(ism_file_path)
def write_profiles(self, profiles_dir, ism_name):
"""Write out all profiles."""
for security_level in self._profiles.keys():
profile_dir = profiles_dir / (ism_name + '_' + security_level)
profile_dir.mkdir(exist_ok=True)
profile_path = profile_dir / 'profile.json'
self._profiles[security_level].oscal_write(profile_path)
class ISM(Command):
"""
Convert the Australian goverment information security manual (in various versions) into catalogs and profiles.
This CLI has presumptions on resource structures that are returned.
Please note that this project current presumes information about the project structure.
"""
def _init_arguments(self):
self.add_argument('-r', '--root-dir', help='Trestle project root.', default='./')
def _run(self, args):
# little test
root_dir = pathlib.Path(args.root_dir).resolve()
catalogs_dir = root_dir.joinpath('catalogs').resolve()
profiles_dir = root_dir.joinpath('profiles').resolve()
ism_json_file = root_dir.joinpath('scripts/ISM/ism_editions.json').resolve()
if not root_dir.exists():
logger.error('Root trestle project does not exist')
return 1
if not catalogs_dir.exists():
logger.error('Catalogs directory does not exist.')
return 1
if not profiles_dir.exists():
logger.error('Profiles directory does not exist.')
return 1
ism_versions = json.load(ism_json_file.open())
for ism_file in ism_versions['isms']:
# ISM file format: 'ISM - List of Security Controls (August 2019).xml'
logger.info(ism_file)
url = ism_file['version_url']
ism_manager = ISMManager()
ism_manager.fetch_ism(url)
revision_date = ism_file['version_name'].split()
revision_string = revision_date[0] + '_' + revision_date[1]
logger.info(f'Revision date: {revision_date}')
logger.info(f'Revision string: {revision_string}')
logger.info(revision_string)
ism_name = 'ISM_' + revision_string
ism_manager.create_ism_catalog(revision_string)
# This is presumed to be relative for now to the catalog repo based on this
ism_manager.write_catalog(catalogs_dir, ism_name)
ism_manager.create_ism_profiles(revision_string, 'trestle://' + ism_name + '/catalog.json')
ism_manager.write_profiles(profiles_dir, ism_name)
if __name__ == '__main__':
sys.exit(ISM().run()) | en | 0.861458 | #!/usr/bin/env python3 # # -*- mode:python; coding:utf-8 -*- # Copyright (c) 2020 IBM Corp. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # limitations under the License. Create ISM catalogs. This script is used to convert Australian Government Information Security Manual (ISM) into OSCAL formats. The ISM is the equivalent of NIST 800-53 / FedRAMP / IL6 and similar documents in the USA. The goal is to produce a similar set OSCAL documents to what NIST and FedRAMP are currently publishing. It does this via pulling the ISM xml doc and creating: 1 Catalog for all the controls 4 profiles (Official, protected, secret, TS) Ideally this would be a cron job based script, however, as ACSC publish revisions with specific names this would need to be discovered by crawling. This will be a potential future enhancement. This script pulls down the controls in a 'dumb' way from the xml to get the actual controls. A full featured catalog will need to parse appropriate word / xml documents to provide groups /guidance. # Globally define logging behaviour. ISMManager a class to manage conversion of ISM artifacts into OSCAL. Initialize ISM manager. No required parameters. Fetch an Australian government ISM and covert to a dict. Populate control lists based on a dict from the xml version of the ISM. # TODO: Really not pythonic but anyway. # Dealing with schema changes 'Yes' and 'true' appear to both be valid options. Probe for the appropriate keys for l2 groups based on whether or not section exists. Normalize string to ncname format. Parse ISM object and create a catalog. # Create basic metadata: Approach: - Two levels of groups - no sub controls. - below this will be parts # Get list of top level controls # now add l2 groups # get set l2 group names. # Now identify and add the controls # now we can create and add controls. # TODO: Make more pythonic # make description the part statement # this is very minimial Create profile for each ISM environment. Wrap and write oscal catalog object. Write out all profiles. Convert the Australian goverment information security manual (in various versions) into catalogs and profiles. This CLI has presumptions on resource structures that are returned. Please note that this project current presumes information about the project structure. # little test # ISM file format: 'ISM - List of Security Controls (August 2019).xml' # This is presumed to be relative for now to the catalog repo based on this | 1.821655 | 2 |
logistics/permissions.py | geo2tag-logistics/main | 0 | 8636 | from rest_framework import permissions
def is_owner(user):
return user.groups.filter(name='OWNER').exists()
def is_driver(user):
return user.groups.filter(name='DRIVER').exists()
class IsOwnerPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_owner(request.user)
class IsDriverPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_driver(request.user)
class IsOwnerOrDriverPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_driver(request.user) or is_owner(request.user)
| from rest_framework import permissions
def is_owner(user):
return user.groups.filter(name='OWNER').exists()
def is_driver(user):
return user.groups.filter(name='DRIVER').exists()
class IsOwnerPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_owner(request.user)
class IsDriverPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_driver(request.user)
class IsOwnerOrDriverPermission(permissions.BasePermission):
def has_permission(self, request, view):
return is_driver(request.user) or is_owner(request.user)
| none | 1 | 2.338171 | 2 |
|
src/python/reduce_fps_parallel.py | blancKaty/alignmentFralework_and_classif | 0 | 8637 | <filename>src/python/reduce_fps_parallel.py
import os
import shutil
import sys
import multiprocessing
import glob
def copy(source, dest):
shutil.copyfile(source, dest)
def main():
input_folder = sys.argv[1]
output_folder = sys.argv[2]
print 'input reduce fps : ' , sys.argv
fps = int(sys.argv[3]);
final_length=float(sys.argv[4]) ;
max_length=final_length * fps ;
print 'normalisation param : ' , fps , final_length , max_length
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
pool = multiprocessing.Pool(multiprocessing.cpu_count())
print "Using a Pool of", multiprocessing.cpu_count(), "processes"
X = sorted(next(os.walk(input_folder))[1])
print X
for x in X:
folder = os.path.join(output_folder, x)
os.mkdir(folder)
#Y = os.listdir(os.path.join(input_folder, x))
#print input_folder , x
Y = glob.glob(input_folder+"/"+x+"/*.jpg")
Y.sort()
sizeV=len(Y)
#print sizeV
if (sizeV > max_length) :
Y=Y[int(sizeV/2)-int(max_length/2): int(sizeV/2)+int(max_length/2)]
for idx, i in enumerate(range(0, len(Y), fps)):
y = Y[i]
source = y
#print y , "image_{:05d}.jpg".format(idx + 1)
y = "image_{:05d}.jpg".format(idx + 1)
dest = os.path.join(folder, y)
#print source , dest
pool.apply_async(copy, (source, dest))
pool.close()
pool.join()
if __name__ == '__main__':
main()
| <filename>src/python/reduce_fps_parallel.py
import os
import shutil
import sys
import multiprocessing
import glob
def copy(source, dest):
shutil.copyfile(source, dest)
def main():
input_folder = sys.argv[1]
output_folder = sys.argv[2]
print 'input reduce fps : ' , sys.argv
fps = int(sys.argv[3]);
final_length=float(sys.argv[4]) ;
max_length=final_length * fps ;
print 'normalisation param : ' , fps , final_length , max_length
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
pool = multiprocessing.Pool(multiprocessing.cpu_count())
print "Using a Pool of", multiprocessing.cpu_count(), "processes"
X = sorted(next(os.walk(input_folder))[1])
print X
for x in X:
folder = os.path.join(output_folder, x)
os.mkdir(folder)
#Y = os.listdir(os.path.join(input_folder, x))
#print input_folder , x
Y = glob.glob(input_folder+"/"+x+"/*.jpg")
Y.sort()
sizeV=len(Y)
#print sizeV
if (sizeV > max_length) :
Y=Y[int(sizeV/2)-int(max_length/2): int(sizeV/2)+int(max_length/2)]
for idx, i in enumerate(range(0, len(Y), fps)):
y = Y[i]
source = y
#print y , "image_{:05d}.jpg".format(idx + 1)
y = "image_{:05d}.jpg".format(idx + 1)
dest = os.path.join(folder, y)
#print source , dest
pool.apply_async(copy, (source, dest))
pool.close()
pool.join()
if __name__ == '__main__':
main()
| ru | 0.080666 | #Y = os.listdir(os.path.join(input_folder, x)) #print input_folder , x #print sizeV #print y , "image_{:05d}.jpg".format(idx + 1) #print source , dest | 3.220473 | 3 |
ansible/utils/module_docs_fragments/docker.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 1 | 8638 | <gh_stars>1-10
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = '''
options:
docker_host:
description:
- "The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, 'tcp://192.0.2.23:2376'. If TLS is used to encrypt the connection,
the module will automatically replace 'tcp' in the connection URL with 'https'."
required: false
default: "unix://var/run/docker.sock"
aliases:
- docker_url
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
default: localhost
required: false
api_version:
description:
- The version of the Docker API running on the Docker Host. Defaults to the latest version of the API
supported by docker-py.
required: false
default: default provided by docker-py
aliases:
- docker_api_version
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
required: false
default: 60
cacert_path:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
required: false
default: null
aliases:
- tls_ca_cert
cert_path:
description:
- Path to the client's TLS certificate file.
required: false
default: null
aliases:
- tls_client_cert
key_path:
description:
- Path to the client's TLS key file.
required: false
default: null
aliases:
- tls_client_key
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by docker-py, currently 1.0.
required: false
default: "1.0"
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server.
default: false
tls_verify:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define DOCKER_HOST, DOCKER_TLS_HOSTNAME, DOCKER_API_VERSION, DOCKER_CERT_PATH, DOCKER_SSL_VERSION,
DOCKER_TLS, DOCKER_TLS_VERIFY and DOCKER_TIMEOUT. If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
https://docker-py.readthedocs.org/en/stable/machine/ for more details.
'''
| # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = '''
options:
docker_host:
description:
- "The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, 'tcp://192.0.2.23:2376'. If TLS is used to encrypt the connection,
the module will automatically replace 'tcp' in the connection URL with 'https'."
required: false
default: "unix://var/run/docker.sock"
aliases:
- docker_url
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
default: localhost
required: false
api_version:
description:
- The version of the Docker API running on the Docker Host. Defaults to the latest version of the API
supported by docker-py.
required: false
default: default provided by docker-py
aliases:
- docker_api_version
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
required: false
default: 60
cacert_path:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
required: false
default: null
aliases:
- tls_ca_cert
cert_path:
description:
- Path to the client's TLS certificate file.
required: false
default: null
aliases:
- tls_client_cert
key_path:
description:
- Path to the client's TLS key file.
required: false
default: null
aliases:
- tls_client_key
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by docker-py, currently 1.0.
required: false
default: "1.0"
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server.
default: false
tls_verify:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define DOCKER_HOST, DOCKER_TLS_HOSTNAME, DOCKER_API_VERSION, DOCKER_CERT_PATH, DOCKER_SSL_VERSION,
DOCKER_TLS, DOCKER_TLS_VERIFY and DOCKER_TIMEOUT. If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
https://docker-py.readthedocs.org/en/stable/machine/ for more details.
''' | en | 0.760021 | # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # Docker doc fragment options: docker_host: description: - "The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the TCP connection string. For example, 'tcp://192.0.2.23:2376'. If TLS is used to encrypt the connection, the module will automatically replace 'tcp' in the connection URL with 'https'." required: false default: "unix://var/run/docker.sock" aliases: - docker_url tls_hostname: description: - When verifying the authenticity of the Docker Host server, provide the expected name of the server. default: localhost required: false api_version: description: - The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by docker-py. required: false default: default provided by docker-py aliases: - docker_api_version timeout: description: - The maximum amount of time in seconds to wait on a response from the API. required: false default: 60 cacert_path: description: - Use a CA certificate when performing server verification by providing the path to a CA certificate file. required: false default: null aliases: - tls_ca_cert cert_path: description: - Path to the client's TLS certificate file. required: false default: null aliases: - tls_client_cert key_path: description: - Path to the client's TLS key file. required: false default: null aliases: - tls_client_key ssl_version: description: - Provide a valid SSL version number. Default value determined by docker-py, currently 1.0. required: false default: "1.0" tls: description: - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. default: false tls_verify: description: - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. default: false notes: - Connect to the Docker daemon by providing parameters with each task or by defining environment variables. You can define DOCKER_HOST, DOCKER_TLS_HOSTNAME, DOCKER_API_VERSION, DOCKER_CERT_PATH, DOCKER_SSL_VERSION, DOCKER_TLS, DOCKER_TLS_VERIFY and DOCKER_TIMEOUT. If you are using docker machine, run the script shipped with the product that sets up the environment. It will set these variables for you. See https://docker-py.readthedocs.org/en/stable/machine/ for more details. | 1.494627 | 1 |
setup.py | cyberjunky/python-garminconnect-aio | 11 | 8639 | <reponame>cyberjunky/python-garminconnect-aio
#!/usr/bin/env python
from setuptools import setup
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
description="Asynchronous Garmin Connect Python 3 API wrapper",
name="garminconnect_aio",
keywords=["garmin connect", "api", "client"],
license="MIT license",
install_requires=["aiohttp >= 3.6", "yarl", "brotlipy"],
long_description_content_type="text/markdown",
long_description=readme,
url="https://github.com/cyberjunky/python-garminconnect-aio",
packages=["garminconnect_aio"],
version="0.1.4",
)
| #!/usr/bin/env python
from setuptools import setup
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
description="Asynchronous Garmin Connect Python 3 API wrapper",
name="garminconnect_aio",
keywords=["garmin connect", "api", "client"],
license="MIT license",
install_requires=["aiohttp >= 3.6", "yarl", "brotlipy"],
long_description_content_type="text/markdown",
long_description=readme,
url="https://github.com/cyberjunky/python-garminconnect-aio",
packages=["garminconnect_aio"],
version="0.1.4",
) | ru | 0.26433 | #!/usr/bin/env python | 1.524663 | 2 |
nova/tests/unit/virt/libvirt/fake_imagebackend.py | ChameleonCloud/nova | 1 | 8640 | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import os
import fixtures
import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
class ImageBackendFixture(fixtures.Fixture):
def __init__(self, got_files=None, imported_files=None, exists=None):
"""This fixture mocks imagebackend.Backend.backend, which is the
only entry point to libvirt.imagebackend from libvirt.driver.
:param got_files: A list of {'filename': path, 'size': size} for every
file which was created.
:param imported_files: A list of (local_filename, remote_filename) for
every invocation of import_file().
:param exists: An optional lambda which takes the disk name as an
argument, and returns True if the disk exists,
False otherwise.
"""
self.got_files = got_files
self.imported_files = imported_files
self.disks = collections.defaultdict(self._mock_disk)
"""A dict of name -> Mock image object. This is a defaultdict,
so tests may access it directly before a disk has been created."""
self._exists = exists
def setUp(self):
super(ImageBackendFixture, self).setUp()
# Mock template functions passed to cache
self.mock_fetch_image = mock.create_autospec(libvirt_utils.fetch_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_image', self.mock_fetch_image))
self.mock_fetch_raw_image = \
mock.create_autospec(libvirt_utils.fetch_raw_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_raw_image',
self.mock_fetch_raw_image))
self.mock_create_ephemeral = \
mock.create_autospec(driver.LibvirtDriver._create_ephemeral)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral',
self.mock_create_ephemeral))
self.mock_create_swap = \
mock.create_autospec(driver.LibvirtDriver._create_swap)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_swap',
self.mock_create_swap))
# Backend.backend creates all Image objects
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.Backend.backend',
self._mock_backend))
@property
def created_disks(self):
"""disks, filtered to contain only disks which were actually created
by calling a relevant method.
"""
# A disk was created iff either cache() or import_file() was called.
return {name: disk for name, disk in self.disks.items()
if any([disk.cache.called, disk.import_file.called])}
def _mock_disk(self):
# This is the generator passed to the disks defaultdict. It returns
# a mocked Image object, but note that the returned object has not
# yet been 'constructed'. We don't know at this stage what arguments
# will be passed to the constructor, so we don't know, eg, its type
# or path.
#
# The reason for this 2 phase construction is to allow tests to
# manipulate mocks for disks before they have been created. eg a
# test can do the following before executing the method under test:
#
# disks['disk'].cache.side_effect = ImageNotFound...
#
# When the 'constructor' (image_init in _mock_backend) later runs,
# it will return the same object we created here, and when the
# caller calls cache() it will raise the requested exception.
disk = mock.create_autospec(imagebackend.Image)
# NOTE(mdbooth): fake_cache and fake_import_file are for compatibility
# with existing tests which test got_files and imported_files. They
# should be removed when they have no remaining users.
disk.cache.side_effect = self._fake_cache
disk.import_file.side_effect = self._fake_import_file
# NOTE(mdbooth): test_virt_drivers assumes libvirt_info has functional
# output
disk.libvirt_info.side_effect = \
functools.partial(self._fake_libvirt_info, disk)
return disk
def _mock_backend(self, backend_self, image_type=None):
# This method mocks Backend.backend, which returns a subclass of Image
# (it returns a class, not an instance). This mocked method doesn't
# return a class; it returns a function which returns a Mock. IOW,
# instead of the getting a QCow2, the caller gets image_init,
# so instead of:
#
# QCow2(instance, disk_name='disk')
#
# the caller effectively does:
#
# image_init(instance, disk_name='disk')
#
# Therefore image_init() must have the same signature as an Image
# subclass constructor, and return a mocked Image object.
#
# The returned mocked Image object has the following additional
# properties which are useful for testing:
#
# * Calls with the same disk_name return the same object from
# self.disks. This means tests can assert on multiple calls for
# the same disk without worrying about whether they were also on
# the same object.
#
# * Mocked objects have an additional image_type attribute set to
# the image_type originally passed to Backend.backend() during
# their construction. Tests can use this to assert that disks were
# created of the expected type.
def image_init(instance=None, disk_name=None, path=None):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
path = os.path.join(
libvirt_utils.get_instance_path(instance), disk_name)
else:
disk_name = os.path.basename(path)
disk = self.disks[disk_name]
# Used directly by callers. These would have been set if called
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
# Used by tests to manipulate which disks exist.
if self._exists is not None:
# We don't just cache the return value here because the
# caller may want, eg, a test where the disk initially does not
# exist and later exists.
disk.exists.side_effect = lambda: self._exists(disk_name)
else:
disk.exists.return_value = True
return disk
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
# Ditto for the 'is_shared_block_storage' function and
# 'is_file_in_instance_path'
def is_shared_block_storage():
return False
def is_file_in_instance_path():
return False
setattr(image_init, 'is_shared_block_storage', is_shared_block_storage)
setattr(image_init, 'is_file_in_instance_path',
is_file_in_instance_path)
return image_init
def _fake_cache(self, fetch_func, filename, size=None, *args, **kwargs):
# Execute the template function so we can test the arguments it was
# called with.
fetch_func(target=filename, *args, **kwargs)
# For legacy tests which use got_files
if self.got_files is not None:
self.got_files.append({'filename': filename, 'size': size})
def _fake_import_file(self, instance, local_filename, remote_filename):
# For legacy tests which use imported_files
if self.imported_files is not None:
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode,
extra_specs, hypervisor_version, disk_unit=None):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = disk_info['type']
info.target_bus = disk_info['bus']
info.target_dev = disk_info['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
return info
| # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import os
import fixtures
import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
class ImageBackendFixture(fixtures.Fixture):
def __init__(self, got_files=None, imported_files=None, exists=None):
"""This fixture mocks imagebackend.Backend.backend, which is the
only entry point to libvirt.imagebackend from libvirt.driver.
:param got_files: A list of {'filename': path, 'size': size} for every
file which was created.
:param imported_files: A list of (local_filename, remote_filename) for
every invocation of import_file().
:param exists: An optional lambda which takes the disk name as an
argument, and returns True if the disk exists,
False otherwise.
"""
self.got_files = got_files
self.imported_files = imported_files
self.disks = collections.defaultdict(self._mock_disk)
"""A dict of name -> Mock image object. This is a defaultdict,
so tests may access it directly before a disk has been created."""
self._exists = exists
def setUp(self):
super(ImageBackendFixture, self).setUp()
# Mock template functions passed to cache
self.mock_fetch_image = mock.create_autospec(libvirt_utils.fetch_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_image', self.mock_fetch_image))
self.mock_fetch_raw_image = \
mock.create_autospec(libvirt_utils.fetch_raw_image)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.utils.fetch_raw_image',
self.mock_fetch_raw_image))
self.mock_create_ephemeral = \
mock.create_autospec(driver.LibvirtDriver._create_ephemeral)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_ephemeral',
self.mock_create_ephemeral))
self.mock_create_swap = \
mock.create_autospec(driver.LibvirtDriver._create_swap)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._create_swap',
self.mock_create_swap))
# Backend.backend creates all Image objects
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.Backend.backend',
self._mock_backend))
@property
def created_disks(self):
"""disks, filtered to contain only disks which were actually created
by calling a relevant method.
"""
# A disk was created iff either cache() or import_file() was called.
return {name: disk for name, disk in self.disks.items()
if any([disk.cache.called, disk.import_file.called])}
def _mock_disk(self):
# This is the generator passed to the disks defaultdict. It returns
# a mocked Image object, but note that the returned object has not
# yet been 'constructed'. We don't know at this stage what arguments
# will be passed to the constructor, so we don't know, eg, its type
# or path.
#
# The reason for this 2 phase construction is to allow tests to
# manipulate mocks for disks before they have been created. eg a
# test can do the following before executing the method under test:
#
# disks['disk'].cache.side_effect = ImageNotFound...
#
# When the 'constructor' (image_init in _mock_backend) later runs,
# it will return the same object we created here, and when the
# caller calls cache() it will raise the requested exception.
disk = mock.create_autospec(imagebackend.Image)
# NOTE(mdbooth): fake_cache and fake_import_file are for compatibility
# with existing tests which test got_files and imported_files. They
# should be removed when they have no remaining users.
disk.cache.side_effect = self._fake_cache
disk.import_file.side_effect = self._fake_import_file
# NOTE(mdbooth): test_virt_drivers assumes libvirt_info has functional
# output
disk.libvirt_info.side_effect = \
functools.partial(self._fake_libvirt_info, disk)
return disk
def _mock_backend(self, backend_self, image_type=None):
# This method mocks Backend.backend, which returns a subclass of Image
# (it returns a class, not an instance). This mocked method doesn't
# return a class; it returns a function which returns a Mock. IOW,
# instead of the getting a QCow2, the caller gets image_init,
# so instead of:
#
# QCow2(instance, disk_name='disk')
#
# the caller effectively does:
#
# image_init(instance, disk_name='disk')
#
# Therefore image_init() must have the same signature as an Image
# subclass constructor, and return a mocked Image object.
#
# The returned mocked Image object has the following additional
# properties which are useful for testing:
#
# * Calls with the same disk_name return the same object from
# self.disks. This means tests can assert on multiple calls for
# the same disk without worrying about whether they were also on
# the same object.
#
# * Mocked objects have an additional image_type attribute set to
# the image_type originally passed to Backend.backend() during
# their construction. Tests can use this to assert that disks were
# created of the expected type.
def image_init(instance=None, disk_name=None, path=None):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
path = os.path.join(
libvirt_utils.get_instance_path(instance), disk_name)
else:
disk_name = os.path.basename(path)
disk = self.disks[disk_name]
# Used directly by callers. These would have been set if called
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
# Used by tests to manipulate which disks exist.
if self._exists is not None:
# We don't just cache the return value here because the
# caller may want, eg, a test where the disk initially does not
# exist and later exists.
disk.exists.side_effect = lambda: self._exists(disk_name)
else:
disk.exists.return_value = True
return disk
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
# Ditto for the 'is_shared_block_storage' function and
# 'is_file_in_instance_path'
def is_shared_block_storage():
return False
def is_file_in_instance_path():
return False
setattr(image_init, 'is_shared_block_storage', is_shared_block_storage)
setattr(image_init, 'is_file_in_instance_path',
is_file_in_instance_path)
return image_init
def _fake_cache(self, fetch_func, filename, size=None, *args, **kwargs):
# Execute the template function so we can test the arguments it was
# called with.
fetch_func(target=filename, *args, **kwargs)
# For legacy tests which use got_files
if self.got_files is not None:
self.got_files.append({'filename': filename, 'size': size})
def _fake_import_file(self, instance, local_filename, remote_filename):
# For legacy tests which use imported_files
if self.imported_files is not None:
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode,
extra_specs, hypervisor_version, disk_unit=None):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = disk_info['type']
info.target_bus = disk_info['bus']
info.target_dev = disk_info['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
return info
| en | 0.890861 | # Copyright 2012 Grid Dynamics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. This fixture mocks imagebackend.Backend.backend, which is the only entry point to libvirt.imagebackend from libvirt.driver. :param got_files: A list of {'filename': path, 'size': size} for every file which was created. :param imported_files: A list of (local_filename, remote_filename) for every invocation of import_file(). :param exists: An optional lambda which takes the disk name as an argument, and returns True if the disk exists, False otherwise. A dict of name -> Mock image object. This is a defaultdict, so tests may access it directly before a disk has been created. # Mock template functions passed to cache # Backend.backend creates all Image objects disks, filtered to contain only disks which were actually created by calling a relevant method. # A disk was created iff either cache() or import_file() was called. # This is the generator passed to the disks defaultdict. It returns # a mocked Image object, but note that the returned object has not # yet been 'constructed'. We don't know at this stage what arguments # will be passed to the constructor, so we don't know, eg, its type # or path. # # The reason for this 2 phase construction is to allow tests to # manipulate mocks for disks before they have been created. eg a # test can do the following before executing the method under test: # # disks['disk'].cache.side_effect = ImageNotFound... # # When the 'constructor' (image_init in _mock_backend) later runs, # it will return the same object we created here, and when the # caller calls cache() it will raise the requested exception. # NOTE(mdbooth): fake_cache and fake_import_file are for compatibility # with existing tests which test got_files and imported_files. They # should be removed when they have no remaining users. # NOTE(mdbooth): test_virt_drivers assumes libvirt_info has functional # output # This method mocks Backend.backend, which returns a subclass of Image # (it returns a class, not an instance). This mocked method doesn't # return a class; it returns a function which returns a Mock. IOW, # instead of the getting a QCow2, the caller gets image_init, # so instead of: # # QCow2(instance, disk_name='disk') # # the caller effectively does: # # image_init(instance, disk_name='disk') # # Therefore image_init() must have the same signature as an Image # subclass constructor, and return a mocked Image object. # # The returned mocked Image object has the following additional # properties which are useful for testing: # # * Calls with the same disk_name return the same object from # self.disks. This means tests can assert on multiple calls for # the same disk without worrying about whether they were also on # the same object. # # * Mocked objects have an additional image_type attribute set to # the image_type originally passed to Backend.backend() during # their construction. Tests can use this to assert that disks were # created of the expected type. # There's nothing special about this path except that it's # predictable and unique for (instance, disk). # Used directly by callers. These would have been set if called # the real constructor. # Used by tests. Note that image_init is a closure over image_type. # Used by tests to manipulate which disks exist. # We don't just cache the return value here because the # caller may want, eg, a test where the disk initially does not # exist and later exists. # Set the SUPPORTS_CLONE member variable to mimic the Image base # class. # Ditto for the 'is_shared_block_storage' function and # 'is_file_in_instance_path' # Execute the template function so we can test the arguments it was # called with. # For legacy tests which use got_files # For legacy tests which use imported_files # For tests in test_virt_drivers which expect libvirt_info to be # functional | 2.142486 | 2 |
tests/test_email_subscriptions.py | coolboi567/dnstwister | 0 | 8641 | <filename>tests/test_email_subscriptions.py
"""Tests of the email subscription mechanism."""
import binascii
import flask_webtest
import mock
import pytest
import webtest.app
import dnstwister
import dnstwister.tools
import patches
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_bad_domains_fail(webapp):
"""Test the email views check domain validity."""
with pytest.raises(webtest.app.AppError) as err:
webapp.get('/email/subscribe/3234jskdnfsdf7y34')
assert '400 BAD REQUEST' in err.value.message
with pytest.raises(webtest.app.AppError) as err:
webapp.post('/email/pending_verify/3234jskdnfsdf7y34')
assert '400 BAD REQUEST' in err.value.message
def test_bad_error_codes(webapp):
"""Test the email error codes being weird doesn't break the page."""
normal_html = webapp.get('/email/subscribe/7777772e6578616d706c652e636f6d').html
assert webapp.get(
'/email/subscribe/7777772e6578616d706c652e636f6d/9',
expect_errors=True
).html == normal_html
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_verification_with_bad_id(webapp):
"""Test that verifying with a dud subscription id just redirects to root.
"""
response = webapp.get('/email/verify/1234', expect_errors=True)
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/'
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_with_no_subscriptions():
repository = dnstwister.repository
assert list(repository.isubscriptions()) == []
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_during_subscription():
repository = dnstwister.repository
domain = 'www.example.com'
email = '<EMAIL>'
sub_id = '1234'
repository.subscribe_email(sub_id, email, domain, False)
subs = list(repository.isubscriptions())
assert len(subs) == 1
assert sorted(subs[0][1].keys()) == [
'domain', 'email_address', 'hide_noisy'
]
assert subs[0][1]['domain'] == domain
assert subs[0][1]['email_address'] == email
assert subs[0][1]['hide_noisy'] == False
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_email_address_required():
app = flask_webtest.TestApp(dnstwister.app)
domain = 'a.com'
hexdomain = binascii.hexlify(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
subscribe_page = app.get(subscribe_path)
assert 'Email address is required' not in subscribe_page.body
subscribe_page.form['email_address'] = ' '
response = subscribe_page.form.submit()
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/email/subscribe/{}/0?hide_noisy=False'.format(hexdomain)
assert 'Email address is required' in response.follow().body
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_email_address_validation_remembers_hide_noisy_flag():
app = flask_webtest.TestApp(dnstwister.app)
domain = 'a.com'
hexdomain = binascii.hexlify(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
subscribe_page = app.get(subscribe_path)
subscribe_page.form['email_address'] = ' '
subscribe_page.form['hide_noisy'] = 'true'
response = subscribe_page.form.submit()
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/email/subscribe/{}/0?hide_noisy=True'.format(hexdomain)
assert 'Email address is required' in response.follow().body
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_link():
app = flask_webtest.TestApp(dnstwister.app)
emailer = dnstwister.views.www.email.emailer
repository = dnstwister.repository
assert emailer.sent_emails == []
domain = 'a.com'
hexdomain = dnstwister.tools.encode_domain(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
search_page = app.get('/search/{}'.format(hexdomain))
assert subscribe_path in search_page.body
subscribe_page = app.get(subscribe_path)
subscribe_page.form['email_address'] = '<EMAIL>'
subscribe_page.form.submit()
assert list(repository.isubscriptions()) == []
verify_code = repository.db.data.items()[0][0].split(
'email_sub_pending:'
)[1]
verify_path = '/email/verify/{}'.format(
verify_code
)
verify_url = 'http://localhost{}'.format(verify_path)
assert len(emailer.sent_emails) == 1
sent_email = emailer.sent_emails[0][:2]
assert sent_email == (
'<EMAIL>', 'Please verify your subscription'
)
assert verify_url in emailer.sent_emails[0][2]
subscribed_page = app.get(verify_path)
assert 'You are now subscribed' in subscribed_page.body
assert len(list(repository.isubscriptions())) == 1
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unsubscribe():
"""Test can unsubscribe."""
app = flask_webtest.TestApp(dnstwister.app)
repository = dnstwister.repository
domain = 'www.example.com'
email = '<EMAIL>'
sub_id = '1234'
assert len(list(repository.isubscriptions())) == 0
repository.subscribe_email(sub_id, email, domain, False)
assert len(list(repository.isubscriptions())) == 1
app.get('/email/unsubscribe/{}'.format(sub_id))
assert len(list(repository.isubscriptions())) == 0
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_link_unicode():
app = flask_webtest.TestApp(dnstwister.app)
emailer = dnstwister.views.www.email.emailer
repository = dnstwister.repository
assert emailer.sent_emails == []
domain = u'\u0454a.com' # ea.com, but with a funny 'e'
hexdomain = dnstwister.tools.encode_domain(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
search_page = app.get('/search/{}'.format(hexdomain))
assert subscribe_path in search_page.body
subscribe_page = app.get(subscribe_path)
assert '\xd1\x94a.com (xn--a-9ub.com)' in subscribe_page.body
subscribe_page.form['email_address'] = '<EMAIL>'
pending_page = subscribe_page.form.submit()
assert pending_page.request.url.endswith('pending_verify/786e2d2d612d3975622e636f6d')
assert '\xd1\x94a.com (xn--a-9ub.com)' in pending_page.body
assert list(repository.isubscriptions()) == []
verify_code = repository.db.data.items()[0][0].split(
'email_sub_pending:'
)[1]
verify_path = '/email/verify/{}'.format(
verify_code
)
verify_url = 'http://localhost{}'.format(verify_path)
assert len(emailer.sent_emails) == 1
sent_email = emailer.sent_emails[0][:2]
assert sent_email == (
'<EMAIL>', 'Please verify your subscription'
)
assert verify_url in emailer.sent_emails[0][2]
subscribed_page = app.get(verify_path)
assert 'You are now subscribed' in subscribed_page.body
assert '\xd1\x94a.com (xn--a-9ub.com)' in subscribed_page.body
assert len(list(repository.isubscriptions())) == 1
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unsubscribe_unicode():
"""Test can unsubscribe."""
app = flask_webtest.TestApp(dnstwister.app)
repository = dnstwister.repository
domain = u'www.\u0454xample.com'
email = '<EMAIL>'
sub_id = '1234'
assert len(list(repository.isubscriptions())) == 0
repository.subscribe_email(sub_id, email, domain, False)
assert len(list(repository.isubscriptions())) == 1
app.get('/email/unsubscribe/{}'.format(sub_id))
assert len(list(repository.isubscriptions())) == 0
| <filename>tests/test_email_subscriptions.py
"""Tests of the email subscription mechanism."""
import binascii
import flask_webtest
import mock
import pytest
import webtest.app
import dnstwister
import dnstwister.tools
import patches
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_bad_domains_fail(webapp):
"""Test the email views check domain validity."""
with pytest.raises(webtest.app.AppError) as err:
webapp.get('/email/subscribe/3234jskdnfsdf7y34')
assert '400 BAD REQUEST' in err.value.message
with pytest.raises(webtest.app.AppError) as err:
webapp.post('/email/pending_verify/3234jskdnfsdf7y34')
assert '400 BAD REQUEST' in err.value.message
def test_bad_error_codes(webapp):
"""Test the email error codes being weird doesn't break the page."""
normal_html = webapp.get('/email/subscribe/7777772e6578616d706c652e636f6d').html
assert webapp.get(
'/email/subscribe/7777772e6578616d706c652e636f6d/9',
expect_errors=True
).html == normal_html
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_verification_with_bad_id(webapp):
"""Test that verifying with a dud subscription id just redirects to root.
"""
response = webapp.get('/email/verify/1234', expect_errors=True)
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/'
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_with_no_subscriptions():
repository = dnstwister.repository
assert list(repository.isubscriptions()) == []
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_during_subscription():
repository = dnstwister.repository
domain = 'www.example.com'
email = '<EMAIL>'
sub_id = '1234'
repository.subscribe_email(sub_id, email, domain, False)
subs = list(repository.isubscriptions())
assert len(subs) == 1
assert sorted(subs[0][1].keys()) == [
'domain', 'email_address', 'hide_noisy'
]
assert subs[0][1]['domain'] == domain
assert subs[0][1]['email_address'] == email
assert subs[0][1]['hide_noisy'] == False
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_email_address_required():
app = flask_webtest.TestApp(dnstwister.app)
domain = 'a.com'
hexdomain = binascii.hexlify(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
subscribe_page = app.get(subscribe_path)
assert 'Email address is required' not in subscribe_page.body
subscribe_page.form['email_address'] = ' '
response = subscribe_page.form.submit()
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/email/subscribe/{}/0?hide_noisy=False'.format(hexdomain)
assert 'Email address is required' in response.follow().body
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_email_address_validation_remembers_hide_noisy_flag():
app = flask_webtest.TestApp(dnstwister.app)
domain = 'a.com'
hexdomain = binascii.hexlify(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
subscribe_page = app.get(subscribe_path)
subscribe_page.form['email_address'] = ' '
subscribe_page.form['hide_noisy'] = 'true'
response = subscribe_page.form.submit()
assert response.status_code == 302
assert response.headers['location'] == 'http://localhost/email/subscribe/{}/0?hide_noisy=True'.format(hexdomain)
assert 'Email address is required' in response.follow().body
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_link():
app = flask_webtest.TestApp(dnstwister.app)
emailer = dnstwister.views.www.email.emailer
repository = dnstwister.repository
assert emailer.sent_emails == []
domain = 'a.com'
hexdomain = dnstwister.tools.encode_domain(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
search_page = app.get('/search/{}'.format(hexdomain))
assert subscribe_path in search_page.body
subscribe_page = app.get(subscribe_path)
subscribe_page.form['email_address'] = '<EMAIL>'
subscribe_page.form.submit()
assert list(repository.isubscriptions()) == []
verify_code = repository.db.data.items()[0][0].split(
'email_sub_pending:'
)[1]
verify_path = '/email/verify/{}'.format(
verify_code
)
verify_url = 'http://localhost{}'.format(verify_path)
assert len(emailer.sent_emails) == 1
sent_email = emailer.sent_emails[0][:2]
assert sent_email == (
'<EMAIL>', 'Please verify your subscription'
)
assert verify_url in emailer.sent_emails[0][2]
subscribed_page = app.get(verify_path)
assert 'You are now subscribed' in subscribed_page.body
assert len(list(repository.isubscriptions())) == 1
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unsubscribe():
"""Test can unsubscribe."""
app = flask_webtest.TestApp(dnstwister.app)
repository = dnstwister.repository
domain = 'www.example.com'
email = '<EMAIL>'
sub_id = '1234'
assert len(list(repository.isubscriptions())) == 0
repository.subscribe_email(sub_id, email, domain, False)
assert len(list(repository.isubscriptions())) == 1
app.get('/email/unsubscribe/{}'.format(sub_id))
assert len(list(repository.isubscriptions())) == 0
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_isubscriptions_link_unicode():
app = flask_webtest.TestApp(dnstwister.app)
emailer = dnstwister.views.www.email.emailer
repository = dnstwister.repository
assert emailer.sent_emails == []
domain = u'\u0454a.com' # ea.com, but with a funny 'e'
hexdomain = dnstwister.tools.encode_domain(domain)
subscribe_path = '/email/subscribe/{}'.format(hexdomain)
search_page = app.get('/search/{}'.format(hexdomain))
assert subscribe_path in search_page.body
subscribe_page = app.get(subscribe_path)
assert '\xd1\x94a.com (xn--a-9ub.com)' in subscribe_page.body
subscribe_page.form['email_address'] = '<EMAIL>'
pending_page = subscribe_page.form.submit()
assert pending_page.request.url.endswith('pending_verify/786e2d2d612d3975622e636f6d')
assert '\xd1\x94a.com (xn--a-9ub.com)' in pending_page.body
assert list(repository.isubscriptions()) == []
verify_code = repository.db.data.items()[0][0].split(
'email_sub_pending:'
)[1]
verify_path = '/email/verify/{}'.format(
verify_code
)
verify_url = 'http://localhost{}'.format(verify_path)
assert len(emailer.sent_emails) == 1
sent_email = emailer.sent_emails[0][:2]
assert sent_email == (
'<EMAIL>', 'Please verify your subscription'
)
assert verify_url in emailer.sent_emails[0][2]
subscribed_page = app.get(verify_path)
assert 'You are now subscribed' in subscribed_page.body
assert '\xd1\x94a.com (xn--a-9ub.com)' in subscribed_page.body
assert len(list(repository.isubscriptions())) == 1
@mock.patch('dnstwister.views.www.email.emailer', patches.NoEmailer())
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unsubscribe_unicode():
"""Test can unsubscribe."""
app = flask_webtest.TestApp(dnstwister.app)
repository = dnstwister.repository
domain = u'www.\u0454xample.com'
email = '<EMAIL>'
sub_id = '1234'
assert len(list(repository.isubscriptions())) == 0
repository.subscribe_email(sub_id, email, domain, False)
assert len(list(repository.isubscriptions())) == 1
app.get('/email/unsubscribe/{}'.format(sub_id))
assert len(list(repository.isubscriptions())) == 0
| en | 0.831099 | Tests of the email subscription mechanism. Test the email views check domain validity. Test the email error codes being weird doesn't break the page. Test that verifying with a dud subscription id just redirects to root. Test can unsubscribe. # ea.com, but with a funny 'e' Test can unsubscribe. | 2.571814 | 3 |
ershoufang/crawler_v2.py | zlikun/python-crawler-lianjia | 2 | 8642 | """
第二版:多进程二手房信息爬虫
1. 将爬虫分解为下载任务和解析任务(可以继续分解,但在本案中意义不大)两部分,两部分各使用一个子进程,相互通过数据管道通信
2. 下载任务内部不使用队列,使用任务管道实现(在多进程:主进程、子进程、子进程内部进程池等场景下,队列并不好用)任务管理和通信
3. 解析任务从与下载任务间的管道中获取数据,解析并保存
问题:当目标被爬完后,怎样让爬虫停止?
"""
import csv
import datetime
import logging
import multiprocessing as mp
import re
import time
from collections import OrderedDict
import requests
from pyquery import PyQuery
from requests import RequestException
base_url = r'https://sh.lianjia.com/ershoufang'
# 已处理URL集合没有很好的表示方法,这里使用普通集合+锁来实现多进程场景下应用
seen_urls = set()
lock = mp.Lock()
# 下载失败重试次数
retries = 3
# 当前日期
today = datetime.date.today()
# 列表页、明细页URL正则表达式
list_page_pattern = '^{}/(pg\d+/)?$'.format(base_url)
item_page_pattern = '^{}/\d+.html$'.format(base_url)
# 数据存储路径
csv_file_path = r'../.data/ershoufang-{}.csv'.format(today)
# 日志配置
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(process)05d - %(levelname)s - %(message)s')
def start_download_job(data_writer, init_tasks):
"""
下载任务(作业)
:param data_writer: 数据管道(写)
:param init_tasks: 初始任务集合
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
pool = mp.Pool(pool_size)
# 任务不使用队列(在这种进程中使用子进程和进程池的应用中,队列会遇到各种问题),使用管道实现
(task_reader, task_writer) = mp.Pipe(duplex=False)
# 为了简化代码,初始任务直接通过任务管道发送出去,再接收
# 也可以直接在循环代码中实现,当初始任务集合为空时,再使用任务管道接收任务
task_writer.send(init_tasks)
# 循环从任务管道中读取任务数据,并进行处理
while True:
# 任务是一组URL
urls = task_reader.recv()
# 使用进程池,分别下载这些URL,将下载后的文档内容和url构成的元组通过管道发出
for url in urls:
# 判断任务是否重复
with lock:
if url in seen_urls:
continue
else:
seen_urls.add(url)
# 执行下载任务
pool.apply_async(download, (url, task_writer, data_writer))
pool.close()
pool.join()
def download(url, task_writer, data_writer):
"""
下载网页,最多重试3次
:param url: 下载url地址
:param task_writer: 任务管道(写)
:param data_writer: 数据管道(写)
:return:
"""
for _ in range(retries + 1):
try:
logging.info('download page {}'.format(url))
content = requests.get(url).text
if content is None:
continue
# 抽取列表页的中链接列表
if is_list_page(url):
links = parse_list_page(content, url)
# 将详情页链接列表通过管道发出去
if links and len(links) > 0:
task_writer.send(links)
else:
data_writer.send((content, url))
return
except RequestException:
# 异常时休眠2秒
time.sleep(2)
# 超过重试次数则打印错误消息
logging.error('重试{}次下载仍失败:{}'.format(retries, url))
# 将失败url重新加入任务队列
task_writer.send(set([url]))
def is_list_page(url):
"""
判断是否列表页
:param url:
:return:
"""
return re.match(list_page_pattern, url)
def parse_list_page(content, url):
"""
列表网页解析器
:param content:
:param url:
:return: 详情页链接集合
"""
pq = PyQuery(content, url=url)
return set([li.attr('href') for li in pq('ul.sellListContent div.title > a').items()])
def parse_item_page(content, url):
"""
详情页解析器
:param content:
:param url:
:return: 返回详情数据
"""
pq = PyQuery(content, url=url)
return OrderedDict({'title': pq('div.content > div.title > h1').text().strip(),
'sub_title': pq('div.content > div.title > div.sub').text().strip(),
'price': pq('div.price > span.total').text().strip(),
'unit_price': pq('div.unitPrice > span.unitPriceValue').text().replace('元/平米', '').strip(),
'down_payment_info': pq('div.tax > span.taxtext').text().strip(),
'area': re.search('(\d+\.?\d*)', pq('div.area > div.mainInfo').text()).group(1),
'year_info': pq('div.area > div.subInfo').text().strip(),
'house_type': pq('div.room > div.mainInfo').text().strip(),
'floor': pq('div.room > div.subInfo').text().strip(),
'towards': pq('div.type > div.mainInfo').text().strip(),
'housing_estate': pq('div.communityName > a:first').text().strip(),
'housing_estate_link': pq('div.communityName > a:first').attr('href'),
'location': tuple([i.text().strip() for i in pq('div.areaName > span > a').items()]),
'broker': pq('div.brokerName > a').text().strip(),
'broker_homepage': pq('div.brokerName > a').attr('href'),
'number': pq('div.houseRecord > span.info').text().replace('举报', '').strip()})
def start_parse_job(data_reader):
"""
解析任务(作业)
:param data_reader: 数据管道(读)
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
# 解析任务只使用下载任务进程池规模的一半(视情况而定,目前其处理速度要远大于下载任务,也避免进程过多)
pool = mp.Pool(pool_size // 2)
while True:
args = data_reader.recv()
if args is not None:
pool.apply_async(parse, args, callback=process)
pool.close()
pool.join()
def parse(content, url):
"""
解析网页
:param content:
:param url:
:return:
"""
if content is None or url is None:
return
try:
# 解析详情页,返回数据
return parse_item_page(content, url)
except Exception as e:
logging.error(e)
def process(data):
"""
处理数据
:param data:
:return:
"""
if data is None:
return
# 数据基本处理
# 处理小区链接不完整问题
if 'housing_estate_link' in data and not data['housing_estate_link'].startswith('https://'):
data['housing_estate_link'] = 'https://sh.lianjia.com' + data['housing_estate_link']
# 数据转换
# 提取户型中的室数
if 'house_type' in data:
data['house_type'] = (data['house_type'].split('室')[0], data['house_type'])
# 数据存储(写入CSV文件,文件按日期生成)
with open(csv_file_path,
'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(data.values())
if __name__ == '__main__':
# 初始任务集合
init_tasks = set([base_url + '/'] + ['{}/pg{}/'.format(base_url, i) for i in range(2, 101)])
# 创建管道,用于任务(进程)间通信
(data_reader, data_writer) = mp.Pipe(duplex=False)
# 启动下载任务(写端)
mp.Process(target=start_download_job, args=(data_writer, init_tasks)).start()
# 启动解析任务(读端)
mp.Process(target=start_parse_job, args=(data_reader,)).start()
logging.info('--running--')
| """
第二版:多进程二手房信息爬虫
1. 将爬虫分解为下载任务和解析任务(可以继续分解,但在本案中意义不大)两部分,两部分各使用一个子进程,相互通过数据管道通信
2. 下载任务内部不使用队列,使用任务管道实现(在多进程:主进程、子进程、子进程内部进程池等场景下,队列并不好用)任务管理和通信
3. 解析任务从与下载任务间的管道中获取数据,解析并保存
问题:当目标被爬完后,怎样让爬虫停止?
"""
import csv
import datetime
import logging
import multiprocessing as mp
import re
import time
from collections import OrderedDict
import requests
from pyquery import PyQuery
from requests import RequestException
base_url = r'https://sh.lianjia.com/ershoufang'
# 已处理URL集合没有很好的表示方法,这里使用普通集合+锁来实现多进程场景下应用
seen_urls = set()
lock = mp.Lock()
# 下载失败重试次数
retries = 3
# 当前日期
today = datetime.date.today()
# 列表页、明细页URL正则表达式
list_page_pattern = '^{}/(pg\d+/)?$'.format(base_url)
item_page_pattern = '^{}/\d+.html$'.format(base_url)
# 数据存储路径
csv_file_path = r'../.data/ershoufang-{}.csv'.format(today)
# 日志配置
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(process)05d - %(levelname)s - %(message)s')
def start_download_job(data_writer, init_tasks):
"""
下载任务(作业)
:param data_writer: 数据管道(写)
:param init_tasks: 初始任务集合
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
pool = mp.Pool(pool_size)
# 任务不使用队列(在这种进程中使用子进程和进程池的应用中,队列会遇到各种问题),使用管道实现
(task_reader, task_writer) = mp.Pipe(duplex=False)
# 为了简化代码,初始任务直接通过任务管道发送出去,再接收
# 也可以直接在循环代码中实现,当初始任务集合为空时,再使用任务管道接收任务
task_writer.send(init_tasks)
# 循环从任务管道中读取任务数据,并进行处理
while True:
# 任务是一组URL
urls = task_reader.recv()
# 使用进程池,分别下载这些URL,将下载后的文档内容和url构成的元组通过管道发出
for url in urls:
# 判断任务是否重复
with lock:
if url in seen_urls:
continue
else:
seen_urls.add(url)
# 执行下载任务
pool.apply_async(download, (url, task_writer, data_writer))
pool.close()
pool.join()
def download(url, task_writer, data_writer):
"""
下载网页,最多重试3次
:param url: 下载url地址
:param task_writer: 任务管道(写)
:param data_writer: 数据管道(写)
:return:
"""
for _ in range(retries + 1):
try:
logging.info('download page {}'.format(url))
content = requests.get(url).text
if content is None:
continue
# 抽取列表页的中链接列表
if is_list_page(url):
links = parse_list_page(content, url)
# 将详情页链接列表通过管道发出去
if links and len(links) > 0:
task_writer.send(links)
else:
data_writer.send((content, url))
return
except RequestException:
# 异常时休眠2秒
time.sleep(2)
# 超过重试次数则打印错误消息
logging.error('重试{}次下载仍失败:{}'.format(retries, url))
# 将失败url重新加入任务队列
task_writer.send(set([url]))
def is_list_page(url):
"""
判断是否列表页
:param url:
:return:
"""
return re.match(list_page_pattern, url)
def parse_list_page(content, url):
"""
列表网页解析器
:param content:
:param url:
:return: 详情页链接集合
"""
pq = PyQuery(content, url=url)
return set([li.attr('href') for li in pq('ul.sellListContent div.title > a').items()])
def parse_item_page(content, url):
"""
详情页解析器
:param content:
:param url:
:return: 返回详情数据
"""
pq = PyQuery(content, url=url)
return OrderedDict({'title': pq('div.content > div.title > h1').text().strip(),
'sub_title': pq('div.content > div.title > div.sub').text().strip(),
'price': pq('div.price > span.total').text().strip(),
'unit_price': pq('div.unitPrice > span.unitPriceValue').text().replace('元/平米', '').strip(),
'down_payment_info': pq('div.tax > span.taxtext').text().strip(),
'area': re.search('(\d+\.?\d*)', pq('div.area > div.mainInfo').text()).group(1),
'year_info': pq('div.area > div.subInfo').text().strip(),
'house_type': pq('div.room > div.mainInfo').text().strip(),
'floor': pq('div.room > div.subInfo').text().strip(),
'towards': pq('div.type > div.mainInfo').text().strip(),
'housing_estate': pq('div.communityName > a:first').text().strip(),
'housing_estate_link': pq('div.communityName > a:first').attr('href'),
'location': tuple([i.text().strip() for i in pq('div.areaName > span > a').items()]),
'broker': pq('div.brokerName > a').text().strip(),
'broker_homepage': pq('div.brokerName > a').attr('href'),
'number': pq('div.houseRecord > span.info').text().replace('举报', '').strip()})
def start_parse_job(data_reader):
"""
解析任务(作业)
:param data_reader: 数据管道(读)
:return:
"""
# 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
# 解析任务只使用下载任务进程池规模的一半(视情况而定,目前其处理速度要远大于下载任务,也避免进程过多)
pool = mp.Pool(pool_size // 2)
while True:
args = data_reader.recv()
if args is not None:
pool.apply_async(parse, args, callback=process)
pool.close()
pool.join()
def parse(content, url):
"""
解析网页
:param content:
:param url:
:return:
"""
if content is None or url is None:
return
try:
# 解析详情页,返回数据
return parse_item_page(content, url)
except Exception as e:
logging.error(e)
def process(data):
"""
处理数据
:param data:
:return:
"""
if data is None:
return
# 数据基本处理
# 处理小区链接不完整问题
if 'housing_estate_link' in data and not data['housing_estate_link'].startswith('https://'):
data['housing_estate_link'] = 'https://sh.lianjia.com' + data['housing_estate_link']
# 数据转换
# 提取户型中的室数
if 'house_type' in data:
data['house_type'] = (data['house_type'].split('室')[0], data['house_type'])
# 数据存储(写入CSV文件,文件按日期生成)
with open(csv_file_path,
'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(data.values())
if __name__ == '__main__':
# 初始任务集合
init_tasks = set([base_url + '/'] + ['{}/pg{}/'.format(base_url, i) for i in range(2, 101)])
# 创建管道,用于任务(进程)间通信
(data_reader, data_writer) = mp.Pipe(duplex=False)
# 启动下载任务(写端)
mp.Process(target=start_download_job, args=(data_writer, init_tasks)).start()
# 启动解析任务(读端)
mp.Process(target=start_parse_job, args=(data_reader,)).start()
logging.info('--running--')
| zh | 0.977877 | 第二版:多进程二手房信息爬虫 1. 将爬虫分解为下载任务和解析任务(可以继续分解,但在本案中意义不大)两部分,两部分各使用一个子进程,相互通过数据管道通信 2. 下载任务内部不使用队列,使用任务管道实现(在多进程:主进程、子进程、子进程内部进程池等场景下,队列并不好用)任务管理和通信 3. 解析任务从与下载任务间的管道中获取数据,解析并保存 问题:当目标被爬完后,怎样让爬虫停止? # 已处理URL集合没有很好的表示方法,这里使用普通集合+锁来实现多进程场景下应用 # 下载失败重试次数 # 当前日期 # 列表页、明细页URL正则表达式 # 数据存储路径 # 日志配置 下载任务(作业) :param data_writer: 数据管道(写) :param init_tasks: 初始任务集合 :return: # 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准 # 任务不使用队列(在这种进程中使用子进程和进程池的应用中,队列会遇到各种问题),使用管道实现 # 为了简化代码,初始任务直接通过任务管道发送出去,再接收 # 也可以直接在循环代码中实现,当初始任务集合为空时,再使用任务管道接收任务 # 循环从任务管道中读取任务数据,并进行处理 # 任务是一组URL # 使用进程池,分别下载这些URL,将下载后的文档内容和url构成的元组通过管道发出 # 判断任务是否重复 # 执行下载任务 下载网页,最多重试3次 :param url: 下载url地址 :param task_writer: 任务管道(写) :param data_writer: 数据管道(写) :return: # 抽取列表页的中链接列表 # 将详情页链接列表通过管道发出去 # 异常时休眠2秒 # 超过重试次数则打印错误消息 # 将失败url重新加入任务队列 判断是否列表页 :param url: :return: 列表网页解析器 :param content: :param url: :return: 详情页链接集合 详情页解析器 :param content: :param url: :return: 返回详情数据 解析任务(作业) :param data_reader: 数据管道(读) :return: # 构造进程池,按CPU核数初始化进程池大小,小于4核以4为准,否则以CPU核数为准 # 解析任务只使用下载任务进程池规模的一半(视情况而定,目前其处理速度要远大于下载任务,也避免进程过多) 解析网页 :param content: :param url: :return: # 解析详情页,返回数据 处理数据 :param data: :return: # 数据基本处理 # 处理小区链接不完整问题 # 数据转换 # 提取户型中的室数 # 数据存储(写入CSV文件,文件按日期生成) # 初始任务集合 # 创建管道,用于任务(进程)间通信 # 启动下载任务(写端) # 启动解析任务(读端) | 2.702075 | 3 |
desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py | yetsun/hue | 5,079 | 8643 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
class CustomAuthenticator(baseauthenticator.BaseAuthenticator):
"""Offloads U2F signing to a pluggable command-line tool.
Offloads U2F signing to a signing plugin which takes the form of a
command-line tool. The command-line tool is configurable via the
SK_SIGNING_PLUGIN environment variable.
The signing plugin should implement the following interface:
Communication occurs over stdin/stdout, and messages are both sent and
received in the form:
[4 bytes - payload size (little-endian)][variable bytes - json payload]
Signing Request JSON
{
"type": "sign_helper_request",
"signData": [{
"keyHandle": <url-safe base64-encoded key handle>,
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"version": U2F protocol version (usually "U2F_V2")
},...],
"timeoutSeconds": <security key touch timeout>
}
Signing Response JSON
{
"type": "sign_helper_reply",
"code": <result code>.
"errorDetail": <text description of error>,
"responseData": {
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"keyHandle": <url-safe base64-encoded key handle>,
"version": <U2F protocol version>,
"signatureData": <url-safe base64-encoded signature>
}
}
Possible response error codes are:
NoError = 0
UnknownError = -127
TouchRequired = 0x6985
WrongData = 0x6a80
"""
def __init__(self, origin):
self.origin = origin
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response)
def IsAvailable(self):
"""See base class."""
return os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR) is not None
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request)
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data
def _SHA256(self, string):
"""Helper method to perform SHA256."""
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
def _Base64Encode(self, bytes_data):
"""Helper method to base64 encode, strip padding, and return str
result."""
return base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')
| # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
class CustomAuthenticator(baseauthenticator.BaseAuthenticator):
"""Offloads U2F signing to a pluggable command-line tool.
Offloads U2F signing to a signing plugin which takes the form of a
command-line tool. The command-line tool is configurable via the
SK_SIGNING_PLUGIN environment variable.
The signing plugin should implement the following interface:
Communication occurs over stdin/stdout, and messages are both sent and
received in the form:
[4 bytes - payload size (little-endian)][variable bytes - json payload]
Signing Request JSON
{
"type": "sign_helper_request",
"signData": [{
"keyHandle": <url-safe base64-encoded key handle>,
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"version": U2F protocol version (usually "U2F_V2")
},...],
"timeoutSeconds": <security key touch timeout>
}
Signing Response JSON
{
"type": "sign_helper_reply",
"code": <result code>.
"errorDetail": <text description of error>,
"responseData": {
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"keyHandle": <url-safe base64-encoded key handle>,
"version": <U2F protocol version>,
"signatureData": <url-safe base64-encoded signature>
}
}
Possible response error codes are:
NoError = 0
UnknownError = -127
TouchRequired = 0x6985
WrongData = 0x6a80
"""
def __init__(self, origin):
self.origin = origin
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response)
def IsAvailable(self):
"""See base class."""
return os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR) is not None
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request)
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data
def _SHA256(self, string):
"""Helper method to perform SHA256."""
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
def _Base64Encode(self, bytes_data):
"""Helper method to base64 encode, strip padding, and return str
result."""
return base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')
| en | 0.744374 | # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Class to offload the end to end flow of U2F signing. Offloads U2F signing to a pluggable command-line tool. Offloads U2F signing to a signing plugin which takes the form of a command-line tool. The command-line tool is configurable via the SK_SIGNING_PLUGIN environment variable. The signing plugin should implement the following interface: Communication occurs over stdin/stdout, and messages are both sent and received in the form: [4 bytes - payload size (little-endian)][variable bytes - json payload] Signing Request JSON { "type": "sign_helper_request", "signData": [{ "keyHandle": <url-safe base64-encoded key handle>, "appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>, "challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>, "version": U2F protocol version (usually "U2F_V2") },...], "timeoutSeconds": <security key touch timeout> } Signing Response JSON { "type": "sign_helper_reply", "code": <result code>. "errorDetail": <text description of error>, "responseData": { "appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>, "challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>, "keyHandle": <url-safe base64-encoded key handle>, "version": <U2F protocol version>, "signatureData": <url-safe base64-encoded signature> } } Possible response error codes are: NoError = 0 UnknownError = -127 TouchRequired = 0x6985 WrongData = 0x6a80 See base class. # Ensure environment variable is present # Prepare input to signer # Call plugin # Handle response See base class. Builds a JSON request in the form that the plugin expects. # Populate challenges list # Populate ClientData map Builds the response to return to the caller. Calls the plugin and validates the response. # Calculate length of input # Call plugin # Parse and validate response size # Ensure valid json # Ensure response type # Parse response codes # Handle errors # Ensure response data is present Helper method to perform SHA256. Helper method to base64 encode, strip padding, and return str result. | 2.031541 | 2 |
kive/portal/management/commands/graph_kive.py | dmacmillan/Kive | 1 | 8644 | import itertools
import os
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Generates class diagrams.'
def handle(self, *args, **options):
if 'django_extensions' not in settings.INSTALLED_APPS:
exit('django_extensions not found, try using --setting kive.UML_settings')
docs_path = os.path.join(os.path.pardir, 'docs', 'models')
apps = [app for app in settings.INSTALLED_APPS
if not (app.startswith('django') or app == 'rest_framework')]
apps.sort()
for app in apps:
print(app)
exclude_models = ['User', 'Group']
if app != 'metadata':
exclude_models.append('AccessControl')
call_command("graph_models",
app,
pygraphviz=True,
group_models=True,
outputfile=os.path.join(docs_path, app+'.png'),
exclude_models=','.join(exclude_models))
readme_path = os.path.join(docs_path, 'README.md')
with open(readme_path, 'rU+') as f:
models_section = '### Models ###\n'
header = itertools.takewhile(lambda line: line != models_section,
f.readlines())
f.seek(0)
for line in header:
f.write(line)
f.write(models_section)
for app in apps:
f.write('#### {} ####\n'.format(app))
f.write('\n\n'.format(app, app))
| import itertools
import os
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Generates class diagrams.'
def handle(self, *args, **options):
if 'django_extensions' not in settings.INSTALLED_APPS:
exit('django_extensions not found, try using --setting kive.UML_settings')
docs_path = os.path.join(os.path.pardir, 'docs', 'models')
apps = [app for app in settings.INSTALLED_APPS
if not (app.startswith('django') or app == 'rest_framework')]
apps.sort()
for app in apps:
print(app)
exclude_models = ['User', 'Group']
if app != 'metadata':
exclude_models.append('AccessControl')
call_command("graph_models",
app,
pygraphviz=True,
group_models=True,
outputfile=os.path.join(docs_path, app+'.png'),
exclude_models=','.join(exclude_models))
readme_path = os.path.join(docs_path, 'README.md')
with open(readme_path, 'rU+') as f:
models_section = '### Models ###\n'
header = itertools.takewhile(lambda line: line != models_section,
f.readlines())
f.seek(0)
for line in header:
f.write(line)
f.write(models_section)
for app in apps:
f.write('#### {} ####\n'.format(app))
f.write('\n\n'.format(app, app))
| en | 0.135667 | ## Models ###\n' ### {} ####\n'.format(app)) | 2.11749 | 2 |
summary.py | rpls/openlane_summary | 0 | 8645 | <reponame>rpls/openlane_summary
#!/usr/bin/env python3
import argparse
import os
import glob
import csv
import sys
import re
from shutil import which
import datetime
def is_tool(name):
return which(name) is not None
def check_path(path):
paths = glob.glob(path)
if len(paths) == 0:
exit("file not found: %s" % path)
if len(paths) > 1:
print("warning: glob pattern found too many files, using first one: %s" % paths[0])
return paths[0]
def openlane_date_sort(e):
datestamp = os.path.basename(e)
if re.match(r'^\d+\-\d+\_\d+\-\d+$',datestamp):
timestamp = datetime.datetime.strptime(datestamp, '%d-%m_%H-%M')
return timestamp.timestamp()
return datestamp
def summary_report(summary_file):
# print short summary of the csv file
status = None
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
if "violation" in key or "error" in key:
print("%30s : %20s" % (key, value))
if "AREA" in key:
area = float(value)
if "flow_status" in key:
status = value
print("area %d um^2" % (1e6 * area))
if status is not None: # newer OpenLANE has status, older ones don't
print("flow status: %s" % status)
def full_summary_report(summary_file):
# print short summary of the csv file
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
print("%30s : %20s" % (key, value))
def drc_report(drc_file):
last_drc = None
drc_count = 0
with open(drc_file) as drc:
for line in drc.readlines():
drc_count += 1
if '(' in line:
if last_drc is not None:
print("* %s (%d)" % (last_drc, drc_count/4))
last_drc = line.strip()
drc_count = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="OpenLANE summary tool")
group = parser.add_mutually_exclusive_group(required=True)
# either choose the design and interation
group.add_argument('--design', help="only run checks on specific design", action='store')
# or show standard cells
group.add_argument('--show-sky130', help='show all standard cells', action='store_const', const=True)
# optionally choose different name for top module and which run to use (default latest)
parser.add_argument('--top', help="name of top module if not same as design", action='store')
parser.add_argument('--run', help="choose a specific run. If not given use latest. If not arg, show a menu", action='store', default=-1, nargs='?', type=int)
# what to show
parser.add_argument('--drc', help='show DRC report', action='store_const', const=True)
parser.add_argument('--summary', help='show violations, area & status from summary report', action='store_const', const=True)
parser.add_argument('--full-summary', help='show the full summary report csv file', action='store_const', const=True)
parser.add_argument('--synth', help='show post techmap synth', action='store_const', const=True)
parser.add_argument('--yosys-report', help='show cell usage after yosys synth', action='store_const', const=True)
# klayout for intermediate files
parser.add_argument('--floorplan', help='show floorplan', action='store_const', const=True)
parser.add_argument('--pdn', help='show PDN', action='store_const', const=True)
parser.add_argument('--global-placement', help='show global placement PDN', action='store_const', const=True)
parser.add_argument('--detailed-placement', help='show detailed placement', action='store_const', const=True)
parser.add_argument('--gds', help='show final GDS', action='store_const', const=True)
# GDS3D for 3d view
parser.add_argument('--gds-3d', help='show final GDS in 3D', action='store_const', const=True)
parser.add_argument('--caravel', help='use caravel directory structure instead of standard openlane', action='store_const', const=True)
args = parser.parse_args()
if not args.top:
args.top = args.design
if not 'OPENLANE_ROOT' in os.environ:
exit("pls set OPENLANE_ROOT to where your OpenLANE is installed")
klayout_def = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_def.xml')
klayout_gds = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_gds.xml')
gds3d_tech = os.path.join(os.path.dirname(sys.argv[0]), 'sky130.txt')
# if showing off the sky130 cells
if args.show_sky130:
if not os.environ['PDK_ROOT']:
exit("pls set PDK_ROOT to where your PDK is installed")
path = check_path(os.path.join(os.environ['PDK_ROOT'], "sky130A", "libs.ref", "sky130_fd_sc_hd", "gds", "sky130_fd_sc_hd.gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
exit()
# otherwise need to know where openlane and the designs are
openlane_designs = ''
if args.caravel:
if os.path.exists('openlane'):
openlane_designs = 'openlane'
else:
openlane_designs = '.'
run_dir = os.path.join(openlane_designs, args.design, 'runs/*')
else:
openlane_designs = os.path.join(os.environ['OPENLANE_ROOT'], 'designs')
run_dir = os.path.join(openlane_designs, args.design, 'runs/*-*')
list_of_files = glob.glob(run_dir)
if len(list_of_files) == 0:
exit("couldn't find that design")
list_of_files.sort(key=openlane_date_sort)
# what run to show?
if args.run == -1:
# default is to use the latest
print("using latest run:")
run_path = max(list_of_files, key=os.path.getctime)
elif args.run is None:
# UI for asking for which run to use
for run_index, run in enumerate(list_of_files):
print("\n%2d: %s" % (run_index, os.path.basename(run)), end='')
print(" <default>\n")
n = input("which run? <enter for default>: ") or run_index
run_path = list_of_files[int(n)]
else:
# use the given run
print("using run %d:" % args.run)
run_path = list_of_files[args.run]
print(run_path)
if args.summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
summary_report(path)
if args.full_summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
full_summary_report(path)
if args.drc:
path = os.path.join(run_path, 'logs', 'magic', 'magic.drc') # don't check path because if DRC is clean, don't get the file
if os.path.exists(path):
drc_report(path)
else:
print("no DRC file, DRC clean?")
if args.synth:
path = check_path(os.path.join(run_path, "tmp", "synthesis", "post_techmap.dot")) # post_techmap is created by https://github.com/efabless/openlane/pull/282
os.system("xdot %s" % path)
if args.yosys_report:
filename = "*yosys_*.stat.rpt"
path = check_path(os.path.join(run_path, "reports", "synthesis", filename))
os.system("cat %s" % path)
if args.floorplan:
path = check_path(os.path.join(run_path, "results", "floorplan", args.top + ".floorplan.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.pdn:
filename = "*pdn.def"
path = check_path(os.path.join(run_path, "tmp", "floorplan", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.global_placement:
filename = "*replace.def"
path = check_path(os.path.join(run_path, "tmp", "placement", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.detailed_placement:
path = check_path(os.path.join(run_path, "results", "placement", args.top + ".placement.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.gds:
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
if args.gds_3d:
if not is_tool('GDS3D'):
exit("pls install GDS3D from https://github.com/trilomix/GDS3D")
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("GDS3D -p %s -i %s" % (gds3d_tech, path))
| #!/usr/bin/env python3
import argparse
import os
import glob
import csv
import sys
import re
from shutil import which
import datetime
def is_tool(name):
return which(name) is not None
def check_path(path):
paths = glob.glob(path)
if len(paths) == 0:
exit("file not found: %s" % path)
if len(paths) > 1:
print("warning: glob pattern found too many files, using first one: %s" % paths[0])
return paths[0]
def openlane_date_sort(e):
datestamp = os.path.basename(e)
if re.match(r'^\d+\-\d+\_\d+\-\d+$',datestamp):
timestamp = datetime.datetime.strptime(datestamp, '%d-%m_%H-%M')
return timestamp.timestamp()
return datestamp
def summary_report(summary_file):
# print short summary of the csv file
status = None
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
if "violation" in key or "error" in key:
print("%30s : %20s" % (key, value))
if "AREA" in key:
area = float(value)
if "flow_status" in key:
status = value
print("area %d um^2" % (1e6 * area))
if status is not None: # newer OpenLANE has status, older ones don't
print("flow status: %s" % status)
def full_summary_report(summary_file):
# print short summary of the csv file
with open(summary_file) as fh:
summary = csv.DictReader(fh)
for row in summary:
for key, value in row.items():
print("%30s : %20s" % (key, value))
def drc_report(drc_file):
last_drc = None
drc_count = 0
with open(drc_file) as drc:
for line in drc.readlines():
drc_count += 1
if '(' in line:
if last_drc is not None:
print("* %s (%d)" % (last_drc, drc_count/4))
last_drc = line.strip()
drc_count = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="OpenLANE summary tool")
group = parser.add_mutually_exclusive_group(required=True)
# either choose the design and interation
group.add_argument('--design', help="only run checks on specific design", action='store')
# or show standard cells
group.add_argument('--show-sky130', help='show all standard cells', action='store_const', const=True)
# optionally choose different name for top module and which run to use (default latest)
parser.add_argument('--top', help="name of top module if not same as design", action='store')
parser.add_argument('--run', help="choose a specific run. If not given use latest. If not arg, show a menu", action='store', default=-1, nargs='?', type=int)
# what to show
parser.add_argument('--drc', help='show DRC report', action='store_const', const=True)
parser.add_argument('--summary', help='show violations, area & status from summary report', action='store_const', const=True)
parser.add_argument('--full-summary', help='show the full summary report csv file', action='store_const', const=True)
parser.add_argument('--synth', help='show post techmap synth', action='store_const', const=True)
parser.add_argument('--yosys-report', help='show cell usage after yosys synth', action='store_const', const=True)
# klayout for intermediate files
parser.add_argument('--floorplan', help='show floorplan', action='store_const', const=True)
parser.add_argument('--pdn', help='show PDN', action='store_const', const=True)
parser.add_argument('--global-placement', help='show global placement PDN', action='store_const', const=True)
parser.add_argument('--detailed-placement', help='show detailed placement', action='store_const', const=True)
parser.add_argument('--gds', help='show final GDS', action='store_const', const=True)
# GDS3D for 3d view
parser.add_argument('--gds-3d', help='show final GDS in 3D', action='store_const', const=True)
parser.add_argument('--caravel', help='use caravel directory structure instead of standard openlane', action='store_const', const=True)
args = parser.parse_args()
if not args.top:
args.top = args.design
if not 'OPENLANE_ROOT' in os.environ:
exit("pls set OPENLANE_ROOT to where your OpenLANE is installed")
klayout_def = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_def.xml')
klayout_gds = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_gds.xml')
gds3d_tech = os.path.join(os.path.dirname(sys.argv[0]), 'sky130.txt')
# if showing off the sky130 cells
if args.show_sky130:
if not os.environ['PDK_ROOT']:
exit("pls set PDK_ROOT to where your PDK is installed")
path = check_path(os.path.join(os.environ['PDK_ROOT'], "sky130A", "libs.ref", "sky130_fd_sc_hd", "gds", "sky130_fd_sc_hd.gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
exit()
# otherwise need to know where openlane and the designs are
openlane_designs = ''
if args.caravel:
if os.path.exists('openlane'):
openlane_designs = 'openlane'
else:
openlane_designs = '.'
run_dir = os.path.join(openlane_designs, args.design, 'runs/*')
else:
openlane_designs = os.path.join(os.environ['OPENLANE_ROOT'], 'designs')
run_dir = os.path.join(openlane_designs, args.design, 'runs/*-*')
list_of_files = glob.glob(run_dir)
if len(list_of_files) == 0:
exit("couldn't find that design")
list_of_files.sort(key=openlane_date_sort)
# what run to show?
if args.run == -1:
# default is to use the latest
print("using latest run:")
run_path = max(list_of_files, key=os.path.getctime)
elif args.run is None:
# UI for asking for which run to use
for run_index, run in enumerate(list_of_files):
print("\n%2d: %s" % (run_index, os.path.basename(run)), end='')
print(" <default>\n")
n = input("which run? <enter for default>: ") or run_index
run_path = list_of_files[int(n)]
else:
# use the given run
print("using run %d:" % args.run)
run_path = list_of_files[args.run]
print(run_path)
if args.summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
summary_report(path)
if args.full_summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
full_summary_report(path)
if args.drc:
path = os.path.join(run_path, 'logs', 'magic', 'magic.drc') # don't check path because if DRC is clean, don't get the file
if os.path.exists(path):
drc_report(path)
else:
print("no DRC file, DRC clean?")
if args.synth:
path = check_path(os.path.join(run_path, "tmp", "synthesis", "post_techmap.dot")) # post_techmap is created by https://github.com/efabless/openlane/pull/282
os.system("xdot %s" % path)
if args.yosys_report:
filename = "*yosys_*.stat.rpt"
path = check_path(os.path.join(run_path, "reports", "synthesis", filename))
os.system("cat %s" % path)
if args.floorplan:
path = check_path(os.path.join(run_path, "results", "floorplan", args.top + ".floorplan.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.pdn:
filename = "*pdn.def"
path = check_path(os.path.join(run_path, "tmp", "floorplan", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.global_placement:
filename = "*replace.def"
path = check_path(os.path.join(run_path, "tmp", "placement", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.detailed_placement:
path = check_path(os.path.join(run_path, "results", "placement", args.top + ".placement.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.gds:
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
if args.gds_3d:
if not is_tool('GDS3D'):
exit("pls install GDS3D from https://github.com/trilomix/GDS3D")
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("GDS3D -p %s -i %s" % (gds3d_tech, path)) | en | 0.870178 | #!/usr/bin/env python3 # print short summary of the csv file # newer OpenLANE has status, older ones don't # print short summary of the csv file # either choose the design and interation # or show standard cells # optionally choose different name for top module and which run to use (default latest) # what to show # klayout for intermediate files # GDS3D for 3d view # if showing off the sky130 cells # otherwise need to know where openlane and the designs are # what run to show? # default is to use the latest # UI for asking for which run to use # use the given run # don't check path because if DRC is clean, don't get the file # post_techmap is created by https://github.com/efabless/openlane/pull/282 | 2.922829 | 3 |
var/spack/repos/builtin/packages/py-cupy/package.py | player1537-forks/spack | 11 | 8646 | <filename>var/spack/repos/builtin/packages/py-cupy/package.py<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCupy(PythonPackage):
"""CuPy is an open-source array library accelerated with
NVIDIA CUDA. CuPy provides GPU accelerated computing with
Python. CuPy uses CUDA-related libraries including cuBLAS,
cuDNN, cuRand, cuSolver, cuSPARSE, cuFFT and NCCL to make
full use of the GPU architecture."""
homepage = "https://cupy.dev/"
pypi = "cupy/cupy-8.0.0.tar.gz"
version('8.0.0', sha256='d1dcba5070dfa754445d010cdc952ff6b646d5f9bdcd7a63e8246e2472c3ddb8')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('cuda')
depends_on('nccl')
depends_on('cudnn')
| <filename>var/spack/repos/builtin/packages/py-cupy/package.py<gh_stars>10-100
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCupy(PythonPackage):
"""CuPy is an open-source array library accelerated with
NVIDIA CUDA. CuPy provides GPU accelerated computing with
Python. CuPy uses CUDA-related libraries including cuBLAS,
cuDNN, cuRand, cuSolver, cuSPARSE, cuFFT and NCCL to make
full use of the GPU architecture."""
homepage = "https://cupy.dev/"
pypi = "cupy/cupy-8.0.0.tar.gz"
version('8.0.0', sha256='d1dcba5070dfa754445d010cdc952ff6b646d5f9bdcd7a63e8246e2472c3ddb8')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('cuda')
depends_on('nccl')
depends_on('cudnn')
| en | 0.804218 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) CuPy is an open-source array library accelerated with NVIDIA CUDA. CuPy provides GPU accelerated computing with Python. CuPy uses CUDA-related libraries including cuBLAS, cuDNN, cuRand, cuSolver, cuSPARSE, cuFFT and NCCL to make full use of the GPU architecture. | 1.496661 | 1 |
simple_rest_client/decorators.py | cfytrok/python-simple-rest-client | 0 | 8647 | <filename>simple_rest_client/decorators.py
import logging
from functools import wraps
import status
from httpx import exceptions
from .exceptions import AuthError, ClientConnectionError, ClientError, NotFoundError, ServerError
logger = logging.getLogger(__name__)
def validate_response(response):
error_suffix = " response={!r}".format(response)
if response.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN):
raise AuthError("operation=auth_error," + error_suffix, response)
if response.status_code == status.HTTP_404_NOT_FOUND:
raise NotFoundError("operation=not_found_error," + error_suffix, response)
if status.is_client_error(code=response.status_code):
raise ClientError("operation=client_error," + error_suffix, response)
if status.is_server_error(code=response.status_code):
raise ServerError("operation=server_error," + error_suffix, response)
def handle_request_error(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
response = f(*args, **kwargs)
except (
exceptions.Timeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
def handle_async_request_error(f):
async def wrapper(*args, **kwargs):
try:
response = await f(*args, **kwargs)
except (
exceptions.ReadTimeout,
exceptions.ReadTimeout,
exceptions.WriteTimeout,
exceptions.PoolTimeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
| <filename>simple_rest_client/decorators.py
import logging
from functools import wraps
import status
from httpx import exceptions
from .exceptions import AuthError, ClientConnectionError, ClientError, NotFoundError, ServerError
logger = logging.getLogger(__name__)
def validate_response(response):
error_suffix = " response={!r}".format(response)
if response.status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN):
raise AuthError("operation=auth_error," + error_suffix, response)
if response.status_code == status.HTTP_404_NOT_FOUND:
raise NotFoundError("operation=not_found_error," + error_suffix, response)
if status.is_client_error(code=response.status_code):
raise ClientError("operation=client_error," + error_suffix, response)
if status.is_server_error(code=response.status_code):
raise ServerError("operation=server_error," + error_suffix, response)
def handle_request_error(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
response = f(*args, **kwargs)
except (
exceptions.Timeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
def handle_async_request_error(f):
async def wrapper(*args, **kwargs):
try:
response = await f(*args, **kwargs)
except (
exceptions.ReadTimeout,
exceptions.ReadTimeout,
exceptions.WriteTimeout,
exceptions.PoolTimeout,
) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
| none | 1 | 2.428805 | 2 |
|
HPOBenchExperimentUtils/resource_manager/__init__.py | PhMueller/TrajectoryParser | 0 | 8648 | from HPOBenchExperimentUtils.resource_manager.file_resource_manager import FileBasedResourceManager
| from HPOBenchExperimentUtils.resource_manager.file_resource_manager import FileBasedResourceManager
| none | 1 | 1.189482 | 1 |
|
tools/mirrors.bzl | kkiningh/slime | 0 | 8649 | <reponame>kkiningh/slime<filename>tools/mirrors.bzl<gh_stars>0
DEFAULT_MIRRORS = {
"bitbucket": [
"https://bitbucket.org/{repository}/get/{commit}.tar.gz",
],
"buildifier": [
"https://github.com/bazelbuild/buildtools/releases/download/{version}/{filename}",
],
"github": [
"https://github.com/{repository}/archive/{commit}.tar.gz",
],
"pypi": [
"https://files.pythonhosted.org/packages/source/{p}/{package}/{package}-{version}.tar.gz",
],
}
| DEFAULT_MIRRORS = {
"bitbucket": [
"https://bitbucket.org/{repository}/get/{commit}.tar.gz",
],
"buildifier": [
"https://github.com/bazelbuild/buildtools/releases/download/{version}/{filename}",
],
"github": [
"https://github.com/{repository}/archive/{commit}.tar.gz",
],
"pypi": [
"https://files.pythonhosted.org/packages/source/{p}/{package}/{package}-{version}.tar.gz",
],
} | none | 1 | 1.053172 | 1 |
|
201805_ChIP_ATAC/codes_old/read_txt.py | ScrippsPipkinLab/GenomeTracks | 0 | 8650 | <reponame>ScrippsPipkinLab/GenomeTracks
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 21:15:23 2017
@author: yolandatiao
"""
import csv
import glob
import os
from astropy.io import ascii # For using ascii table to open csv
from astropy.table import Table, Column # For using astropy table functions
os.chdir("/Volumes/Huitian/GSE88987/codes")
import fc_basic_astropy_subprocess as fc
os.chdir("/Volumes/Huitian/Genombrower/codes/txt")
flist=[]
for fname in glob.glob("*.txt"):
flist.append(fname)
nlist=[]
fnflist=[]
print len(flist)
for i in flist:
fnflist.append(i[:-4])
with open(i, "r") as fin:
rfin=csv.reader(fin, delimiter=",")
nlist.append(int(next(rfin)[0]))
#print nlist
outab=Table()
outab["filename_nf"]=fnflist
outab["bdgaccu"]=nlist
ascii.write(outab, "meta.csv", format="csv", overwrite=True)
metab=ascii.read("meta_write_bash.csv")
metab=fc.setcolnames(metab)
with open("bdgnorm.sh","r") as fin:
rfin=csv.reader(fin, delimiter=",")
inrow=next(rfin)[0]
print inrow
for x in xrange(0, len(metab)):
xshname="%s.sh"%x
with open(xshname, "w") as fout:
wfout=csv.writer(fout, delimiter="\t")
wfout.writerow(["cd /gpfs/home/hdiao/Geombrowser"])
outrow=inrow
osfactor=str(metab["1000000000_scalingfactor"][x])
ofname=str(metab["filename_nf"][x])
outrow=outrow.replace("sfactor", osfactor)
outrow=outrow.replace("inputfile", ofname)
fout.writelines(outrow)
with open("qsub.sh", "w") as fout:
for x in xrange(0, 66):
fout.writelines("qsub %s.sh"%x)
fout.writelines("\n")
os.chdir("/Volumes/Huitian/Genombrower/codes/rename")
meta=ascii.read("rename_meta.csv")
with open("rename.sh", "w") as fout:
for x in xrange(0, len(meta)):
fout.writelines("mv ")
fout.writelines(meta["oldname"][x])
fout.writelines(" ")
fout.writelines(meta["newnamenf"][x])
fout.writelines(".bdg")
fout.writelines("\n")
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 21:15:23 2017
@author: yolandatiao
"""
import csv
import glob
import os
from astropy.io import ascii # For using ascii table to open csv
from astropy.table import Table, Column # For using astropy table functions
os.chdir("/Volumes/Huitian/GSE88987/codes")
import fc_basic_astropy_subprocess as fc
os.chdir("/Volumes/Huitian/Genombrower/codes/txt")
flist=[]
for fname in glob.glob("*.txt"):
flist.append(fname)
nlist=[]
fnflist=[]
print len(flist)
for i in flist:
fnflist.append(i[:-4])
with open(i, "r") as fin:
rfin=csv.reader(fin, delimiter=",")
nlist.append(int(next(rfin)[0]))
#print nlist
outab=Table()
outab["filename_nf"]=fnflist
outab["bdgaccu"]=nlist
ascii.write(outab, "meta.csv", format="csv", overwrite=True)
metab=ascii.read("meta_write_bash.csv")
metab=fc.setcolnames(metab)
with open("bdgnorm.sh","r") as fin:
rfin=csv.reader(fin, delimiter=",")
inrow=next(rfin)[0]
print inrow
for x in xrange(0, len(metab)):
xshname="%s.sh"%x
with open(xshname, "w") as fout:
wfout=csv.writer(fout, delimiter="\t")
wfout.writerow(["cd /gpfs/home/hdiao/Geombrowser"])
outrow=inrow
osfactor=str(metab["1000000000_scalingfactor"][x])
ofname=str(metab["filename_nf"][x])
outrow=outrow.replace("sfactor", osfactor)
outrow=outrow.replace("inputfile", ofname)
fout.writelines(outrow)
with open("qsub.sh", "w") as fout:
for x in xrange(0, 66):
fout.writelines("qsub %s.sh"%x)
fout.writelines("\n")
os.chdir("/Volumes/Huitian/Genombrower/codes/rename")
meta=ascii.read("rename_meta.csv")
with open("rename.sh", "w") as fout:
for x in xrange(0, len(meta)):
fout.writelines("mv ")
fout.writelines(meta["oldname"][x])
fout.writelines(" ")
fout.writelines(meta["newnamenf"][x])
fout.writelines(".bdg")
fout.writelines("\n") | en | 0.450162 | #!/usr/bin/env python2 # -*- coding: utf-8 -*- Created on Tue Jun 6 21:15:23 2017 @author: yolandatiao # For using ascii table to open csv # For using astropy table functions #print nlist | 2.218445 | 2 |
tests/test_vendcrawler.py | josetaas/vendcrawler | 0 | 8651 | <reponame>josetaas/vendcrawler
import unittest
from vendcrawler.scripts.vendcrawler import VendCrawler
class TestVendCrawlerMethods(unittest.TestCase):
def test_get_links(self):
links = VendCrawler('a', 'b', 'c').get_links(2)
self.assertEqual(links,
['https://sarahserver.net/?module=vendor&p=1',
'https://sarahserver.net/?module=vendor&p=2'])
def test_get_page_count(self):
with open('test_vendcrawler.html', 'r') as f:
data = f.read()
page_count = VendCrawler('a', 'b', 'c').get_page_count(str(data))
self.assertEqual(int(page_count), 84)
if __name__ == '__main__':
unittest.main()
| import unittest
from vendcrawler.scripts.vendcrawler import VendCrawler
class TestVendCrawlerMethods(unittest.TestCase):
def test_get_links(self):
links = VendCrawler('a', 'b', 'c').get_links(2)
self.assertEqual(links,
['https://sarahserver.net/?module=vendor&p=1',
'https://sarahserver.net/?module=vendor&p=2'])
def test_get_page_count(self):
with open('test_vendcrawler.html', 'r') as f:
data = f.read()
page_count = VendCrawler('a', 'b', 'c').get_page_count(str(data))
self.assertEqual(int(page_count), 84)
if __name__ == '__main__':
unittest.main() | none | 1 | 2.926129 | 3 |
|
services/spotify-service.py | thk4711/mediamanager | 0 | 8652 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import json
import os
import sys
import time
import urllib
import socket
import argparse
import requests
import lib.common as common
base_url = 'http://localhost:24879/player/'
#------------------------------------------------------------------------------#
# do something on startup #
#------------------------------------------------------------------------------#
def init():
global port
check_port()
script_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_path)
parser = argparse.ArgumentParser(description='media manager spotify connect service')
parser.add_argument('-p', '--port', type=int, help='WEB server port', required=True)
args = parser.parse_args()
port = args.port
#------------------------------------------------------------------------------#
# check if librespot-java is running #
#------------------------------------------------------------------------------#
def check_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('localhost', 24879))
if result == 0:
sock.close()
return
print("Please check if SpoCon is configured correctly and running", file = sys.stderr )
sock.close()
exit(1)
#------------------------------------------------------------------------------#
# get metadata from spotify #
#------------------------------------------------------------------------------#
def get_metadata():
meta_data = {}
global current_cover
try:
current_track = get_player()
album = current_track['item']['album']
current_cover = album['images'][0]['url']
tmp_cover = current_cover
tmp_cover=tmp_cover.replace('https://i.scdn.co/image/','')
meta_data['track'] = current_track['item']['name']
meta_data['album'] = album['name']
meta_data['artist'] = album['artists'][0]['name']
meta_data['cover'] = 'external_' + tmp_cover
meta_data['playstatus'] = get_play_status()
if meta_data['playstatus'] == False:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
return(bytes(json.dumps(meta_data), 'utf-8'))
except:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
meta_data['playstatus'] = False
return(bytes(json.dumps(meta_data), 'utf-8'))
#------------------------------------------------------------------------------#
# get play status #
#------------------------------------------------------------------------------#
def get_play_status(mode=False):
playing = False
ret_val = False
ret_str = 'NO'
try:
current_track = get_player()
playing = current_track['is_playing']
except:
pass
if playing == True:
try:
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
data = ret.json()
if 'current' in data:
ret_str = 'YES'
ret_val = True
get_player()
except:
pass
if mode:
return(bytes(ret_str, 'utf-8'))
return(ret_val)
#------------------------------------------------------------------------------#
# get whats currently playing #
#------------------------------------------------------------------------------#
def get_current():
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# get player data from API #
#------------------------------------------------------------------------------#
def get_player():
path = 'http://localhost:24879/web-api/v1/me/player'
ret = requests.get(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# read cover image fom spotify connect web #
#------------------------------------------------------------------------------#
def read_cover_image():
webURL = urllib.request.urlopen(current_cover)
data = webURL.read()
return(data)
#------------------------------------------------------------------------------#
# play next song #
#------------------------------------------------------------------------------#
def next():
requests.post(url = base_url + 'next')
#------------------------------------------------------------------------------#
# play previuous song #
#------------------------------------------------------------------------------#
def prev():
requests.post(url = base_url + 'prev')
#------------------------------------------------------------------------------#
# start playing #
#------------------------------------------------------------------------------#
def play():
requests.post(url = base_url + 'resume')
#------------------------------------------------------------------------------#
# stop playing #
#------------------------------------------------------------------------------#
def pause():
requests.post(url = base_url + 'pause')
#------------------------------------------------------------------------------#
# handle http get request #
#------------------------------------------------------------------------------#
def respond_to_get_request(data):
if 'action' not in data:
return(bytes('failed', 'utf-8'))
if data['action'] == 'play':
play()
elif data['action'] == 'pause':
pause()
elif data['action'] == 'prev':
get_metadata()
prev()
elif data['action'] == 'next':
get_metadata()
next()
elif data['action'] == 'metadata':
return(get_metadata())
elif data['action'] == 'coverimage':
return(read_cover_image())
elif data['action'] == 'getplaystatus':
return(get_play_status(True))
return(bytes('OK', 'utf-8'))
#------------------------------------------------------------------------------#
# main program #
#------------------------------------------------------------------------------#
init()
common.http_get_handler = respond_to_get_request
common.run_http(port)
while True:
time.sleep(2000)
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import json
import os
import sys
import time
import urllib
import socket
import argparse
import requests
import lib.common as common
base_url = 'http://localhost:24879/player/'
#------------------------------------------------------------------------------#
# do something on startup #
#------------------------------------------------------------------------------#
def init():
global port
check_port()
script_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_path)
parser = argparse.ArgumentParser(description='media manager spotify connect service')
parser.add_argument('-p', '--port', type=int, help='WEB server port', required=True)
args = parser.parse_args()
port = args.port
#------------------------------------------------------------------------------#
# check if librespot-java is running #
#------------------------------------------------------------------------------#
def check_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('localhost', 24879))
if result == 0:
sock.close()
return
print("Please check if SpoCon is configured correctly and running", file = sys.stderr )
sock.close()
exit(1)
#------------------------------------------------------------------------------#
# get metadata from spotify #
#------------------------------------------------------------------------------#
def get_metadata():
meta_data = {}
global current_cover
try:
current_track = get_player()
album = current_track['item']['album']
current_cover = album['images'][0]['url']
tmp_cover = current_cover
tmp_cover=tmp_cover.replace('https://i.scdn.co/image/','')
meta_data['track'] = current_track['item']['name']
meta_data['album'] = album['name']
meta_data['artist'] = album['artists'][0]['name']
meta_data['cover'] = 'external_' + tmp_cover
meta_data['playstatus'] = get_play_status()
if meta_data['playstatus'] == False:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
return(bytes(json.dumps(meta_data), 'utf-8'))
except:
meta_data['track'] = ''
meta_data['album'] = ''
meta_data['artist'] = ''
meta_data['cover'] = 'images/pause.png'
meta_data['playstatus'] = False
return(bytes(json.dumps(meta_data), 'utf-8'))
#------------------------------------------------------------------------------#
# get play status #
#------------------------------------------------------------------------------#
def get_play_status(mode=False):
playing = False
ret_val = False
ret_str = 'NO'
try:
current_track = get_player()
playing = current_track['is_playing']
except:
pass
if playing == True:
try:
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
data = ret.json()
if 'current' in data:
ret_str = 'YES'
ret_val = True
get_player()
except:
pass
if mode:
return(bytes(ret_str, 'utf-8'))
return(ret_val)
#------------------------------------------------------------------------------#
# get whats currently playing #
#------------------------------------------------------------------------------#
def get_current():
path = 'http://localhost:24879/player/current/'
ret = requests.post(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# get player data from API #
#------------------------------------------------------------------------------#
def get_player():
path = 'http://localhost:24879/web-api/v1/me/player'
ret = requests.get(url = path)
return ret.json()
#------------------------------------------------------------------------------#
# read cover image fom spotify connect web #
#------------------------------------------------------------------------------#
def read_cover_image():
webURL = urllib.request.urlopen(current_cover)
data = webURL.read()
return(data)
#------------------------------------------------------------------------------#
# play next song #
#------------------------------------------------------------------------------#
def next():
requests.post(url = base_url + 'next')
#------------------------------------------------------------------------------#
# play previuous song #
#------------------------------------------------------------------------------#
def prev():
requests.post(url = base_url + 'prev')
#------------------------------------------------------------------------------#
# start playing #
#------------------------------------------------------------------------------#
def play():
requests.post(url = base_url + 'resume')
#------------------------------------------------------------------------------#
# stop playing #
#------------------------------------------------------------------------------#
def pause():
requests.post(url = base_url + 'pause')
#------------------------------------------------------------------------------#
# handle http get request #
#------------------------------------------------------------------------------#
def respond_to_get_request(data):
if 'action' not in data:
return(bytes('failed', 'utf-8'))
if data['action'] == 'play':
play()
elif data['action'] == 'pause':
pause()
elif data['action'] == 'prev':
get_metadata()
prev()
elif data['action'] == 'next':
get_metadata()
next()
elif data['action'] == 'metadata':
return(get_metadata())
elif data['action'] == 'coverimage':
return(read_cover_image())
elif data['action'] == 'getplaystatus':
return(get_play_status(True))
return(bytes('OK', 'utf-8'))
#------------------------------------------------------------------------------#
# main program #
#------------------------------------------------------------------------------#
init()
common.http_get_handler = respond_to_get_request
common.run_http(port)
while True:
time.sleep(2000)
| en | 0.105235 | #!/usr/bin/python3 # -*- coding: utf-8 -*- #------------------------------------------------------------------------------# # do something on startup # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # check if librespot-java is running # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # get metadata from spotify # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # get play status # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # get whats currently playing # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # get player data from API # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # read cover image fom spotify connect web # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # play next song # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # play previuous song # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # start playing # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # stop playing # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # handle http get request # #------------------------------------------------------------------------------# #------------------------------------------------------------------------------# # main program # #------------------------------------------------------------------------------# | 2.451741 | 2 |
Segment/models/other/fcn.py | YuHe0108/cvmodule | 0 | 8653 | # from tensorflow.keras import Model, Input
# from tensorflow.keras.applications import vgg16, resnet50
# from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
# from tensorflow.keras import layers
# import tensorflow as tf
#
# """
# FCN-8特点:
# 1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
# 2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
# 3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
# 4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
# 上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
# 当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
#
# FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
# 1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
# 如果直接上采样 32 倍预测输出,被称为 FCN-32。
# FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
# """
#
#
# def fcn8_helper(input_shape, num_classes, backbone):
# assert input_shape[0] % 32 == 0
# assert input_shape[1] % 32 == 0
#
# inputs = Input(input_shape)
# if backbone == 'vgg16':
# base_model = vgg16.VGG16(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=100)
# elif backbone == 'resnet50':
# base_model = resnet50.ResNet50(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=1000)
# assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
#
# out = Conv2D(
# filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
# kernel_initializer="he_normal", name="score_fr")(out)
#
# # [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
# out = Conv2DTranspose(
# filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
#
# fcn8 = Model(inputs=inputs, outputs=out)
# return fcn8
#
#
# def fcn8_model(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='vgg16')
#
# # "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, filters]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# def fcn8_model_resnet50(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='resnet50')
#
# # "block4_pool" shape: [B, 16, 16, 1024] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("conv4_block6_out").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, 512]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("conv3_block4_out").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# if __name__ == '__main__':
# # m = FCN8(15, 320, 320)
# # from keras.utils import plot_model
# #
# # plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# # print(len(m.layers))
# model_1 = fcn8_model_resnet50(input_shape=(256, 256, 3), num_classes=1)
# model_1.summary()
# # inputs = tf.keras.Input((256, 256, 3))
# # base_model = resnet50.ResNet50(input_tensor=inputs,
# # include_top=False,
# # weights='imagenet',
# # pooling=None,
# # classes=1000)
# # base_model.summary()
from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
from tensorflow.keras.applications import vgg16, resnet50
from tensorflow.keras import Model, Input
from tensorflow.keras import layers
"""
FCN-8特点:
1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
如果直接上采样 32 倍预测输出,被称为 FCN-32。
FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
"""
def fcn8_helper(input_shape, num_classes, weight_name='imagenet'):
assert input_shape[0] % 32 == 0
assert input_shape[1] % 32 == 0
inputs = Input(input_shape)
base_model = vgg16.VGG16(input_tensor=inputs,
include_top=False,
weights=weight_name,
pooling=None,
classes=100)
assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
out = Conv2D(
filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
kernel_initializer="he_normal", name="score_fr")(out)
# [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
out = Conv2DTranspose(
filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
fcn8 = Model(inputs=inputs, outputs=out)
return fcn8
def fcn8_model(input_shape, num_classes):
fcn8 = fcn8_helper(input_shape, num_classes)
# "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
skip_con1 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
Summed = add(inputs=[skip_con1, fcn8.output])
# [B, 32, 32, num_classes]
x = Conv2DTranspose(
num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
# block3_pool: [B, 32, 32, filters]
skip_con2 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
Summed2 = add(inputs=[skip_con2, x])
# 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
outputs = Conv2DTranspose(
num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
activation='sigmoid', name="upsample")(Summed2)
if num_classes == 1:
outputs = layers.Activation('sigmoid')(outputs)
else:
outputs = layers.Softmax()(outputs)
fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# for layer_ in fcn_model.layers[:]:
# layer_.trainable = True
return fcn_model
if __name__ == '__main__':
# m = FCN8(15, 320, 320)
# from keras.utils import plot_model
#
# plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# print(len(m.layers))
model_1 = fcn8_model(input_shape=(256, 256, 3), num_classes=1)
model_1.summary()
| # from tensorflow.keras import Model, Input
# from tensorflow.keras.applications import vgg16, resnet50
# from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
# from tensorflow.keras import layers
# import tensorflow as tf
#
# """
# FCN-8特点:
# 1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
# 2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
# 3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
# 4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
# 上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
# 当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
#
# FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
# 1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
# 如果直接上采样 32 倍预测输出,被称为 FCN-32。
# FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
# """
#
#
# def fcn8_helper(input_shape, num_classes, backbone):
# assert input_shape[0] % 32 == 0
# assert input_shape[1] % 32 == 0
#
# inputs = Input(input_shape)
# if backbone == 'vgg16':
# base_model = vgg16.VGG16(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=100)
# elif backbone == 'resnet50':
# base_model = resnet50.ResNet50(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=1000)
# assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
#
# out = Conv2D(
# filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
# kernel_initializer="he_normal", name="score_fr")(out)
#
# # [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
# out = Conv2DTranspose(
# filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
#
# fcn8 = Model(inputs=inputs, outputs=out)
# return fcn8
#
#
# def fcn8_model(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='vgg16')
#
# # "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, filters]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# def fcn8_model_resnet50(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='resnet50')
#
# # "block4_pool" shape: [B, 16, 16, 1024] 跳跃连接融合低级特征:
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("conv4_block6_out").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, 512]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("conv3_block4_out").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# if __name__ == '__main__':
# # m = FCN8(15, 320, 320)
# # from keras.utils import plot_model
# #
# # plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# # print(len(m.layers))
# model_1 = fcn8_model_resnet50(input_shape=(256, 256, 3), num_classes=1)
# model_1.summary()
# # inputs = tf.keras.Input((256, 256, 3))
# # base_model = resnet50.ResNet50(input_tensor=inputs,
# # include_top=False,
# # weights='imagenet',
# # pooling=None,
# # classes=1000)
# # base_model.summary()
from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
from tensorflow.keras.applications import vgg16, resnet50
from tensorflow.keras import Model, Input
from tensorflow.keras import layers
"""
FCN-8特点:
1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
如果直接上采样 32 倍预测输出,被称为 FCN-32。
FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。
"""
def fcn8_helper(input_shape, num_classes, weight_name='imagenet'):
assert input_shape[0] % 32 == 0
assert input_shape[1] % 32 == 0
inputs = Input(input_shape)
base_model = vgg16.VGG16(input_tensor=inputs,
include_top=False,
weights=weight_name,
pooling=None,
classes=100)
assert isinstance(base_model, Model)
# base_model.trainable = False # 是否固定特征提取单元
out = Conv2D(
filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
out = Dropout(rate=0.5)(out)
out = Conv2D(
filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
kernel_initializer="he_normal", name="score_fr")(out)
# [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
out = Conv2DTranspose(
filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
fcn8 = Model(inputs=inputs, outputs=out)
return fcn8
def fcn8_model(input_shape, num_classes):
fcn8 = fcn8_helper(input_shape, num_classes)
# "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征:
skip_con1 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
Summed = add(inputs=[skip_con1, fcn8.output])
# [B, 32, 32, num_classes]
x = Conv2DTranspose(
num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
# block3_pool: [B, 32, 32, filters]
skip_con2 = Conv2D(
num_classes, kernel_size=(1, 1), padding="same", activation=None,
kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
Summed2 = add(inputs=[skip_con2, x])
# 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
outputs = Conv2DTranspose(
num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
activation='sigmoid', name="upsample")(Summed2)
if num_classes == 1:
outputs = layers.Activation('sigmoid')(outputs)
else:
outputs = layers.Softmax()(outputs)
fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# for layer_ in fcn_model.layers[:]:
# layer_.trainable = True
return fcn_model
if __name__ == '__main__':
# m = FCN8(15, 320, 320)
# from keras.utils import plot_model
#
# plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# print(len(m.layers))
model_1 = fcn8_model(input_shape=(256, 256, 3), num_classes=1)
model_1.summary()
| en | 0.279609 | # from tensorflow.keras import Model, Input # from tensorflow.keras.applications import vgg16, resnet50 # from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation) # from tensorflow.keras import layers # import tensorflow as tf # # """ # FCN-8特点: # 1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。 # 2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。 # 3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。 # 4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels # 上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop, # 当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。 # # FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数, # 1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes], # 如果直接上采样 32 倍预测输出,被称为 FCN-32。 # FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。 # """ # # # def fcn8_helper(input_shape, num_classes, backbone): # assert input_shape[0] % 32 == 0 # assert input_shape[1] % 32 == 0 # # inputs = Input(input_shape) # if backbone == 'vgg16': # base_model = vgg16.VGG16(input_tensor=inputs, # include_top=False, # weights='imagenet', # pooling=None, # classes=100) # elif backbone == 'resnet50': # base_model = resnet50.ResNet50(input_tensor=inputs, # include_top=False, # weights='imagenet', # pooling=None, # classes=1000) # assert isinstance(base_model, Model) # base_model.trainable = False # 是否固定特征提取单元 # # out = Conv2D( # filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output) # out = Dropout(rate=0.5)(out) # out = Conv2D( # filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out) # out = Dropout(rate=0.5)(out) # out = Conv2D( # filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu", # kernel_initializer="he_normal", name="score_fr")(out) # # # [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes] # out = Conv2DTranspose( # filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out) # # fcn8 = Model(inputs=inputs, outputs=out) # return fcn8 # # # def fcn8_model(input_shape, num_classes): # fcn8 = fcn8_helper(input_shape, num_classes, backbone='vgg16') # # # "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征: # skip_con1 = Conv2D( # num_classes, kernel_size=(1, 1), padding="same", activation=None, # kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output) # Summed = add(inputs=[skip_con1, fcn8.output]) # # # [B, 32, 32, num_classes] # x = Conv2DTranspose( # num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed) # # # block3_pool: [B, 32, 32, filters] # skip_con2 = Conv2D( # num_classes, kernel_size=(1, 1), padding="same", activation=None, # kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output) # Summed2 = add(inputs=[skip_con2, x]) # # # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes] # outputs = Conv2DTranspose( # num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid", # activation='sigmoid', name="upsample")(Summed2) # # if num_classes == 1: # outputs = layers.Activation('sigmoid')(outputs) # else: # outputs = layers.Softmax()(outputs) # # fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s') # return fcn_model # # # def fcn8_model_resnet50(input_shape, num_classes): # fcn8 = fcn8_helper(input_shape, num_classes, backbone='resnet50') # # # "block4_pool" shape: [B, 16, 16, 1024] 跳跃连接融合低级特征: # skip_con1 = Conv2D( # num_classes, kernel_size=(1, 1), padding="same", activation=None, # kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("conv4_block6_out").output) # Summed = add(inputs=[skip_con1, fcn8.output]) # # # [B, 32, 32, num_classes] # x = Conv2DTranspose( # num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed) # # # block3_pool: [B, 32, 32, 512] # skip_con2 = Conv2D( # num_classes, kernel_size=(1, 1), padding="same", activation=None, # kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("conv3_block4_out").output) # Summed2 = add(inputs=[skip_con2, x]) # # # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes] # outputs = Conv2DTranspose( # num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid", # activation='sigmoid', name="upsample")(Summed2) # # if num_classes == 1: # outputs = layers.Activation('sigmoid')(outputs) # else: # outputs = layers.Softmax()(outputs) # # fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s') # return fcn_model # # # if __name__ == '__main__': # # m = FCN8(15, 320, 320) # # from keras.utils import plot_model # # # # plot_model(m, show_shapes=True, to_file='model_fcn8.png') # # print(len(m.layers)) # model_1 = fcn8_model_resnet50(input_shape=(256, 256, 3), num_classes=1) # model_1.summary() # # inputs = tf.keras.Input((256, 256, 3)) # # base_model = resnet50.ResNet50(input_tensor=inputs, # # include_top=False, # # weights='imagenet', # # pooling=None, # # classes=1000) # # base_model.summary() FCN-8特点:
1、不含全连接层(fc)的全卷积(fully conv)网络。可适应任意尺寸输入。
2、增大数据尺寸的反卷积(deconv)层。能够输出精细的结果。
3、结合不同深度层结果的跳级(skip)结构。同时确保鲁棒性和精确性。
4、使用 skip 结构融合多层(3层)输出,底层网络可以预测更多的位置信息,因为感受野小可以看到小的 pixels
上采样 lower-resolution layers 时,如果采样后的图因为 padding 等原因和前面的图大小不同,使用 crop,
当裁剪成大小相同的,spatially aligned ,使用 concat 操作融合两个层。
FCN-8、FCN-16、FCN-32的区别与联系: 最后上采样的过程中,放大的倍数,
1、区别: FCN模型会输出三种尺寸的特征图: [b, 16, 16, filters], 这时候直接上采样32倍,可以得到 [b, 16*32, 16*32, n_classes],
如果直接上采样 32 倍预测输出,被称为 FCN-32。
FCN-16 和 FCN-8 则是融合了不同阶段的特征图,最终输出的时候,上采样16倍和8倍得到。 # base_model.trainable = False # 是否固定特征提取单元 # [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes] # "block4_pool" shape: [B, 16, 16, 512] 跳跃连接融合低级特征: # [B, 32, 32, num_classes] # block3_pool: [B, 32, 32, filters] # 上采样8倍, 直接由 [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes] # for layer_ in fcn_model.layers[:]: # layer_.trainable = True # m = FCN8(15, 320, 320) # from keras.utils import plot_model # # plot_model(m, show_shapes=True, to_file='model_fcn8.png') # print(len(m.layers)) | 2.593547 | 3 |
tests/Python/test_all_configs_output.py | lopippo/IsoSpec | 27 | 8654 | <filename>tests/Python/test_all_configs_output.py
def binom(n, k):
"""Quickly adapted from https://stackoverflow.com/questions/26560726/python-binomial-coefficient"""
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways
def max_confs_cnt(formula=""):
"""Get the maximal number of configurations for a given chemical formula."""
from IsoSpecPy import IsoParamsFromFormula
f = IsoParamsFromFormula(formula)
if f.atomCount:
N = 1
for n, p in zip(f.atomCount, f.prob):
N *= binom(n+len(p)-1, n)
return N
else:
return 0
def test_max_confs_cnt():
assert max_confs_cnt("O100") == 5151
assert max_confs_cnt("O100N10S6") == 4759524
test_formulas = [ 'O100',
'O100N10S6',
'C100H202',
'S10H20' ]
def test_all_configs_output_cnt():
"""Test if IsoSpecPy output correctly all configurations."""
from IsoSpecPy import IsoThreshold
global test_formulas
for f in test_formulas:
I = IsoThreshold(formula=f, threshold=0.0, absolute=True)
assert len(I) == max_confs_cnt(f)
print("Seems OK!")
if __name__ == "__main__":
test_all_configs_output_cnt()
| <filename>tests/Python/test_all_configs_output.py
def binom(n, k):
"""Quickly adapted from https://stackoverflow.com/questions/26560726/python-binomial-coefficient"""
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways
def max_confs_cnt(formula=""):
"""Get the maximal number of configurations for a given chemical formula."""
from IsoSpecPy import IsoParamsFromFormula
f = IsoParamsFromFormula(formula)
if f.atomCount:
N = 1
for n, p in zip(f.atomCount, f.prob):
N *= binom(n+len(p)-1, n)
return N
else:
return 0
def test_max_confs_cnt():
assert max_confs_cnt("O100") == 5151
assert max_confs_cnt("O100N10S6") == 4759524
test_formulas = [ 'O100',
'O100N10S6',
'C100H202',
'S10H20' ]
def test_all_configs_output_cnt():
"""Test if IsoSpecPy output correctly all configurations."""
from IsoSpecPy import IsoThreshold
global test_formulas
for f in test_formulas:
I = IsoThreshold(formula=f, threshold=0.0, absolute=True)
assert len(I) == max_confs_cnt(f)
print("Seems OK!")
if __name__ == "__main__":
test_all_configs_output_cnt()
| en | 0.675289 | Quickly adapted from https://stackoverflow.com/questions/26560726/python-binomial-coefficient Get the maximal number of configurations for a given chemical formula. Test if IsoSpecPy output correctly all configurations. | 3.005553 | 3 |
tractseg/models/UNet_Pytorch_Regression.py | soichih/TractSeg | 0 | 8655 | # Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adamax
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
from tractseg.libs.PytorchUtils import PytorchUtils
from tractseg.libs.ExpUtils import ExpUtils
from tractseg.models.BaseModel import BaseModel
from tractseg.libs.MetricUtils import MetricUtils
from tractseg.libs.PytorchUtils import conv2d
from tractseg.libs.PytorchUtils import deconv2d
class UNet_Pytorch_Regression(torch.nn.Module):
def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):
super(UNet_Pytorch_Regression, self).__init__()
self.in_channel = n_input_channels
self.n_classes = n_classes
self.contr_1_1 = conv2d(n_input_channels, n_filt)
self.contr_1_2 = conv2d(n_filt, n_filt)
self.pool_1 = nn.MaxPool2d((2, 2))
self.contr_2_1 = conv2d(n_filt, n_filt * 2)
self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)
self.pool_2 = nn.MaxPool2d((2, 2))
self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)
self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)
self.pool_3 = nn.MaxPool2d((2, 2))
self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)
self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)
self.pool_4 = nn.MaxPool2d((2, 2))
self.dropout = nn.Dropout(p=0.4)
self.encode_1 = conv2d(n_filt * 8, n_filt * 16)
self.encode_2 = conv2d(n_filt * 16, n_filt * 16)
self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)
# self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d
self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)
self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)
self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)
# self.deconv_2 = nn.Upsample(scale_factor=2)
self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)
self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)
self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)
# self.deconv_3 = nn.Upsample(scale_factor=2)
self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)
self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)
self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)
# self.deconv_4 = nn.Upsample(scale_factor=2)
self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)
self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)
self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)
def forward(self, inpt):
contr_1_1 = self.contr_1_1(inpt)
contr_1_2 = self.contr_1_2(contr_1_1)
pool_1 = self.pool_1(contr_1_2)
contr_2_1 = self.contr_2_1(pool_1)
contr_2_2 = self.contr_2_2(contr_2_1)
pool_2 = self.pool_2(contr_2_2)
contr_3_1 = self.contr_3_1(pool_2)
contr_3_2 = self.contr_3_2(contr_3_1)
pool_3 = self.pool_3(contr_3_2)
contr_4_1 = self.contr_4_1(pool_3)
contr_4_2 = self.contr_4_2(contr_4_1)
pool_4 = self.pool_4(contr_4_2)
pool_4 = self.dropout(pool_4)
encode_1 = self.encode_1(pool_4)
encode_2 = self.encode_2(encode_1)
deconv_1 = self.deconv_1(encode_2)
concat1 = torch.cat([deconv_1, contr_4_2], 1)
expand_1_1 = self.expand_1_1(concat1)
expand_1_2 = self.expand_1_2(expand_1_1)
deconv_2 = self.deconv_2(expand_1_2)
concat2 = torch.cat([deconv_2, contr_3_2], 1)
expand_2_1 = self.expand_2_1(concat2)
expand_2_2 = self.expand_2_2(expand_2_1)
deconv_3 = self.deconv_3(expand_2_2)
concat3 = torch.cat([deconv_3, contr_2_2], 1)
expand_3_1 = self.expand_3_1(concat3)
expand_3_2 = self.expand_3_2(expand_3_1)
deconv_4 = self.deconv_4(expand_3_2)
concat4 = torch.cat([deconv_4, contr_1_2], 1)
expand_4_1 = self.expand_4_1(concat4)
expand_4_2 = self.expand_4_2(expand_4_1)
conv_5 = self.conv_5(expand_4_2)
return conv_5, None
| # Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adamax
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
from tractseg.libs.PytorchUtils import PytorchUtils
from tractseg.libs.ExpUtils import ExpUtils
from tractseg.models.BaseModel import BaseModel
from tractseg.libs.MetricUtils import MetricUtils
from tractseg.libs.PytorchUtils import conv2d
from tractseg.libs.PytorchUtils import deconv2d
class UNet_Pytorch_Regression(torch.nn.Module):
def __init__(self, n_input_channels=3, n_classes=7, n_filt=64, batchnorm=False, dropout=False):
super(UNet_Pytorch_Regression, self).__init__()
self.in_channel = n_input_channels
self.n_classes = n_classes
self.contr_1_1 = conv2d(n_input_channels, n_filt)
self.contr_1_2 = conv2d(n_filt, n_filt)
self.pool_1 = nn.MaxPool2d((2, 2))
self.contr_2_1 = conv2d(n_filt, n_filt * 2)
self.contr_2_2 = conv2d(n_filt * 2, n_filt * 2)
self.pool_2 = nn.MaxPool2d((2, 2))
self.contr_3_1 = conv2d(n_filt * 2, n_filt * 4)
self.contr_3_2 = conv2d(n_filt * 4, n_filt * 4)
self.pool_3 = nn.MaxPool2d((2, 2))
self.contr_4_1 = conv2d(n_filt * 4, n_filt * 8)
self.contr_4_2 = conv2d(n_filt * 8, n_filt * 8)
self.pool_4 = nn.MaxPool2d((2, 2))
self.dropout = nn.Dropout(p=0.4)
self.encode_1 = conv2d(n_filt * 8, n_filt * 16)
self.encode_2 = conv2d(n_filt * 16, n_filt * 16)
self.deconv_1 = deconv2d(n_filt * 16, n_filt * 16, kernel_size=2, stride=2)
# self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d
self.expand_1_1 = conv2d(n_filt * 8 + n_filt * 16, n_filt * 8)
self.expand_1_2 = conv2d(n_filt * 8, n_filt * 8)
self.deconv_2 = deconv2d(n_filt * 8, n_filt * 8, kernel_size=2, stride=2)
# self.deconv_2 = nn.Upsample(scale_factor=2)
self.expand_2_1 = conv2d(n_filt * 4 + n_filt * 8, n_filt * 4, stride=1)
self.expand_2_2 = conv2d(n_filt * 4, n_filt * 4, stride=1)
self.deconv_3 = deconv2d(n_filt * 4, n_filt * 4, kernel_size=2, stride=2)
# self.deconv_3 = nn.Upsample(scale_factor=2)
self.expand_3_1 = conv2d(n_filt * 2 + n_filt * 4, n_filt * 2, stride=1)
self.expand_3_2 = conv2d(n_filt * 2, n_filt * 2, stride=1)
self.deconv_4 = deconv2d(n_filt * 2, n_filt * 2, kernel_size=2, stride=2)
# self.deconv_4 = nn.Upsample(scale_factor=2)
self.expand_4_1 = conv2d(n_filt + n_filt * 2, n_filt, stride=1)
self.expand_4_2 = conv2d(n_filt, n_filt, stride=1)
self.conv_5 = nn.Conv2d(n_filt, n_classes, kernel_size=1, stride=1, padding=0, bias=True) # no activation function, because is in LossFunction (...WithLogits)
def forward(self, inpt):
contr_1_1 = self.contr_1_1(inpt)
contr_1_2 = self.contr_1_2(contr_1_1)
pool_1 = self.pool_1(contr_1_2)
contr_2_1 = self.contr_2_1(pool_1)
contr_2_2 = self.contr_2_2(contr_2_1)
pool_2 = self.pool_2(contr_2_2)
contr_3_1 = self.contr_3_1(pool_2)
contr_3_2 = self.contr_3_2(contr_3_1)
pool_3 = self.pool_3(contr_3_2)
contr_4_1 = self.contr_4_1(pool_3)
contr_4_2 = self.contr_4_2(contr_4_1)
pool_4 = self.pool_4(contr_4_2)
pool_4 = self.dropout(pool_4)
encode_1 = self.encode_1(pool_4)
encode_2 = self.encode_2(encode_1)
deconv_1 = self.deconv_1(encode_2)
concat1 = torch.cat([deconv_1, contr_4_2], 1)
expand_1_1 = self.expand_1_1(concat1)
expand_1_2 = self.expand_1_2(expand_1_1)
deconv_2 = self.deconv_2(expand_1_2)
concat2 = torch.cat([deconv_2, contr_3_2], 1)
expand_2_1 = self.expand_2_1(concat2)
expand_2_2 = self.expand_2_2(expand_2_1)
deconv_3 = self.deconv_3(expand_2_2)
concat3 = torch.cat([deconv_3, contr_2_2], 1)
expand_3_1 = self.expand_3_1(concat3)
expand_3_2 = self.expand_3_2(expand_3_1)
deconv_4 = self.deconv_4(expand_3_2)
concat4 = torch.cat([deconv_4, contr_1_2], 1)
expand_4_1 = self.expand_4_1(concat4)
expand_4_2 = self.expand_4_2(expand_4_1)
conv_5 = self.conv_5(expand_4_2)
return conv_5, None
| en | 0.788314 | # Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # self.deconv_1 = nn.Upsample(scale_factor=2) #does only upscale width and height #Similar results to deconv2d # self.deconv_2 = nn.Upsample(scale_factor=2) # self.deconv_3 = nn.Upsample(scale_factor=2) # self.deconv_4 = nn.Upsample(scale_factor=2) # no activation function, because is in LossFunction (...WithLogits) | 1.937665 | 2 |
platform/core/polyaxon/sidecar/sidecar/__main__.py | hackerwins/polyaxon | 0 | 8656 | import argparse
import time
from kubernetes.client.rest import ApiException
from polyaxon_client.client import PolyaxonClient
from polyaxon_k8s.manager import K8SManager
from sidecar import settings
from sidecar.monitor import is_pod_running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--app_label',
type=str
)
parser.add_argument(
'--container_id',
type=str
)
parser.add_argument(
'--sleep_interval',
default=2,
type=int
)
parser.add_argument(
'--max_restarts',
default=0,
type=int
)
args = parser.parse_args()
arguments = args.__dict__
container_id = arguments.pop('container_id')
app_label = arguments.pop('app_label')
sleep_interval = arguments.pop('sleep_interval')
max_restarts = arguments.pop('max_restarts')
k8s_manager = K8SManager(namespace=settings.K8S_NAMESPACE, in_cluster=True)
client = PolyaxonClient()
client.set_internal_health_check()
retry = 0
is_running = True
status = None
while is_running and retry < 3:
time.sleep(sleep_interval)
try:
is_running, status = is_pod_running(k8s_manager,
settings.POD_ID,
container_id,
max_restarts)
except ApiException:
retry += 1
time.sleep(sleep_interval) # We wait a bit more before try
if status:
client.reconcile(status=status)
| import argparse
import time
from kubernetes.client.rest import ApiException
from polyaxon_client.client import PolyaxonClient
from polyaxon_k8s.manager import K8SManager
from sidecar import settings
from sidecar.monitor import is_pod_running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--app_label',
type=str
)
parser.add_argument(
'--container_id',
type=str
)
parser.add_argument(
'--sleep_interval',
default=2,
type=int
)
parser.add_argument(
'--max_restarts',
default=0,
type=int
)
args = parser.parse_args()
arguments = args.__dict__
container_id = arguments.pop('container_id')
app_label = arguments.pop('app_label')
sleep_interval = arguments.pop('sleep_interval')
max_restarts = arguments.pop('max_restarts')
k8s_manager = K8SManager(namespace=settings.K8S_NAMESPACE, in_cluster=True)
client = PolyaxonClient()
client.set_internal_health_check()
retry = 0
is_running = True
status = None
while is_running and retry < 3:
time.sleep(sleep_interval)
try:
is_running, status = is_pod_running(k8s_manager,
settings.POD_ID,
container_id,
max_restarts)
except ApiException:
retry += 1
time.sleep(sleep_interval) # We wait a bit more before try
if status:
client.reconcile(status=status)
| en | 0.958274 | # We wait a bit more before try | 2.029748 | 2 |
simple_robot_tests/src/test_odometry.py | plusangel/simple_robot | 1 | 8657 | #! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
class OdomTopicReader(object):
def __init__(self, topic_name = '/odom'):
self._topic_name = topic_name
self._sub = rospy.Subscriber(self._topic_name, Odometry, self.topic_callback)
self._odomdata = Odometry()
def topic_callback(self, msg):
self._odomdata = msg
rospy.loginfo(self._odomdata)
if __name__ == "__main__":
rospy.init_node('odom_topic_subscriber')
odom_reader_object = OdomTopicReader()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
| #! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
class OdomTopicReader(object):
def __init__(self, topic_name = '/odom'):
self._topic_name = topic_name
self._sub = rospy.Subscriber(self._topic_name, Odometry, self.topic_callback)
self._odomdata = Odometry()
def topic_callback(self, msg):
self._odomdata = msg
rospy.loginfo(self._odomdata)
if __name__ == "__main__":
rospy.init_node('odom_topic_subscriber')
odom_reader_object = OdomTopicReader()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
| ru | 0.148623 | #! /usr/bin/env python | 2.526528 | 3 |
test/test_random.py | kevinintel/neural-compressor | 100 | 8658 | """Tests for quantization"""
import numpy as np
import unittest
import os
import shutil
import yaml
import tensorflow as tf
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
accuracy_criterion:
relative: 0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml2():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
exit_policy:
max_trials: 5
accuracy_criterion:
relative: -0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml2.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_model():
try:
graph = tf.Graph()
graph_def = tf.GraphDef()
with tf.Session() as sess:
x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filter=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.global_variables_initializer())
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
except:
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filters=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.compat.v1.global_variables_initializer())
constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, [
'op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
class TestQuantization(unittest.TestCase):
@classmethod
def setUpClass(self):
self.constant_graph = build_fake_model()
build_fake_yaml()
build_fake_yaml2()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml2.yaml')
shutil.rmtree("saved", ignore_errors=True)
def test_ru_random_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
def test_ru_random_max_trials(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml2.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
if __name__ == "__main__":
unittest.main()
| """Tests for quantization"""
import numpy as np
import unittest
import os
import shutil
import yaml
import tensorflow as tf
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
accuracy_criterion:
relative: 0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml2():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
exit_policy:
max_trials: 5
accuracy_criterion:
relative: -0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml2.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_model():
try:
graph = tf.Graph()
graph_def = tf.GraphDef()
with tf.Session() as sess:
x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filter=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.global_variables_initializer())
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
except:
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filters=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.compat.v1.global_variables_initializer())
constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, [
'op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
class TestQuantization(unittest.TestCase):
@classmethod
def setUpClass(self):
self.constant_graph = build_fake_model()
build_fake_yaml()
build_fake_yaml2()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml2.yaml')
shutil.rmtree("saved", ignore_errors=True)
def test_ru_random_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
def test_ru_random_max_trials(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml2.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
if __name__ == "__main__":
unittest.main()
| en | 0.627019 | Tests for quantization model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
accuracy_criterion:
relative: 0.01
workspace:
path: saved model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
exit_policy:
max_trials: 5
accuracy_criterion:
relative: -0.01
workspace:
path: saved | 2.562492 | 3 |
cirq/google/engine/engine_client_test.py | lilies/Cirq | 1 | 8659 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EngineClient."""
import datetime
from unittest import mock
import pytest
from google.api_core import exceptions
from google.protobuf.field_mask_pb2 import FieldMask
from google.protobuf.timestamp_pb2 import Timestamp
from cirq.google.engine.engine_client import EngineClient, EngineException
from cirq.google.engine.client import quantum
from cirq.google.engine.client.quantum_v1alpha1 import enums as qenums
from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes
def setup_mock_(client_constructor):
grpc_client = mock.Mock()
client_constructor.return_value = grpc_client
return grpc_client
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.create_quantum_program.return_value = result
code = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_program('proj', 'prog', code, 'A program',
labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program',
labels=labels), False)
assert client.create_program('proj', 'prog', code,
'A program') == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program'), False)
assert client.create_program('proj', 'prog', code,
labels=labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
labels=labels), False)
assert client.create_program('proj', 'prog', code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code), False)
assert client.create_program('proj', program_id=None,
code=code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj', qtypes.QuantumProgram(code=code), False)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.get_quantum_program.return_value = result
client = EngineClient()
assert client.get_program('proj', 'prog', False) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert client.get_program('proj', 'prog', True) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProgram(name='projects/proj/programs/prog1'),
qtypes.QuantumProgram(name='projects/proj/programs/prog2')
]
grpc_client.list_quantum_programs.return_value = results
client = EngineClient()
assert client.list_programs(project_id='proj') == results
assert grpc_client.list_quantum_programs.call_args[0] == ('projects/proj',)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, created_after, created_before, labels',
[
('',
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:*',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters(client_constructor, expected_filter,
created_before, created_after, labels):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_programs(project_id='proj',
created_before=created_before,
created_after=created_after,
has_labels=labels)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters_invalid_type(client_constructor):
with pytest.raises(ValueError, match=""):
EngineClient().list_programs(project_id='proj',
created_before="Unsupported date/time")
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.set_program_description('proj', 'prog', 'A program') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
description='A program'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_program_description('proj', 'prog', '') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.return_value = qtypes.QuantumProgram(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_program_labels('proj', 'prog', labels) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_program_labels('proj', 'prog', {}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.add_program_labels('proj', 'prog',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.add_program_labels('proj', 'prog',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_program_labels('proj', 'prog', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.remove_program_labels('proj', 'prog', ['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_program_labels('proj', 'prog',
['hello', 'weather']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_program_labels('proj', 'prog',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_program('proj', 'prog')
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert not client.delete_program('proj', 'prog', delete_jobs=True)
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.create_quantum_job.return_value = result
run_context = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10, 'A job',
labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job',
labels=labels), False)
assert client.create_job(
'proj',
'prog',
'job0',
['processor0'],
run_context,
10,
'A job',
) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job'), False)
assert client.create_job('proj',
'prog',
'job0', ['processor0'],
run_context,
10,
labels=labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
labels=labels), False)
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
assert client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
with pytest.raises(ValueError, match='priority must be between 0 and 1000'):
client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=5000)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_job.return_value = result
client = EngineClient()
assert client.get_job('proj', 'prog', 'job0', False) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', False)
assert client.get_job('proj', 'prog', 'job0', True) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.set_job_description('proj', 'prog', 'job0', 'A job') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
description='A job'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_job_description('proj', 'prog', 'job0', '') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_job.return_value = qtypes.QuantumJob(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_job_labels('proj', 'prog', 'job0', labels) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_job_labels('proj', 'prog', 'job0', {}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.add_job_labels('proj', 'prog', 'job0',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_job.call_count == 0
assert client.add_job_labels('proj', 'prog', 'job0',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_job_labels('proj', 'prog', 'job0', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.remove_job_labels('proj', 'prog', 'job0',
['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_job_labels('proj', 'prog', 'job0',
['hello', 'weather']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_job_labels('proj', 'prog', 'job0',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_job('proj', 'prog', 'job0')
assert grpc_client.delete_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.cancel_job('proj', 'prog', 'job0')
assert grpc_client.cancel_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_job_results(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumResult(
parent='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_result.return_value = result
client = EngineClient()
assert client.get_job_results('proj', 'prog', 'job0') == result
assert grpc_client.get_quantum_result.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job1'),
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job2')
]
grpc_client.list_quantum_jobs.return_value = results
client = EngineClient()
assert client.list_jobs(project_id='proj', program_id='prog1') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/prog1',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
assert client.list_jobs(project_id='proj') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/-',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, '
'created_after, '
'created_before, '
'labels, '
'execution_states',
[
('',
None,
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None,
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None,
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
None
),
('(execution_status.state = FAILURE OR '
'execution_status.state = CANCELLED)',
None,
None,
None,
[quantum.enums.ExecutionStatus.State.FAILURE,
quantum.enums.ExecutionStatus.State.CANCELLED,]
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:* AND '
'(execution_status.state = SUCCESS)',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
[quantum.enums.ExecutionStatus.State.SUCCESS,],
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs_filters(client_constructor, expected_filter, created_before,
created_after, labels, execution_states):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_jobs(project_id='proj',
program_id='prog',
created_before=created_before,
created_after=created_after,
has_labels=labels,
execution_states=execution_states)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_processors(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProcessor(name='projects/proj/processor/processor0'),
qtypes.QuantumProcessor(name='projects/proj/processor/processor1')
]
grpc_client.list_quantum_processors.return_value = results
client = EngineClient()
assert client.list_processors('proj') == results
assert grpc_client.list_quantum_processors.call_args[0] == (
'projects/proj',)
assert grpc_client.list_quantum_processors.call_args[1] == {
'filter_': '',
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_processor(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProcessor(name='projects/proj/processors/processor0')
grpc_client.get_quantum_processor.return_value = result
client = EngineClient()
assert client.get_processor('proj', 'processor0') == result
assert grpc_client.get_quantum_processor.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_calibrations(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumCalibration(
name='projects/proj/processor/processor0/calibrations/123456'),
qtypes.QuantumCalibration(
name='projects/proj/processor/processor1/calibrations/224466')
]
grpc_client.list_quantum_calibrations.return_value = results
client = EngineClient()
assert client.list_calibrations('proj', 'processor0') == results
assert grpc_client.list_quantum_calibrations.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_calibration('proj', 'processor0', 123456) == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/123456',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_does_not_exist(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') is None
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_error(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_current_calibration('proj', 'processor0')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_doesnt_retry_not_found_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
with pytest.raises(EngineException, match='not found'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count == 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_retry_5xx_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.ServiceUnavailable(
'internal error')
client = EngineClient(max_retry_delay_seconds=1)
with pytest.raises(TimeoutError,
match='Reached max retry attempts.*internal error'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count > 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
start = datetime.datetime.fromtimestamp(1000000000)
end = datetime.datetime.fromtimestamp(1000003600)
users = ['<EMAIL>']
result = qtypes.QuantumReservation(
name='projects/proj/processors/processor0/reservations/papar-party-44',
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000003600),
whitelisted_users=users,
)
grpc_client.create_quantum_reservation.return_value = result
client = EngineClient()
assert client.create_reservation('proj', 'processor0', start, end,
users) == result
assert grpc_client.create_quantum_reservation.call_count == 1
kwargs = grpc_client.create_quantum_reservation.call_args[1]
# The outgoing argument will not have the resource name
result.name = ''
assert kwargs == {
'parent': 'projects/proj/processors/processor0',
'quantum_reservation': result
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.cancel_quantum_reservation.return_value = result
client = EngineClient()
assert (client.cancel_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.cancel_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.delete_quantum_reservation.return_value = result
client = EngineClient()
assert (client.delete_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.delete_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.get_quantum_reservation.return_value = result
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_not_found(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
grpc_client.get_quantum_reservation.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == None)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_exception(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_reservation.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_reservation('proj', 'processor0', 'goog')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
results = [
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
),
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1200000000),
end_time=Timestamp(seconds=1200002000),
whitelisted_users=['<EMAIL>'],
),
]
grpc_client.list_quantum_reservations.return_value = results
client = EngineClient()
assert (client.list_reservations('proj', 'processor0') == results)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000001000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
start=datetime.datetime.fromtimestamp(1000001000),
end=datetime.datetime.fromtimestamp(1000002000),
whitelisted_users=['<EMAIL>'],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name':
name,
'quantum_reservation':
result,
'update_mask':
FieldMask(paths=['start_time', 'end_time', 'whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation_remove_all_users(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
whitelisted_users=[],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
whitelisted_users=[],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
'quantum_reservation': result,
'update_mask': FieldMask(paths=['whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_time_slots(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000020000),
end_time=Timestamp(seconds=1000040000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.MAINTENANCE,
maintenance_config=qtypes.QuantumTimeSlot.MaintenanceConfig(
title='Testing',
description='Testing some new configuration.',
),
),
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000010000),
end_time=Timestamp(seconds=1000020000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.RESERVATION,
reservation_config=qtypes.QuantumTimeSlot.ReservationConfig(
project_id='super_secret_quantum'),
)
]
grpc_client.list_quantum_time_slots.return_value = results
client = EngineClient()
assert (client.list_time_slots('proj', 'processor0') == results)
| # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EngineClient."""
import datetime
from unittest import mock
import pytest
from google.api_core import exceptions
from google.protobuf.field_mask_pb2 import FieldMask
from google.protobuf.timestamp_pb2 import Timestamp
from cirq.google.engine.engine_client import EngineClient, EngineException
from cirq.google.engine.client import quantum
from cirq.google.engine.client.quantum_v1alpha1 import enums as qenums
from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes
def setup_mock_(client_constructor):
grpc_client = mock.Mock()
client_constructor.return_value = grpc_client
return grpc_client
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.create_quantum_program.return_value = result
code = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_program('proj', 'prog', code, 'A program',
labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program',
labels=labels), False)
assert client.create_program('proj', 'prog', code,
'A program') == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
description='A program'), False)
assert client.create_program('proj', 'prog', code,
labels=labels) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code,
labels=labels), False)
assert client.create_program('proj', 'prog', code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
code=code), False)
assert client.create_program('proj', program_id=None,
code=code) == ('prog', result)
assert grpc_client.create_quantum_program.call_args[0] == (
'projects/proj', qtypes.QuantumProgram(code=code), False)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.get_quantum_program.return_value = result
client = EngineClient()
assert client.get_program('proj', 'prog', False) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert client.get_program('proj', 'prog', True) == result
assert grpc_client.get_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProgram(name='projects/proj/programs/prog1'),
qtypes.QuantumProgram(name='projects/proj/programs/prog2')
]
grpc_client.list_quantum_programs.return_value = results
client = EngineClient()
assert client.list_programs(project_id='proj') == results
assert grpc_client.list_quantum_programs.call_args[0] == ('projects/proj',)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, created_after, created_before, labels',
[
('',
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:*',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters(client_constructor, expected_filter,
created_before, created_after, labels):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_programs(project_id='proj',
created_before=created_before,
created_after=created_after,
has_labels=labels)
assert grpc_client.list_quantum_programs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_program_filters_invalid_type(client_constructor):
with pytest.raises(ValueError, match=""):
EngineClient().list_programs(project_id='proj',
created_before="Unsupported date/time")
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.set_program_description('proj', 'prog', 'A program') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
description='A program'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_program_description('proj', 'prog', '') == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.return_value = qtypes.QuantumProgram(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_program_labels('proj', 'prog', labels) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_program_labels('proj', 'prog', {}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.add_program_labels('proj', 'prog',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.add_program_labels('proj', 'prog',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_program_labels('proj', 'prog', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_program_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumProgram(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_program.return_value = existing
result = qtypes.QuantumProgram(name='projects/proj/programs/prog')
grpc_client.update_quantum_program.return_value = result
client = EngineClient()
assert client.remove_program_labels('proj', 'prog', ['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_program_labels('proj', 'prog',
['hello', 'weather']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_program_labels('proj', 'prog',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_program.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumProgram(name='projects/proj/programs/prog',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_program(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_program('proj', 'prog')
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', False)
assert not client.delete_program('proj', 'prog', delete_jobs=True)
assert grpc_client.delete_quantum_program.call_args[0] == (
'projects/proj/programs/prog', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.create_quantum_job.return_value = result
run_context = qtypes.any_pb2.Any()
labels = {'hello': 'world'}
client = EngineClient()
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10, 'A job',
labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job',
labels=labels), False)
assert client.create_job(
'proj',
'prog',
'job0',
['processor0'],
run_context,
10,
'A job',
) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
description='A job'), False)
assert client.create_job('proj',
'prog',
'job0', ['processor0'],
run_context,
10,
labels=labels) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
labels=labels), False)
assert client.create_job('proj', 'prog', 'job0', ['processor0'],
run_context, 10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
name='projects/proj/programs/prog/jobs/job0',
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
assert client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=10) == ('job0', result)
assert grpc_client.create_quantum_job.call_args[0] == (
'projects/proj/programs/prog',
qtypes.QuantumJob(
run_context=run_context,
scheduling_config=qtypes.SchedulingConfig(
priority=10,
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/proj/processors/processor0'])),
), False)
with pytest.raises(ValueError, match='priority must be between 0 and 1000'):
client.create_job('proj',
'prog',
job_id=None,
processor_ids=['processor0'],
run_context=run_context,
priority=5000)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_job.return_value = result
client = EngineClient()
assert client.get_job('proj', 'prog', 'job0', False) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', False)
assert client.get_job('proj', 'prog', 'job0', True) == result
assert grpc_client.get_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0', True)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_description(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.set_job_description('proj', 'prog', 'job0', 'A job') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
description='A job'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
assert client.set_job_description('proj', 'prog', 'job0', '') == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0'),
qtypes.field_mask_pb2.FieldMask(paths=['description']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_set_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_job.return_value = qtypes.QuantumJob(
labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
labels = {'hello': 'world', 'color': 'blue', 'run': '1'}
assert client.set_job_labels('proj', 'prog', 'job0', labels) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels=labels,
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.set_job_labels('proj', 'prog', 'job0', {}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_add_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.add_job_labels('proj', 'prog', 'job0',
{'color': 'red'}) == existing
assert grpc_client.update_quantum_job.call_count == 0
assert client.add_job_labels('proj', 'prog', 'job0',
{'hello': 'world'}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.add_job_labels('proj', 'prog', 'job0', {
'hello': 'world',
'color': 'blue'
}) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'blue',
'weather': 'sun',
'run': '1',
'hello': 'world'
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_remove_job_labels(client_constructor):
grpc_client = setup_mock_(client_constructor)
existing = qtypes.QuantumJob(labels={
'color': 'red',
'weather': 'sun',
'run': '1'
},
label_fingerprint='hash')
grpc_client.get_quantum_job.return_value = existing
result = qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0')
grpc_client.update_quantum_job.return_value = result
client = EngineClient()
assert client.remove_job_labels('proj', 'prog', 'job0',
['other']) == existing
assert grpc_client.update_quantum_program.call_count == 0
assert client.remove_job_labels('proj', 'prog', 'job0',
['hello', 'weather']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
labels={
'color': 'red',
'run': '1',
},
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
assert client.remove_job_labels('proj', 'prog', 'job0',
['color', 'weather', 'run']) == result
assert grpc_client.update_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',
qtypes.QuantumJob(name='projects/proj/programs/prog/jobs/job0',
label_fingerprint='hash'),
qtypes.field_mask_pb2.FieldMask(paths=['labels']))
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.delete_job('proj', 'prog', 'job0')
assert grpc_client.delete_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_job(client_constructor):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
assert not client.cancel_job('proj', 'prog', 'job0')
assert grpc_client.cancel_quantum_job.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_job_results(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumResult(
parent='projects/proj/programs/prog/jobs/job0')
grpc_client.get_quantum_result.return_value = result
client = EngineClient()
assert client.get_job_results('proj', 'prog', 'job0') == result
assert grpc_client.get_quantum_result.call_args[0] == (
'projects/proj/programs/prog/jobs/job0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job1'),
qtypes.QuantumJob(name='projects/proj/programs/prog1/jobs/job2')
]
grpc_client.list_quantum_jobs.return_value = results
client = EngineClient()
assert client.list_jobs(project_id='proj', program_id='prog1') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/prog1',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
assert client.list_jobs(project_id='proj') == results
assert grpc_client.list_quantum_jobs.call_args[0] == (
'projects/proj/programs/-',)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': '',
}
# yapf: disable
@pytest.mark.parametrize(
'expected_filter, '
'created_after, '
'created_before, '
'labels, '
'execution_states',
[
('',
None,
None,
None,
None),
('create_time >= 2020-09-01',
datetime.date(2020, 9, 1),
None,
None,
None),
('create_time >= 1598918400',
datetime.datetime(2020, 9, 1, 0, 0, 0,
tzinfo=datetime.timezone.utc),
None,
None,
None),
('create_time <= 2020-10-01',
None,
datetime.date(2020, 10, 1),
None,
None),
('create_time >= 2020-09-01 AND create_time <= 1598918410',
datetime.date(2020, 9, 1),
datetime.datetime(2020, 9, 1, 0, 0, 10,
tzinfo=datetime.timezone.utc),
None,
None),
('labels.color:red AND labels.shape:*',
None,
None,
{
'color': 'red',
'shape': '*'
},
None
),
('(execution_status.state = FAILURE OR '
'execution_status.state = CANCELLED)',
None,
None,
None,
[quantum.enums.ExecutionStatus.State.FAILURE,
quantum.enums.ExecutionStatus.State.CANCELLED,]
),
('create_time >= 2020-08-01 AND '
'create_time <= 1598918400 AND '
'labels.color:red AND labels.shape:* AND '
'(execution_status.state = SUCCESS)',
datetime.date(2020, 8, 1),
datetime.datetime(2020, 9, 1, tzinfo=datetime.timezone.utc),
{
'color': 'red',
'shape': '*'
},
[quantum.enums.ExecutionStatus.State.SUCCESS,],
),
])
# yapf: enable
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_jobs_filters(client_constructor, expected_filter, created_before,
created_after, labels, execution_states):
grpc_client = setup_mock_(client_constructor)
client = EngineClient()
client.list_jobs(project_id='proj',
program_id='prog',
created_before=created_before,
created_after=created_after,
has_labels=labels,
execution_states=execution_states)
assert grpc_client.list_quantum_jobs.call_args[1] == {
'filter_': expected_filter,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_processors(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumProcessor(name='projects/proj/processor/processor0'),
qtypes.QuantumProcessor(name='projects/proj/processor/processor1')
]
grpc_client.list_quantum_processors.return_value = results
client = EngineClient()
assert client.list_processors('proj') == results
assert grpc_client.list_quantum_processors.call_args[0] == (
'projects/proj',)
assert grpc_client.list_quantum_processors.call_args[1] == {
'filter_': '',
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_processor(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumProcessor(name='projects/proj/processors/processor0')
grpc_client.get_quantum_processor.return_value = result
client = EngineClient()
assert client.get_processor('proj', 'processor0') == result
assert grpc_client.get_quantum_processor.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_calibrations(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumCalibration(
name='projects/proj/processor/processor0/calibrations/123456'),
qtypes.QuantumCalibration(
name='projects/proj/processor/processor1/calibrations/224466')
]
grpc_client.list_quantum_calibrations.return_value = results
client = EngineClient()
assert client.list_calibrations('proj', 'processor0') == results
assert grpc_client.list_quantum_calibrations.call_args[0] == (
'projects/proj/processors/processor0',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_calibration('proj', 'processor0', 123456) == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/123456',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration(client_constructor):
grpc_client = setup_mock_(client_constructor)
result = qtypes.QuantumCalibration(
name='projects/proj/processors/processor0/calibrations/123456')
grpc_client.get_quantum_calibration.return_value = result
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') == result
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_does_not_exist(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert client.get_current_calibration('proj', 'processor0') is None
assert grpc_client.get_quantum_calibration.call_args[0] == (
'projects/proj/processors/processor0/calibrations/current',)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_current_calibration_error(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_calibration.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_current_calibration('proj', 'processor0')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_doesnt_retry_not_found_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
with pytest.raises(EngineException, match='not found'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count == 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_api_retry_5xx_errors(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_program.side_effect = exceptions.ServiceUnavailable(
'internal error')
client = EngineClient(max_retry_delay_seconds=1)
with pytest.raises(TimeoutError,
match='Reached max retry attempts.*internal error'):
client.get_program('proj', 'prog', False)
assert grpc_client.get_quantum_program.call_count > 1
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_create_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
start = datetime.datetime.fromtimestamp(1000000000)
end = datetime.datetime.fromtimestamp(1000003600)
users = ['<EMAIL>']
result = qtypes.QuantumReservation(
name='projects/proj/processors/processor0/reservations/papar-party-44',
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000003600),
whitelisted_users=users,
)
grpc_client.create_quantum_reservation.return_value = result
client = EngineClient()
assert client.create_reservation('proj', 'processor0', start, end,
users) == result
assert grpc_client.create_quantum_reservation.call_count == 1
kwargs = grpc_client.create_quantum_reservation.call_args[1]
# The outgoing argument will not have the resource name
result.name = ''
assert kwargs == {
'parent': 'projects/proj/processors/processor0',
'quantum_reservation': result
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_cancel_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.cancel_quantum_reservation.return_value = result
client = EngineClient()
assert (client.cancel_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.cancel_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_delete_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.delete_quantum_reservation.return_value = result
client = EngineClient()
assert (client.delete_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.delete_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.get_quantum_reservation.return_value = result
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == result)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_not_found(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
grpc_client.get_quantum_reservation.side_effect = exceptions.NotFound(
'not found')
client = EngineClient()
assert (client.get_reservation('proj', 'processor0',
'papar-party-44') == None)
kwargs = grpc_client.get_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_get_reservation_exception(client_constructor):
grpc_client = setup_mock_(client_constructor)
grpc_client.get_quantum_reservation.side_effect = exceptions.BadRequest(
'boom')
client = EngineClient()
with pytest.raises(EngineException, match='boom'):
client.get_reservation('proj', 'processor0', 'goog')
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
results = [
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000000000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
),
qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1200000000),
end_time=Timestamp(seconds=1200002000),
whitelisted_users=['<EMAIL>'],
),
]
grpc_client.list_quantum_reservations.return_value = results
client = EngineClient()
assert (client.list_reservations('proj', 'processor0') == results)
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
start_time=Timestamp(seconds=1000001000),
end_time=Timestamp(seconds=1000002000),
whitelisted_users=['<EMAIL>'],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
start=datetime.datetime.fromtimestamp(1000001000),
end=datetime.datetime.fromtimestamp(1000002000),
whitelisted_users=['<EMAIL>'],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name':
name,
'quantum_reservation':
result,
'update_mask':
FieldMask(paths=['start_time', 'end_time', 'whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_update_reservation_remove_all_users(client_constructor):
grpc_client = setup_mock_(client_constructor)
name = 'projects/proj/processors/processor0/reservations/papar-party-44'
result = qtypes.QuantumReservation(
name=name,
whitelisted_users=[],
)
grpc_client.update_quantum_reservation.return_value = result
client = EngineClient()
assert (client.update_reservation(
'proj',
'processor0',
'papar-party-44',
whitelisted_users=[],
) == result)
kwargs = grpc_client.update_quantum_reservation.call_args[1]
assert kwargs == {
'name': name,
'quantum_reservation': result,
'update_mask': FieldMask(paths=['whitelisted_users'])
}
@mock.patch.object(quantum, 'QuantumEngineServiceClient', autospec=True)
def test_list_time_slots(client_constructor):
grpc_client = setup_mock_(client_constructor)
results = [
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000020000),
end_time=Timestamp(seconds=1000040000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.MAINTENANCE,
maintenance_config=qtypes.QuantumTimeSlot.MaintenanceConfig(
title='Testing',
description='Testing some new configuration.',
),
),
qtypes.QuantumTimeSlot(
processor_name='potofgold',
start_time=Timestamp(seconds=1000010000),
end_time=Timestamp(seconds=1000020000),
slot_type=qenums.QuantumTimeSlot.TimeSlotType.RESERVATION,
reservation_config=qtypes.QuantumTimeSlot.ReservationConfig(
project_id='super_secret_quantum'),
)
]
grpc_client.list_quantum_time_slots.return_value = results
client = EngineClient()
assert (client.list_time_slots('proj', 'processor0') == results)
| en | 0.839021 | # Copyright 2020 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for EngineClient. # yapf: disable # yapf: enable # yapf: disable # yapf: enable # The outgoing argument will not have the resource name | 1.965239 | 2 |
google/ads/google_ads/v0/proto/services/media_file_service_pb2_grpc.py | jwygoda/google-ads-python | 0 | 8660 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v0.proto.resources import media_file_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2
from google.ads.google_ads.v0.proto.services import media_file_service_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2
class MediaFileServiceStub(object):
"""Service to manage media files.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetMediaFile = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/GetMediaFile',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.FromString,
)
self.MutateMediaFiles = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/MutateMediaFiles',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.FromString,
)
class MediaFileServiceServicer(object):
"""Service to manage media files.
"""
def GetMediaFile(self, request, context):
"""Returns the requested media file in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateMediaFiles(self, request, context):
"""Creates media files. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MediaFileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMediaFile': grpc.unary_unary_rpc_method_handler(
servicer.GetMediaFile,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString,
),
'MutateMediaFiles': grpc.unary_unary_rpc_method_handler(
servicer.MutateMediaFiles,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v0.services.MediaFileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v0.proto.resources import media_file_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2
from google.ads.google_ads.v0.proto.services import media_file_service_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2
class MediaFileServiceStub(object):
"""Service to manage media files.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetMediaFile = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/GetMediaFile',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.FromString,
)
self.MutateMediaFiles = channel.unary_unary(
'/google.ads.googleads.v0.services.MediaFileService/MutateMediaFiles',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.FromString,
)
class MediaFileServiceServicer(object):
"""Service to manage media files.
"""
def GetMediaFile(self, request, context):
"""Returns the requested media file in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateMediaFiles(self, request, context):
"""Creates media files. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MediaFileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMediaFile': grpc.unary_unary_rpc_method_handler(
servicer.GetMediaFile,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString,
),
'MutateMediaFiles': grpc.unary_unary_rpc_method_handler(
servicer.MutateMediaFiles,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v0.services.MediaFileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| en | 0.789923 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! Service to manage media files. Constructor. Args: channel: A grpc.Channel. Service to manage media files. Returns the requested media file in full detail. Creates media files. Operation statuses are returned. | 1.876139 | 2 |
docs/generate_example_images.py | KhaledSharif/kornia | 0 | 8661 | <reponame>KhaledSharif/kornia<gh_stars>0
import importlib
import math
import os
from pathlib import Path
from typing import Optional, Tuple
import cv2
import numpy as np
import requests
import torch
import kornia as K
def read_img_from_url(url: str, resize_to: Optional[Tuple[int, int]] = None) -> torch.Tensor:
# perform request
response = requests.get(url).content
# convert to array of ints
nparr = np.frombuffer(response, np.uint8)
# convert to image array and resize
img: np.ndarray = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)[..., :3]
# convert the image to a tensor
img_t: torch.Tensor = K.utils.image_to_tensor(img, keepdim=False) # 1xCxHXW
img_t = img_t.float() / 255.0
if resize_to is None:
img_t = K.geometry.resize(img_t, 184)
else:
img_t = K.geometry.resize(img_t, resize_to)
return img_t
def main():
# load the images
BASE_IMAGE_URL1: str = "https://raw.githubusercontent.com/kornia/data/main/panda.jpg" # augmentation
BASE_IMAGE_URL2: str = "https://raw.githubusercontent.com/kornia/data/main/simba.png" # color
BASE_IMAGE_URL3: str = "https://raw.githubusercontent.com/kornia/data/main/girona.png" # enhance
BASE_IMAGE_URL4: str = "https://raw.githubusercontent.com/kornia/data/main/baby_giraffe.png" # morphology
BASE_IMAGE_URL5: str = "https://raw.githubusercontent.com/kornia/data/main/persistencia_memoria.jpg" # filters
BASE_IMAGE_URL6: str = "https://raw.githubusercontent.com/kornia/data/main/delorean.png" # geometry
OUTPUT_PATH = Path(__file__).absolute().parent / "source/_static/img"
os.makedirs(OUTPUT_PATH, exist_ok=True)
print(f"Pointing images to path {OUTPUT_PATH}.")
img1 = read_img_from_url(BASE_IMAGE_URL1)
img2 = read_img_from_url(BASE_IMAGE_URL2, img1.shape[-2:])
img3 = read_img_from_url(BASE_IMAGE_URL3, img1.shape[-2:])
img4 = read_img_from_url(BASE_IMAGE_URL4)
img5 = read_img_from_url(BASE_IMAGE_URL5, (234, 320))
img6 = read_img_from_url(BASE_IMAGE_URL6)
# TODO: make this more generic for modules out of kornia.augmentation
# Dictionary containing the transforms to generate the sample images:
# Key: Name of the transform class.
# Value: (parameters, num_samples, seed)
mod = importlib.import_module("kornia.augmentation")
augmentations_list: dict = {
"CenterCrop": ((184, 184), 1, 2018),
"ColorJitter": ((0.3, 0.3, 0.3, 0.3), 2, 2018),
"RandomAffine": (((-15.0, 20.0), (0.1, 0.1), (0.7, 1.3), 20), 2, 2019),
"RandomBoxBlur": (((7, 7),), 1, 2020),
"RandomCrop": ((img1.shape[-2:], (50, 50)), 2, 2020),
"RandomChannelShuffle": ((), 1, 2020),
"RandomElasticTransform": (((63, 63), (32, 32), (2.0, 2.0)), 2, 2018),
"RandomEqualize": ((), 1, 2020),
"RandomErasing": (((0.2, 0.4), (0.3, 1 / 0.3)), 2, 2017),
"RandomFisheye": ((torch.tensor([-0.3, 0.3]), torch.tensor([-0.3, 0.3]), torch.tensor([0.9, 1.0])), 2, 2020),
"RandomGaussianBlur": (((3, 3), (0.1, 2.0)), 1, 2020),
"RandomGaussianNoise": ((0.0, 0.05), 1, 2020),
"RandomGrayscale": ((), 1, 2020),
"RandomHorizontalFlip": ((), 1, 2020),
"RandomInvert": ((), 1, 2020),
"RandomMotionBlur": ((7, 35.0, 0.5), 2, 2020),
"RandomPerspective": ((0.2,), 2, 2020),
"RandomPlanckianJitter": ((), 2, 2022),
"RandomPosterize": (((1, 4),), 2, 2016),
"RandomResizedCrop": ((img1.shape[-2:], (1.0, 2.0), (1.0, 2.0)), 2, 2020),
"RandomRotation": ((45.0,), 2, 2019),
"RandomSharpness": ((16.0,), 1, 2019),
"RandomSolarize": ((0.2, 0.2), 2, 2019),
"RandomVerticalFlip": ((), 1, 2020),
"RandomThinPlateSpline": ((), 1, 2020),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in augmentations_list.items():
img_in = img1.repeat(num_samples, 1, 1, 1)
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out = aug(img_in)
if aug_name == "CenterCrop":
h, w = img1.shape[-2:]
h_new, w_new = out.shape[-2:]
h_dif, w_dif = int(h - h_new), int(w - w_new)
out = torch.nn.functional.pad(out, (w_dif // 2, w_dif // 2, 0, h_dif))
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.augmentation")
mix_augmentations_list: dict = {
"RandomMixUp": (((0.3, 0.4),), 2, 20),
"RandomCutMix": ((img1.shape[-2], img1.shape[-1]), 2, 2019),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in mix_augmentations_list.items():
img_in = torch.cat([img1, img2])
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out, _ = aug(img_in, torch.tensor([0, 1]))
out = torch.cat([img_in[0], img_in[1], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.color")
color_transforms_list: dict = {
"grayscale_to_rgb": ((), 3),
"rgb_to_bgr": ((), 1),
"rgb_to_grayscale": ((), 1),
"rgb_to_hsv": ((), 1),
"rgb_to_hls": ((), 1),
"rgb_to_luv": ((), 1),
"rgb_to_lab": ((), 1),
# "rgb_to_rgba": ((1.,), 1),
"rgb_to_xyz": ((), 1),
"rgb_to_ycbcr": ((), 1),
"rgb_to_yuv": ((), 1),
"rgb_to_linear_rgb": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in color_transforms_list.items():
# import function and apply
fn = getattr(mod, fn_name)
if fn_name == "grayscale_to_rgb":
out = fn(K.color.rgb_to_grayscale(img2), *args)
else:
out = fn(img2, *args)
# perform normalization to visualize
if fn_name == "rgb_to_lab":
out = out[:, :1] / 100.0
elif fn_name == "rgb_to_hsv":
out[:, :1] = out[:, :1] / 2 * math.pi
elif fn_name == "rgb_to_luv":
out = out[:, :1] / 116.0
# repeat channels for grayscale
if out.shape[1] != 3:
out = out.repeat(1, 3, 1, 1)
# save the output image
if fn_name == "grayscale_to_rgb":
out = torch.cat(
[K.color.rgb_to_grayscale(img2[0]).repeat(3, 1, 1), *(out[i] for i in range(out.size(0)))], dim=-1
)
else:
out = torch.cat([img2[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.enhance module
mod = importlib.import_module("kornia.enhance")
transforms: dict = {
"adjust_brightness": ((torch.tensor([0.25, 0.5]),), 2),
"adjust_contrast": ((torch.tensor([0.65, 0.5]),), 2),
"adjust_gamma": ((torch.tensor([0.85, 0.75]), 2.0), 2),
"adjust_hue": ((torch.tensor([-math.pi / 4, math.pi / 4]),), 2),
"adjust_saturation": ((torch.tensor([1.0, 2.0]),), 2),
"solarize": ((torch.tensor([0.8, 0.5]), torch.tensor([-0.25, 0.25])), 2),
"posterize": ((torch.tensor([4, 2]),), 2),
"sharpness": ((torch.tensor([1.0, 2.5]),), 2),
"equalize": ((), 1),
"invert": ((), 1),
"equalize_clahe": ((), 1),
"add_weighted": ((0.75, 0.25, 2.0), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img3.repeat(num_samples, 1, 1, 1)
if fn_name == "add_weighted":
args_in = (img_in, args[0], img2, args[1], args[2])
else:
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.morphology module
mod = importlib.import_module("kornia.morphology")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"dilation": ((kernel,), 1),
"erosion": ((kernel,), 1),
"opening": ((kernel,), 1),
"closing": ((kernel,), 1),
"gradient": ((kernel,), 1),
"top_hat": ((kernel,), 1),
"bottom_hat": ((kernel,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img4.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
# import pdb;pdb.set_trace()
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.filters module
mod = importlib.import_module("kornia.filters")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"box_blur": (((5, 5),), 1),
"median_blur": (((5, 5),), 1),
"gaussian_blur2d": (((5, 5), (1.5, 1.5)), 1),
"motion_blur": ((5, 90.0, 1.0), 1),
"max_blur_pool2d": ((5,), 1),
"blur_pool2d": ((5,), 1),
"unsharp_mask": (((5, 5), (1.5, 1.5)), 1),
"laplacian": ((5,), 1),
"sobel": ((), 1),
"spatial_gradient": ((), 1),
"canny": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img5.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("max_blur_pool2d", "blur_pool2d"):
out = K.geometry.resize(out, img_in.shape[-2:])
if fn_name == "canny":
out = out[1].repeat(1, 3, 1, 1)
if isinstance(out, torch.Tensor):
out = out.clamp(min=0.0, max=1.0)
if fn_name in ("laplacian", "sobel", "spatial_gradient", "canny"):
out = K.enhance.normalize_min_max(out)
if fn_name == "spatial_gradient":
out = out.permute(2, 1, 0, 3, 4).squeeze()
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.geometry.transform module
mod = importlib.import_module("kornia.geometry.transform")
h, w = img6.shape[-2:]
def _get_tps_args():
src = torch.tensor([[[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0], [0.0, 0.0]]]).repeat(2, 1, 1) # Bx5x2
dst = src + torch.distributions.Uniform(-0.2, 0.2).rsample((2, 5, 2))
kernel, affine = K.geometry.transform.get_tps_transform(dst, src)
return src, kernel, affine
transforms: dict = {
"warp_affine": (
(
K.geometry.transform.get_affine_matrix2d(
translations=torch.zeros(2, 2),
center=(torch.tensor([w, h]) / 2).repeat(2, 1),
scale=torch.distributions.Uniform(0.5, 1.5).rsample((2, 2)),
angle=torch.tensor([-25.0, 25.0]),
)[:, :2, :3],
(h, w),
),
2,
),
"remap": (
(
*(K.utils.create_meshgrid(h, w, normalized_coordinates=True) - 0.25).unbind(-1),
'bilinear',
'zeros',
True,
True,
),
1,
),
"warp_image_tps": ((_get_tps_args()), 2),
"rotate": ((torch.tensor([-15.0, 25.0]),), 2),
"translate": ((torch.tensor([[10.0, -15], [50.0, -25.0]]),), 2),
"scale": ((torch.tensor([[0.5, 1.25], [1.0, 1.5]]),), 2),
"shear": ((torch.tensor([[0.1, -0.2], [-0.2, 0.1]]),), 2),
"rot180": ((), 1),
"hflip": ((), 1),
"vflip": ((), 1),
"resize": (((120, 220),), 1),
"rescale": ((0.5,), 1),
"elastic_transform2d": ((torch.rand(1, 2, h, w) * 2 - 1, (63, 63), (32, 32), (4.0, 4.0)), 1),
"pyrdown": ((), 1),
"pyrup": ((), 1),
"build_pyramid": ((3,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img6.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("resize", "rescale", "pyrdown", "pyrup"):
h_new, w_new = out.shape[-2:]
out = torch.nn.functional.pad(out, (0, (w - w_new), 0, (h - h_new)))
if fn_name == "build_pyramid":
_out = []
for pyr in out[1:]:
h_new, w_new = pyr.shape[-2:]
out_tmp = torch.nn.functional.pad(pyr, (0, (w - w_new), 0, (h - h_new)))
_out.append(out_tmp)
out = torch.cat(_out)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
if __name__ == "__main__":
main()
| import importlib
import math
import os
from pathlib import Path
from typing import Optional, Tuple
import cv2
import numpy as np
import requests
import torch
import kornia as K
def read_img_from_url(url: str, resize_to: Optional[Tuple[int, int]] = None) -> torch.Tensor:
# perform request
response = requests.get(url).content
# convert to array of ints
nparr = np.frombuffer(response, np.uint8)
# convert to image array and resize
img: np.ndarray = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)[..., :3]
# convert the image to a tensor
img_t: torch.Tensor = K.utils.image_to_tensor(img, keepdim=False) # 1xCxHXW
img_t = img_t.float() / 255.0
if resize_to is None:
img_t = K.geometry.resize(img_t, 184)
else:
img_t = K.geometry.resize(img_t, resize_to)
return img_t
def main():
# load the images
BASE_IMAGE_URL1: str = "https://raw.githubusercontent.com/kornia/data/main/panda.jpg" # augmentation
BASE_IMAGE_URL2: str = "https://raw.githubusercontent.com/kornia/data/main/simba.png" # color
BASE_IMAGE_URL3: str = "https://raw.githubusercontent.com/kornia/data/main/girona.png" # enhance
BASE_IMAGE_URL4: str = "https://raw.githubusercontent.com/kornia/data/main/baby_giraffe.png" # morphology
BASE_IMAGE_URL5: str = "https://raw.githubusercontent.com/kornia/data/main/persistencia_memoria.jpg" # filters
BASE_IMAGE_URL6: str = "https://raw.githubusercontent.com/kornia/data/main/delorean.png" # geometry
OUTPUT_PATH = Path(__file__).absolute().parent / "source/_static/img"
os.makedirs(OUTPUT_PATH, exist_ok=True)
print(f"Pointing images to path {OUTPUT_PATH}.")
img1 = read_img_from_url(BASE_IMAGE_URL1)
img2 = read_img_from_url(BASE_IMAGE_URL2, img1.shape[-2:])
img3 = read_img_from_url(BASE_IMAGE_URL3, img1.shape[-2:])
img4 = read_img_from_url(BASE_IMAGE_URL4)
img5 = read_img_from_url(BASE_IMAGE_URL5, (234, 320))
img6 = read_img_from_url(BASE_IMAGE_URL6)
# TODO: make this more generic for modules out of kornia.augmentation
# Dictionary containing the transforms to generate the sample images:
# Key: Name of the transform class.
# Value: (parameters, num_samples, seed)
mod = importlib.import_module("kornia.augmentation")
augmentations_list: dict = {
"CenterCrop": ((184, 184), 1, 2018),
"ColorJitter": ((0.3, 0.3, 0.3, 0.3), 2, 2018),
"RandomAffine": (((-15.0, 20.0), (0.1, 0.1), (0.7, 1.3), 20), 2, 2019),
"RandomBoxBlur": (((7, 7),), 1, 2020),
"RandomCrop": ((img1.shape[-2:], (50, 50)), 2, 2020),
"RandomChannelShuffle": ((), 1, 2020),
"RandomElasticTransform": (((63, 63), (32, 32), (2.0, 2.0)), 2, 2018),
"RandomEqualize": ((), 1, 2020),
"RandomErasing": (((0.2, 0.4), (0.3, 1 / 0.3)), 2, 2017),
"RandomFisheye": ((torch.tensor([-0.3, 0.3]), torch.tensor([-0.3, 0.3]), torch.tensor([0.9, 1.0])), 2, 2020),
"RandomGaussianBlur": (((3, 3), (0.1, 2.0)), 1, 2020),
"RandomGaussianNoise": ((0.0, 0.05), 1, 2020),
"RandomGrayscale": ((), 1, 2020),
"RandomHorizontalFlip": ((), 1, 2020),
"RandomInvert": ((), 1, 2020),
"RandomMotionBlur": ((7, 35.0, 0.5), 2, 2020),
"RandomPerspective": ((0.2,), 2, 2020),
"RandomPlanckianJitter": ((), 2, 2022),
"RandomPosterize": (((1, 4),), 2, 2016),
"RandomResizedCrop": ((img1.shape[-2:], (1.0, 2.0), (1.0, 2.0)), 2, 2020),
"RandomRotation": ((45.0,), 2, 2019),
"RandomSharpness": ((16.0,), 1, 2019),
"RandomSolarize": ((0.2, 0.2), 2, 2019),
"RandomVerticalFlip": ((), 1, 2020),
"RandomThinPlateSpline": ((), 1, 2020),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in augmentations_list.items():
img_in = img1.repeat(num_samples, 1, 1, 1)
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out = aug(img_in)
if aug_name == "CenterCrop":
h, w = img1.shape[-2:]
h_new, w_new = out.shape[-2:]
h_dif, w_dif = int(h - h_new), int(w - w_new)
out = torch.nn.functional.pad(out, (w_dif // 2, w_dif // 2, 0, h_dif))
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.augmentation")
mix_augmentations_list: dict = {
"RandomMixUp": (((0.3, 0.4),), 2, 20),
"RandomCutMix": ((img1.shape[-2], img1.shape[-1]), 2, 2019),
}
# ITERATE OVER THE TRANSFORMS
for aug_name, (args, num_samples, seed) in mix_augmentations_list.items():
img_in = torch.cat([img1, img2])
# dynamically create the class instance
cls = getattr(mod, aug_name)
aug = cls(*args, p=1.0)
# set seed
torch.manual_seed(seed)
# apply the augmentaiton to the image and concat
out, _ = aug(img_in, torch.tensor([0, 1]))
out = torch.cat([img_in[0], img_in[1], *(out[i] for i in range(out.size(0)))], dim=-1)
# save the output image
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{aug_name}.png"), out_np)
sig = f"{aug_name}({', '.join([str(a) for a in args])}, p=1.0)"
print(f"Generated image example for {aug_name}. {sig}")
mod = importlib.import_module("kornia.color")
color_transforms_list: dict = {
"grayscale_to_rgb": ((), 3),
"rgb_to_bgr": ((), 1),
"rgb_to_grayscale": ((), 1),
"rgb_to_hsv": ((), 1),
"rgb_to_hls": ((), 1),
"rgb_to_luv": ((), 1),
"rgb_to_lab": ((), 1),
# "rgb_to_rgba": ((1.,), 1),
"rgb_to_xyz": ((), 1),
"rgb_to_ycbcr": ((), 1),
"rgb_to_yuv": ((), 1),
"rgb_to_linear_rgb": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in color_transforms_list.items():
# import function and apply
fn = getattr(mod, fn_name)
if fn_name == "grayscale_to_rgb":
out = fn(K.color.rgb_to_grayscale(img2), *args)
else:
out = fn(img2, *args)
# perform normalization to visualize
if fn_name == "rgb_to_lab":
out = out[:, :1] / 100.0
elif fn_name == "rgb_to_hsv":
out[:, :1] = out[:, :1] / 2 * math.pi
elif fn_name == "rgb_to_luv":
out = out[:, :1] / 116.0
# repeat channels for grayscale
if out.shape[1] != 3:
out = out.repeat(1, 3, 1, 1)
# save the output image
if fn_name == "grayscale_to_rgb":
out = torch.cat(
[K.color.rgb_to_grayscale(img2[0]).repeat(3, 1, 1), *(out[i] for i in range(out.size(0)))], dim=-1
)
else:
out = torch.cat([img2[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.enhance module
mod = importlib.import_module("kornia.enhance")
transforms: dict = {
"adjust_brightness": ((torch.tensor([0.25, 0.5]),), 2),
"adjust_contrast": ((torch.tensor([0.65, 0.5]),), 2),
"adjust_gamma": ((torch.tensor([0.85, 0.75]), 2.0), 2),
"adjust_hue": ((torch.tensor([-math.pi / 4, math.pi / 4]),), 2),
"adjust_saturation": ((torch.tensor([1.0, 2.0]),), 2),
"solarize": ((torch.tensor([0.8, 0.5]), torch.tensor([-0.25, 0.25])), 2),
"posterize": ((torch.tensor([4, 2]),), 2),
"sharpness": ((torch.tensor([1.0, 2.5]),), 2),
"equalize": ((), 1),
"invert": ((), 1),
"equalize_clahe": ((), 1),
"add_weighted": ((0.75, 0.25, 2.0), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img3.repeat(num_samples, 1, 1, 1)
if fn_name == "add_weighted":
args_in = (img_in, args[0], img2, args[1], args[2])
else:
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.morphology module
mod = importlib.import_module("kornia.morphology")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"dilation": ((kernel,), 1),
"erosion": ((kernel,), 1),
"opening": ((kernel,), 1),
"closing": ((kernel,), 1),
"gradient": ((kernel,), 1),
"top_hat": ((kernel,), 1),
"bottom_hat": ((kernel,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img4.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
# import pdb;pdb.set_trace()
fn = getattr(mod, fn_name)
out = fn(*args_in)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.filters module
mod = importlib.import_module("kornia.filters")
kernel = torch.tensor([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
transforms: dict = {
"box_blur": (((5, 5),), 1),
"median_blur": (((5, 5),), 1),
"gaussian_blur2d": (((5, 5), (1.5, 1.5)), 1),
"motion_blur": ((5, 90.0, 1.0), 1),
"max_blur_pool2d": ((5,), 1),
"blur_pool2d": ((5,), 1),
"unsharp_mask": (((5, 5), (1.5, 1.5)), 1),
"laplacian": ((5,), 1),
"sobel": ((), 1),
"spatial_gradient": ((), 1),
"canny": ((), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img5.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("max_blur_pool2d", "blur_pool2d"):
out = K.geometry.resize(out, img_in.shape[-2:])
if fn_name == "canny":
out = out[1].repeat(1, 3, 1, 1)
if isinstance(out, torch.Tensor):
out = out.clamp(min=0.0, max=1.0)
if fn_name in ("laplacian", "sobel", "spatial_gradient", "canny"):
out = K.enhance.normalize_min_max(out)
if fn_name == "spatial_gradient":
out = out.permute(2, 1, 0, 3, 4).squeeze()
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
# korna.geometry.transform module
mod = importlib.import_module("kornia.geometry.transform")
h, w = img6.shape[-2:]
def _get_tps_args():
src = torch.tensor([[[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0], [0.0, 0.0]]]).repeat(2, 1, 1) # Bx5x2
dst = src + torch.distributions.Uniform(-0.2, 0.2).rsample((2, 5, 2))
kernel, affine = K.geometry.transform.get_tps_transform(dst, src)
return src, kernel, affine
transforms: dict = {
"warp_affine": (
(
K.geometry.transform.get_affine_matrix2d(
translations=torch.zeros(2, 2),
center=(torch.tensor([w, h]) / 2).repeat(2, 1),
scale=torch.distributions.Uniform(0.5, 1.5).rsample((2, 2)),
angle=torch.tensor([-25.0, 25.0]),
)[:, :2, :3],
(h, w),
),
2,
),
"remap": (
(
*(K.utils.create_meshgrid(h, w, normalized_coordinates=True) - 0.25).unbind(-1),
'bilinear',
'zeros',
True,
True,
),
1,
),
"warp_image_tps": ((_get_tps_args()), 2),
"rotate": ((torch.tensor([-15.0, 25.0]),), 2),
"translate": ((torch.tensor([[10.0, -15], [50.0, -25.0]]),), 2),
"scale": ((torch.tensor([[0.5, 1.25], [1.0, 1.5]]),), 2),
"shear": ((torch.tensor([[0.1, -0.2], [-0.2, 0.1]]),), 2),
"rot180": ((), 1),
"hflip": ((), 1),
"vflip": ((), 1),
"resize": (((120, 220),), 1),
"rescale": ((0.5,), 1),
"elastic_transform2d": ((torch.rand(1, 2, h, w) * 2 - 1, (63, 63), (32, 32), (4.0, 4.0)), 1),
"pyrdown": ((), 1),
"pyrup": ((), 1),
"build_pyramid": ((3,), 1),
}
# ITERATE OVER THE TRANSFORMS
for fn_name, (args, num_samples) in transforms.items():
img_in = img6.repeat(num_samples, 1, 1, 1)
args_in = (img_in, *args)
# import function and apply
fn = getattr(mod, fn_name)
out = fn(*args_in)
if fn_name in ("resize", "rescale", "pyrdown", "pyrup"):
h_new, w_new = out.shape[-2:]
out = torch.nn.functional.pad(out, (0, (w - w_new), 0, (h - h_new)))
if fn_name == "build_pyramid":
_out = []
for pyr in out[1:]:
h_new, w_new = pyr.shape[-2:]
out_tmp = torch.nn.functional.pad(pyr, (0, (w - w_new), 0, (h - h_new)))
_out.append(out_tmp)
out = torch.cat(_out)
# save the output image
out = torch.cat([img_in[0], *(out[i] for i in range(out.size(0)))], dim=-1)
out_np = K.utils.tensor_to_image((out * 255.0).byte())
cv2.imwrite(str(OUTPUT_PATH / f"{fn_name}.png"), out_np)
sig = f"{fn_name}({', '.join([str(a) for a in args])})"
print(f"Generated image example for {fn_name}. {sig}")
if __name__ == "__main__":
main() | en | 0.464979 | # perform request # convert to array of ints # convert to image array and resize # convert the image to a tensor # 1xCxHXW # load the images # augmentation # color # enhance # morphology # filters # geometry # TODO: make this more generic for modules out of kornia.augmentation # Dictionary containing the transforms to generate the sample images: # Key: Name of the transform class. # Value: (parameters, num_samples, seed) # ITERATE OVER THE TRANSFORMS # dynamically create the class instance # set seed # apply the augmentaiton to the image and concat # save the output image # ITERATE OVER THE TRANSFORMS # dynamically create the class instance # set seed # apply the augmentaiton to the image and concat # save the output image # "rgb_to_rgba": ((1.,), 1), # ITERATE OVER THE TRANSFORMS # import function and apply # perform normalization to visualize # repeat channels for grayscale # save the output image # korna.enhance module # ITERATE OVER THE TRANSFORMS # import function and apply # save the output image # korna.morphology module # ITERATE OVER THE TRANSFORMS # import function and apply # import pdb;pdb.set_trace() # save the output image # korna.filters module # ITERATE OVER THE TRANSFORMS # import function and apply # save the output image # korna.geometry.transform module # Bx5x2 # ITERATE OVER THE TRANSFORMS # import function and apply # save the output image | 2.5953 | 3 |
forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py | Pushkar-Bhuse/forte | 0 | 8662 | <filename>forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
# Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Tuple
import numpy as np
from texar.torch.data import Vocab, Embedding
from ft.onto.base_ontology import Annotation
from forte.common.configuration import Config
from forte.processors.data_augment.algorithms.text_replacement_op import (
TextReplacementOp,
)
__all__ = [
"EmbeddingSimilarityReplacementOp",
]
class EmbeddingSimilarityReplacementOp(TextReplacementOp):
r"""
This class is a replacement op leveraging pre-trained word
embeddings, such as `word2vec` and `glove`, to replace the input
word with another word with similar word embedding.
By default, the replacement word is randomly chosen from the
top k words with the most similar embeddings.
Args:
configs:
The config should contain the following key-value pairs:
- vocab_path (str): The absolute path to the vocabulary file for
the pretrained embeddings
- embed_hparams (dict): The hparams to initialize the
texar.torch.data.Embedding object.
- top_k (int): the number of k most similar words to choose from
"""
def __init__(self, configs: Config):
super().__init__(configs)
self.vocab = Vocab(self.configs["vocab_path"])
embed_hparams = self.configs["embed_hparams"]
embedding = Embedding(self.vocab.token_to_id_map_py, embed_hparams)
self.normalized_vectors = (
embedding.word_vecs
/ np.sqrt((embedding.word_vecs**2).sum(axis=1))[:, np.newaxis]
)
def replace(self, input_anno: Annotation) -> Tuple[bool, str]:
r"""
This function replaces a word words with similar
pretrained embeddings.
Args:
input_anno (Annotation): The input annotation.
Returns:
A tuple of two values, where the first element is a boolean value
indicating whether the replacement happens, and the second
element is the replaced word.
"""
word = input_anno.text
if word not in self.vocab.token_to_id_map_py:
return False, word
source_id = self.vocab.token_to_id_map_py[word]
source_vector = self.normalized_vectors[source_id]
scores = np.dot(self.normalized_vectors, source_vector)
target_ids = np.argpartition(-scores, self.configs["top_k"] + 1)[
: self.configs["top_k"] + 1
]
target_words = [
self.vocab.id_to_token_map_py[idx]
for idx in target_ids
if idx != source_id
and self.vocab.id_to_token_map_py[idx].lower() != word.lower()
]
return True, random.choice(target_words)
| <filename>forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py
# Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Tuple
import numpy as np
from texar.torch.data import Vocab, Embedding
from ft.onto.base_ontology import Annotation
from forte.common.configuration import Config
from forte.processors.data_augment.algorithms.text_replacement_op import (
TextReplacementOp,
)
__all__ = [
"EmbeddingSimilarityReplacementOp",
]
class EmbeddingSimilarityReplacementOp(TextReplacementOp):
r"""
This class is a replacement op leveraging pre-trained word
embeddings, such as `word2vec` and `glove`, to replace the input
word with another word with similar word embedding.
By default, the replacement word is randomly chosen from the
top k words with the most similar embeddings.
Args:
configs:
The config should contain the following key-value pairs:
- vocab_path (str): The absolute path to the vocabulary file for
the pretrained embeddings
- embed_hparams (dict): The hparams to initialize the
texar.torch.data.Embedding object.
- top_k (int): the number of k most similar words to choose from
"""
def __init__(self, configs: Config):
super().__init__(configs)
self.vocab = Vocab(self.configs["vocab_path"])
embed_hparams = self.configs["embed_hparams"]
embedding = Embedding(self.vocab.token_to_id_map_py, embed_hparams)
self.normalized_vectors = (
embedding.word_vecs
/ np.sqrt((embedding.word_vecs**2).sum(axis=1))[:, np.newaxis]
)
def replace(self, input_anno: Annotation) -> Tuple[bool, str]:
r"""
This function replaces a word words with similar
pretrained embeddings.
Args:
input_anno (Annotation): The input annotation.
Returns:
A tuple of two values, where the first element is a boolean value
indicating whether the replacement happens, and the second
element is the replaced word.
"""
word = input_anno.text
if word not in self.vocab.token_to_id_map_py:
return False, word
source_id = self.vocab.token_to_id_map_py[word]
source_vector = self.normalized_vectors[source_id]
scores = np.dot(self.normalized_vectors, source_vector)
target_ids = np.argpartition(-scores, self.configs["top_k"] + 1)[
: self.configs["top_k"] + 1
]
target_words = [
self.vocab.id_to_token_map_py[idx]
for idx in target_ids
if idx != source_id
and self.vocab.id_to_token_map_py[idx].lower() != word.lower()
]
return True, random.choice(target_words)
| en | 0.808124 | # Copyright 2020 The Forte Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This class is a replacement op leveraging pre-trained word embeddings, such as `word2vec` and `glove`, to replace the input word with another word with similar word embedding. By default, the replacement word is randomly chosen from the top k words with the most similar embeddings. Args: configs: The config should contain the following key-value pairs: - vocab_path (str): The absolute path to the vocabulary file for the pretrained embeddings - embed_hparams (dict): The hparams to initialize the texar.torch.data.Embedding object. - top_k (int): the number of k most similar words to choose from This function replaces a word words with similar pretrained embeddings. Args: input_anno (Annotation): The input annotation. Returns: A tuple of two values, where the first element is a boolean value indicating whether the replacement happens, and the second element is the replaced word. | 2.182026 | 2 |
src/sentry/receivers/experiments.py | FelixSchwarz/sentry | 0 | 8663 | from __future__ import print_function, absolute_import
from sentry import analytics
from sentry.signals import join_request_created, join_request_link_viewed
@join_request_created.connect(weak=False)
def record_join_request_created(member, **kwargs):
analytics.record(
"join_request.created", member_id=member.id, organization_id=member.organization_id
)
@join_request_link_viewed.connect(weak=False)
def record_join_request_link_viewed(organization, **kwargs):
analytics.record("join_request.link_viewed", organization_id=organization.id)
| from __future__ import print_function, absolute_import
from sentry import analytics
from sentry.signals import join_request_created, join_request_link_viewed
@join_request_created.connect(weak=False)
def record_join_request_created(member, **kwargs):
analytics.record(
"join_request.created", member_id=member.id, organization_id=member.organization_id
)
@join_request_link_viewed.connect(weak=False)
def record_join_request_link_viewed(organization, **kwargs):
analytics.record("join_request.link_viewed", organization_id=organization.id)
| none | 1 | 2.063571 | 2 |
|
arturtamborskipl/urls.py | arturtamborski/arturtamborskipl | 1 | 8664 | from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from django.contrib.sitemaps.views import sitemap
from django.conf import settings
from blog.sitemaps import ArticleSitemap
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}}, name='sitemap'),
url(r'^', include('blog.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from django.contrib.sitemaps.views import sitemap
from django.conf import settings
from blog.sitemaps import ArticleSitemap
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}}, name='sitemap'),
url(r'^', include('blog.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| none | 1 | 1.808867 | 2 |
|
ion_functions/qc/qc_functions.py | steinermg/ion-functions | 10 | 8665 | #!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: <NAME>. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data Product Specification for Combined QC Flags. Document
Control Number 1341-100012.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf)
"""
if strict_validation:
if not utils.islogical(inflags):
raise ValueError('\'inflags\' must be \'0\' or \'1\' '
'integer flag array')
array_size = inflags.shape
nrows = array_size[0]
if nrows < 2:
error('\'inflags\' must be at least a two-dimensional array')
outflag = np.all(inflags, 0)
return outflag.astype('int8')
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8):
"""
Description:
Implementation of the Sea-Bird conductivity compressibility correction,
scaling the input conductivity based on ratio of the original pressure
and the updated pressure.
Implemented by:
2013-04-07: Christopher Wingard. Initial python implementation.
Usage:
c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor)
where
c_new = updated conductivity record [S/m]
p_orig = original pressure used to calculate original conductivity,
this typically the L1a PRESWAT [dbar]
p_new = updated pressure, typically L1b PRESWAT [dbar]
c_orig = original conductivty record, typically L1a CONDWAT [S/m]
cpcor = pressure correction coefficient used to calculate original
conductivity, default is -9.57e-8
References:
OOI (2012). Data Product Specification for Conductivity Compressibility
Correction. Document Control Number 1341-10030.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf)
"""
c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new)
return c_new
| #!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: <NAME>. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data Product Specification for Combined QC Flags. Document
Control Number 1341-100012.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf)
"""
if strict_validation:
if not utils.islogical(inflags):
raise ValueError('\'inflags\' must be \'0\' or \'1\' '
'integer flag array')
array_size = inflags.shape
nrows = array_size[0]
if nrows < 2:
error('\'inflags\' must be at least a two-dimensional array')
outflag = np.all(inflags, 0)
return outflag.astype('int8')
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8):
"""
Description:
Implementation of the Sea-Bird conductivity compressibility correction,
scaling the input conductivity based on ratio of the original pressure
and the updated pressure.
Implemented by:
2013-04-07: Christopher Wingard. Initial python implementation.
Usage:
c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor)
where
c_new = updated conductivity record [S/m]
p_orig = original pressure used to calculate original conductivity,
this typically the L1a PRESWAT [dbar]
p_new = updated pressure, typically L1b PRESWAT [dbar]
c_orig = original conductivty record, typically L1a CONDWAT [S/m]
cpcor = pressure correction coefficient used to calculate original
conductivity, default is -9.57e-8
References:
OOI (2012). Data Product Specification for Conductivity Compressibility
Correction. Document Control Number 1341-10030.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf)
"""
c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new)
return c_new
| en | 0.734727 | #!/usr/bin/env python @package ion_functions.qc_functions @file ion_functions/qc_functions.py @author <NAME> @brief Module containing QC functions ported from matlab samples in DPS documents # try to load the OOI logging module, using default Python logging module if # unavailable # Not the normal fill value, it's hardcoded to the QC params Python wrapper for dataqc_globalrangetest Combines the min/max arguments into list for dataqc_globalrangetest Description: Data quality control algorithm testing if measurements fall into a user-defined valid range. Returns 1 for presumably good data and 0 for data presumed bad. Implemented by: 2010-07-28: DPS authored by <NAME>. Example code provided for Matlab. 2013-04-06: <NAME>. Initial python implementation. 2013-05-30: <NAME>. Performance improvements by adding strict_validation flag. Usage: qcflag = dataqc_globalrangetest(dat, datlim, strict_validation) where qcflag = Boolean, 0 if value is outside range, else = 1. dat = Input dataset, any scalar or vector. Must be numeric and real. datlim = Two-element vector with the minimum and maximum values considered to be valid. strict_validation = Flag (default is False) to assert testing of input types (e.g. isreal, isnumeric) References: OOI (2012). Data Product Specification for Global Range Test. Document Control Number 1341-10004. https://alfresco.oceanobservatories.org (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf) # Must have at least 2 elements # Convert time vector to vector of months # Fetch the dimension from the callback method Description: Data quality control algorithm testing if measurements fall into a user-defined valid range. This range is not constant but varies with measurement location. Returns 1 for presumably good data and 0 for data presumed bad. Implemented by: 2012-07-17: DPS authored by <NAME>. Example code provided for Matlab. 2013-04-06: <NAME>. Initial python implementation. Usage: qcflag = dataqc_localrangetest(dat, z, datlim, datlimz) where qcflag = Boolean, 0 if value is outside range, else = 1. dat = input data set, a numeric real scalar or column vector. z = location of measurement dat. must have same # of rows as dat and same # of columns as datlimz datlim = two column array with the minimum (column 1) and maximum (column 2) values considered valid. datlimz = array with the locations where datlim is given. must have same # of rows as datlim and same # of columns as z. References: OOI (2012). Data Product Specification for Local Range Test. Document Control Number 1341-10005. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf) # check if dat and datlim are matrices # check if all inputs are numeric and real # test size and shape of the input arrays datlimz and datlim, setting test # variables. # test the size and shape of the z input array # test datlim, values in column 2 must be greater than those in column 1 # calculate the upper and lower limits for the data set # determine the lower limits using linear interpolation # determine the upper limits using linear interpolation # Compute Delaunay Triangulation and use linear interpolation to # determine the N-dimensional lower limits # Compute Delaunay Triangulation and use linear interpolation to # determine the N-dimensional upper limits # replace NaNs from above interpolations # compute the qcflags Description: Data quality control algorithm testing a time series for spikes. Returns 1 for presumably good data and 0 for data presumed bad. The time series is divided into windows of len L (an odd integer number). Then, window by window, each value is compared to its (L-1) neighboring values: a range R of these (L-1) values is computed (max. minus min.), and replaced with the measurement accuracy ACC if ACC>R. A value is presumed to be good, i.e. no spike, if it deviates from the mean of the (L-1) peers by less than a multiple of the range, N*max(R,ACC). Further than (L-1)/2 values from the start or end points, the peer values are symmetrically before and after the test value. Within that range of the start and end, the peers are the first/last L values (without the test value itself). The purpose of ACC is to restrict spike detection to deviations exceeding a minimum threshold value (N*ACC) even if the data have little variability. Use ACC=0 to disable this behavior. Implemented by: 2012-07-28: DPS authored by <NAME>. Example code provided for Matlab. 2013-04-06: <NAME>. Initial python implementation. 2013-05-30: <NAME>. Performance optimizations. Usage: qcflag = dataqc_spiketest(dat, acc, N, L) where qcflag = Boolean, 0 if value is outside range, else = 1. dat = input data set, a numeric, real vector. acc = Accuracy of any input measurement. N = (optional, defaults to 5) Range multiplier, cf. above L = (optional, defaults to 5) Window len, cf. above References: OOI (2012). Data Product Specification for Spike Test. Document Control Number 1341-10006. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf) Description: Data quality control algorithm testing if measurements contain a significant portion of a polynomial. Returns 1 if this is not the case, else 0. The purpose of this test is to check if a significant fraction of the variability in a time series can be explained by a drift, possibly interpreted as a sensor drift. This drift is assumed to be a polynomial of order ORD. Use ORD=1 to consider a linear drift The time series dat is passed to MatLab's POLYFIT routine to obtain a polynomial fit PP to dat, and the difference dat-PP is compared to the original dat. If the standard deviation of (dat-PP) is less than that of dat by a factor of NSTD, the time series is assumed to contain a significant trend (output will be 0), else not (output will be 1). Implemented by: 2012-10-29: DPS authored by <NAME>. Example code provided for Matlab. 2013-04-06: <NAME>. Initial python implementation. 2013-05-30: <NAME>. Performance optimizations. Usage: qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation) where qcflag = Boolean, 0 a trend is detected, 1 elsewhere. dat = Input dataset, a numeric real vector. t = time record associated with dat ord_n (optional, defaults to 1) = Polynomial order. nstd (optional, defaults to 3) = Factor by how much the standard deviation must be reduced before qcflag switches from 1 to 0 strict_validation (optional, defaults to False) = Flag asserting testing of inputs. References: OOI (2012). Data Product Specification for Trend Test. Document Control Number 1341-10007. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf) # Not needed because time is incorporated as 't' # t = range(ll) # test for a trend # insure output size equals input, even though test yields a single value. Description: Data quality control algorithm testing a time series for "stuck values", i.e. repeated occurences of one value. Returns 1 for presumably good data and 0 for data presumed bad. Implemented by: 2012-10-29: DPS authored by <NAME>. Example code provided for Matlab. 2013-04-06: <NAME>. Initial python implementation. Usage: qcflag = =dataqc_stuckvaluetest(x, RESO, NUM); where qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere. x = Input time series (vector, numeric). reso = Resolution; repeat values less than reso apart will be considered "stuck values". num = Minimum number of successive values within reso of each other that will trigger the "stuck value". num is optional and defaults to 10 if omitted or empty. References: OOI (2012). Data Product Specification for Stuck Value Test. Document Control Number 1341-10008. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf) # Warn - 'num' is greater than len(x), returning zeros Description Data quality control algorithm testing if changes between successive data points fall within a certain range. Input data dat are given as a function of coordinate x. The algorithm will flag dat values as bad if the change deltaDAT/deltaX between successive dat values exceeds thresholds given in ddatdx. Once the threshold is exceeded, following dat are considered bad until a dat value returns to within toldat of the last known good value. It is possible to remove data points that are too close together in x coordinates (use mindx). By default, the first value of dat is considered good. To change this, use startdat and toldat to set as the first good data point the first one that comes within toldat of startdat. Implemented by: 2012-07-17: DPS authored by <NAME>. Example code provided for Matlab. 2013-04-06: <NAME>. Initial python implementation. Usage: outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat); where outdat = same as dat except that NaNs and values not meeting mindx are removed. outx = same as x except that NaNs and values not meeting mindx are removed. outqc = output quality control flags for outdat. 0 means bad data, 1 means good data. dat = input dataset, a numeric real vector. x = coordinate (e.g. time, distance) along which dat is given. Must be of the same size as dat and strictly increasing. ddatdx = two-element vector defining the valid range of ddat/dx from one point to the next. mindx = scalar. minimum dx for which this test will be applied (data that are less than mindx apart will be deleted). defaults to zero if NaN/empty. startdat = start value (scalar) of dat that is presumed good. defaults to first non-NaN value of dat if NaN/empty. toldat = tolerance value (scalar) for dat; threshold to within which dat must return to be counted as good, after exceeding a ddatdx threshold detected bad data. References: OOI (2012). Data Product Specification for Gradient Test. Document Control Number 1341-100010. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf) # No strict validation here, they are scalards and they must be validated # before going into the C-layer # Confirm that there are still data points left, else abort: Description Computes instantaneous no-sky solar radiation and altitude from date and time stamp and position data. It is put together from expressions taken from Appendix E in the 1978 edition of Almanac for Computers, Nautical Almanac Office, U.S. Naval Observatory. They are reduced accuracy expressions valid for the years 1800-2100. Solar declination computed from these expressions is accurate to at least 1'. The solar constant (1368.0 W/m^2) represents a mean of satellite measurements made over the last sunspot cycle (1979-1995) taken from Coffey et al (1995), Earth System Monitor, 6, 6-10. This code is a python implementation of soradna1.m available in Air-Sea Toolbox. Implemented by: 1997-03-08: Version 1.0 (author unknown) of soradna1.m. 1998-08-28: Version 1.1 (author unknown) of soradna1.m. 1999-08-05: Version 2.0 (author unknown) of soradna1.m. 2013-04-07: <NAME>. Initial python implementation. Note, this function is derived from old, unmaintained code. More robust implementations exist (e.g. PyEphem and PySolar) that will probably calculate these values more accurately. Usage: z, sorad = dataqc_solarelevation(lon, lat, dt) where z = solar altitude [degrees] sorad = no atmosphere solar radiation [W m^-2] lon = longitude (east is positive) [decimal degress] lat = latitude [decimal degrees] dt = date and time stamp in UTC [seconds since 1970-01-01] Examples dt = 1329177600 # 2012-02-14 00:00:00 z, sorad = dataqc_solarelevation(120, 30, dt) z = 15.1566, sorad = 366.8129 OOI (2012). Data Product Specification for Solar Elevation. Document Control Number 1341-100011. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf) # Test lengths and types of inputs. Latitude and longitude must be the same # size and can either be a scalar or a vecotr. The date and time stamp # can also be either a scalar or a vector. If all three inputs are vectors, # they must be of the same length. # test their lengths # set constants (using values from as_consts.m) # ------ short-wave flux calculations # the solar constant [W m^-2] represents a mean of satellite measurements # made over the last sunspot cycle (1979-1995), taken from Coffey et al. # (1995), Earth System Monitor, 6, 6-10. # Create a time tuple in UTC from the Epoch time input, and then create # scalars or numpy arrays of time elements for subsequent calculations. # create time tuple in UTC # create scalar elements #constants used in function # compute Universal Time in hours # compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which # equals -4712 Jan 1) # compute interval in Julian centuries since 1900 # compute mean anomaly of the sun # compute mean longitude of sun # compute mean anomaly of Jupiter # compute longitude of the ascending node of the moon's orbit # compute mean anomaly of Venus # compute sun theta # compute sun rho # compute declination # compute equation of time (in seconds of time) # compute local hour angle from global hour angle # compute radius vector # compute solar altitude # compute solar radiation outside atmosphere (defaults to 0 when solar # altitude is below the horizon) This is a function that wraps dataqc_propagateflags for use in ION It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags Description: Propagate "bad" qc flags (from an arbitrary number of source datasets) to another (derived) dataset. Consider data from an oceanographic CTD (conductivity, temperature, and pressure) instrument. From these three time series, you want to compute salinity. If any of the three source data (conductivity, temperature, pressure) is of bad quality, the salinity will be bad as well. You can feed your QC assessment of the former three into this routine, which will then give you the combined assessment for the derived (here: salinity) property. Implemented by: 2012-07-17: DPS authored by <NAME>. Example code provided for Matlab. 2013-04-06: <NAME>. Initial python implementation. Usage: outflag = dataqc_propagateflags(inflags) where outflag = a 1-by-N boolean vector that contains 1 where all of the inflags are 1, and 0 otherwise. inflags = an M-by-N boolean matrix, where each of the M rows contains flags of an independent data set such that "0" means bad data and "1" means good data. References: OOI (2012). Data Product Specification for Combined QC Flags. Document Control Number 1341-100012. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf) Description: Implementation of the Sea-Bird conductivity compressibility correction, scaling the input conductivity based on ratio of the original pressure and the updated pressure. Implemented by: 2013-04-07: Christopher Wingard. Initial python implementation. Usage: c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor) where c_new = updated conductivity record [S/m] p_orig = original pressure used to calculate original conductivity, this typically the L1a PRESWAT [dbar] p_new = updated pressure, typically L1b PRESWAT [dbar] c_orig = original conductivty record, typically L1a CONDWAT [S/m] cpcor = pressure correction coefficient used to calculate original conductivity, default is -9.57e-8 References: OOI (2012). Data Product Specification for Conductivity Compressibility Correction. Document Control Number 1341-10030. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf) | 2.171414 | 2 |
datatest/__past__/api08.py | avshalomt2/datatest | 0 | 8666 | <reponame>avshalomt2/datatest<filename>datatest/__past__/api08.py
"""Backward compatibility for version 0.8 API."""
from __future__ import absolute_import
import inspect
import datatest
from datatest._compatibility import itertools
from datatest._compatibility.collections.abc import Sequence
from datatest._load.get_reader import get_reader
from datatest._load.load_csv import load_csv
from datatest._load.temptable import load_data
from datatest._load.temptable import new_table_name
from datatest._load.temptable import savepoint
from datatest._load.temptable import table_exists
from datatest._query.query import DEFAULT_CONNECTION
from datatest._query.query import BaseElement
from datatest._utils import file_types
from datatest._utils import string_types
from datatest._utils import iterpeek
from datatest.allowance import BaseAllowance
from datatest import Invalid
from datatest.difference import NOTFOUND
datatest.DataResult = datatest.Result
class DataQuery(datatest.Query):
def __call__(self, *args, **kwds):
self.execute(*args, **kwds)
datatest.DataQuery = DataQuery
class DataSource(datatest.Selector):
def __init__(self, data, fieldnames=None):
first_value, iterator = iterpeek(data)
if isinstance(first_value, dict):
if not fieldnames:
fieldnames = list(first_value.keys())
super(DataSource, self).__init__(iterator, fieldnames)
else:
if fieldnames:
iterator = itertools.chain([fieldnames], iterator)
super(DataSource, self).__init__(iterator)
@classmethod
def from_csv(cls, file, encoding=None, **fmtparams):
if isinstance(file, string_types) or isinstance(file, file_types):
data_list = [file]
else:
data_list = file
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
for obj in data_list:
load_csv(cursor, table, obj, encoding=encoding, **fmtparams)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = file
new_cls._args = (encoding,)
new_cls._kwds = fmtparams
new_cls._update_list = []
return new_cls
@classmethod
def from_excel(cls, path, worksheet=0):
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
reader = get_reader.from_excel(path, worksheet=0)
load_data(cursor, table, reader)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = path
new_cls._args = tuple()
new_cls._kwds = dict()
if worksheet != 0:
new_cls._kwds['worksheet'] = worksheet
new_cls._update_list = []
return new_cls
def columns(self, type=list): # Removed in datatest 0.8.2
return type(self.fieldnames)
datatest.DataSource = DataSource
class allowed_key(BaseAllowance):
"""The given *function* should accept a number of arguments
equal the given key elements. If key is a single value (string
or otherwise), *function* should accept one argument. If key
is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_key, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
key = item[0]
if not isinstance(key, tuple) and isinstance(key, BaseElement):
return self.function(key)
return self.function(*key)
datatest.allowed_key = allowed_key
class allowed_args(BaseAllowance):
"""The given *function* should accept a number of arguments equal
the given elements in the 'args' attribute. If args is a single
value (string or otherwise), *function* should accept one argument.
If args is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_args, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
args = item[1].args
if not isinstance(args, tuple) and isinstance(args, BaseElement):
return self.function(args)
return self.function(*args)
datatest.allowed_args = allowed_args
def get_subject(self):
if hasattr(self, '_subject_data'):
return self._subject_data
return self._find_data_source('subject')
def set_subject(self, value):
self._subject_data = value
datatest.DataTestCase.subject = property(get_subject, set_subject)
def get_reference(self):
if hasattr(self, '_reference_data'):
return self._reference_data
return self._find_data_source('reference')
def set_reference(self, value):
self._reference_data = value
datatest.DataTestCase.reference = property(get_reference, set_reference)
def _find_data_source(name):
stack = inspect.stack()
stack.pop() # Skip record of current frame.
for record in stack: # Bubble-up stack looking for name.
frame = record[0]
if name in frame.f_globals:
return frame.f_globals[name] # <- EXIT!
raise NameError('cannot find {0!r}'.format(name))
datatest.DataTestCase._find_data_source = staticmethod(_find_data_source)
def allowedKey(self, function, msg=None):
"""Allows differences in a mapping where *function* returns True.
For each difference, function will receive the associated mapping
**key** unpacked into one or more arguments.
"""
return allowed_key(function, msg)
datatest.DataTestCase.allowedKey = allowedKey
def allowedArgs(self, function, msg=None):
"""Allows differences where *function* returns True. For the
'args' attribute of each difference (a tuple), *function* must
accept the number of arguments unpacked from 'args'.
"""
return allowed_args(function, msg)
datatest.DataTestCase.allowedArgs = allowedArgs
def _require_sequence(data, sequence): # New behavior in datatest 0.8.3
"""Compare *data* against a *sequence* of values. Stops at the
first difference found and returns an AssertionError. If no
differences are found, returns None.
"""
if isinstance(data, str):
raise ValueError("uncomparable types: 'str' and sequence type")
data_type = getattr(data, 'evaluation_type', data.__class__)
if not issubclass(data_type, Sequence):
type_name = data_type.__name__
msg = "expected sequence type, but got " + repr(type_name)
raise ValueError(msg)
message_prefix = None
previous_element = NOTFOUND
zipped = itertools.zip_longest(data, sequence, fillvalue=NOTFOUND)
for index, (actual, expected) in enumerate(zipped):
if actual == expected:
previous_element = actual
continue
if actual == NOTFOUND:
message_prefix = ('Data sequence is missing '
'elements starting with index {0}').format(index)
message_suffix = 'Expected {0!r}'.format(expected)
elif expected == NOTFOUND:
message_prefix = ('Data sequence contains extra '
'elements starting with index {0}').format(index)
message_suffix = 'Found {0!r}'.format(actual)
else:
message_prefix = \
'Data sequence differs starting at index {0}'.format(index)
message_suffix = \
'Found {0!r}, expected {1!r}'.format(actual, expected)
break
else: # <- NOBREAK!
return None # <- EXIT!
leading_elements = []
if index > 1:
leading_elements.append('...')
if previous_element != NOTFOUND:
leading_elements.append(repr(previous_element))
actual_repr = repr(actual) if actual != NOTFOUND else '?????'
caret_underline = '^' * len(actual_repr)
trailing_elements = []
next_tuple = next(zipped, NOTFOUND)
if next_tuple != NOTFOUND:
trailing_elements.append(repr(next_tuple[0]))
if next(zipped, NOTFOUND) != NOTFOUND:
trailing_elements.append('...')
if leading_elements:
leading_string = ', '.join(leading_elements) + ', '
else:
leading_string = ''
leading_whitespace = ' ' * len(leading_string)
if trailing_elements:
trailing_string = ', ' + ', '.join(trailing_elements)
else:
trailing_string = ''
sequence_string = leading_string + actual_repr + trailing_string
message = '{0}:\n\n {1}\n {2}{3}\n{4}'.format(message_prefix,
sequence_string,
leading_whitespace,
caret_underline,
message_suffix)
return AssertionError(message)
datatest.validation._require_sequence = _require_sequence
def _require_callable(data, function):
if data is NOTFOUND:
return Invalid(None) # <- EXIT!
def wrapped(element):
try:
if isinstance(element, BaseElement):
returned_value = function(element)
else:
returned_value = function(*element)
except Exception:
returned_value = False # Raised errors count as False.
if returned_value == True:
return None # <- EXIT!
if returned_value == False:
return Invalid(element) # <- EXIT!
if isinstance(returned_value, BaseDifference):
return returned_value # <- EXIT!
callable_name = function.__name__
message = \
'{0!r} returned {1!r}, should return True, False or a difference instance'
raise TypeError(message.format(callable_name, returned_value))
if isinstance(data, BaseElement):
return wrapped(data) # <- EXIT!
results = (wrapped(elem) for elem in data)
diffs = (diff for diff in results if diff)
first_element, diffs = iterpeek(diffs)
if first_element: # If not empty, return diffs.
return diffs
return None
| """Backward compatibility for version 0.8 API."""
from __future__ import absolute_import
import inspect
import datatest
from datatest._compatibility import itertools
from datatest._compatibility.collections.abc import Sequence
from datatest._load.get_reader import get_reader
from datatest._load.load_csv import load_csv
from datatest._load.temptable import load_data
from datatest._load.temptable import new_table_name
from datatest._load.temptable import savepoint
from datatest._load.temptable import table_exists
from datatest._query.query import DEFAULT_CONNECTION
from datatest._query.query import BaseElement
from datatest._utils import file_types
from datatest._utils import string_types
from datatest._utils import iterpeek
from datatest.allowance import BaseAllowance
from datatest import Invalid
from datatest.difference import NOTFOUND
datatest.DataResult = datatest.Result
class DataQuery(datatest.Query):
def __call__(self, *args, **kwds):
self.execute(*args, **kwds)
datatest.DataQuery = DataQuery
class DataSource(datatest.Selector):
def __init__(self, data, fieldnames=None):
first_value, iterator = iterpeek(data)
if isinstance(first_value, dict):
if not fieldnames:
fieldnames = list(first_value.keys())
super(DataSource, self).__init__(iterator, fieldnames)
else:
if fieldnames:
iterator = itertools.chain([fieldnames], iterator)
super(DataSource, self).__init__(iterator)
@classmethod
def from_csv(cls, file, encoding=None, **fmtparams):
if isinstance(file, string_types) or isinstance(file, file_types):
data_list = [file]
else:
data_list = file
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
for obj in data_list:
load_csv(cursor, table, obj, encoding=encoding, **fmtparams)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = file
new_cls._args = (encoding,)
new_cls._kwds = fmtparams
new_cls._update_list = []
return new_cls
@classmethod
def from_excel(cls, path, worksheet=0):
new_cls = cls.__new__(cls)
new_cls._connection = DEFAULT_CONNECTION
cursor = new_cls._connection.cursor()
with savepoint(cursor):
table = new_table_name(cursor)
reader = get_reader.from_excel(path, worksheet=0)
load_data(cursor, table, reader)
new_cls._table = table if table_exists(cursor, table) else None
new_cls._data = path
new_cls._args = tuple()
new_cls._kwds = dict()
if worksheet != 0:
new_cls._kwds['worksheet'] = worksheet
new_cls._update_list = []
return new_cls
def columns(self, type=list): # Removed in datatest 0.8.2
return type(self.fieldnames)
datatest.DataSource = DataSource
class allowed_key(BaseAllowance):
"""The given *function* should accept a number of arguments
equal the given key elements. If key is a single value (string
or otherwise), *function* should accept one argument. If key
is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_key, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
key = item[0]
if not isinstance(key, tuple) and isinstance(key, BaseElement):
return self.function(key)
return self.function(*key)
datatest.allowed_key = allowed_key
class allowed_args(BaseAllowance):
"""The given *function* should accept a number of arguments equal
the given elements in the 'args' attribute. If args is a single
value (string or otherwise), *function* should accept one argument.
If args is a three-tuple, *function* should accept three arguments.
"""
def __init__(self, function, msg=None):
super(allowed_args, self).__init__(msg)
self.function = function
def __repr__(self):
cls_name = self.__class__.__name__
msg_part = ', msg={0!r}'.format(self.msg) if self.msg else ''
return '{0}({1!r}{2})'.format(cls_name, self.function, msg_part)
def call_predicate(self, item):
args = item[1].args
if not isinstance(args, tuple) and isinstance(args, BaseElement):
return self.function(args)
return self.function(*args)
datatest.allowed_args = allowed_args
def get_subject(self):
if hasattr(self, '_subject_data'):
return self._subject_data
return self._find_data_source('subject')
def set_subject(self, value):
self._subject_data = value
datatest.DataTestCase.subject = property(get_subject, set_subject)
def get_reference(self):
if hasattr(self, '_reference_data'):
return self._reference_data
return self._find_data_source('reference')
def set_reference(self, value):
self._reference_data = value
datatest.DataTestCase.reference = property(get_reference, set_reference)
def _find_data_source(name):
stack = inspect.stack()
stack.pop() # Skip record of current frame.
for record in stack: # Bubble-up stack looking for name.
frame = record[0]
if name in frame.f_globals:
return frame.f_globals[name] # <- EXIT!
raise NameError('cannot find {0!r}'.format(name))
datatest.DataTestCase._find_data_source = staticmethod(_find_data_source)
def allowedKey(self, function, msg=None):
"""Allows differences in a mapping where *function* returns True.
For each difference, function will receive the associated mapping
**key** unpacked into one or more arguments.
"""
return allowed_key(function, msg)
datatest.DataTestCase.allowedKey = allowedKey
def allowedArgs(self, function, msg=None):
"""Allows differences where *function* returns True. For the
'args' attribute of each difference (a tuple), *function* must
accept the number of arguments unpacked from 'args'.
"""
return allowed_args(function, msg)
datatest.DataTestCase.allowedArgs = allowedArgs
def _require_sequence(data, sequence): # New behavior in datatest 0.8.3
"""Compare *data* against a *sequence* of values. Stops at the
first difference found and returns an AssertionError. If no
differences are found, returns None.
"""
if isinstance(data, str):
raise ValueError("uncomparable types: 'str' and sequence type")
data_type = getattr(data, 'evaluation_type', data.__class__)
if not issubclass(data_type, Sequence):
type_name = data_type.__name__
msg = "expected sequence type, but got " + repr(type_name)
raise ValueError(msg)
message_prefix = None
previous_element = NOTFOUND
zipped = itertools.zip_longest(data, sequence, fillvalue=NOTFOUND)
for index, (actual, expected) in enumerate(zipped):
if actual == expected:
previous_element = actual
continue
if actual == NOTFOUND:
message_prefix = ('Data sequence is missing '
'elements starting with index {0}').format(index)
message_suffix = 'Expected {0!r}'.format(expected)
elif expected == NOTFOUND:
message_prefix = ('Data sequence contains extra '
'elements starting with index {0}').format(index)
message_suffix = 'Found {0!r}'.format(actual)
else:
message_prefix = \
'Data sequence differs starting at index {0}'.format(index)
message_suffix = \
'Found {0!r}, expected {1!r}'.format(actual, expected)
break
else: # <- NOBREAK!
return None # <- EXIT!
leading_elements = []
if index > 1:
leading_elements.append('...')
if previous_element != NOTFOUND:
leading_elements.append(repr(previous_element))
actual_repr = repr(actual) if actual != NOTFOUND else '?????'
caret_underline = '^' * len(actual_repr)
trailing_elements = []
next_tuple = next(zipped, NOTFOUND)
if next_tuple != NOTFOUND:
trailing_elements.append(repr(next_tuple[0]))
if next(zipped, NOTFOUND) != NOTFOUND:
trailing_elements.append('...')
if leading_elements:
leading_string = ', '.join(leading_elements) + ', '
else:
leading_string = ''
leading_whitespace = ' ' * len(leading_string)
if trailing_elements:
trailing_string = ', ' + ', '.join(trailing_elements)
else:
trailing_string = ''
sequence_string = leading_string + actual_repr + trailing_string
message = '{0}:\n\n {1}\n {2}{3}\n{4}'.format(message_prefix,
sequence_string,
leading_whitespace,
caret_underline,
message_suffix)
return AssertionError(message)
datatest.validation._require_sequence = _require_sequence
def _require_callable(data, function):
if data is NOTFOUND:
return Invalid(None) # <- EXIT!
def wrapped(element):
try:
if isinstance(element, BaseElement):
returned_value = function(element)
else:
returned_value = function(*element)
except Exception:
returned_value = False # Raised errors count as False.
if returned_value == True:
return None # <- EXIT!
if returned_value == False:
return Invalid(element) # <- EXIT!
if isinstance(returned_value, BaseDifference):
return returned_value # <- EXIT!
callable_name = function.__name__
message = \
'{0!r} returned {1!r}, should return True, False or a difference instance'
raise TypeError(message.format(callable_name, returned_value))
if isinstance(data, BaseElement):
return wrapped(data) # <- EXIT!
results = (wrapped(elem) for elem in data)
diffs = (diff for diff in results if diff)
first_element, diffs = iterpeek(diffs)
if first_element: # If not empty, return diffs.
return diffs
return None | en | 0.681111 | Backward compatibility for version 0.8 API. # Removed in datatest 0.8.2 The given *function* should accept a number of arguments equal the given key elements. If key is a single value (string or otherwise), *function* should accept one argument. If key is a three-tuple, *function* should accept three arguments. The given *function* should accept a number of arguments equal the given elements in the 'args' attribute. If args is a single value (string or otherwise), *function* should accept one argument. If args is a three-tuple, *function* should accept three arguments. # Skip record of current frame. # Bubble-up stack looking for name. # <- EXIT! Allows differences in a mapping where *function* returns True. For each difference, function will receive the associated mapping **key** unpacked into one or more arguments. Allows differences where *function* returns True. For the 'args' attribute of each difference (a tuple), *function* must accept the number of arguments unpacked from 'args'. # New behavior in datatest 0.8.3 Compare *data* against a *sequence* of values. Stops at the first difference found and returns an AssertionError. If no differences are found, returns None. # <- NOBREAK! # <- EXIT! # <- EXIT! # Raised errors count as False. # <- EXIT! # <- EXIT! # <- EXIT! # <- EXIT! # If not empty, return diffs. | 2.118623 | 2 |
lib/reinteract/editor.py | jonkuhn/reinteract-jk | 1 | 8667 | # Copyright 2008 <NAME>
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import gobject
import gtk
import pango
from application import application
from format_escaped import format_escaped
from notebook import NotebookFile
from shell_buffer import ShellBuffer
from shell_view import ShellView
from save_file import SaveFileBuilder
class Editor(gobject.GObject):
def __init__(self, notebook):
gobject.GObject.__init__(self)
self.notebook = notebook
self._unsaved_index = application.allocate_unsaved_index()
#######################################################
# Utility
#######################################################
def _clear_unsaved(self):
if self._unsaved_index != None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
def _update_filename(self, *args):
self.notify('filename')
self.notify('title')
def _update_modified(self, *args):
self.notify('modified')
self.notify('title')
def _update_state(self, *args):
self.notify('state')
def _update_file(self):
self.notify('file')
def __prompt_for_name(self, title, save_button_text, action, check_name=None):
builder = SaveFileBuilder(title, self._get_display_name(), save_button_text, check_name)
builder.dialog.set_transient_for(self.widget.get_toplevel())
if self._get_filename() != None:
builder.name_entry.set_text(os.path.basename(self._get_filename()))
while True:
response = builder.dialog.run()
if response != gtk.RESPONSE_OK:
break
raw_name = builder.name_entry.get_text()
error_message = None
try:
raw_name = application.validate_name(raw_name)
except ValueError, e:
error_message = e.message
if not error_message:
extension = "." + self._get_extension()
if not (raw_name.lower().endswith(extension)):
raw_name += extension
if not error_message:
fullname = os.path.join(self.notebook.folder, raw_name)
if os.path.exists(fullname):
error_message = "'%s' already exists" % raw_name
if error_message:
dialog = gtk.MessageDialog(parent=self.widget.get_toplevel(), buttons=gtk.BUTTONS_OK,
type=gtk.MESSAGE_ERROR)
dialog.set_markup("<big><b>Please choose a different name</b></big>")
dialog.format_secondary_text(error_message)
dialog.run()
dialog.destroy()
continue
action(fullname)
break
builder.dialog.destroy()
#######################################################
# Implemented by subclasses
#######################################################
def _get_display_name(self):
raise NotImplementedError()
def _get_modified(self):
raise NotImplementedError()
def _get_state(self):
return NotebookFile.NONE
def _get_filename(self):
return NotImplementedError()
def _get_file(self):
return NotImplementedError()
def _get_extension(self):
return NotImplementedError()
def _save(self, filename):
return NotImplementedError()
#######################################################
# Public API
#######################################################
def close(self):
if self._unsaved_index != None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
self.widget.destroy()
def confirm_discard(self, before_quit=False):
if not self.modified:
return True
if before_quit:
message_format = self.DISCARD_FORMAT_BEFORE_QUIT
continue_button_text = '_Quit without saving'
else:
message_format = self.DISCARD_FORMAT
continue_button_text = '_Discard'
if self._get_filename() == None:
save_button_text = gtk.STOCK_SAVE_AS
else:
save_button_text = gtk.STOCK_SAVE
message = format_escaped("<big><b>" + message_format + "</b></big>", self._get_display_name())
dialog = gtk.MessageDialog(parent=self.widget.get_toplevel(), buttons=gtk.BUTTONS_NONE,
type=gtk.MESSAGE_WARNING)
dialog.set_markup(message)
dialog.add_buttons(continue_button_text, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
save_button_text, 1)
dialog.set_default_response(1)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
return True
elif response == 1:
self.save()
if self.modified:
return False
else:
return True
else:
return False
def load(self, filename):
raise NotImplementedError()
def save(self, filename=None):
if filename == None:
filename = self._get_filename()
if filename == None:
def action(fullname):
self._save(fullname)
self._clear_unsaved()
self.notebook.refresh()
self.__prompt_for_name(title="Save As...", save_button_text="_Save", action=action)
else:
self._save(filename)
def rename(self):
if self._get_filename() == None:
self.save()
return
old_name = os.path.basename(self._get_filename())
title = "Rename '%s'" % old_name
def check_name(name):
return name != "" and name != old_name
def action(fullname):
old_filename = self._get_filename()
self._save(fullname)
self._clear_unsaved()
os.remove(old_filename)
self.notebook.refresh()
self.__prompt_for_name(title=title, save_button_text="_Rename", action=action, check_name=check_name)
@property
def needs_calculate(self):
return (self.state != NotebookFile.EXECUTE_SUCCESS and
self.state != NotebookFile.NONE and
self.state != NotebookFile.EXECUTING)
def calculate(self):
pass
def undo(self):
pass
def redo(self):
pass
@gobject.property
def filename(self):
return self._get_filename()
@gobject.property
def file(self):
return self._get_file()
@gobject.property
def modified(self):
return self._get_modified()
@gobject.property
def state(self):
return self._get_state()
@gobject.property
def title(self):
if self.modified:
return "*" + self._get_display_name()
else:
return self._get_display_name()
| # Copyright 2008 <NAME>
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import gobject
import gtk
import pango
from application import application
from format_escaped import format_escaped
from notebook import NotebookFile
from shell_buffer import ShellBuffer
from shell_view import ShellView
from save_file import SaveFileBuilder
class Editor(gobject.GObject):
def __init__(self, notebook):
gobject.GObject.__init__(self)
self.notebook = notebook
self._unsaved_index = application.allocate_unsaved_index()
#######################################################
# Utility
#######################################################
def _clear_unsaved(self):
if self._unsaved_index != None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
def _update_filename(self, *args):
self.notify('filename')
self.notify('title')
def _update_modified(self, *args):
self.notify('modified')
self.notify('title')
def _update_state(self, *args):
self.notify('state')
def _update_file(self):
self.notify('file')
def __prompt_for_name(self, title, save_button_text, action, check_name=None):
builder = SaveFileBuilder(title, self._get_display_name(), save_button_text, check_name)
builder.dialog.set_transient_for(self.widget.get_toplevel())
if self._get_filename() != None:
builder.name_entry.set_text(os.path.basename(self._get_filename()))
while True:
response = builder.dialog.run()
if response != gtk.RESPONSE_OK:
break
raw_name = builder.name_entry.get_text()
error_message = None
try:
raw_name = application.validate_name(raw_name)
except ValueError, e:
error_message = e.message
if not error_message:
extension = "." + self._get_extension()
if not (raw_name.lower().endswith(extension)):
raw_name += extension
if not error_message:
fullname = os.path.join(self.notebook.folder, raw_name)
if os.path.exists(fullname):
error_message = "'%s' already exists" % raw_name
if error_message:
dialog = gtk.MessageDialog(parent=self.widget.get_toplevel(), buttons=gtk.BUTTONS_OK,
type=gtk.MESSAGE_ERROR)
dialog.set_markup("<big><b>Please choose a different name</b></big>")
dialog.format_secondary_text(error_message)
dialog.run()
dialog.destroy()
continue
action(fullname)
break
builder.dialog.destroy()
#######################################################
# Implemented by subclasses
#######################################################
def _get_display_name(self):
raise NotImplementedError()
def _get_modified(self):
raise NotImplementedError()
def _get_state(self):
return NotebookFile.NONE
def _get_filename(self):
return NotImplementedError()
def _get_file(self):
return NotImplementedError()
def _get_extension(self):
return NotImplementedError()
def _save(self, filename):
return NotImplementedError()
#######################################################
# Public API
#######################################################
def close(self):
if self._unsaved_index != None:
application.free_unsaved_index(self._unsaved_index)
self._unsaved_index = None
self.widget.destroy()
def confirm_discard(self, before_quit=False):
if not self.modified:
return True
if before_quit:
message_format = self.DISCARD_FORMAT_BEFORE_QUIT
continue_button_text = '_Quit without saving'
else:
message_format = self.DISCARD_FORMAT
continue_button_text = '_Discard'
if self._get_filename() == None:
save_button_text = gtk.STOCK_SAVE_AS
else:
save_button_text = gtk.STOCK_SAVE
message = format_escaped("<big><b>" + message_format + "</b></big>", self._get_display_name())
dialog = gtk.MessageDialog(parent=self.widget.get_toplevel(), buttons=gtk.BUTTONS_NONE,
type=gtk.MESSAGE_WARNING)
dialog.set_markup(message)
dialog.add_buttons(continue_button_text, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
save_button_text, 1)
dialog.set_default_response(1)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_OK:
return True
elif response == 1:
self.save()
if self.modified:
return False
else:
return True
else:
return False
def load(self, filename):
raise NotImplementedError()
def save(self, filename=None):
if filename == None:
filename = self._get_filename()
if filename == None:
def action(fullname):
self._save(fullname)
self._clear_unsaved()
self.notebook.refresh()
self.__prompt_for_name(title="Save As...", save_button_text="_Save", action=action)
else:
self._save(filename)
def rename(self):
if self._get_filename() == None:
self.save()
return
old_name = os.path.basename(self._get_filename())
title = "Rename '%s'" % old_name
def check_name(name):
return name != "" and name != old_name
def action(fullname):
old_filename = self._get_filename()
self._save(fullname)
self._clear_unsaved()
os.remove(old_filename)
self.notebook.refresh()
self.__prompt_for_name(title=title, save_button_text="_Rename", action=action, check_name=check_name)
@property
def needs_calculate(self):
return (self.state != NotebookFile.EXECUTE_SUCCESS and
self.state != NotebookFile.NONE and
self.state != NotebookFile.EXECUTING)
def calculate(self):
pass
def undo(self):
pass
def redo(self):
pass
@gobject.property
def filename(self):
return self._get_filename()
@gobject.property
def file(self):
return self._get_file()
@gobject.property
def modified(self):
return self._get_modified()
@gobject.property
def state(self):
return self._get_state()
@gobject.property
def title(self):
if self.modified:
return "*" + self._get_display_name()
else:
return self._get_display_name()
| de | 0.666387 | # Copyright 2008 <NAME> # # This file is part of Reinteract and distributed under the terms # of the BSD license. See the file COPYING in the Reinteract # distribution for full details. # ######################################################################## ####################################################### # Utility ####################################################### ####################################################### # Implemented by subclasses ####################################################### ####################################################### # Public API ####################################################### | 1.987946 | 2 |
python/scripts/compare_events.py | tvogels01/arthur-redshift-etl | 0 | 8668 | """
This script compares events from two ETLs to highlight differences in elapsed times or row counts.
* Pre-requisites
You need to have a list of events for each ETL. Arthur can provide this using the
"query_events" command.
For example:
```
arthur.py query_events -p development 37ACEC7440AB4620 -q > 37ACEC7440AB4620.events
arthur.py query_events -p development 96BE11B234F84F39 -q > 96BE11B234F84F39.events
```
* Usage
Once you have the files, you use this script:
```
compare_events.py 37ACEC7440AB4620.events 96BE11B234F84F39.events
```
The order of those two files is: "older ETL" => "newer ETL".
"""
import csv
import re
import sys
from collections import defaultdict, namedtuple
from math import isclose
from tabulate import tabulate
def read_file(filename):
"""
Read output from query_events command.
The file is expected to be formatted such that there's a header line, a separator, then the
data. The column set must contain "elapsed" and "rowcount" for later processing.
Also Arthur prints a summary after the table, like "(100 rows)" which will be skipped if present.
"""
column_spacing_re = re.compile(r"\s+\|\s+")
row_count_re = re.compile(r"\(\d+\s*rows\)")
print(f"Reading events from {filename}...")
with open(filename) as f:
for i, line in enumerate(f.readlines()):
if i == 1 or row_count_re.match(line):
# Found the separator line or the final row tally.
continue
yield column_spacing_re.sub("|", line).strip()
def parse_file(filename):
"""Parse the input as '|'-delimited columns."""
lines = read_file(filename)
reader = csv.reader(lines, delimiter="|")
row_class = namedtuple("CsvRow", next(reader), rename=True)
for row in reader:
yield row_class._make(row)
def extract_values(filename):
"""Find elapsed time and rowcount for each target relation."""
# The "lambda: None" trick allows us to use 'd[]' instead of 'd.get()' later.
elapsed = defaultdict(lambda: None)
rowcount = defaultdict(lambda: None)
for row in parse_file(filename):
elapsed[row.step, row.target] = float(row.elapsed) if row.elapsed != "---" else None
rowcount[row.step, row.target] = int(row.rowcount) if row.rowcount != "---" else None
return elapsed, rowcount
def delta(a, b):
"""
Return change in percent (or None if undefined).
The delta in percent is rounded to one decimal.
"""
if a is None or b is None:
return None
if a == 0.0 and b == 0.0:
return 0.0
assert a != 0.0 and b != 0.0
return round((b - a) * 1000.0 / a) / 10.0
def show_delta(previous_value, current_value, column):
"""
Return whether the change from previous event to current event is "significant".
If the values appear to be equal or almost equal, there's no need to report a delta.
Also, if the values are really small and any change is inflated, skip reporting the delta.
Note that for row count, a decrease in rows is always shown.
"""
if previous_value is None or current_value is None:
return False
if previous_value == current_value:
return False
if column == "elapsed":
# Decrease trigger-happiness for quick loads:
if previous_value < 10.0 and current_value < 10.0:
return False
if previous_value < 30.0 or current_value < 30.0:
return not isclose(previous_value, current_value, abs_tol=20.0)
if previous_value < 60.0 or current_value < 60.0:
return not isclose(previous_value, current_value, rel_tol=0.5)
if previous_value < 300.0 or current_value < 300.0:
return not isclose(previous_value, current_value, rel_tol=0.2)
if column == "rowcount":
# We expect to move forward with growing tables so smaller row counts are suspect.
if previous_value > current_value:
return True
# Increase trigger-happiness for small (dimensional) tables:
if previous_value < 1000 or current_value < 1000:
return not isclose(previous_value, current_value, abs_tol=10)
return not isclose(previous_value, current_value, rel_tol=0.1)
def print_comparison_table(previous_values, current_values, column):
"""Print differences between runs, sorted by relation."""
all_events = frozenset(previous_values).union(current_values)
has_large_diff = frozenset(
event
for event in all_events
if show_delta(previous_values[event], current_values[event], column)
)
table = sorted(
(
(
event[1], # target
event[0], # step
previous_values[event],
current_values[event],
delta(previous_values[event], current_values[event]),
)
for event in has_large_diff
),
key=lambda row: row[:2], # Avoid comparison with None values in the columns
)
print("Differences for '{}':\n".format(column))
print(
tabulate(
table,
headers=("target", "step", "prev. " + column, "cur. " + column, "delta %"),
tablefmt="presto",
)
)
def main():
if len(sys.argv) >= 2 and sys.argv[1] in ("-h", "--help"):
print(__doc__)
sys.exit(0)
if len(sys.argv) != 3:
print(
"Usage: {prog} previous_events current_events".format(prog=sys.argv[0]),
file=sys.stderr,
)
sys.exit(1)
previous_events_file, current_events_file = sys.argv[1:3]
previous_elapsed, previous_rowcount = extract_values(previous_events_file)
current_elapsed, current_rowcount = extract_values(current_events_file)
print_comparison_table(previous_elapsed, current_elapsed, "elapsed")
print()
print_comparison_table(previous_rowcount, current_rowcount, "rowcount")
if __name__ == "__main__":
main()
| """
This script compares events from two ETLs to highlight differences in elapsed times or row counts.
* Pre-requisites
You need to have a list of events for each ETL. Arthur can provide this using the
"query_events" command.
For example:
```
arthur.py query_events -p development 37ACEC7440AB4620 -q > 37ACEC7440AB4620.events
arthur.py query_events -p development 96BE11B234F84F39 -q > 96BE11B234F84F39.events
```
* Usage
Once you have the files, you use this script:
```
compare_events.py 37ACEC7440AB4620.events 96BE11B234F84F39.events
```
The order of those two files is: "older ETL" => "newer ETL".
"""
import csv
import re
import sys
from collections import defaultdict, namedtuple
from math import isclose
from tabulate import tabulate
def read_file(filename):
"""
Read output from query_events command.
The file is expected to be formatted such that there's a header line, a separator, then the
data. The column set must contain "elapsed" and "rowcount" for later processing.
Also Arthur prints a summary after the table, like "(100 rows)" which will be skipped if present.
"""
column_spacing_re = re.compile(r"\s+\|\s+")
row_count_re = re.compile(r"\(\d+\s*rows\)")
print(f"Reading events from {filename}...")
with open(filename) as f:
for i, line in enumerate(f.readlines()):
if i == 1 or row_count_re.match(line):
# Found the separator line or the final row tally.
continue
yield column_spacing_re.sub("|", line).strip()
def parse_file(filename):
"""Parse the input as '|'-delimited columns."""
lines = read_file(filename)
reader = csv.reader(lines, delimiter="|")
row_class = namedtuple("CsvRow", next(reader), rename=True)
for row in reader:
yield row_class._make(row)
def extract_values(filename):
"""Find elapsed time and rowcount for each target relation."""
# The "lambda: None" trick allows us to use 'd[]' instead of 'd.get()' later.
elapsed = defaultdict(lambda: None)
rowcount = defaultdict(lambda: None)
for row in parse_file(filename):
elapsed[row.step, row.target] = float(row.elapsed) if row.elapsed != "---" else None
rowcount[row.step, row.target] = int(row.rowcount) if row.rowcount != "---" else None
return elapsed, rowcount
def delta(a, b):
"""
Return change in percent (or None if undefined).
The delta in percent is rounded to one decimal.
"""
if a is None or b is None:
return None
if a == 0.0 and b == 0.0:
return 0.0
assert a != 0.0 and b != 0.0
return round((b - a) * 1000.0 / a) / 10.0
def show_delta(previous_value, current_value, column):
"""
Return whether the change from previous event to current event is "significant".
If the values appear to be equal or almost equal, there's no need to report a delta.
Also, if the values are really small and any change is inflated, skip reporting the delta.
Note that for row count, a decrease in rows is always shown.
"""
if previous_value is None or current_value is None:
return False
if previous_value == current_value:
return False
if column == "elapsed":
# Decrease trigger-happiness for quick loads:
if previous_value < 10.0 and current_value < 10.0:
return False
if previous_value < 30.0 or current_value < 30.0:
return not isclose(previous_value, current_value, abs_tol=20.0)
if previous_value < 60.0 or current_value < 60.0:
return not isclose(previous_value, current_value, rel_tol=0.5)
if previous_value < 300.0 or current_value < 300.0:
return not isclose(previous_value, current_value, rel_tol=0.2)
if column == "rowcount":
# We expect to move forward with growing tables so smaller row counts are suspect.
if previous_value > current_value:
return True
# Increase trigger-happiness for small (dimensional) tables:
if previous_value < 1000 or current_value < 1000:
return not isclose(previous_value, current_value, abs_tol=10)
return not isclose(previous_value, current_value, rel_tol=0.1)
def print_comparison_table(previous_values, current_values, column):
"""Print differences between runs, sorted by relation."""
all_events = frozenset(previous_values).union(current_values)
has_large_diff = frozenset(
event
for event in all_events
if show_delta(previous_values[event], current_values[event], column)
)
table = sorted(
(
(
event[1], # target
event[0], # step
previous_values[event],
current_values[event],
delta(previous_values[event], current_values[event]),
)
for event in has_large_diff
),
key=lambda row: row[:2], # Avoid comparison with None values in the columns
)
print("Differences for '{}':\n".format(column))
print(
tabulate(
table,
headers=("target", "step", "prev. " + column, "cur. " + column, "delta %"),
tablefmt="presto",
)
)
def main():
if len(sys.argv) >= 2 and sys.argv[1] in ("-h", "--help"):
print(__doc__)
sys.exit(0)
if len(sys.argv) != 3:
print(
"Usage: {prog} previous_events current_events".format(prog=sys.argv[0]),
file=sys.stderr,
)
sys.exit(1)
previous_events_file, current_events_file = sys.argv[1:3]
previous_elapsed, previous_rowcount = extract_values(previous_events_file)
current_elapsed, current_rowcount = extract_values(current_events_file)
print_comparison_table(previous_elapsed, current_elapsed, "elapsed")
print()
print_comparison_table(previous_rowcount, current_rowcount, "rowcount")
if __name__ == "__main__":
main()
| en | 0.828169 | This script compares events from two ETLs to highlight differences in elapsed times or row counts. * Pre-requisites You need to have a list of events for each ETL. Arthur can provide this using the "query_events" command. For example: ``` arthur.py query_events -p development 37ACEC7440AB4620 -q > 37ACEC7440AB4620.events arthur.py query_events -p development 96BE11B234F84F39 -q > 96BE11B234F84F39.events ``` * Usage Once you have the files, you use this script: ``` compare_events.py 37ACEC7440AB4620.events 96BE11B234F84F39.events ``` The order of those two files is: "older ETL" => "newer ETL". Read output from query_events command. The file is expected to be formatted such that there's a header line, a separator, then the data. The column set must contain "elapsed" and "rowcount" for later processing. Also Arthur prints a summary after the table, like "(100 rows)" which will be skipped if present. # Found the separator line or the final row tally. Parse the input as '|'-delimited columns. Find elapsed time and rowcount for each target relation. # The "lambda: None" trick allows us to use 'd[]' instead of 'd.get()' later. Return change in percent (or None if undefined). The delta in percent is rounded to one decimal. Return whether the change from previous event to current event is "significant". If the values appear to be equal or almost equal, there's no need to report a delta. Also, if the values are really small and any change is inflated, skip reporting the delta. Note that for row count, a decrease in rows is always shown. # Decrease trigger-happiness for quick loads: # We expect to move forward with growing tables so smaller row counts are suspect. # Increase trigger-happiness for small (dimensional) tables: Print differences between runs, sorted by relation. # target # step # Avoid comparison with None values in the columns | 2.923743 | 3 |
harness/drifter.py | cmu-sei/augur-code | 0 | 8669 | # Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
import shutil
from drift import drift_generator
from utils import arguments
from utils.config import Config
from utils import logging
from datasets import dataset
LOG_FILE_NAME = "drifter.log"
DEFAULT_CONFIG_FILENAME = "./drifter_config.json"
DRIFT_EXP_CONFIG_FOLDER = "../experiments/drifter"
def load_dataset(dataset_filename, dataset_class_name):
"""Load dataset to drift."""
dataset_class = dataset.load_dataset_class(dataset_class_name)
base_dataset = dataset_class()
base_dataset.load_from_file(dataset_filename)
return base_dataset
def main():
logging.setup_logging(LOG_FILE_NAME)
# Allow selecting configs for experiments, and load it.
args = arguments.get_parsed_arguments()
config_file = Config.get_config_file(args, DRIFT_EXP_CONFIG_FOLDER, DEFAULT_CONFIG_FILENAME)
config = Config()
config.load(config_file)
# Load scenario data.
drift_module, params = drift_generator.load_drift_config(config.get("drift_scenario"))
if args.test:
drift_generator.test_drift(config, drift_module, params, config.get("bins"))
else:
# Sort dataset into bins.
base_dataset = load_dataset(config.get("dataset"), config.get("dataset_class"))
bin_value = config.get("bin_value") if config.contains("bin_value") else "results"
bin_shuffle = config.get("bin_shuffle") if config.contains("bin_shuffle") else True
bins = drift_generator.load_bins(base_dataset, config.get("bins"), bin_value, bin_shuffle)
# Apply drift.
drifted_dataset = drift_generator.apply_drift(bins, drift_module, params)
drift_generator.add_timestamps(drifted_dataset, config.get("timestamps"))
# Save it to regular file, and timestamped file.
drifted_dataset.save_to_file(config.get("output"))
print("Copying output file to timestamped backup.")
shutil.copyfile(config.get("output"), drift_generator.get_drift_stamped_name(config.get("output")))
if __name__ == '__main__':
main()
| # Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
import shutil
from drift import drift_generator
from utils import arguments
from utils.config import Config
from utils import logging
from datasets import dataset
LOG_FILE_NAME = "drifter.log"
DEFAULT_CONFIG_FILENAME = "./drifter_config.json"
DRIFT_EXP_CONFIG_FOLDER = "../experiments/drifter"
def load_dataset(dataset_filename, dataset_class_name):
"""Load dataset to drift."""
dataset_class = dataset.load_dataset_class(dataset_class_name)
base_dataset = dataset_class()
base_dataset.load_from_file(dataset_filename)
return base_dataset
def main():
logging.setup_logging(LOG_FILE_NAME)
# Allow selecting configs for experiments, and load it.
args = arguments.get_parsed_arguments()
config_file = Config.get_config_file(args, DRIFT_EXP_CONFIG_FOLDER, DEFAULT_CONFIG_FILENAME)
config = Config()
config.load(config_file)
# Load scenario data.
drift_module, params = drift_generator.load_drift_config(config.get("drift_scenario"))
if args.test:
drift_generator.test_drift(config, drift_module, params, config.get("bins"))
else:
# Sort dataset into bins.
base_dataset = load_dataset(config.get("dataset"), config.get("dataset_class"))
bin_value = config.get("bin_value") if config.contains("bin_value") else "results"
bin_shuffle = config.get("bin_shuffle") if config.contains("bin_shuffle") else True
bins = drift_generator.load_bins(base_dataset, config.get("bins"), bin_value, bin_shuffle)
# Apply drift.
drifted_dataset = drift_generator.apply_drift(bins, drift_module, params)
drift_generator.add_timestamps(drifted_dataset, config.get("timestamps"))
# Save it to regular file, and timestamped file.
drifted_dataset.save_to_file(config.get("output"))
print("Copying output file to timestamped backup.")
shutil.copyfile(config.get("output"), drift_generator.get_drift_stamped_name(config.get("output")))
if __name__ == '__main__':
main()
| en | 0.621314 | # Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code # Copyright 2022 Carnegie Mellon University. # # NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. # # Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms. # # [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution. # # Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University. # # This Software includes and/or makes use of the following Third-Party Software subject to its own license: # 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California. # 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors. # 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers. # 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers. # 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers. # 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers. # 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team. # # DM22-0044 Load dataset to drift. # Allow selecting configs for experiments, and load it. # Load scenario data. # Sort dataset into bins. # Apply drift. # Save it to regular file, and timestamped file. | 1.158862 | 1 |
server/server-flask/app/docs/admin/survey/survey.py | DSM-DMS/Project-DMS-Web | 11 | 8670 | <gh_stars>10-100
SURVEY_POST = {
'tags': ['설문조사 관리'],
'description': '설문조사 등록',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': '설문조사 제목',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'start_date',
'description': '시작 날짜(YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'end_date',
'description': '종료 날짜(YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'target',
'description': '대상 학년',
'in': 'formData',
'type': 'list',
'required': True
}
],
'responses': {
'201': {
'description': '설문조사 등록 성공'
},
'403': {
'description': '권한 없음'
}
}
}
QUESTION_POST = {
'tags': ['설문조사 관리'],
'description': '설문조사에 질문 등록',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'id',
'description': '질문을 추가할 설문조사 ID',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': '질문 제목',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'is_objective',
'description': '객관식 여부',
'in': 'formData',
'type': 'bool',
'required': True
},
{
'name': 'choice_paper',
'description': '객관식 선택지',
'in': 'formData',
'type': 'list',
'required': False
}
],
'responses': {
'201': {
'description': '질문 추가 성공'
},
'403': {
'description': '권한 없음'
}
}
}
| SURVEY_POST = {
'tags': ['설문조사 관리'],
'description': '설문조사 등록',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': '설문조사 제목',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'start_date',
'description': '시작 날짜(YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'end_date',
'description': '종료 날짜(YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'target',
'description': '대상 학년',
'in': 'formData',
'type': 'list',
'required': True
}
],
'responses': {
'201': {
'description': '설문조사 등록 성공'
},
'403': {
'description': '권한 없음'
}
}
}
QUESTION_POST = {
'tags': ['설문조사 관리'],
'description': '설문조사에 질문 등록',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'id',
'description': '질문을 추가할 설문조사 ID',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': '질문 제목',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'is_objective',
'description': '객관식 여부',
'in': 'formData',
'type': 'bool',
'required': True
},
{
'name': 'choice_paper',
'description': '객관식 선택지',
'in': 'formData',
'type': 'list',
'required': False
}
],
'responses': {
'201': {
'description': '질문 추가 성공'
},
'403': {
'description': '권한 없음'
}
}
} | none | 1 | 1.747625 | 2 |
|
network/baselines_archive/resnet_3d101.py | xuyu0010/ARID_v1 | 5 | 8671 | <reponame>xuyu0010/ARID_v1
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import logging
import os
try:
from . import initializer
from .utils import load_state
except:
import initializer
from utils import load_state
__all__ = ['ResNeXt', 'resnet50', 'resnet101']
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = conv3x3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
block_inplanes=[64, 128, 256, 512],
n_input_channels=3,
conv1_t_size=7,
conv1_t_stride=1,
no_max_pool=False,
shortcut_type='B',
widen_factor=1.0,
num_classes=400,
pretrained=True):
super().__init__()
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = nn.Conv3d(n_input_channels,
self.in_planes,
kernel_size=(conv1_t_size, 7, 7),
stride=(conv1_t_stride, 2, 2),
padding=(conv1_t_size // 2, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
shortcut_type)
self.layer2 = self._make_layer(block,
block_inplanes[1],
layers[1],
shortcut_type,
stride=2)
self.layer3 = self._make_layer(block,
block_inplanes[2],
layers[2],
shortcut_type,
stride=2)
self.layer4 = self._make_layer(block,
block_inplanes[3],
layers[3],
shortcut_type,
stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Initialization
initializer.xavier(net=self)
if pretrained:
pretrained_model=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained/resnet-101-kinetics.pth')
logging.info("Network:: graph initialized, loading pretrained model: `{}'".format(pretrained_model))
assert os.path.exists(pretrained_model), "cannot locate: `{}'".format(pretrained_model)
pretrained = torch.load(pretrained_model)
load_state(self, pretrained['state_dict'])
else:
logging.info("Network:: graph initialized, use random inilization!")
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),
out.size(3), out.size(4))
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes, planes * block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(in_planes=self.in_planes,
planes=planes,
stride=stride,
downsample=downsample))
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def RESNET101(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
if __name__ == "__main__":
import torch
logging.getLogger().setLevel(logging.DEBUG)
# ---------
net1 = RESNET101(num_classes=11, pretrained=True)
data = torch.randn(1,3,16,224,224)
output1 = net1(data)
print (output1.shape)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import logging
import os
try:
from . import initializer
from .utils import load_state
except:
import initializer
from utils import load_state
__all__ = ['ResNeXt', 'resnet50', 'resnet101']
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = conv3x3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
block_inplanes=[64, 128, 256, 512],
n_input_channels=3,
conv1_t_size=7,
conv1_t_stride=1,
no_max_pool=False,
shortcut_type='B',
widen_factor=1.0,
num_classes=400,
pretrained=True):
super().__init__()
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = nn.Conv3d(n_input_channels,
self.in_planes,
kernel_size=(conv1_t_size, 7, 7),
stride=(conv1_t_stride, 2, 2),
padding=(conv1_t_size // 2, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
shortcut_type)
self.layer2 = self._make_layer(block,
block_inplanes[1],
layers[1],
shortcut_type,
stride=2)
self.layer3 = self._make_layer(block,
block_inplanes[2],
layers[2],
shortcut_type,
stride=2)
self.layer4 = self._make_layer(block,
block_inplanes[3],
layers[3],
shortcut_type,
stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Initialization
initializer.xavier(net=self)
if pretrained:
pretrained_model=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained/resnet-101-kinetics.pth')
logging.info("Network:: graph initialized, loading pretrained model: `{}'".format(pretrained_model))
assert os.path.exists(pretrained_model), "cannot locate: `{}'".format(pretrained_model)
pretrained = torch.load(pretrained_model)
load_state(self, pretrained['state_dict'])
else:
logging.info("Network:: graph initialized, use random inilization!")
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),
out.size(3), out.size(4))
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes, planes * block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(
block(in_planes=self.in_planes,
planes=planes,
stride=stride,
downsample=downsample))
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def RESNET101(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
if __name__ == "__main__":
import torch
logging.getLogger().setLevel(logging.DEBUG)
# ---------
net1 = RESNET101(num_classes=11, pretrained=True)
data = torch.randn(1,3,16,224,224)
output1 = net1(data)
print (output1.shape) | en | 0.577824 | # 3x3x3 convolution with padding # Initialization Constructs a ResNet-50 model. # --------- | 2.331347 | 2 |
tests/ninety_nine_problems/test_miscellaneous_problems.py | gecBurton/inference_logic | 3 | 8672 | import pytest
from inference_logic import Rule, Variable, search
from inference_logic.data_structures import Assert, Assign
@pytest.mark.xfail
def test_90():
r"""
P90 (**) Eight queens problem
This is a classical problem in computer science. The objective is to
place eight queens on a chessboard so that no two queens are attacking
each other; i.e., no two queens are in the same row, the same column,
or on the same diagonal. We generalize this original problem by
allowing for an arbitrary dimension N of the chessboard.
We represent the positions of the queens as a list of numbers 1..N.
Example: [4,2,7,3,6,8,5,1] means that the queen in the first column
is in row 4, the queen in the second column is in row 2, etc.
By using the permutations of the numbers 1..N we guarantee that
no two queens are in the same row. The only test that remains
to be made is the diagonal test. A queen placed at column X and
row Y occupies two diagonals: one of them, with number C = X-Y, goes
from bottom-left to top-right, the other one, numbered D = X+Y, goes
from top-left to bottom-right. In the test predicate we keep track
of the already occupied diagonals in Cs and Ds.
% The first version is a simple generate-and-test solution.
% queens_1(N,Qs) :- Qs is a solution of the N-queens problem
queens_1(N,Qs) :- range(1,N,Rs), permu(Rs,Qs), test(Qs).
% range(A,B,L) :- L is the list of numbers A..B
range(A,A,[A]).
range(A,B,[A|L]) :- A < B, A1 is A+1, range(A1,B,L).
% permu(Xs,Zs) :- the list Zs is a permutation of the list Xs
permu([],[]).
permu(Qs,[Y|Ys]) :- del(Y,Qs,Rs), permu(Rs,Ys).
del(X,[X|Xs],Xs).
del(X,[Y|Ys],[Y|Zs]) :- del(X,Ys,Zs).
% test(Qs) :- the list Qs represents a non-attacking queens solution
test(Qs) :- test(Qs,1,[],[]).
% test(Qs,X,Cs,Ds) :- the queens in Qs, representing columns X to N,
% are not in conflict with the diagonals Cs and Ds
test([],_,_,_).
test([Y|Ys],X,Cs,Ds) :-
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X + 1,
test(Ys,X1,[C|Cs],[D|Ds]).
%--------------------------------------------------------------
% Now, in version 2, the tester is pushed completely inside the
% generator permu.
queens_2(N,Qs) :- range(1,N,Rs), permu_test(Rs,Qs,1,[],[]).
permu_test([],[],_,_,_).
permu_test(Qs,[Y|Ys],X,Cs,Ds) :-
del(Y,Qs,Rs),
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X+1,
permu_test(Rs,Ys,X1,[C|Cs],[D|Ds]).
"""
N, Qs, N, Rs, Qs, A, B, L, A1, Y, Ys, X, Xs, Zs = Variable.factory(
"N", "Qs", "N", "Rs", "Qs", "A", "B", "L", "A1", "Y", "Ys", "X", "Xs", "Zs"
)
_W1, _W2, _W3 = Variable.factory("_W1", "_W2", "_W3")
Cs, Ds, D, X1, C, Cs = Variable.factory("Cs", "Ds", "D", "X1", "C", "Cs")
db = [
Rule(
dict(queens_1=N, a=Qs),
dict(range=1, a=N, b=Rs),
dict(permu=Rs, a=Qs),
dict(test=Qs),
),
dict(range=A, a=A, b=[A]),
Rule(
dict(range=A, a=B, b=[A, *L]),
Assert(lambda A, B: A < B),
Assign(A1, lambda A: A + 1),
dict(range=A1, a=B, b=L),
),
dict(permu=[], a=[]),
Rule(
dict(permu=Qs, a=[Y, *Ys]), dict(delete=Y, a=Qs, b=Rs), dict(permu=Rs, a=Ys)
),
dict(delete=X, a=[X, *Xs], b=Xs),
Rule(dict(delete=X, a=[Y, *Ys], b=[Y, *Zs]), dict(delete=X, a=Ys, b=Zs)),
Rule(dict(test=Qs), dict(test=Qs, a=1, b=[], c=[])),
dict(test=[], a=_W1, b=_W2, c=_W3),
Rule(
dict(test=[Y, *Ys], a=X, b=Cs, c=Ds),
Assign(C, lambda X, Y: X - Y),
Assert(lambda C, Cs: C not in Cs),
Assign(D, lambda X, Y: X + Y),
Assert(lambda D, Ds: D not in Ds),
Assign(X1, lambda X: X + 1),
dict(test=Ys, a=X1, b=[C, *Cs], c=[D, *Ds]),
),
]
Q = Variable("Q")
query = dict(queens_1=8, a=Q)
assert list(search(db, query)) == []
| import pytest
from inference_logic import Rule, Variable, search
from inference_logic.data_structures import Assert, Assign
@pytest.mark.xfail
def test_90():
r"""
P90 (**) Eight queens problem
This is a classical problem in computer science. The objective is to
place eight queens on a chessboard so that no two queens are attacking
each other; i.e., no two queens are in the same row, the same column,
or on the same diagonal. We generalize this original problem by
allowing for an arbitrary dimension N of the chessboard.
We represent the positions of the queens as a list of numbers 1..N.
Example: [4,2,7,3,6,8,5,1] means that the queen in the first column
is in row 4, the queen in the second column is in row 2, etc.
By using the permutations of the numbers 1..N we guarantee that
no two queens are in the same row. The only test that remains
to be made is the diagonal test. A queen placed at column X and
row Y occupies two diagonals: one of them, with number C = X-Y, goes
from bottom-left to top-right, the other one, numbered D = X+Y, goes
from top-left to bottom-right. In the test predicate we keep track
of the already occupied diagonals in Cs and Ds.
% The first version is a simple generate-and-test solution.
% queens_1(N,Qs) :- Qs is a solution of the N-queens problem
queens_1(N,Qs) :- range(1,N,Rs), permu(Rs,Qs), test(Qs).
% range(A,B,L) :- L is the list of numbers A..B
range(A,A,[A]).
range(A,B,[A|L]) :- A < B, A1 is A+1, range(A1,B,L).
% permu(Xs,Zs) :- the list Zs is a permutation of the list Xs
permu([],[]).
permu(Qs,[Y|Ys]) :- del(Y,Qs,Rs), permu(Rs,Ys).
del(X,[X|Xs],Xs).
del(X,[Y|Ys],[Y|Zs]) :- del(X,Ys,Zs).
% test(Qs) :- the list Qs represents a non-attacking queens solution
test(Qs) :- test(Qs,1,[],[]).
% test(Qs,X,Cs,Ds) :- the queens in Qs, representing columns X to N,
% are not in conflict with the diagonals Cs and Ds
test([],_,_,_).
test([Y|Ys],X,Cs,Ds) :-
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X + 1,
test(Ys,X1,[C|Cs],[D|Ds]).
%--------------------------------------------------------------
% Now, in version 2, the tester is pushed completely inside the
% generator permu.
queens_2(N,Qs) :- range(1,N,Rs), permu_test(Rs,Qs,1,[],[]).
permu_test([],[],_,_,_).
permu_test(Qs,[Y|Ys],X,Cs,Ds) :-
del(Y,Qs,Rs),
C is X-Y, \+ memberchk(C,Cs),
D is X+Y, \+ memberchk(D,Ds),
X1 is X+1,
permu_test(Rs,Ys,X1,[C|Cs],[D|Ds]).
"""
N, Qs, N, Rs, Qs, A, B, L, A1, Y, Ys, X, Xs, Zs = Variable.factory(
"N", "Qs", "N", "Rs", "Qs", "A", "B", "L", "A1", "Y", "Ys", "X", "Xs", "Zs"
)
_W1, _W2, _W3 = Variable.factory("_W1", "_W2", "_W3")
Cs, Ds, D, X1, C, Cs = Variable.factory("Cs", "Ds", "D", "X1", "C", "Cs")
db = [
Rule(
dict(queens_1=N, a=Qs),
dict(range=1, a=N, b=Rs),
dict(permu=Rs, a=Qs),
dict(test=Qs),
),
dict(range=A, a=A, b=[A]),
Rule(
dict(range=A, a=B, b=[A, *L]),
Assert(lambda A, B: A < B),
Assign(A1, lambda A: A + 1),
dict(range=A1, a=B, b=L),
),
dict(permu=[], a=[]),
Rule(
dict(permu=Qs, a=[Y, *Ys]), dict(delete=Y, a=Qs, b=Rs), dict(permu=Rs, a=Ys)
),
dict(delete=X, a=[X, *Xs], b=Xs),
Rule(dict(delete=X, a=[Y, *Ys], b=[Y, *Zs]), dict(delete=X, a=Ys, b=Zs)),
Rule(dict(test=Qs), dict(test=Qs, a=1, b=[], c=[])),
dict(test=[], a=_W1, b=_W2, c=_W3),
Rule(
dict(test=[Y, *Ys], a=X, b=Cs, c=Ds),
Assign(C, lambda X, Y: X - Y),
Assert(lambda C, Cs: C not in Cs),
Assign(D, lambda X, Y: X + Y),
Assert(lambda D, Ds: D not in Ds),
Assign(X1, lambda X: X + 1),
dict(test=Ys, a=X1, b=[C, *Cs], c=[D, *Ds]),
),
]
Q = Variable("Q")
query = dict(queens_1=8, a=Q)
assert list(search(db, query)) == []
| en | 0.77928 | P90 (**) Eight queens problem This is a classical problem in computer science. The objective is to place eight queens on a chessboard so that no two queens are attacking each other; i.e., no two queens are in the same row, the same column, or on the same diagonal. We generalize this original problem by allowing for an arbitrary dimension N of the chessboard. We represent the positions of the queens as a list of numbers 1..N. Example: [4,2,7,3,6,8,5,1] means that the queen in the first column is in row 4, the queen in the second column is in row 2, etc. By using the permutations of the numbers 1..N we guarantee that no two queens are in the same row. The only test that remains to be made is the diagonal test. A queen placed at column X and row Y occupies two diagonals: one of them, with number C = X-Y, goes from bottom-left to top-right, the other one, numbered D = X+Y, goes from top-left to bottom-right. In the test predicate we keep track of the already occupied diagonals in Cs and Ds. % The first version is a simple generate-and-test solution. % queens_1(N,Qs) :- Qs is a solution of the N-queens problem queens_1(N,Qs) :- range(1,N,Rs), permu(Rs,Qs), test(Qs). % range(A,B,L) :- L is the list of numbers A..B range(A,A,[A]). range(A,B,[A|L]) :- A < B, A1 is A+1, range(A1,B,L). % permu(Xs,Zs) :- the list Zs is a permutation of the list Xs permu([],[]). permu(Qs,[Y|Ys]) :- del(Y,Qs,Rs), permu(Rs,Ys). del(X,[X|Xs],Xs). del(X,[Y|Ys],[Y|Zs]) :- del(X,Ys,Zs). % test(Qs) :- the list Qs represents a non-attacking queens solution test(Qs) :- test(Qs,1,[],[]). % test(Qs,X,Cs,Ds) :- the queens in Qs, representing columns X to N, % are not in conflict with the diagonals Cs and Ds test([],_,_,_). test([Y|Ys],X,Cs,Ds) :- C is X-Y, \+ memberchk(C,Cs), D is X+Y, \+ memberchk(D,Ds), X1 is X + 1, test(Ys,X1,[C|Cs],[D|Ds]). %-------------------------------------------------------------- % Now, in version 2, the tester is pushed completely inside the % generator permu. queens_2(N,Qs) :- range(1,N,Rs), permu_test(Rs,Qs,1,[],[]). permu_test([],[],_,_,_). permu_test(Qs,[Y|Ys],X,Cs,Ds) :- del(Y,Qs,Rs), C is X-Y, \+ memberchk(C,Cs), D is X+Y, \+ memberchk(D,Ds), X1 is X+1, permu_test(Rs,Ys,X1,[C|Cs],[D|Ds]). | 3.721731 | 4 |
airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py | ipa320/airbus_coop | 4 | 8673 | #!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import sys
import threading
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from airbus_cobot_gui.res import R
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from airbus_pyqt_extend.QtAgiGui import QAgiPopup
from rqt_robot_monitor.status_item import StatusItem
import rqt_robot_monitor.util_robot_monitor as util
## @class DiagnosticsStatus
## @brief Class for difine different control status.
#OK = 0
#WARN = 1
#ERROR = 2
#STALE = 3
class DiagnosticsWidget(QPushButton):
DIAGNOSTICS_TOPLEVEL_TOPIC_NAME = rospy.get_param('diagnostics_toplevel_topic_name','/diagnostics_toplevel_state')
state = "status_stale"
msg = "No diagnostic messages received"
def __init__(self, context):
"""! The constructor."""
QPushButton.__init__(self)
self._context = context
# Diagnostics top level: update the color of the button depending on the current diagnostics toplevel message
self.connect(self, SIGNAL("stateChanged"), self.update_state)
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
self._diagnostics_toplevel_state_sub = rospy.Subscriber(self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME , DiagnosticStatus, self.toplevel_state_callback)
# Diagnostics: when button pressed open a new window with a detailed list of components and diagnostic messages
self.connect(self,SIGNAL('clicked(bool)'),self._trigger_button)
def update_state(self, state, msg):
self.setIcon(R.getIconById(state))
self.setIconSize(QSize(40,40))
self.setToolTip(msg)
def toplevel_state_callback(self, msg):
self.state = msg.level
if msg.level == 0:
self.state= "status_ok"
self.msg = "OK"
if msg.level == 1 :
self.state= "status_warning"
self.msg = "WARNING"
if msg.level == 2 :
self.state= "status_error"
self.msg = "ERROR"
if msg.level == 3 :
self.state= "status_stale"
self.msg = "STALE"
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
def _trigger_button(self, checked):
popup = DiagnosticsPopup(self, self._context)
popup.show_()
class DiagnosticsPopup(QAgiPopup):
def __init__(self, parent, context):
"""! The constructor."""
QAgiPopup.__init__(self, parent)
self._context = context
self._parent = parent
self.setRelativePosition(QAgiPopup.TopRight, QAgiPopup.BottomRight)
loadUi(R.layouts.diagnostics_popup, self)
self._inspectors = {}
self._current_msg = None
palette = self.tree_all_devices.palette()
self._original_base_color = palette.base().color()
self._original_alt_base_color = palette.alternateBase().color()
self._tree = StatusItem(self.tree_all_devices.invisibleRootItem())
self.adjustSize()
# Diagnostics subscriber
DIAGNOSTICS_TOPIC_NAME = rospy.get_param('diagnostics_topic_name','/diagnostics_agg')
self.connect(self,SIGNAL("UpdateDiagnostics"), self.update_diag)
self._diagnostics_agg_sub = rospy.Subscriber(DIAGNOSTICS_TOPIC_NAME, DiagnosticArray, self.message_cb)
def update_diag(self):
#update the tree
self._tree.prune()
self.tree_all_devices.resizeColumnToContents(0)
self.adjustSize()
def message_cb(self,msg):
""" DiagnosticArray message callback """
for status in msg.status:
path = status.name.split('/')
if path[0] == '':
path = path[1:]
tmp_tree = self._tree
for p in path:
tmp_tree = tmp_tree[p]
tmp_tree.update(status, util.get_resource_name(status.name))
self.emit(SIGNAL('UpdateDiagnostics'))
if __name__ == "__main__":
from airbus_cobot_gui.context import Context
app = QApplication(sys.argv)
main = QMainWindow()
main.setCentralWidget(TranslatorUi(Context(main)))
main.show()
app.exec_()
#End of file
| #!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import sys
import threading
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from airbus_cobot_gui.res import R
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from airbus_pyqt_extend.QtAgiGui import QAgiPopup
from rqt_robot_monitor.status_item import StatusItem
import rqt_robot_monitor.util_robot_monitor as util
## @class DiagnosticsStatus
## @brief Class for difine different control status.
#OK = 0
#WARN = 1
#ERROR = 2
#STALE = 3
class DiagnosticsWidget(QPushButton):
DIAGNOSTICS_TOPLEVEL_TOPIC_NAME = rospy.get_param('diagnostics_toplevel_topic_name','/diagnostics_toplevel_state')
state = "status_stale"
msg = "No diagnostic messages received"
def __init__(self, context):
"""! The constructor."""
QPushButton.__init__(self)
self._context = context
# Diagnostics top level: update the color of the button depending on the current diagnostics toplevel message
self.connect(self, SIGNAL("stateChanged"), self.update_state)
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
self._diagnostics_toplevel_state_sub = rospy.Subscriber(self.DIAGNOSTICS_TOPLEVEL_TOPIC_NAME , DiagnosticStatus, self.toplevel_state_callback)
# Diagnostics: when button pressed open a new window with a detailed list of components and diagnostic messages
self.connect(self,SIGNAL('clicked(bool)'),self._trigger_button)
def update_state(self, state, msg):
self.setIcon(R.getIconById(state))
self.setIconSize(QSize(40,40))
self.setToolTip(msg)
def toplevel_state_callback(self, msg):
self.state = msg.level
if msg.level == 0:
self.state= "status_ok"
self.msg = "OK"
if msg.level == 1 :
self.state= "status_warning"
self.msg = "WARNING"
if msg.level == 2 :
self.state= "status_error"
self.msg = "ERROR"
if msg.level == 3 :
self.state= "status_stale"
self.msg = "STALE"
self.emit(SIGNAL('stateChanged'), self.state, self.msg)
def _trigger_button(self, checked):
popup = DiagnosticsPopup(self, self._context)
popup.show_()
class DiagnosticsPopup(QAgiPopup):
def __init__(self, parent, context):
"""! The constructor."""
QAgiPopup.__init__(self, parent)
self._context = context
self._parent = parent
self.setRelativePosition(QAgiPopup.TopRight, QAgiPopup.BottomRight)
loadUi(R.layouts.diagnostics_popup, self)
self._inspectors = {}
self._current_msg = None
palette = self.tree_all_devices.palette()
self._original_base_color = palette.base().color()
self._original_alt_base_color = palette.alternateBase().color()
self._tree = StatusItem(self.tree_all_devices.invisibleRootItem())
self.adjustSize()
# Diagnostics subscriber
DIAGNOSTICS_TOPIC_NAME = rospy.get_param('diagnostics_topic_name','/diagnostics_agg')
self.connect(self,SIGNAL("UpdateDiagnostics"), self.update_diag)
self._diagnostics_agg_sub = rospy.Subscriber(DIAGNOSTICS_TOPIC_NAME, DiagnosticArray, self.message_cb)
def update_diag(self):
#update the tree
self._tree.prune()
self.tree_all_devices.resizeColumnToContents(0)
self.adjustSize()
def message_cb(self,msg):
""" DiagnosticArray message callback """
for status in msg.status:
path = status.name.split('/')
if path[0] == '':
path = path[1:]
tmp_tree = self._tree
for p in path:
tmp_tree = tmp_tree[p]
tmp_tree.update(status, util.get_resource_name(status.name))
self.emit(SIGNAL('UpdateDiagnostics'))
if __name__ == "__main__":
from airbus_cobot_gui.context import Context
app = QApplication(sys.argv)
main = QMainWindow()
main.setCentralWidget(TranslatorUi(Context(main)))
main.show()
app.exec_()
#End of file
| en | 0.745314 | #!/usr/bin/env python # # Copyright 2015 Airbus # Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## @class DiagnosticsStatus ## @brief Class for difine different control status. #OK = 0 #WARN = 1 #ERROR = 2 #STALE = 3 ! The constructor. # Diagnostics top level: update the color of the button depending on the current diagnostics toplevel message # Diagnostics: when button pressed open a new window with a detailed list of components and diagnostic messages ! The constructor. # Diagnostics subscriber #update the tree DiagnosticArray message callback #End of file | 1.930832 | 2 |
sanansaattaja/website/forms/comment_form.py | KEZKA/YL-WEB-PROJECT | 3 | 8674 | <gh_stars>1-10
from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField
from wtforms.validators import DataRequired
class CommentForm(FlaskForm):
text = TextAreaField("Text", validators=[DataRequired()])
submit = SubmitField('Publish')
| from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField
from wtforms.validators import DataRequired
class CommentForm(FlaskForm):
text = TextAreaField("Text", validators=[DataRequired()])
submit = SubmitField('Publish') | none | 1 | 2.307112 | 2 |
|
graphgallery/functional/dense/onehot.py | dongzizhu/GraphGallery | 1 | 8675 | <gh_stars>1-10
import numpy as np
from ..transform import DenseTransform
from ..decorators import multiple
from ..transform import Transform
__all__ = ['onehot', 'Onehot']
@Transform.register()
class Onehot(DenseTransform):
def __init__(self, depth=None):
super().__init__()
self.collect(locals())
def __call__(self, *x):
return onehot(*x, depth=self.depth)
@multiple()
def onehot(label, depth=None):
"""Get the one-hot like label of nodes."""
label = np.asarray(label, dtype=np.int32)
depth = depth or label.max() + 1
if label.ndim == 1:
return np.eye(depth, dtype=label.dtype)[label]
else:
raise ValueError(f"label must be a 1D array, but got {label.ndim}D array.")
| import numpy as np
from ..transform import DenseTransform
from ..decorators import multiple
from ..transform import Transform
__all__ = ['onehot', 'Onehot']
@Transform.register()
class Onehot(DenseTransform):
def __init__(self, depth=None):
super().__init__()
self.collect(locals())
def __call__(self, *x):
return onehot(*x, depth=self.depth)
@multiple()
def onehot(label, depth=None):
"""Get the one-hot like label of nodes."""
label = np.asarray(label, dtype=np.int32)
depth = depth or label.max() + 1
if label.ndim == 1:
return np.eye(depth, dtype=label.dtype)[label]
else:
raise ValueError(f"label must be a 1D array, but got {label.ndim}D array.") | en | 0.771693 | Get the one-hot like label of nodes. | 2.41431 | 2 |
models.py | Bileonaire/api-ridemyway | 0 | 8676 | """Handles data storage for Users, rides and requests
"""
# pylint: disable=E1101
import datetime
from flask import make_response, jsonify, current_app
from werkzeug.security import generate_password_hash
import psycopg2
import config
from databasesetup import db
class User():
"""Contains user columns and methods to add, update and delete a user"""
def __init__(self, username, email, password, admin):
self.username = username
self.email = email
self.password = generate_password_hash(password, method='sha256')
if admin == True:
self.admin = '1'
else:
self.admin = '0'
new_user = "INSERT INTO users (username, email, password, admin) VALUES " \
"('" + self.username + "', '" + self.email + "', '" + self.password + "', '" + self.admin + "')"
db_cursor = db.con()
db_cursor.execute(new_user)
db.commit()
@staticmethod
def update_user(user_id, username, email, password, admin):
"""Updates user information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE users SET username=%s, email=%s, password=%s, admin=%s WHERE user_id=%s",
(username, email, password, admin, user_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def delete_user(user_id):
"""Deletes a user"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM users WHERE user_id=%s", (user_id,))
db.commit()
return make_response(jsonify({"message" : "user has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_user(user_id):
"""Gets a particular user"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users WHERE user_id=%s", (user_id,))
user = db_cursor.fetchall()
if user != []:
user=user[0]
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
return make_response(jsonify({"profile" : info}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_all_users():
"""Gets all users"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users")
users = db_cursor.fetchall()
all_users = []
for user in users:
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
all_users.append(info)
return make_response(jsonify({"All users" : all_users}), 200)
class Ride():
"""Contains ride columns and methods to add, update and delete a ride"""
def __init__(self, ride, driver_id, departuretime, numberplate, maximum, status):
self.ride = ride
self.driver_id = driver_id
self.departuretime = departuretime
self.numberplate = numberplate
self.maximum = maximum
self.status = status
new_ride = "INSERT INTO rides (ride, driver_id, departuretime, numberplate, maximum, status) VALUES " \
"('" + self.ride + "', '" + self.driver_id + "', '" + self.departuretime + "', '" + self.numberplate + "','" + self.maximum + "','" + self.status + "' )"
db_cursor = db.con()
db_cursor.execute(new_ride)
db.commit()
@classmethod
def create_ride(cls, ride, driver_id, departuretime, numberplate, maximum, status="pending"):
"""Creates a new ride"""
cls(ride, driver_id, departuretime, numberplate, maximum, status)
return make_response(jsonify({"message" : "ride has been successfully created"}), 201)
@staticmethod
def update_ride(ride_id, ride, driver_id, departuretime, numberplate,
maximum):
"""Updates ride information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE rides SET ride=%s, driver_id=%s, departuretime=%s, numberplate=%s, maximum=%s WHERE ride_id=%s",
(ride, driver_id, departuretime, numberplate, maximum, ride_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def start_ride(ride_id, driver_id):
"""starts a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride = ride[0]
if int(ride[2]) == driver_id:
db_cursor.execute("UPDATE rides SET status=%s WHERE ride_id=%s", ("given", ride_id,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("taken", ride_id, True,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("rejected", ride_id, False,))
db.commit()
return {"message" : "ride has started"}
return {"message" : "The ride you want to start is not your ride."}
return {"message" : "ride does not exist"}
@staticmethod
def delete_ride(ride_id):
"""Deletes a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
for ride in rides:
if ride[0] == ride_id:
db_cursor.execute("DELETE FROM rides WHERE ride_id=%s", (ride_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_ride(ride_id):
"""Gets a particular ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride=ride[0]
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
return make_response(jsonify({"ride" : info}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_rides():
"""Gets all rides"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
all_rides = []
for ride in rides:
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
all_rides.append(info)
return make_response(jsonify({"All rides" : all_rides}), 200)
class Request:
"""Contains menu columns and methods to add, update and delete a request"""
def __init__(self, ride_id, user_id, accepted, status):
self.ride_id = str(ride_id)
self.user_id = str(user_id)
self.accepted = accepted
self.status = status
new_request = "INSERT INTO request (ride_id, user_id, accepted, status) VALUES " \
"('" + self.ride_id + "', '" + self.user_id + "', '" + '0' + "', '" + self.status + "')"
db_cursor = db.con()
db_cursor.execute(new_request)
db.commit()
@classmethod
def request_ride(cls, ride_id, user_id, accepted=False, status="pending"):
"""Creates a new request"""
db_cursor = db.con()
db_cursor.execute("SELECT status FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchone()
if ride[0] == "pending":
cls(ride_id, user_id, accepted, status)
return make_response(jsonify({"message" : "request has been successfully sent for approval"}), 201)
return make_response(jsonify({"message" : "ride is already given"}), 400)
@staticmethod
def delete_request(request_id):
"""Deletes a request"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM request WHERE request_id=%s", (request_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def accept_request(request_id):
"""Accepts request"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE request SET accepted=%s WHERE request_id=%s", (True, request_id))
db.commit()
return make_response(jsonify({"message" : "request has been successfully accepted"}), 200)
except KeyError:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def get_requests(request_id):
"""Gets a particular request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
if request != None:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
return make_response(jsonify({"request" : info}), 200)
return make_response(jsonify({"message" : "request does not exists"}), 404)
@staticmethod
def get_particular_riderequests(ride_id):
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE ride_id=%s", (ride_id,))
requests = db_cursor.fetchall()
if requests != []:
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_requests():
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request")
requests = db_cursor.fetchall()
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
class Relation:
"""Contains method to get driver_id and maximum from a requested ride"""
@staticmethod
def get_driver_id(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
ride_id = str(request[2])
db_cursor.execute("SELECT driver_id FROM rides WHERE ride_id=%s", (ride_id,))
driver_id = db_cursor.fetchone()
if driver_id == None:
return make_response(jsonify({"message" : "ride does not exists"}), 404)
driver_id = driver_id[0]
return int(driver_id)
@staticmethod
def get_maximum(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (str(request_id),))
request = db_cursor.fetchone()
db_cursor.execute("SELECT maximum FROM rides WHERE ride_id=%s", (request[2],))
maximum = db_cursor.fetchone()
maximum = maximum[0]
return maximum
| """Handles data storage for Users, rides and requests
"""
# pylint: disable=E1101
import datetime
from flask import make_response, jsonify, current_app
from werkzeug.security import generate_password_hash
import psycopg2
import config
from databasesetup import db
class User():
"""Contains user columns and methods to add, update and delete a user"""
def __init__(self, username, email, password, admin):
self.username = username
self.email = email
self.password = generate_password_hash(password, method='sha256')
if admin == True:
self.admin = '1'
else:
self.admin = '0'
new_user = "INSERT INTO users (username, email, password, admin) VALUES " \
"('" + self.username + "', '" + self.email + "', '" + self.password + "', '" + self.admin + "')"
db_cursor = db.con()
db_cursor.execute(new_user)
db.commit()
@staticmethod
def update_user(user_id, username, email, password, admin):
"""Updates user information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE users SET username=%s, email=%s, password=%s, admin=%s WHERE user_id=%s",
(username, email, password, admin, user_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def delete_user(user_id):
"""Deletes a user"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM users WHERE user_id=%s", (user_id,))
db.commit()
return make_response(jsonify({"message" : "user has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_user(user_id):
"""Gets a particular user"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users WHERE user_id=%s", (user_id,))
user = db_cursor.fetchall()
if user != []:
user=user[0]
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
return make_response(jsonify({"profile" : info}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_all_users():
"""Gets all users"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM users")
users = db_cursor.fetchall()
all_users = []
for user in users:
info = {user[0] : {"email": user[1],
"username": user[2],
"admin": user[4]}}
all_users.append(info)
return make_response(jsonify({"All users" : all_users}), 200)
class Ride():
"""Contains ride columns and methods to add, update and delete a ride"""
def __init__(self, ride, driver_id, departuretime, numberplate, maximum, status):
self.ride = ride
self.driver_id = driver_id
self.departuretime = departuretime
self.numberplate = numberplate
self.maximum = maximum
self.status = status
new_ride = "INSERT INTO rides (ride, driver_id, departuretime, numberplate, maximum, status) VALUES " \
"('" + self.ride + "', '" + self.driver_id + "', '" + self.departuretime + "', '" + self.numberplate + "','" + self.maximum + "','" + self.status + "' )"
db_cursor = db.con()
db_cursor.execute(new_ride)
db.commit()
@classmethod
def create_ride(cls, ride, driver_id, departuretime, numberplate, maximum, status="pending"):
"""Creates a new ride"""
cls(ride, driver_id, departuretime, numberplate, maximum, status)
return make_response(jsonify({"message" : "ride has been successfully created"}), 201)
@staticmethod
def update_ride(ride_id, ride, driver_id, departuretime, numberplate,
maximum):
"""Updates ride information"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE rides SET ride=%s, driver_id=%s, departuretime=%s, numberplate=%s, maximum=%s WHERE ride_id=%s",
(ride, driver_id, departuretime, numberplate, maximum, ride_id))
db.commit()
return make_response(jsonify({"message" : "user has been successfully updated"}), 200)
except:
return make_response(jsonify({"message" : "user does not exist"}), 404)
@staticmethod
def start_ride(ride_id, driver_id):
"""starts a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride = ride[0]
if int(ride[2]) == driver_id:
db_cursor.execute("UPDATE rides SET status=%s WHERE ride_id=%s", ("given", ride_id,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("taken", ride_id, True,))
db_cursor.execute("UPDATE request SET status=%s WHERE ride_id=%s and accepted=%s", ("rejected", ride_id, False,))
db.commit()
return {"message" : "ride has started"}
return {"message" : "The ride you want to start is not your ride."}
return {"message" : "ride does not exist"}
@staticmethod
def delete_ride(ride_id):
"""Deletes a ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
for ride in rides:
if ride[0] == ride_id:
db_cursor.execute("DELETE FROM rides WHERE ride_id=%s", (ride_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
return make_response(jsonify({"message" : "user does not exists"}), 404)
@staticmethod
def get_ride(ride_id):
"""Gets a particular ride"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchall()
if ride != []:
ride=ride[0]
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
return make_response(jsonify({"ride" : info}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_rides():
"""Gets all rides"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM rides")
rides = db_cursor.fetchall()
all_rides = []
for ride in rides:
info = {ride[0] : {"ride": ride[1],
"driver_id": ride[2],
"departure_time": ride[3],
"cost": ride[4],
"maximum": ride[5],
"status": ride[6]}}
all_rides.append(info)
return make_response(jsonify({"All rides" : all_rides}), 200)
class Request:
"""Contains menu columns and methods to add, update and delete a request"""
def __init__(self, ride_id, user_id, accepted, status):
self.ride_id = str(ride_id)
self.user_id = str(user_id)
self.accepted = accepted
self.status = status
new_request = "INSERT INTO request (ride_id, user_id, accepted, status) VALUES " \
"('" + self.ride_id + "', '" + self.user_id + "', '" + '0' + "', '" + self.status + "')"
db_cursor = db.con()
db_cursor.execute(new_request)
db.commit()
@classmethod
def request_ride(cls, ride_id, user_id, accepted=False, status="pending"):
"""Creates a new request"""
db_cursor = db.con()
db_cursor.execute("SELECT status FROM rides WHERE ride_id=%s", (ride_id,))
ride = db_cursor.fetchone()
if ride[0] == "pending":
cls(ride_id, user_id, accepted, status)
return make_response(jsonify({"message" : "request has been successfully sent for approval"}), 201)
return make_response(jsonify({"message" : "ride is already given"}), 400)
@staticmethod
def delete_request(request_id):
"""Deletes a request"""
try:
db_cursor = db.con()
db_cursor.execute("DELETE FROM request WHERE request_id=%s", (request_id,))
db.commit()
return make_response(jsonify({"message" : "ride has been successfully deleted"}), 200)
except:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def accept_request(request_id):
"""Accepts request"""
try:
db_cursor = db.con()
db_cursor.execute("UPDATE request SET accepted=%s WHERE request_id=%s", (True, request_id))
db.commit()
return make_response(jsonify({"message" : "request has been successfully accepted"}), 200)
except KeyError:
return make_response(jsonify({"message" : "the specified request does not exist in requests"}), 404)
@staticmethod
def get_requests(request_id):
"""Gets a particular request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
if request != None:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
return make_response(jsonify({"request" : info}), 200)
return make_response(jsonify({"message" : "request does not exists"}), 404)
@staticmethod
def get_particular_riderequests(ride_id):
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE ride_id=%s", (ride_id,))
requests = db_cursor.fetchall()
if requests != []:
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
return make_response(jsonify({"message" : "ride does not exists"}), 404)
@staticmethod
def get_all_requests():
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request")
requests = db_cursor.fetchall()
ride_requests = []
for request in requests:
info = {request[0] : {"user_id": request[1],
"ride_id": request[2],
"status": request[3],
"accepted": request[4]}}
ride_requests.append(info)
return make_response(jsonify({"ride_requests" : ride_requests}), 200)
class Relation:
"""Contains method to get driver_id and maximum from a requested ride"""
@staticmethod
def get_driver_id(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (request_id,))
request = db_cursor.fetchone()
ride_id = str(request[2])
db_cursor.execute("SELECT driver_id FROM rides WHERE ride_id=%s", (ride_id,))
driver_id = db_cursor.fetchone()
if driver_id == None:
return make_response(jsonify({"message" : "ride does not exists"}), 404)
driver_id = driver_id[0]
return int(driver_id)
@staticmethod
def get_maximum(request_id):
"""Gets all request"""
db_cursor = db.con()
db_cursor.execute("SELECT * FROM request WHERE request_id=%s", (str(request_id),))
request = db_cursor.fetchone()
db_cursor.execute("SELECT maximum FROM rides WHERE ride_id=%s", (request[2],))
maximum = db_cursor.fetchone()
maximum = maximum[0]
return maximum
| en | 0.778073 | Handles data storage for Users, rides and requests # pylint: disable=E1101 Contains user columns and methods to add, update and delete a user Updates user information Deletes a user Gets a particular user Gets all users Contains ride columns and methods to add, update and delete a ride Creates a new ride Updates ride information starts a ride Deletes a ride Gets a particular ride Gets all rides Contains menu columns and methods to add, update and delete a request Creates a new request Deletes a request Accepts request Gets a particular request Gets all request Contains method to get driver_id and maximum from a requested ride Gets all request Gets all request | 3.004391 | 3 |
lesson06/liqi/test.py | herrywen-nanj/51reboot | 0 | 8677 | <filename>lesson06/liqi/test.py<gh_stars>0
import configparser
'''
config = configparser.ConfigParser()
config.read('db.ini')
print(config.sections())
print(dict(config['mysqld'])['symbolic-links'])
'''
def ReadConfig(filename, section, key=None):
print(filename)
config = configparser.ConfigParser()
config.read(filename)
print(config.sections())
if not config.sections():
return "config init is empty", False
if key:
if section in config.sections():
return dict(config[section])[key], True
else:
return '', False
else:
return dict(config[section]), True
result, ok = ReadConfig('db.ini', 'mysqld', 'socket')
print(ok)
print(result)
if __name__ == '__main__':
ReadConfig('db.ini','mysqld','socket') | <filename>lesson06/liqi/test.py<gh_stars>0
import configparser
'''
config = configparser.ConfigParser()
config.read('db.ini')
print(config.sections())
print(dict(config['mysqld'])['symbolic-links'])
'''
def ReadConfig(filename, section, key=None):
print(filename)
config = configparser.ConfigParser()
config.read(filename)
print(config.sections())
if not config.sections():
return "config init is empty", False
if key:
if section in config.sections():
return dict(config[section])[key], True
else:
return '', False
else:
return dict(config[section]), True
result, ok = ReadConfig('db.ini', 'mysqld', 'socket')
print(ok)
print(result)
if __name__ == '__main__':
ReadConfig('db.ini','mysqld','socket') | ja | 0.095704 | config = configparser.ConfigParser() config.read('db.ini') print(config.sections()) print(dict(config['mysqld'])['symbolic-links']) | 2.68066 | 3 |
core/forms.py | xUndero/noc | 1 | 8678 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Forms wrapper
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from django import forms
from django.utils.encoding import force_unicode
from django.utils.html import escape
class NOCBoundField(forms.forms.BoundField):
"""
Bound field with django-admin like label-tag
"""
def __init__(self, *args, **kwargs):
super(NOCBoundField, self).__init__(*args, **kwargs)
self.is_checkbox = isinstance(self.field.widget, forms.CheckboxInput)
def label_tag(self, contents=None, attrs=None):
if not contents:
contents = force_unicode(
escape(self.field.label if self.field.label else self.name)
) + (":" if not self.is_checkbox else "")
classes = []
if self.is_checkbox:
classes += ["vCheckboxLabel"]
if self.field.required:
classes += ["required"]
if classes:
attrs = attrs.copy() if attrs else {}
attrs["class"] = " ".join(classes)
return super(NOCBoundField, self).label_tag(contents=contents, attrs=attrs)
class NOCForm(forms.Form):
"""
Form wrapper returning NOCBoundField items
"""
class Media(object):
css = {"all": ["/ui/pkg/django-media/admin/css/forms.css"]}
def __init__(self, *args, **kwargs):
super(NOCForm, self).__init__(*args, **kwargs)
self.disabled_fields = set()
def disable_field(self, name):
self.disabled_fields.add(name)
def __iter__(self):
for name, field in six.iteritems(self.fields):
if name not in self.disabled_fields:
yield NOCBoundField(self, field, name)
| # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Forms wrapper
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from django import forms
from django.utils.encoding import force_unicode
from django.utils.html import escape
class NOCBoundField(forms.forms.BoundField):
"""
Bound field with django-admin like label-tag
"""
def __init__(self, *args, **kwargs):
super(NOCBoundField, self).__init__(*args, **kwargs)
self.is_checkbox = isinstance(self.field.widget, forms.CheckboxInput)
def label_tag(self, contents=None, attrs=None):
if not contents:
contents = force_unicode(
escape(self.field.label if self.field.label else self.name)
) + (":" if not self.is_checkbox else "")
classes = []
if self.is_checkbox:
classes += ["vCheckboxLabel"]
if self.field.required:
classes += ["required"]
if classes:
attrs = attrs.copy() if attrs else {}
attrs["class"] = " ".join(classes)
return super(NOCBoundField, self).label_tag(contents=contents, attrs=attrs)
class NOCForm(forms.Form):
"""
Form wrapper returning NOCBoundField items
"""
class Media(object):
css = {"all": ["/ui/pkg/django-media/admin/css/forms.css"]}
def __init__(self, *args, **kwargs):
super(NOCForm, self).__init__(*args, **kwargs)
self.disabled_fields = set()
def disable_field(self, name):
self.disabled_fields.add(name)
def __iter__(self):
for name, field in six.iteritems(self.fields):
if name not in self.disabled_fields:
yield NOCBoundField(self, field, name)
| en | 0.321552 | # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Forms wrapper # --------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Third-party modules Bound field with django-admin like label-tag Form wrapper returning NOCBoundField items | 2.006429 | 2 |
ersteops/unit/views.py | Prescrypto/ErsteOps | 0 | 8679 | import json
from django.shortcuts import get_object_or_404
from django.core import serializers
from django.http import HttpResponse
from .models import Unit
from .utils import UNIT_LIST_FIELD
BAD_REQUEST = HttpResponse(json.dumps({'error': 'Bad Request'}), status=400, content_type='application/json')
def unit_json_list(request):
''' List Json View for local available units '''
if request.is_ajax():
units = Unit.objects.available_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
_raw_data = json.loads(data)
for unit in _raw_data:
if unit['fields']['is_alliance']:
unit['fields'].update({'identifier': '{}{}'.format(unit['fields']['identifier'],' (Alianza)')})
else:
continue
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def detail_unit_json(request, id_unit):
''' Detail view of unit '''
if request.is_ajax():
unit = Unit.objects.filter(pk=id_unit)
if len(unit) == 0:
return HttpResponse(json.dumps({'error': 'Unidad no encontrada'}), status=404, content_type='application/json')
data = serializers.serialize('json', unit, fields=UNIT_LIST_FIELD)
# Add crew list
_raw_data = json.loads(data)
_raw_data[0]['fields'].update({
'crew_list' : unit.first().get_crew_list
})
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def alliance_unit_json_list(request):
''' List Json View for alliance available units '''
if request.is_ajax():
units = Unit.objects.available_alliance_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
return HttpResponse(data, content_type='application/json', status=200)
else:
return BAD_REQUEST
| import json
from django.shortcuts import get_object_or_404
from django.core import serializers
from django.http import HttpResponse
from .models import Unit
from .utils import UNIT_LIST_FIELD
BAD_REQUEST = HttpResponse(json.dumps({'error': 'Bad Request'}), status=400, content_type='application/json')
def unit_json_list(request):
''' List Json View for local available units '''
if request.is_ajax():
units = Unit.objects.available_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
_raw_data = json.loads(data)
for unit in _raw_data:
if unit['fields']['is_alliance']:
unit['fields'].update({'identifier': '{}{}'.format(unit['fields']['identifier'],' (Alianza)')})
else:
continue
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def detail_unit_json(request, id_unit):
''' Detail view of unit '''
if request.is_ajax():
unit = Unit.objects.filter(pk=id_unit)
if len(unit) == 0:
return HttpResponse(json.dumps({'error': 'Unidad no encontrada'}), status=404, content_type='application/json')
data = serializers.serialize('json', unit, fields=UNIT_LIST_FIELD)
# Add crew list
_raw_data = json.loads(data)
_raw_data[0]['fields'].update({
'crew_list' : unit.first().get_crew_list
})
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def alliance_unit_json_list(request):
''' List Json View for alliance available units '''
if request.is_ajax():
units = Unit.objects.available_alliance_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
return HttpResponse(data, content_type='application/json', status=200)
else:
return BAD_REQUEST
| en | 0.586927 | List Json View for local available units Detail view of unit # Add crew list List Json View for alliance available units | 2.226484 | 2 |
olamundo.py/exercicios_refeitos/ex029.py | gabrielviticov/exercicios-python | 0 | 8680 | '''
ex029: Escreva um programa que leia a velocidade de uma carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada Km acima do limite.
'''
from colorise import set_color, reset_color
cor = {
'limpa':'\033[m',
'white':'\033[1;97m'
}
set_color(fg='green')
velocidade_carro = int(input('Informe a velocidade do carro KM/H: '))
if velocidade_carro > 80:
multa = (velocidade_carro - 80) * 7.00
print('\nMULTADO! VOCÊ ULTRAPASSOU O LIMITE PERMITIDO. LOGO TERÁ QUE PAGAR ', end='')
reset_color()
print('{}R${:.2f}{}'.format(cor['white'], multa, cor['limpa']))
else:
set_color(fg='green')
print('\nCONTINUE ASSIM. DIRIGINDO COM SEGURANÇA!')
| '''
ex029: Escreva um programa que leia a velocidade de uma carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada Km acima do limite.
'''
from colorise import set_color, reset_color
cor = {
'limpa':'\033[m',
'white':'\033[1;97m'
}
set_color(fg='green')
velocidade_carro = int(input('Informe a velocidade do carro KM/H: '))
if velocidade_carro > 80:
multa = (velocidade_carro - 80) * 7.00
print('\nMULTADO! VOCÊ ULTRAPASSOU O LIMITE PERMITIDO. LOGO TERÁ QUE PAGAR ', end='')
reset_color()
print('{}R${:.2f}{}'.format(cor['white'], multa, cor['limpa']))
else:
set_color(fg='green')
print('\nCONTINUE ASSIM. DIRIGINDO COM SEGURANÇA!')
| pt | 0.998142 | ex029: Escreva um programa que leia a velocidade de uma carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada Km acima do limite. | 3.631126 | 4 |
fruit/mixin/drawable.py | felko/fruit | 4 | 8681 | #!/usr/bin/env python3.4
# coding: utf-8
class Drawable:
"""
Base class for drawable objects.
"""
def draw(self):
"""
Returns a Surface object.
"""
raise NotImplementedError(
"Method `draw` is not implemented for {}".format(type(self)))
| #!/usr/bin/env python3.4
# coding: utf-8
class Drawable:
"""
Base class for drawable objects.
"""
def draw(self):
"""
Returns a Surface object.
"""
raise NotImplementedError(
"Method `draw` is not implemented for {}".format(type(self)))
| en | 0.663202 | #!/usr/bin/env python3.4 # coding: utf-8 Base class for drawable objects. Returns a Surface object. | 2.914138 | 3 |
src/action/tests/test_logic.py | uts-cic/ontask_b | 3 | 8682 | <reponame>uts-cic/ontask_b
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
from django.conf import settings
from django.shortcuts import reverse
from django.core.management import call_command
import test
from dataops import pandas_db
from workflow.models import Workflow
class EmailActionTracking(test.OntaskTestCase):
fixtures = ['simple_email_action']
filename = os.path.join(
settings.BASE_DIR(),
'action',
'fixtures',
'simple_email_action_df.sql'
)
trck_tokens = [
"<KEY>",
"<KEY>",
"<KEY>NWbcY3YPTfJXRagPaeJae4M"
]
wflow_name = 'wflow1'
wflow_desc = 'description text for workflow 1'
wflow_empty = 'The workflow does not have data'
@classmethod
def setUpClass(cls):
super(EmailActionTracking, cls).setUpClass()
pandas_db.pg_restore_table(cls.filename)
def tearDown(self):
pandas_db.delete_all_tables()
super(EmailActionTracking, self).tearDown()
# Test that tracking hits are properly stored.
def test_tracking(self):
# Repeat the checks two times to test if they are accumulating
for idx in range(1, 3):
# Iterate over the tracking items
for trck in self.trck_tokens:
self.client.get(reverse('trck') + '?v=' + trck)
# Get the workflow and the data frame
workflow = Workflow.objects.get(name=self.wflow_name)
df = pandas_db.load_from_db(workflow.id)
# Check that the results have been updated in the DB (to 1)
for uemail in [x[1] for x in test.user_info
if x[1].startswith('student')]:
self.assertEqual(
int(df.loc[df['email'] == uemail, 'EmailRead_1'].values[0]),
idx
)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
from django.conf import settings
from django.shortcuts import reverse
from django.core.management import call_command
import test
from dataops import pandas_db
from workflow.models import Workflow
class EmailActionTracking(test.OntaskTestCase):
fixtures = ['simple_email_action']
filename = os.path.join(
settings.BASE_DIR(),
'action',
'fixtures',
'simple_email_action_df.sql'
)
trck_tokens = [
"<KEY>",
"<KEY>",
"<KEY>NWbcY3YPTfJXRagPaeJae4M"
]
wflow_name = 'wflow1'
wflow_desc = 'description text for workflow 1'
wflow_empty = 'The workflow does not have data'
@classmethod
def setUpClass(cls):
super(EmailActionTracking, cls).setUpClass()
pandas_db.pg_restore_table(cls.filename)
def tearDown(self):
pandas_db.delete_all_tables()
super(EmailActionTracking, self).tearDown()
# Test that tracking hits are properly stored.
def test_tracking(self):
# Repeat the checks two times to test if they are accumulating
for idx in range(1, 3):
# Iterate over the tracking items
for trck in self.trck_tokens:
self.client.get(reverse('trck') + '?v=' + trck)
# Get the workflow and the data frame
workflow = Workflow.objects.get(name=self.wflow_name)
df = pandas_db.load_from_db(workflow.id)
# Check that the results have been updated in the DB (to 1)
for uemail in [x[1] for x in test.user_info
if x[1].startswith('student')]:
self.assertEqual(
int(df.loc[df['email'] == uemail, 'EmailRead_1'].values[0]),
idx
) | en | 0.897711 | # -*- coding: utf-8 -*- # Test that tracking hits are properly stored. # Repeat the checks two times to test if they are accumulating # Iterate over the tracking items # Get the workflow and the data frame # Check that the results have been updated in the DB (to 1) | 2.228657 | 2 |
obniz/parts/Moving/StepperMotor/__init__.py | izm51/obniz-python-sdk | 11 | 8683 | from attrdict import AttrDefault
import asyncio
class StepperMotor:
def __init__(self):
self.keys = ['<KEY> 'common']
self.required_keys = ['a', 'b', 'aa', 'bb']
self._step_instructions = AttrDefault(bool,
{
'1': [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]],
'2': [[0, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0]],
'1-2': [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0], [0, 1, 1, 0]]
}
)
self.type = None
self.current_step = 0
self._step_type = '2'
self.frequency = 100
self.rotation_step_count = 100
self.milli_meter_step_count = 1
@staticmethod
def info():
return AttrDefault(bool, {'name': 'StepperMotor'})
def wired(self, obniz):
self.obniz = obniz
if obniz.is_valid_io(*[self.params.common]):
self.common = obniz.get_io(*[self.params.common])
self.common.output(*[True])
self.type = 'unipolar'
else:
self.type = 'bipolar'
self.ios = []
self.ios.append(*[obniz.get_io(*[self.params.a])])
self.ios.append(*[obniz.get_io(*[self.params.b])])
self.ios.append(*[obniz.get_io(*[self.params.aa])])
self.ios.append(*[obniz.get_io(*[self.params.bb])])
async def step_wait(self, step_count):
if type(step_count) in ['int', 'float']:
raise Exception('must provide number')
step_count = round(*[step_count])
if step_count == 0:
return
step_count_abs = abs(*[step_count])
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
array = []
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
if step_count > 0:
for i in range(0, len(instructions), 1):
current_phase += 1
if current_phase >= instruction_length:
current_phase = 0
array.append(*[instructions[current_phase]])
else:
for i in range(0, len(instructions), 1):
current_phase -= 1
if current_phase < 0:
current_phase = (instruction_length - 1)
array.append(*[instructions[current_phase]])
msec = 1000 / self.frequency
msec = int(*[msec])
if msec < 1:
msec = 1
def anonymous0(index):
instruction = array[index]
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instruction[i]])
state = anonymous0
states = []
for i in range(0, instruction_length, 1):
states.append(*[AttrDefault(bool, {'duration': msec, 'state': state})])
await self.obniz.io.repeat_wait(*[states, step_count_abs])
self.current_step += step_count
async def step_to_wait(self, destination):
mustmove = (destination - self.current_step)
await self.step_wait(*[mustmove])
async def hold_wait(self):
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instructions[current_phase][i]])
await self.obniz.ping_wait(*[])
async def free_wait(self):
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[True])
await self.obniz.ping_wait(*[])
def step_type(self, step_type):
new_type = self._step_instructions[step_type]
if not new_type:
raise Exception('unknown step type ' + str(step_type))
self._step_type = step_type
def speed(self, step_per_sec):
self.frequency = step_per_sec
def current_rotation(self):
return self.current_step / self.rotation_step_count * 360
def current_angle(self):
angle = int(*[self.current_rotation(*[]) * 1000]) % 360000 / 1000
if angle < 0:
angle = (360 - angle)
return angle
async def rotate_wait(self, rotation):
rotation /= 360
needed = rotation * self.rotation_step_count
await self.step_wait(*[needed])
async def rotate_to_wait(self, angle):
needed = (angle - self.current_angle(*[]))
if abs(*[needed]) > 180:
needed = (needed - 360) if needed > 0 else (360 + needed)
needed = needed / 360 * self.rotation_step_count
await self.step_wait(*[needed])
def current_distance(self):
return self.current_step / self.milli_meter_step_count
async def move_wait(self, distance):
needed = distance * self.milli_meter_step_count
await self.step_wait(*[needed])
async def move_to_wait(self, destination):
needed = (destination - self.current_distance(*[])) * self.milli_meter_step_count
await self.step_wait(*[needed])
def _get_step_instructions(self):
return self._step_instructions[self._step_type] | from attrdict import AttrDefault
import asyncio
class StepperMotor:
def __init__(self):
self.keys = ['<KEY> 'common']
self.required_keys = ['a', 'b', 'aa', 'bb']
self._step_instructions = AttrDefault(bool,
{
'1': [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]],
'2': [[0, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0]],
'1-2': [[0, 1, 1, 1], [0, 0, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1], [1, 1, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0], [0, 1, 1, 0]]
}
)
self.type = None
self.current_step = 0
self._step_type = '2'
self.frequency = 100
self.rotation_step_count = 100
self.milli_meter_step_count = 1
@staticmethod
def info():
return AttrDefault(bool, {'name': 'StepperMotor'})
def wired(self, obniz):
self.obniz = obniz
if obniz.is_valid_io(*[self.params.common]):
self.common = obniz.get_io(*[self.params.common])
self.common.output(*[True])
self.type = 'unipolar'
else:
self.type = 'bipolar'
self.ios = []
self.ios.append(*[obniz.get_io(*[self.params.a])])
self.ios.append(*[obniz.get_io(*[self.params.b])])
self.ios.append(*[obniz.get_io(*[self.params.aa])])
self.ios.append(*[obniz.get_io(*[self.params.bb])])
async def step_wait(self, step_count):
if type(step_count) in ['int', 'float']:
raise Exception('must provide number')
step_count = round(*[step_count])
if step_count == 0:
return
step_count_abs = abs(*[step_count])
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
array = []
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
if step_count > 0:
for i in range(0, len(instructions), 1):
current_phase += 1
if current_phase >= instruction_length:
current_phase = 0
array.append(*[instructions[current_phase]])
else:
for i in range(0, len(instructions), 1):
current_phase -= 1
if current_phase < 0:
current_phase = (instruction_length - 1)
array.append(*[instructions[current_phase]])
msec = 1000 / self.frequency
msec = int(*[msec])
if msec < 1:
msec = 1
def anonymous0(index):
instruction = array[index]
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instruction[i]])
state = anonymous0
states = []
for i in range(0, instruction_length, 1):
states.append(*[AttrDefault(bool, {'duration': msec, 'state': state})])
await self.obniz.io.repeat_wait(*[states, step_count_abs])
self.current_step += step_count
async def step_to_wait(self, destination):
mustmove = (destination - self.current_step)
await self.step_wait(*[mustmove])
async def hold_wait(self):
instructions = self._get_step_instructions(*[])
instruction_length = len(instructions)
current_phase = self.current_step % instruction_length
if current_phase < 0:
current_phase = (instruction_length - current_phase * -1)
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[instructions[current_phase][i]])
await self.obniz.ping_wait(*[])
async def free_wait(self):
for i in range(0, len(self.ios), 1):
self.ios[i].output(*[True])
await self.obniz.ping_wait(*[])
def step_type(self, step_type):
new_type = self._step_instructions[step_type]
if not new_type:
raise Exception('unknown step type ' + str(step_type))
self._step_type = step_type
def speed(self, step_per_sec):
self.frequency = step_per_sec
def current_rotation(self):
return self.current_step / self.rotation_step_count * 360
def current_angle(self):
angle = int(*[self.current_rotation(*[]) * 1000]) % 360000 / 1000
if angle < 0:
angle = (360 - angle)
return angle
async def rotate_wait(self, rotation):
rotation /= 360
needed = rotation * self.rotation_step_count
await self.step_wait(*[needed])
async def rotate_to_wait(self, angle):
needed = (angle - self.current_angle(*[]))
if abs(*[needed]) > 180:
needed = (needed - 360) if needed > 0 else (360 + needed)
needed = needed / 360 * self.rotation_step_count
await self.step_wait(*[needed])
def current_distance(self):
return self.current_step / self.milli_meter_step_count
async def move_wait(self, distance):
needed = distance * self.milli_meter_step_count
await self.step_wait(*[needed])
async def move_to_wait(self, destination):
needed = (destination - self.current_distance(*[])) * self.milli_meter_step_count
await self.step_wait(*[needed])
def _get_step_instructions(self):
return self._step_instructions[self._step_type] | none | 1 | 2.451022 | 2 |
|
basic_assignment/39.py | 1212091/python-learning | 0 | 8684 | <reponame>1212091/python-learning
input_num = raw_input()
print(str(eval(input_num)))
| input_num = raw_input()
print(str(eval(input_num))) | none | 1 | 2.621203 | 3 |
|
website/website/apps/entry/admin.py | SimonGreenhill/Language5 | 1 | 8685 | <filename>website/website/apps/entry/admin.py
from django.contrib import admin
from django.db.models import Count
from reversion.admin import VersionAdmin
from website.apps.lexicon.models import Lexicon
from website.apps.entry.models import Task, TaskLog, Wordlist, WordlistMember
from website.apps.core.admin import TrackedModelAdmin
class CheckpointListFilter(admin.SimpleListFilter):
title = 'Has Checkpoint'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'has_checkpoint'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('yes', 'Has Checkpoint'),
('no', 'No Checkpoint'),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'yes':
return queryset.filter(checkpoint__isnull=False).exclude(checkpoint__iexact='')
if self.value() == 'no':
return queryset.filter(checkpoint__isnull=True).filter(checkpoint__exact='')
class TaskAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_display = ('id', 'name', 'editor', 'records', 'completable', 'done')
list_filter = ('editor', 'done', 'completable', CheckpointListFilter, 'source', 'language', 'view')
ordering = ('-id',)
exclude = ('lexicon',)
list_select_related = True
class TaskLogAdmin(admin.ModelAdmin):
date_hierarchy = 'time'
list_display = ('person', 'task_id', 'time', 'page', 'message')
list_filter = ('person', 'page', )
ordering = ('-time',)
list_select_related = True
def task_id(self, instance):
return instance.task_id
class WordlistMembersInline(admin.TabularInline):
model = Wordlist.words.through
extra = 0 # don't add anything new unless explicitly told to.
class TaskWordlistAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_display = ('id', 'name', 'words_count')
ordering = ('name',)
filter_horizontal = ('words',)
inlines = [WordlistMembersInline,]
def get_queryset(self, request):
return Wordlist.objects.annotate(words_count=Count("words"))
def words_count(self, inst):
return inst.words_count
words_count.admin_order_field = 'words_count'
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskLog, TaskLogAdmin)
admin.site.register(Wordlist, TaskWordlistAdmin)
| <filename>website/website/apps/entry/admin.py
from django.contrib import admin
from django.db.models import Count
from reversion.admin import VersionAdmin
from website.apps.lexicon.models import Lexicon
from website.apps.entry.models import Task, TaskLog, Wordlist, WordlistMember
from website.apps.core.admin import TrackedModelAdmin
class CheckpointListFilter(admin.SimpleListFilter):
title = 'Has Checkpoint'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'has_checkpoint'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('yes', 'Has Checkpoint'),
('no', 'No Checkpoint'),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'yes':
return queryset.filter(checkpoint__isnull=False).exclude(checkpoint__iexact='')
if self.value() == 'no':
return queryset.filter(checkpoint__isnull=True).filter(checkpoint__exact='')
class TaskAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_display = ('id', 'name', 'editor', 'records', 'completable', 'done')
list_filter = ('editor', 'done', 'completable', CheckpointListFilter, 'source', 'language', 'view')
ordering = ('-id',)
exclude = ('lexicon',)
list_select_related = True
class TaskLogAdmin(admin.ModelAdmin):
date_hierarchy = 'time'
list_display = ('person', 'task_id', 'time', 'page', 'message')
list_filter = ('person', 'page', )
ordering = ('-time',)
list_select_related = True
def task_id(self, instance):
return instance.task_id
class WordlistMembersInline(admin.TabularInline):
model = Wordlist.words.through
extra = 0 # don't add anything new unless explicitly told to.
class TaskWordlistAdmin(TrackedModelAdmin, VersionAdmin):
date_hierarchy = 'added'
list_display = ('id', 'name', 'words_count')
ordering = ('name',)
filter_horizontal = ('words',)
inlines = [WordlistMembersInline,]
def get_queryset(self, request):
return Wordlist.objects.annotate(words_count=Count("words"))
def words_count(self, inst):
return inst.words_count
words_count.admin_order_field = 'words_count'
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskLog, TaskLogAdmin)
admin.site.register(Wordlist, TaskWordlistAdmin)
| en | 0.822515 | # Parameter for the filter that will be used in the URL query. Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`. # don't add anything new unless explicitly told to. | 2.088128 | 2 |
src/modules/python.py | fest2bash/fest2bash | 0 | 8686 | <reponame>fest2bash/fest2bash<filename>src/modules/python.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
sys.dont_write_bytecode = True
from pprint import pprint
from base import BaseFest2Bash
class Fest2Bash(BaseFest2Bash):
def __init__(self, manifest):
super(Fest2Bash, self).__init__(manifest)
def generate(self, *args, **kwargs):
return self.manifest
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
sys.dont_write_bytecode = True
from pprint import pprint
from base import BaseFest2Bash
class Fest2Bash(BaseFest2Bash):
def __init__(self, manifest):
super(Fest2Bash, self).__init__(manifest)
def generate(self, *args, **kwargs):
return self.manifest | en | 0.308914 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- | 2.178199 | 2 |
opennem/utils/scrapyd.py | paulculmsee/opennem | 22 | 8687 | <reponame>paulculmsee/opennem
#!/usr/bin/env python
"""
Srapyd control methods
"""
import logging
from typing import Any, Dict, List
from urllib.parse import urljoin
from opennem.settings import settings
from opennem.utils.http import http
from opennem.utils.scrapy import get_spiders
logger = logging.getLogger("scrapyd.client")
def get_jobs() -> Dict[str, Any]:
job_url = urljoin(
settings.scrapyd_url,
"listjobs.json?project={}".format(settings.scrapyd_project_name),
)
jobs = http.get(job_url).json()
return jobs
def job_cancel(id: str) -> bool:
cancel_job_url = urljoin(settings.scrapyd_url, "cancel.json")
r = http.post(cancel_job_url, data={"project": "opennem", "job": id})
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Cancelled job: {}".format(resp["jobid"]))
return True
def job_schedule(spider_name: str) -> bool:
schedule_url = urljoin(settings.scrapyd_url, "schedule.json")
try:
r = http.post(schedule_url, data={"project": "opennem", "spider": spider_name})
except Exception as e:
logger.error("Error getting {}: {}".format(schedule_url, e))
return False
if not r.ok:
logger.error("Error: {}".format(r.status_code))
return False
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Queued spider {} with task: {}".format(spider_name, resp["jobid"]))
return True
def job_cancel_state(state: str = "pending") -> bool:
jobs = get_jobs()
if state not in jobs:
logger.info("Invalid state or no jobs in state {}".format(state))
return False
pending_jobs = jobs[state]
for job in pending_jobs:
job_id = job["id"]
logger.info("Cancelling {}".format(job_id))
job_cancel(job_id)
return True
def job_schedule_all(matches: str = None) -> List[str]:
spiders = get_spiders()
spider_scheduled = []
for s in spiders:
if matches and matches != s:
continue
job_schedule(s)
spider_scheduled.append(s)
return spider_scheduled
| #!/usr/bin/env python
"""
Srapyd control methods
"""
import logging
from typing import Any, Dict, List
from urllib.parse import urljoin
from opennem.settings import settings
from opennem.utils.http import http
from opennem.utils.scrapy import get_spiders
logger = logging.getLogger("scrapyd.client")
def get_jobs() -> Dict[str, Any]:
job_url = urljoin(
settings.scrapyd_url,
"listjobs.json?project={}".format(settings.scrapyd_project_name),
)
jobs = http.get(job_url).json()
return jobs
def job_cancel(id: str) -> bool:
cancel_job_url = urljoin(settings.scrapyd_url, "cancel.json")
r = http.post(cancel_job_url, data={"project": "opennem", "job": id})
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Cancelled job: {}".format(resp["jobid"]))
return True
def job_schedule(spider_name: str) -> bool:
schedule_url = urljoin(settings.scrapyd_url, "schedule.json")
try:
r = http.post(schedule_url, data={"project": "opennem", "spider": spider_name})
except Exception as e:
logger.error("Error getting {}: {}".format(schedule_url, e))
return False
if not r.ok:
logger.error("Error: {}".format(r.status_code))
return False
resp = r.json()
if resp["status"] == "error":
logger.error("Error: {}".format(resp["message"]))
return False
logger.info("Queued spider {} with task: {}".format(spider_name, resp["jobid"]))
return True
def job_cancel_state(state: str = "pending") -> bool:
jobs = get_jobs()
if state not in jobs:
logger.info("Invalid state or no jobs in state {}".format(state))
return False
pending_jobs = jobs[state]
for job in pending_jobs:
job_id = job["id"]
logger.info("Cancelling {}".format(job_id))
job_cancel(job_id)
return True
def job_schedule_all(matches: str = None) -> List[str]:
spiders = get_spiders()
spider_scheduled = []
for s in spiders:
if matches and matches != s:
continue
job_schedule(s)
spider_scheduled.append(s)
return spider_scheduled | en | 0.210886 | #!/usr/bin/env python Srapyd control methods | 2.381094 | 2 |
src/abaqus/Material/Elastic/Linear/Elastic.py | Haiiliin/PyAbaqus | 7 | 8688 | from abaqusConstants import *
from .FailStrain import FailStrain
from .FailStress import FailStress
class Elastic:
"""The Elastic object specifies elastic material properties.
Notes
-----
This object can be accessed by:
.. code-block:: python
import material
mdb.models[name].materials[name].elastic
import odbMaterial
session.odbs[name].materials[name].elastic
The table data for this object are:
- If *type*=ISOTROPIC, the table data specify the following:
- The Young's modulus, E.
- The Poisson's ratio, v.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=SHEAR, the table data specify the following:
- The shear modulus,G.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENGINEERING_CONSTANTS, the table data specify the following:
- E1.
- E2.
- E3.
- v12.
- v13.
- v23.
- G12.
- G13.
- G23.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=LAMINA, the table data specify the following:
- E1.
- E2.
- v12.
- G12.
- G13. This shear modulus is needed to define transverse shear behavior in shells.
- G23. This shear modulus is needed to define transverse shear behavior in shells.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ORTHOTROPIC, the table data specify the following:
- D1111.
- D1122.
- D2222.
- D1133.
- D2233.
- D3333.
- D1212.
- D1313.
- D2323.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ANISOTROPIC, the table data specify the following:
- D1111.
- D1122.
- D2222.
- D1133.
- D2233.
- D3333.
- D1112.
- D2212.
- D3312.
- D1212.
- D1113.
- D2213.
- D3313.
- D1213.
- D1313.
- D1123.
- D2223.
- D3323.
- D1223.
- D1323.
- D2323.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=TRACTION, the table data specify the following:
- EE for warping elements; Enn for cohesive elements.
- G1 for warping elements; Ess for cohesive elements.
- G2 for warping elements; Ett for cohesive elements.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=BILAMINA, the table data specify the following:
- E1+.
- E2+.
- v12+.
- G12.
- E1-.
- E2-.
- v112-.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=SHORT_FIBER, there is no table data.
The corresponding analysis keywords are:
- ELASTIC
"""
# A FailStress object.
failStress: FailStress = FailStress(((),))
# A FailStrain object.
failStrain: FailStrain = FailStrain(((),))
def __init__(self, table: tuple, type: SymbolicConstant = ISOTROPIC, noCompression: Boolean = OFF,
noTension: Boolean = OFF, temperatureDependency: Boolean = OFF, dependencies: int = 0,
moduli: SymbolicConstant = LONG_TERM):
"""This method creates an Elastic object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].materials[name].Elastic
session.odbs[name].materials[name].Elastic
Parameters
----------
table
A sequence of sequences of Floats specifying the items described below.
type
A SymbolicConstant specifying the type of elasticity data provided. Possible values are:
- ISOTROPIC
- ORTHOTROPIC
- ANISOTROPIC
- ENGINEERING_CONSTANTS
- LAMINA
- TRACTION
- COUPLED_TRACTION
- SHORT_FIBER
- SHEAR
- BILAMINA
The default value is ISOTROPIC.
noCompression
A Boolean specifying whether compressive stress is allowed. The default value is OFF.
noTension
A Boolean specifying whether tensile stress is allowed. The default value is OFF.
temperatureDependency
A Boolean specifying whether the data depend on temperature. The default value is OFF.
dependencies
An Int specifying the number of field variable dependencies. The default value is 0.
moduli
A SymbolicConstant specifying the time-dependence of the elastic material constants.
Possible values are INSTANTANEOUS and LONG_TERM. The default value is LONG_TERM.
Returns
-------
An Elastic object.
Raises
------
RangeError
"""
pass
def setValues(self):
"""This method modifies the Elastic object.
Raises
------
RangeError
"""
pass
| from abaqusConstants import *
from .FailStrain import FailStrain
from .FailStress import FailStress
class Elastic:
"""The Elastic object specifies elastic material properties.
Notes
-----
This object can be accessed by:
.. code-block:: python
import material
mdb.models[name].materials[name].elastic
import odbMaterial
session.odbs[name].materials[name].elastic
The table data for this object are:
- If *type*=ISOTROPIC, the table data specify the following:
- The Young's modulus, E.
- The Poisson's ratio, v.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=SHEAR, the table data specify the following:
- The shear modulus,G.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ENGINEERING_CONSTANTS, the table data specify the following:
- E1.
- E2.
- E3.
- v12.
- v13.
- v23.
- G12.
- G13.
- G23.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=LAMINA, the table data specify the following:
- E1.
- E2.
- v12.
- G12.
- G13. This shear modulus is needed to define transverse shear behavior in shells.
- G23. This shear modulus is needed to define transverse shear behavior in shells.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ORTHOTROPIC, the table data specify the following:
- D1111.
- D1122.
- D2222.
- D1133.
- D2233.
- D3333.
- D1212.
- D1313.
- D2323.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=ANISOTROPIC, the table data specify the following:
- D1111.
- D1122.
- D2222.
- D1133.
- D2233.
- D3333.
- D1112.
- D2212.
- D3312.
- D1212.
- D1113.
- D2213.
- D3313.
- D1213.
- D1313.
- D1123.
- D2223.
- D3323.
- D1223.
- D1323.
- D2323.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=TRACTION, the table data specify the following:
- EE for warping elements; Enn for cohesive elements.
- G1 for warping elements; Ess for cohesive elements.
- G2 for warping elements; Ett for cohesive elements.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=BILAMINA, the table data specify the following:
- E1+.
- E2+.
- v12+.
- G12.
- E1-.
- E2-.
- v112-.
- Temperature, if the data depend on temperature.
- Value of the first field variable, if the data depend on field variables.
- Value of the second field variable.
- Etc.
- If *type*=SHORT_FIBER, there is no table data.
The corresponding analysis keywords are:
- ELASTIC
"""
# A FailStress object.
failStress: FailStress = FailStress(((),))
# A FailStrain object.
failStrain: FailStrain = FailStrain(((),))
def __init__(self, table: tuple, type: SymbolicConstant = ISOTROPIC, noCompression: Boolean = OFF,
noTension: Boolean = OFF, temperatureDependency: Boolean = OFF, dependencies: int = 0,
moduli: SymbolicConstant = LONG_TERM):
"""This method creates an Elastic object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].materials[name].Elastic
session.odbs[name].materials[name].Elastic
Parameters
----------
table
A sequence of sequences of Floats specifying the items described below.
type
A SymbolicConstant specifying the type of elasticity data provided. Possible values are:
- ISOTROPIC
- ORTHOTROPIC
- ANISOTROPIC
- ENGINEERING_CONSTANTS
- LAMINA
- TRACTION
- COUPLED_TRACTION
- SHORT_FIBER
- SHEAR
- BILAMINA
The default value is ISOTROPIC.
noCompression
A Boolean specifying whether compressive stress is allowed. The default value is OFF.
noTension
A Boolean specifying whether tensile stress is allowed. The default value is OFF.
temperatureDependency
A Boolean specifying whether the data depend on temperature. The default value is OFF.
dependencies
An Int specifying the number of field variable dependencies. The default value is 0.
moduli
A SymbolicConstant specifying the time-dependence of the elastic material constants.
Possible values are INSTANTANEOUS and LONG_TERM. The default value is LONG_TERM.
Returns
-------
An Elastic object.
Raises
------
RangeError
"""
pass
def setValues(self):
"""This method modifies the Elastic object.
Raises
------
RangeError
"""
pass
| en | 0.423837 | The Elastic object specifies elastic material properties. Notes ----- This object can be accessed by: .. code-block:: python import material mdb.models[name].materials[name].elastic import odbMaterial session.odbs[name].materials[name].elastic The table data for this object are: - If *type*=ISOTROPIC, the table data specify the following: - The Young's modulus, E. - The Poisson's ratio, v. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=SHEAR, the table data specify the following: - The shear modulus,G. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=ENGINEERING_CONSTANTS, the table data specify the following: - E1. - E2. - E3. - v12. - v13. - v23. - G12. - G13. - G23. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=LAMINA, the table data specify the following: - E1. - E2. - v12. - G12. - G13. This shear modulus is needed to define transverse shear behavior in shells. - G23. This shear modulus is needed to define transverse shear behavior in shells. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=ORTHOTROPIC, the table data specify the following: - D1111. - D1122. - D2222. - D1133. - D2233. - D3333. - D1212. - D1313. - D2323. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=ANISOTROPIC, the table data specify the following: - D1111. - D1122. - D2222. - D1133. - D2233. - D3333. - D1112. - D2212. - D3312. - D1212. - D1113. - D2213. - D3313. - D1213. - D1313. - D1123. - D2223. - D3323. - D1223. - D1323. - D2323. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=TRACTION, the table data specify the following: - EE for warping elements; Enn for cohesive elements. - G1 for warping elements; Ess for cohesive elements. - G2 for warping elements; Ett for cohesive elements. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=BILAMINA, the table data specify the following: - E1+. - E2+. - v12+. - G12. - E1-. - E2-. - v112-. - Temperature, if the data depend on temperature. - Value of the first field variable, if the data depend on field variables. - Value of the second field variable. - Etc. - If *type*=SHORT_FIBER, there is no table data. The corresponding analysis keywords are: - ELASTIC # A FailStress object. # A FailStrain object. This method creates an Elastic object. Notes ----- This function can be accessed by: .. code-block:: python mdb.models[name].materials[name].Elastic session.odbs[name].materials[name].Elastic Parameters ---------- table A sequence of sequences of Floats specifying the items described below. type A SymbolicConstant specifying the type of elasticity data provided. Possible values are: - ISOTROPIC - ORTHOTROPIC - ANISOTROPIC - ENGINEERING_CONSTANTS - LAMINA - TRACTION - COUPLED_TRACTION - SHORT_FIBER - SHEAR - BILAMINA The default value is ISOTROPIC. noCompression A Boolean specifying whether compressive stress is allowed. The default value is OFF. noTension A Boolean specifying whether tensile stress is allowed. The default value is OFF. temperatureDependency A Boolean specifying whether the data depend on temperature. The default value is OFF. dependencies An Int specifying the number of field variable dependencies. The default value is 0. moduli A SymbolicConstant specifying the time-dependence of the elastic material constants. Possible values are INSTANTANEOUS and LONG_TERM. The default value is LONG_TERM. Returns ------- An Elastic object. Raises ------ RangeError This method modifies the Elastic object. Raises ------ RangeError | 2.776555 | 3 |
apps/pyscrabble/pyscrabble-hatchet/setup.py | UWSysLab/diamond | 19 | 8689 | <reponame>UWSysLab/diamond<filename>apps/pyscrabble/pyscrabble-hatchet/setup.py
# setup.py for pyscrabble
from distutils.core import setup
try:
import py2exe
HAS_PY2EXE = True
except ImportError:
HAS_PY2EXE = False
import glob
import os
import pkg_resources
import sys
from pyscrabble.constants import VERSION
from pyscrabble import util
from pyscrabble import dist
def fix_path(item):
if type(item) in (list, tuple):
if 'config' in item[0]:
return (item[0].replace('config', dist.get_app_data_dir()), item[1])
else:
return (item[0].replace('resources/', 'share/pyscrabble/'), item[1])
else:
return item
kwargs = {
'name': 'pyscrabble',
'version': VERSION,
'author': '<NAME>',
'author_email': '<EMAIL>',
'url': 'http://pyscrabble.sourceforge.net',
'data_files': dist.getDataFiles(),
'packages': ['pyscrabble', 'pyscrabble.command', 'pyscrabble.game', 'pyscrabble.gui', 'pyscrabble.net']
}
if HAS_PY2EXE and 'py2exe' in sys.argv:
#eggpacks = pkg_resources.require("nevow")
#for egg in eggpacks:
# if os.path.isdir(egg.location):
# sys.path.insert(0, egg.location)
try:
import modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com",p)
for extra in ["win32com.shell"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.addPackagePath(extra, p)
except ImportError:
print 'import error'
kwargs['py_modules'] = ['pyscrabble-main', 'server_console', 'db_upgrade']
kwargs['options'] = {
"py2exe": {
"packages": "encodings, nevow",
"includes": "pango,atk,gobject,decimal,dumbdbm,dbhash,xml.sax.expatreader",
"dll_excludes": ["iconv.dll","intl.dll","libatk-1.0-0.dll",
"libgdk_pixbuf-2.0-0.dll","libgdk-win32-2.0-0.dll",
"libglib-2.0-0.dll","libgmodule-2.0-0.dll",
"libgobject-2.0-0.dll","libgthread-2.0-0.dll",
"libgtk-win32-2.0-0.dll","libpango-1.0-0.dll",
"libpangowin32-1.0-0.dll"],
}
}
kwargs['windows'] = [{
"script": "pyscrabble-main.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['console'] = [{
"script": "server_service.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}, {
"script": "server_console.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['service'] = ['server_service']
kwargs['data_files'] += [('.', ['CHANGELOG.txt'])]
kwargs['data_files'] += [('.', ['LICENSE.txt'])]
#for egg in eggpacks:
# kwargs['data_files'] += dist.getResourceDirs(egg.location, ensureLower=False, basePath=None, outdir='extra')
else:
kwargs['scripts'] = ['pyscrabble-main.py', 'server_console.py', 'db_upgrade.py']
kwargs['data_files'] = [fix_path(x) for x in kwargs['data_files']]
kwargs['cmdclass'] = {'install_lib': dist.InstallLib, 'install_scripts' : dist.InstallScripts}
setup(**kwargs) | # setup.py for pyscrabble
from distutils.core import setup
try:
import py2exe
HAS_PY2EXE = True
except ImportError:
HAS_PY2EXE = False
import glob
import os
import pkg_resources
import sys
from pyscrabble.constants import VERSION
from pyscrabble import util
from pyscrabble import dist
def fix_path(item):
if type(item) in (list, tuple):
if 'config' in item[0]:
return (item[0].replace('config', dist.get_app_data_dir()), item[1])
else:
return (item[0].replace('resources/', 'share/pyscrabble/'), item[1])
else:
return item
kwargs = {
'name': 'pyscrabble',
'version': VERSION,
'author': '<NAME>',
'author_email': '<EMAIL>',
'url': 'http://pyscrabble.sourceforge.net',
'data_files': dist.getDataFiles(),
'packages': ['pyscrabble', 'pyscrabble.command', 'pyscrabble.game', 'pyscrabble.gui', 'pyscrabble.net']
}
if HAS_PY2EXE and 'py2exe' in sys.argv:
#eggpacks = pkg_resources.require("nevow")
#for egg in eggpacks:
# if os.path.isdir(egg.location):
# sys.path.insert(0, egg.location)
try:
import modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com",p)
for extra in ["win32com.shell"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.addPackagePath(extra, p)
except ImportError:
print 'import error'
kwargs['py_modules'] = ['pyscrabble-main', 'server_console', 'db_upgrade']
kwargs['options'] = {
"py2exe": {
"packages": "encodings, nevow",
"includes": "pango,atk,gobject,decimal,dumbdbm,dbhash,xml.sax.expatreader",
"dll_excludes": ["iconv.dll","intl.dll","libatk-1.0-0.dll",
"libgdk_pixbuf-2.0-0.dll","libgdk-win32-2.0-0.dll",
"libglib-2.0-0.dll","libgmodule-2.0-0.dll",
"libgobject-2.0-0.dll","libgthread-2.0-0.dll",
"libgtk-win32-2.0-0.dll","libpango-1.0-0.dll",
"libpangowin32-1.0-0.dll"],
}
}
kwargs['windows'] = [{
"script": "pyscrabble-main.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['console'] = [{
"script": "server_service.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}, {
"script": "server_console.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['service'] = ['server_service']
kwargs['data_files'] += [('.', ['CHANGELOG.txt'])]
kwargs['data_files'] += [('.', ['LICENSE.txt'])]
#for egg in eggpacks:
# kwargs['data_files'] += dist.getResourceDirs(egg.location, ensureLower=False, basePath=None, outdir='extra')
else:
kwargs['scripts'] = ['pyscrabble-main.py', 'server_console.py', 'db_upgrade.py']
kwargs['data_files'] = [fix_path(x) for x in kwargs['data_files']]
kwargs['cmdclass'] = {'install_lib': dist.InstallLib, 'install_scripts' : dist.InstallScripts}
setup(**kwargs) | en | 0.424416 | # setup.py for pyscrabble #eggpacks = pkg_resources.require("nevow") #for egg in eggpacks: # if os.path.isdir(egg.location): # sys.path.insert(0, egg.location) #for egg in eggpacks: # kwargs['data_files'] += dist.getResourceDirs(egg.location, ensureLower=False, basePath=None, outdir='extra') | 2.15806 | 2 |
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py | liuxiaomiao123/NeuroMathAcademy | 2 | 8690 | def integrate_exponential(a, x0, dt, T):
"""Compute solution of the differential equation xdot=a*x with
initial condition x0 for a duration T. Use time step dt for numerical
solution.
Args:
a (scalar): parameter of xdot (xdot=a*x)
x0 (scalar): initial condition (x at time 0)
dt (scalar): timestep of the simulation
T (scalar): total duration of the simulation
Returns:
ndarray, ndarray: `x` for all simulation steps and the time `t` at each step
"""
# Initialize variables
t = np.arange(0, T, dt)
x = np.zeros_like(t, dtype=complex)
x[0] = x0
# Step through system and integrate in time
for k in range(1, len(t)):
# for each point in time, compute xdot = a*x
xdot = (a*x[k-1])
# update x by adding xdot scaled by dt
x[k] = x[k-1] + xdot * dt
return x, t
# choose parameters
a = -0.5 # parameter in f(x)
T = 10 # total Time duration
dt = 0.001 # timestep of our simulation
x0 = 1. # initial condition of x at time 0
x, t = integrate_exponential(a, x0, dt, T)
with plt.xkcd():
fig = plt.figure(figsize=(8, 6))
plt.plot(t, x.real)
plt.xlabel('Time (s)')
plt.ylabel('x') | def integrate_exponential(a, x0, dt, T):
"""Compute solution of the differential equation xdot=a*x with
initial condition x0 for a duration T. Use time step dt for numerical
solution.
Args:
a (scalar): parameter of xdot (xdot=a*x)
x0 (scalar): initial condition (x at time 0)
dt (scalar): timestep of the simulation
T (scalar): total duration of the simulation
Returns:
ndarray, ndarray: `x` for all simulation steps and the time `t` at each step
"""
# Initialize variables
t = np.arange(0, T, dt)
x = np.zeros_like(t, dtype=complex)
x[0] = x0
# Step through system and integrate in time
for k in range(1, len(t)):
# for each point in time, compute xdot = a*x
xdot = (a*x[k-1])
# update x by adding xdot scaled by dt
x[k] = x[k-1] + xdot * dt
return x, t
# choose parameters
a = -0.5 # parameter in f(x)
T = 10 # total Time duration
dt = 0.001 # timestep of our simulation
x0 = 1. # initial condition of x at time 0
x, t = integrate_exponential(a, x0, dt, T)
with plt.xkcd():
fig = plt.figure(figsize=(8, 6))
plt.plot(t, x.real)
plt.xlabel('Time (s)')
plt.ylabel('x') | en | 0.695171 | Compute solution of the differential equation xdot=a*x with initial condition x0 for a duration T. Use time step dt for numerical solution. Args: a (scalar): parameter of xdot (xdot=a*x) x0 (scalar): initial condition (x at time 0) dt (scalar): timestep of the simulation T (scalar): total duration of the simulation Returns: ndarray, ndarray: `x` for all simulation steps and the time `t` at each step # Initialize variables # Step through system and integrate in time # for each point in time, compute xdot = a*x # update x by adding xdot scaled by dt # choose parameters # parameter in f(x) # total Time duration # timestep of our simulation # initial condition of x at time 0 | 4.054157 | 4 |
PyTemp/gis/shapefile_to_geojson.py | SwaggerKhan/PatrolGis | 0 | 8691 | <reponame>SwaggerKhan/PatrolGis
import json
import geojson
import geopandas as gpd
class SaveToGeoJSON:
__name_counter = 0
def file_name(self):
if self.__name_counter == 0:
self.__name_counter = 1
return "./out"+str(self.__name_counter)+".json"
elif self.__name_counter == 1:
self.__name_counter = 2
return "./out"+str(self.__name_counter)+".json"
else:
self.__name_counter = 0
print("Contact developer")
def save(self, name, file_save_name):
self.shape_file = gpd.read_file(name)
self.shape_file.to_file(file_save_name, driver="GeoJSON")
class MergeGeoJSON:
__files_merge_list = ['./out1.json', './out2.json']
__poly_geojson = list()
def save(self):
for i in self.__files_merge_list:
with open(i) as geojson_data:
self.__poly_geojson.append(json.load(geojson_data))
merged = { 'firstObj ' : self.__poly_geojson[1], 'secondObj' : self.__poly_geojson[0] }
json.dumps(merged)
with open('Merged_out.json', 'w') as outfile:
json.dump(merged, outfile, indent=3)
outfile.close()
return True
| import json
import geojson
import geopandas as gpd
class SaveToGeoJSON:
__name_counter = 0
def file_name(self):
if self.__name_counter == 0:
self.__name_counter = 1
return "./out"+str(self.__name_counter)+".json"
elif self.__name_counter == 1:
self.__name_counter = 2
return "./out"+str(self.__name_counter)+".json"
else:
self.__name_counter = 0
print("Contact developer")
def save(self, name, file_save_name):
self.shape_file = gpd.read_file(name)
self.shape_file.to_file(file_save_name, driver="GeoJSON")
class MergeGeoJSON:
__files_merge_list = ['./out1.json', './out2.json']
__poly_geojson = list()
def save(self):
for i in self.__files_merge_list:
with open(i) as geojson_data:
self.__poly_geojson.append(json.load(geojson_data))
merged = { 'firstObj ' : self.__poly_geojson[1], 'secondObj' : self.__poly_geojson[0] }
json.dumps(merged)
with open('Merged_out.json', 'w') as outfile:
json.dump(merged, outfile, indent=3)
outfile.close()
return True | none | 1 | 2.798331 | 3 |
|
ssbio/databases/pdbflex.py | JoshuaMeyers/ssbio | 76 | 8692 | import requests
import ssbio.utils
import os.path as op
# #### PDB stats
# Request flexibility data about one particular PDB.
#
# http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A
#
# pdbID of structure you are interested in
# chainID of chain you are interested in
#
# [{"pdbID":"1a50",
# "chainID":"A",
# "parentClusterID":"4hn4A",
# "avgRMSD":"0.538",
# "maxRMSD":"2.616",
# "flexibilityLabel":"Low",
# "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...],
# "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}]
#
# Note: you can omit the chainID and PDBFlex will return information for all chains.
#
# #### RMSD profile
# Request RMSD array used for local flexibility plots
#
# http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# {"queryPDB":"1a50A",
# "clusterName":"4hn4A",
# "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"}
#
# #### PDB representatives
# Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster.
#
# http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# ["2trsA","3pr2A","1kfjA"]
def get_pdbflex_info(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_stats.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/PDBStats.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# TODO: will running with chain ID always return a single item list?
assert len(infolist) == 1
newdict = {}
for k, v in infolist[0].items():
if k == 'avgRMSD' and v:
newdict[k] = float(v)
elif k == 'maxRMSD' and v:
newdict[k] = float(v)
else:
newdict[k] = v
return newdict
def get_pdbflex_rmsd_profile(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_rmsdprofile.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/rmsdProfile.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infodict = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
infodict['profile'] = [float(x) for x in infodict['profile'].strip('[]').split(',')]
return infodict
def get_pdbflex_representatives(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_representatives.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/representatives.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# infolist = [str(x) for x in infolist.strip('[]').split(',')]
return infolist | import requests
import ssbio.utils
import os.path as op
# #### PDB stats
# Request flexibility data about one particular PDB.
#
# http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A
#
# pdbID of structure you are interested in
# chainID of chain you are interested in
#
# [{"pdbID":"1a50",
# "chainID":"A",
# "parentClusterID":"4hn4A",
# "avgRMSD":"0.538",
# "maxRMSD":"2.616",
# "flexibilityLabel":"Low",
# "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...],
# "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}]
#
# Note: you can omit the chainID and PDBFlex will return information for all chains.
#
# #### RMSD profile
# Request RMSD array used for local flexibility plots
#
# http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# {"queryPDB":"1a50A",
# "clusterName":"4hn4A",
# "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"}
#
# #### PDB representatives
# Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster.
#
# http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# ["2trsA","3pr2A","1kfjA"]
def get_pdbflex_info(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_stats.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/PDBStats.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# TODO: will running with chain ID always return a single item list?
assert len(infolist) == 1
newdict = {}
for k, v in infolist[0].items():
if k == 'avgRMSD' and v:
newdict[k] = float(v)
elif k == 'maxRMSD' and v:
newdict[k] = float(v)
else:
newdict[k] = v
return newdict
def get_pdbflex_rmsd_profile(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_rmsdprofile.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/rmsdProfile.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infodict = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
infodict['profile'] = [float(x) for x in infodict['profile'].strip('[]').split(',')]
return infodict
def get_pdbflex_representatives(pdb_id, chain_id, outdir, force_rerun=False):
outfile = '{}{}_pdbflex_representatives.json'.format(pdb_id, chain_id)
pdbflex_link = 'http://pdbflex.org/php/api/representatives.php?pdbID={}&chainID={}'.format(pdb_id,
chain_id)
infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun)
# infolist = [str(x) for x in infolist.strip('[]').split(',')]
return infolist | en | 0.927168 | # #### PDB stats # Request flexibility data about one particular PDB. # # http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A # # pdbID of structure you are interested in # chainID of chain you are interested in # # [{"pdbID":"1a50", # "chainID":"A", # "parentClusterID":"4hn4A", # "avgRMSD":"0.538", # "maxRMSD":"2.616", # "flexibilityLabel":"Low", # "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...], # "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}] # # Note: you can omit the chainID and PDBFlex will return information for all chains. # # #### RMSD profile # Request RMSD array used for local flexibility plots # # http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A # # pdbID PDB ID of structure you are interested in # chainID Chain ID of chain you are interested in # # {"queryPDB":"1a50A", # "clusterName":"4hn4A", # "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"} # # #### PDB representatives # Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster. # # http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A # # pdbID PDB ID of structure you are interested in # chainID Chain ID of chain you are interested in # # ["2trsA","3pr2A","1kfjA"] # TODO: will running with chain ID always return a single item list? # infolist = [str(x) for x in infolist.strip('[]').split(',')] | 1.659286 | 2 |
api/insights/insights/infrastructure/mysql/read/modify_notes.py | manisharmagarg/qymatix | 0 | 8693 | <filename>api/insights/insights/infrastructure/mysql/read/modify_notes.py
"""
Modify Notes
"""
# pylint: disable=too-few-public-methods
from ...mysql.mysql_connection import MySqlConnection
from ...mysql.orm.autogen_entities import Task
class ModifyNotes(object):
"""
ModifyNotes responsible to update the record in db
"""
def __init__(self, db_name, notes_id, title=None, comment=None):
super(ModifyNotes, self).__init__()
self.data_db = 'data_{}'.format(db_name)
self.notes_id = notes_id
self.title = title
self.comment = comment
connection = MySqlConnection(self.data_db)
self.session = connection.session()
self.results = self.modify_notes()
def modify_notes(self):
"""
function: query to update the notes record
return: updated notes Id
"""
notes_obj = self.session.query(Task). \
filter_by(id=self.notes_id).first()
notes_obj.title = self.title
notes_obj.description = self.comment
self.session.add(notes_obj)
self.session.commit()
return notes_obj.id
| <filename>api/insights/insights/infrastructure/mysql/read/modify_notes.py
"""
Modify Notes
"""
# pylint: disable=too-few-public-methods
from ...mysql.mysql_connection import MySqlConnection
from ...mysql.orm.autogen_entities import Task
class ModifyNotes(object):
"""
ModifyNotes responsible to update the record in db
"""
def __init__(self, db_name, notes_id, title=None, comment=None):
super(ModifyNotes, self).__init__()
self.data_db = 'data_{}'.format(db_name)
self.notes_id = notes_id
self.title = title
self.comment = comment
connection = MySqlConnection(self.data_db)
self.session = connection.session()
self.results = self.modify_notes()
def modify_notes(self):
"""
function: query to update the notes record
return: updated notes Id
"""
notes_obj = self.session.query(Task). \
filter_by(id=self.notes_id).first()
notes_obj.title = self.title
notes_obj.description = self.comment
self.session.add(notes_obj)
self.session.commit()
return notes_obj.id
| en | 0.663355 | Modify Notes # pylint: disable=too-few-public-methods ModifyNotes responsible to update the record in db function: query to update the notes record return: updated notes Id | 2.370709 | 2 |
desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py | yetsun/hue | 5,079 | 8694 | <filename>desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py<gh_stars>1000+
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.type import namedval
class NamedValuesCaseBase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.e = namedval.NamedValues(('off', 0), ('on', 1))
def testDict(self):
assert set(self.e.items()) == set([('off', 0), ('on', 1)])
assert set(self.e.keys()) == set(['off', 'on'])
assert set(self.e) == set(['off', 'on'])
assert set(self.e.values()) == set([0, 1])
assert 'on' in self.e and 'off' in self.e and 'xxx' not in self.e
assert 0 in self.e and 1 in self.e and 2 not in self.e
def testInit(self):
assert namedval.NamedValues(off=0, on=1) == {'off': 0, 'on': 1}
assert namedval.NamedValues('off', 'on') == {'off': 0, 'on': 1}
assert namedval.NamedValues(('c', 0)) == {'c': 0}
assert namedval.NamedValues('a', 'b', ('c', 0), d=1) == {'c': 0, 'd': 1, 'a': 2, 'b': 3}
def testLen(self):
assert len(self.e) == 2
assert len(namedval.NamedValues()) == 0
def testAdd(self):
assert namedval.NamedValues(off=0) + namedval.NamedValues(on=1) == {'off': 0, 'on': 1}
def testClone(self):
assert namedval.NamedValues(off=0).clone(('on', 1)) == {'off': 0, 'on': 1}
assert namedval.NamedValues(off=0).clone(on=1) == {'off': 0, 'on': 1}
def testStrRepr(self):
assert str(self.e)
assert repr(self.e)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| <filename>desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py<gh_stars>1000+
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.type import namedval
class NamedValuesCaseBase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.e = namedval.NamedValues(('off', 0), ('on', 1))
def testDict(self):
assert set(self.e.items()) == set([('off', 0), ('on', 1)])
assert set(self.e.keys()) == set(['off', 'on'])
assert set(self.e) == set(['off', 'on'])
assert set(self.e.values()) == set([0, 1])
assert 'on' in self.e and 'off' in self.e and 'xxx' not in self.e
assert 0 in self.e and 1 in self.e and 2 not in self.e
def testInit(self):
assert namedval.NamedValues(off=0, on=1) == {'off': 0, 'on': 1}
assert namedval.NamedValues('off', 'on') == {'off': 0, 'on': 1}
assert namedval.NamedValues(('c', 0)) == {'c': 0}
assert namedval.NamedValues('a', 'b', ('c', 0), d=1) == {'c': 0, 'd': 1, 'a': 2, 'b': 3}
def testLen(self):
assert len(self.e) == 2
assert len(namedval.NamedValues()) == 0
def testAdd(self):
assert namedval.NamedValues(off=0) + namedval.NamedValues(on=1) == {'off': 0, 'on': 1}
def testClone(self):
assert namedval.NamedValues(off=0).clone(('on', 1)) == {'off': 0, 'on': 1}
assert namedval.NamedValues(off=0).clone(on=1) == {'off': 0, 'on': 1}
def testStrRepr(self):
assert str(self.e)
assert repr(self.e)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| en | 0.711205 | # # This file is part of pyasn1 software. # # Copyright (c) 2005-2019, <NAME> <<EMAIL>> # License: http://snmplabs.com/pyasn1/license.html # | 2.426044 | 2 |
setup.py | methane/pymemcache | 0 | 8695 | #!/usr/bin/env python
from setuptools import setup, find_packages
from pymemcache import __version__
setup(
name = 'pymemcache',
version = __version__,
author = '<NAME>',
author_email = '<EMAIL>',
packages = find_packages(),
tests_require = ['nose>=1.0'],
install_requires = ['six'],
description = 'A comprehensive, fast, pure Python memcached client',
long_description = open('README.md').read(),
license = 'Apache License 2.0',
url = 'https://github.com/Pinterest/pymemcache',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Topic :: Database',
],
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
from pymemcache import __version__
setup(
name = 'pymemcache',
version = __version__,
author = '<NAME>',
author_email = '<EMAIL>',
packages = find_packages(),
tests_require = ['nose>=1.0'],
install_requires = ['six'],
description = 'A comprehensive, fast, pure Python memcached client',
long_description = open('README.md').read(),
license = 'Apache License 2.0',
url = 'https://github.com/Pinterest/pymemcache',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Topic :: Database',
],
)
| ru | 0.26433 | #!/usr/bin/env python | 1.22677 | 1 |
torch_geometric/read/ply.py | DL-85/pytorch_geometric | 2 | 8696 | import torch
from plyfile import PlyData
from torch_geometric.data import Data
def read_ply(path):
with open(path, 'rb') as f:
data = PlyData.read(f)
pos = ([torch.tensor(data['vertex'][axis]) for axis in ['x', 'y', 'z']])
pos = torch.stack(pos, dim=-1)
face = None
if 'face' in data:
faces = data['face']['vertex_indices']
faces = [torch.tensor(face, dtype=torch.long) for face in faces]
face = torch.stack(faces, dim=-1)
data = Data(pos=pos)
data.face = face
return data
| import torch
from plyfile import PlyData
from torch_geometric.data import Data
def read_ply(path):
with open(path, 'rb') as f:
data = PlyData.read(f)
pos = ([torch.tensor(data['vertex'][axis]) for axis in ['x', 'y', 'z']])
pos = torch.stack(pos, dim=-1)
face = None
if 'face' in data:
faces = data['face']['vertex_indices']
faces = [torch.tensor(face, dtype=torch.long) for face in faces]
face = torch.stack(faces, dim=-1)
data = Data(pos=pos)
data.face = face
return data
| none | 1 | 2.730108 | 3 |
|
ml-agents/mlagents/trainers/brain_conversion_utils.py | ranguera/ml-agents | 2 | 8697 | from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution
from mlagents.envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents.envs.exception import UnityEnvironmentException
import numpy as np
from typing import List
def step_result_to_brain_info(
step_result: BatchedStepResult,
group_spec: AgentGroupSpec,
agent_id_prefix: int = None,
) -> BrainInfo:
n_agents = step_result.n_agents()
vis_obs_indices = []
vec_obs_indices = []
for index, observation in enumerate(step_result.obs):
if len(observation.shape) == 2:
vec_obs_indices.append(index)
elif len(observation.shape) == 4:
vis_obs_indices.append(index)
else:
raise UnityEnvironmentException(
"Invalid input received from the environment, the observation should "
"either be a vector of float or a PNG image"
)
if len(vec_obs_indices) == 0:
vec_obs = np.zeros((n_agents, 0), dtype=np.float32)
else:
vec_obs = np.concatenate([step_result.obs[i] for i in vec_obs_indices], axis=1)
vis_obs = [step_result.obs[i] for i in vis_obs_indices]
mask = np.ones((n_agents, np.sum(group_spec.action_size)), dtype=np.float32)
if group_spec.is_action_discrete():
mask = np.ones(
(n_agents, np.sum(group_spec.discrete_action_branches)), dtype=np.float32
)
if step_result.action_mask is not None:
mask = 1 - np.concatenate(step_result.action_mask, axis=1)
if agent_id_prefix is None:
agent_ids = [str(ag_id) for ag_id in list(step_result.agent_id)]
else:
agent_ids = [f"${agent_id_prefix}-{ag_id}" for ag_id in step_result.agent_id]
return BrainInfo(
vis_obs,
vec_obs,
list(step_result.reward),
agent_ids,
list(step_result.done),
list(step_result.max_step),
mask,
)
def group_spec_to_brain_parameters(
name: str, group_spec: AgentGroupSpec
) -> BrainParameters:
vec_size = np.sum(
[shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
)
vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
a_size: List[int] = []
if group_spec.is_action_discrete():
a_size += list(group_spec.discrete_action_branches)
vector_action_space_type = 0
else:
a_size += [group_spec.action_size]
vector_action_space_type = 1
return BrainParameters(
name, int(vec_size), cam_res, a_size, [], vector_action_space_type
)
| from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution
from mlagents.envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents.envs.exception import UnityEnvironmentException
import numpy as np
from typing import List
def step_result_to_brain_info(
step_result: BatchedStepResult,
group_spec: AgentGroupSpec,
agent_id_prefix: int = None,
) -> BrainInfo:
n_agents = step_result.n_agents()
vis_obs_indices = []
vec_obs_indices = []
for index, observation in enumerate(step_result.obs):
if len(observation.shape) == 2:
vec_obs_indices.append(index)
elif len(observation.shape) == 4:
vis_obs_indices.append(index)
else:
raise UnityEnvironmentException(
"Invalid input received from the environment, the observation should "
"either be a vector of float or a PNG image"
)
if len(vec_obs_indices) == 0:
vec_obs = np.zeros((n_agents, 0), dtype=np.float32)
else:
vec_obs = np.concatenate([step_result.obs[i] for i in vec_obs_indices], axis=1)
vis_obs = [step_result.obs[i] for i in vis_obs_indices]
mask = np.ones((n_agents, np.sum(group_spec.action_size)), dtype=np.float32)
if group_spec.is_action_discrete():
mask = np.ones(
(n_agents, np.sum(group_spec.discrete_action_branches)), dtype=np.float32
)
if step_result.action_mask is not None:
mask = 1 - np.concatenate(step_result.action_mask, axis=1)
if agent_id_prefix is None:
agent_ids = [str(ag_id) for ag_id in list(step_result.agent_id)]
else:
agent_ids = [f"${agent_id_prefix}-{ag_id}" for ag_id in step_result.agent_id]
return BrainInfo(
vis_obs,
vec_obs,
list(step_result.reward),
agent_ids,
list(step_result.done),
list(step_result.max_step),
mask,
)
def group_spec_to_brain_parameters(
name: str, group_spec: AgentGroupSpec
) -> BrainParameters:
vec_size = np.sum(
[shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
)
vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
a_size: List[int] = []
if group_spec.is_action_discrete():
a_size += list(group_spec.discrete_action_branches)
vector_action_space_type = 0
else:
a_size += [group_spec.action_size]
vector_action_space_type = 1
return BrainParameters(
name, int(vec_size), cam_res, a_size, [], vector_action_space_type
)
| none | 1 | 2.23027 | 2 |
|
mrdc_ws/src/mrdc_serial/setup.py | SoonerRobotics/MRDC22 | 0 | 8698 | <filename>mrdc_ws/src/mrdc_serial/setup.py
from setuptools import find_packages, setup
from glob import glob
import os
package_name = 'mrdc_serial'
setup(
name=package_name,
version='1.0.0',
packages=find_packages(),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'launch'),
glob(os.path.join('launch', '*.xml')))
],
install_requires=['setuptools'],
maintainer='<NAME>',
maintainer_email='<EMAIL>',
description='The MRDC Serial package that controls communication to the arduino',
license='MIT License',
entry_points={
'console_scripts': [
'remote = mrdc_serial.remote:main',
'serial = mrdc_serial.serial:main'
],
},
)
| <filename>mrdc_ws/src/mrdc_serial/setup.py
from setuptools import find_packages, setup
from glob import glob
import os
package_name = 'mrdc_serial'
setup(
name=package_name,
version='1.0.0',
packages=find_packages(),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'launch'),
glob(os.path.join('launch', '*.xml')))
],
install_requires=['setuptools'],
maintainer='<NAME>',
maintainer_email='<EMAIL>',
description='The MRDC Serial package that controls communication to the arduino',
license='MIT License',
entry_points={
'console_scripts': [
'remote = mrdc_serial.remote:main',
'serial = mrdc_serial.serial:main'
],
},
)
| none | 1 | 1.581099 | 2 |
|
orders/views.py | DobromirZlatkov/anteya | 0 | 8699 | <filename>orders/views.py
from django.core.urlresolvers import reverse_lazy
from django.views import generic
from django.shortcuts import redirect, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from . import forms
from . import models
from custommixins import mixins
class OrderView(generic.View):
template_name = 'orders/order_create.html'
def get(self, request):
qs = models.Product.objects.none()
formset = forms.ProductFormSet(queryset=qs, prefix='formset')
order_form = forms.OrderForm(prefix='order_form')
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
def post(self, request):
formset = forms.ProductFormSet(request.POST, prefix='formset')
order_form = forms.OrderForm(request.POST, prefix='order_form')
if formset.is_valid():
order = order_form.save()
for form in formset.forms:
product = form.save(commit=False)
order.products.add(product)
order.save()
return HttpResponseRedirect(reverse('order_details', args=(order.id,)))
else:
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
class OrderDetails(generic.DetailView):
model = models.Order
template_name_suffix = '_details'
class OrderList(mixins.LoginRequiredMixin, mixins.AdminRequiredMixin, generic.ListView):
model = models.Order
class OrderEdit(generic.View):
template_name = 'orders/order_edit.html'
def get(self, request, pk):
order = models.Order.objects.get(pk=pk)
formset = forms.ProductFormSet(queryset=order.products.all(), prefix='formset')
order_form = forms.OrderForm(prefix='order_form', instance=order)
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
def post(self, request, pk):
order = models.Order.objects.get(pk=pk)
formset = forms.ProductFormSet(request.POST, prefix='formset')
order_form = forms.OrderForm(request.POST, prefix='order_form')
if formset.is_valid():
order = order_form.save()
for form in formset.forms:
product = form.save(commit=False)
order.products.add(product)
order.save()
return HttpResponseRedirect(reverse('order_details', args=(order.id,)))
else:
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
| <filename>orders/views.py
from django.core.urlresolvers import reverse_lazy
from django.views import generic
from django.shortcuts import redirect, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from . import forms
from . import models
from custommixins import mixins
class OrderView(generic.View):
template_name = 'orders/order_create.html'
def get(self, request):
qs = models.Product.objects.none()
formset = forms.ProductFormSet(queryset=qs, prefix='formset')
order_form = forms.OrderForm(prefix='order_form')
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
def post(self, request):
formset = forms.ProductFormSet(request.POST, prefix='formset')
order_form = forms.OrderForm(request.POST, prefix='order_form')
if formset.is_valid():
order = order_form.save()
for form in formset.forms:
product = form.save(commit=False)
order.products.add(product)
order.save()
return HttpResponseRedirect(reverse('order_details', args=(order.id,)))
else:
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
class OrderDetails(generic.DetailView):
model = models.Order
template_name_suffix = '_details'
class OrderList(mixins.LoginRequiredMixin, mixins.AdminRequiredMixin, generic.ListView):
model = models.Order
class OrderEdit(generic.View):
template_name = 'orders/order_edit.html'
def get(self, request, pk):
order = models.Order.objects.get(pk=pk)
formset = forms.ProductFormSet(queryset=order.products.all(), prefix='formset')
order_form = forms.OrderForm(prefix='order_form', instance=order)
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
def post(self, request, pk):
order = models.Order.objects.get(pk=pk)
formset = forms.ProductFormSet(request.POST, prefix='formset')
order_form = forms.OrderForm(request.POST, prefix='order_form')
if formset.is_valid():
order = order_form.save()
for form in formset.forms:
product = form.save(commit=False)
order.products.add(product)
order.save()
return HttpResponseRedirect(reverse('order_details', args=(order.id,)))
else:
return render(request, self.template_name, {'formset': formset, 'order_form': order_form})
| none | 1 | 2.121984 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.