code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import histogram_module
import dist_module
def rgb2gray(rgb):
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
# model_images - list of file names of model images
# query_images - list of file names of query images
#
# dist_type - string which specifies distance type: 'chi2', 'l2', 'intersect'
# hist_type - string which specifies histogram type: 'grayvalue', 'dxdy', 'rgb', 'rg'
#
# note: use functions 'get_dist_by_name', 'get_hist_by_name' and 'is_grayvalue_hist' to obtain
# handles to distance and histogram functions, and to find out whether histogram function
# expects grayvalue or color image
def find_best_match(model_images, query_images, dist_type, hist_type, num_bins):
hist_isgray = histogram_module.is_grayvalue_hist(hist_type)
model_hists = compute_histograms(model_images, hist_type, hist_isgray, num_bins)
query_hists = compute_histograms(query_images, hist_type, hist_isgray, num_bins)
D = np.zeros((len(model_images), len(query_images)))
# compute distance for each couple of query - image
for j, query in enumerate(query_hists):
for i, model in enumerate(model_hists):
D[i, j] = dist_module.get_dist_by_name(model, query, dist_type)
best_match = [] # to save best matches
# for each query , find best model
for j in range(len(query_images)):
query_matches = D[:, j] # get query columns from matrix
argmin = np.argmin(query_matches) # get index with minimum distance
best_match.append(argmin) # save index for query
best_match = np.array(best_match) # array of best match for each query
return best_match, D
def compute_histograms(image_list, hist_type, hist_isgray, num_bins):
image_hist = []
# Compute hisgoram for each image and add it at the bottom of image_hist
# ... (your code here)
for img in image_list:
img_color = np.array(Image.open(img))
# if hist is gray type we use gray image
# othewise rgb image
img_to_process = rgb2gray(img_color) if hist_isgray else img_color.astype('double')
# We compute histogram for image
hist = histogram_module.get_hist_by_name(img=img_to_process,
num_bins_gray=num_bins,
hist_name=hist_type
)
image_hist.append(hist)
return image_hist
# For each image file from 'query_images' find and visualize the 5 nearest images from 'model_image'.
#
# Note: use the previously implemented function 'find_best_match'
# Note: use subplot command to show all the images in the same Python figure, one row per query image
def show_neighbors(model_images, query_images, dist_type, hist_type, num_bins):
plt.figure()
num_nearest = 5 # show the top-5 neighbors
# ... (your code here)
_, D = find_best_match(model_images=model_images,
query_images=query_images,
dist_type=dist_type,
hist_type=hist_type,
num_bins=num_bins
)
Q = len(query_images)
pos = 0
for j in range(Q):
query_matches = D[:, j]
best_args = np.argsort(query_matches)[:num_nearest]
query_img = query_images[j]
pos += 1
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(query_img)), vmin=0, vmax=255);
plt.title(f'Q{j}')
for ind in range(len(best_args)):
pos += 1
model_ind = best_args[ind]
model_img = model_images[model_ind]
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(model_img)), vmin=0, vmax=255);
plt.title(f'MO.{model_ind}')
plt.show()
| [
"PIL.Image.open",
"histogram_module.is_grayvalue_hist",
"histogram_module.get_hist_by_name",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.argmin",
"matplotlib.pyplot.title",
"dist_module.get_dist_by_name",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
]
| [((866, 911), 'histogram_module.is_grayvalue_hist', 'histogram_module.is_grayvalue_hist', (['hist_type'], {}), '(hist_type)\n', (900, 911), False, 'import histogram_module\n'), ((1708, 1728), 'numpy.array', 'np.array', (['best_match'], {}), '(best_match)\n', (1716, 1728), True, 'import numpy as np\n'), ((2953, 2965), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2963, 2965), True, 'import matplotlib.pyplot as plt\n'), ((3968, 3978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3976, 3978), True, 'import matplotlib.pyplot as plt\n'), ((1572, 1596), 'numpy.argmin', 'np.argmin', (['query_matches'], {}), '(query_matches)\n', (1581, 1596), True, 'import numpy as np\n'), ((2292, 2395), 'histogram_module.get_hist_by_name', 'histogram_module.get_hist_by_name', ([], {'img': 'img_to_process', 'num_bins_gray': 'num_bins', 'hist_name': 'hist_type'}), '(img=img_to_process, num_bins_gray=\n num_bins, hist_name=hist_type)\n', (2325, 2395), False, 'import histogram_module\n'), ((3539, 3561), 'matplotlib.pyplot.subplot', 'plt.subplot', (['Q', '(6)', 'pos'], {}), '(Q, 6, pos)\n', (3550, 3561), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3660), 'matplotlib.pyplot.title', 'plt.title', (['f"""Q{j}"""'], {}), "(f'Q{j}')\n", (3651, 3660), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1365), 'dist_module.get_dist_by_name', 'dist_module.get_dist_by_name', (['model', 'query', 'dist_type'], {}), '(model, query, dist_type)\n', (1340, 1365), False, 'import dist_module\n'), ((2047, 2062), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (2057, 2062), False, 'from PIL import Image\n'), ((3436, 3461), 'numpy.argsort', 'np.argsort', (['query_matches'], {}), '(query_matches)\n', (3446, 3461), True, 'import numpy as np\n'), ((3823, 3845), 'matplotlib.pyplot.subplot', 'plt.subplot', (['Q', '(6)', 'pos'], {}), '(Q, 6, pos)\n', (3834, 3845), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3962), 'matplotlib.pyplot.title', 'plt.title', (['f"""MO.{model_ind}"""'], {}), "(f'MO.{model_ind}')\n", (3943, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3612), 'PIL.Image.open', 'Image.open', (['query_img'], {}), '(query_img)\n', (3601, 3612), False, 'from PIL import Image\n'), ((3879, 3900), 'PIL.Image.open', 'Image.open', (['model_img'], {}), '(model_img)\n', (3889, 3900), False, 'from PIL import Image\n')] |
################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .java_classes import *
import numpy as np
import ctypes
import warnings
native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps()
# DATA TYPE MANAGEMENT
DOUBLE = DataType.DOUBLE
FLOAT = DataType.FLOAT
HALF = DataType.HALF
LONG = DataType.LONG
INT = DataType.INT
SHORT = DataType.SHORT
UBYTE = DataType.UBYTE
BYTE = DataType.BYTE
BOOL = DataType.BOOL
UTF8 = DataType.UTF8
COMPRESSED = DataType.COMPRESSED
UNKNOWN = DataType.UNKNOWN
SUPPORTED_JAVA_DTYPES = [
DOUBLE,
FLOAT,
HALF,
LONG,
INT,
SHORT,
BOOL
#UTF8
]
SUPPORTED_PYTHON_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.bool_
#np.str_
]
_PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
_J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
def _dtype_py2j(dtype):
if isinstance(dtype, str):
dtype = np.dtype(dtype).type
elif isinstance(dtype, np.dtype):
dtype = dtype.type
jtype = _PY2J.get(dtype)
if jtype is None:
raise NotImplementedError("Unsupported type: " + dtype.name)
return jtype
def _dtype_j2py(dtype):
pytype = _J2PY.get(dtype)
if pytype is None:
raise NotImplementedError("Unsupported type: " + (str(dtype)))
return pytype
def set_context_dtype(dtype):
'''
Sets the dtype for nd4j
# Arguments
dtype: 'float' or 'double'
'''
dtype_map = {
'float32': 'float',
'float64': 'double'
}
dtype = dtype_map.get(dtype, dtype)
if dtype not in ['float', 'double']:
raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype))
dtype_ = DataTypeUtil.getDtypeFromContext(dtype)
DataTypeUtil.setDTypeForContext(dtype_)
if get_context_dtype() != dtype:
warnings.warn("Can not set context dtype now. Set it at the beginning of your program.")
def get_context_dtype():
'''
Returns the nd4j dtype
'''
dtype = DataTypeUtil.getDtypeFromContext()
return DataTypeUtil.getDTypeForName(dtype)
_refs = []
def _from_numpy(np_array):
'''
Convert numpy array to nd4j array
'''
pointer_address, _ = np_array.__array_interface__['data']
_refs.append(np_array)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_array.size
pointer.limit(size)
jdtype = _dtype_py2j(np_array.dtype)
'''
mapping = {
DOUBLE: DoublePointer,
FLOAT: FloatPointer,
HALF: HalfPointer,
LONG: LongPointer,
INT: IntPointer,
SHORT: ShortPointer,
BOOL: BoolPointer
}
pc = mapping[jdtype]
#pointer = pc(pointer)
'''
buff = Nd4j.createBuffer(pointer, size, jdtype)
assert buff.address() == pointer_address
_refs.append(buff)
elem_size = buff.getElementSize()
assert elem_size == np_array.dtype.itemsize
strides = np_array.strides
strides = [dim / elem_size for dim in strides]
shape = np_array.shape
nd4j_array = Nd4j.create(buff, shape, strides, 0)
assert buff.address() == nd4j_array.data().address()
return nd4j_array
def _to_numpy(nd4j_array):
'''
Convert nd4j array to numpy array
'''
buff = nd4j_array.data()
address = buff.pointer().address()
dtype = nd4j_array.dataType().toString()
mapping = {
'DOUBLE': ctypes.c_double,
'FLOAT': ctypes.c_float,
'HALF': ctypes.c_short,
'LONG': ctypes.c_long,
'INT': ctypes.c_int,
'SHORT': ctypes.c_short,
'BOOL': ctypes.c_bool
}
Pointer = ctypes.POINTER(mapping[dtype])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
def _indarray(x):
typ = type(x)
if typ is INDArray:
return x
elif typ is ndarray:
return x.array
elif 'numpy' in str(typ):
return _from_numpy(x)
elif typ in (list, tuple):
return _from_numpy(np.array(x))
elif typ in (int, float):
return Nd4j.scalar(x)
else:
raise Exception('Data type not understood :' + str(typ))
def _nparray(x):
typ = type(x)
if typ is INDArray:
return ndarray(x).numpy()
elif typ is ndarray:
return x.numpy()
elif 'numpy' in str(typ):
return x
elif typ in (list, tuple):
return np.array(x)
elif typ in (int, float):
return np.array(x)
else:
raise Exception('Data type not understood :' + str(typ))
def broadcast_like(y, x):
xs = x.shape()
ys = y.shape()
if xs == ys:
return y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(ys)
ny = nx
elif ny > nx:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
yt = []
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
yt.append(1)
elif xd == 1:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
elif yd == 1:
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_y:
y = y.repmat(*yt)
return y
def broadcast(x, y):
xs = x.shape()
ys = y.shape()
if xs == ys:
return x, y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(*ys)
ny = nx
elif ny > nx:
diff = ny - nx
xs = ([1] * diff) + xs
x = x.reshape(*xs)
nx = ny
xt = []
yt = []
rep_x = False
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
xt.append(1)
yt.append(1)
elif xd == 1:
xt.append(yd)
yt.append(1)
rep_x = True
elif yd == 1:
xt.append(1)
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_x:
x = Nd4j.tile(x, *xt)
if rep_y:
try:
y = Nd4j.tile(y, *yt)
except:
y = Nd4j.tile(y, *yt)
return x, y
class ndarray(object):
def __init__(self, data, dtype=None):
# we ignore dtype for now
typ = type(data)
if 'nd4j' in typ.__name__:
# Note that we don't make a copy here
self.array = data
elif typ is ndarray:
self.array = data.array.dup()
else:
if typ is not np.ndarray:
data = np.array(data)
self.array = _from_numpy(data)
def numpy(self):
try:
return self.np_array
except AttributeError:
self.np_array = _to_numpy(self.array)
return self.np_array
@property
def size(self):
return self.array.length()
@property
def shape(self):
return tuple(self.array.shape())
@shape.setter
def shape(self, value):
arr = self.reshape(value)
self.array = arr.array
@property
def ndim(self):
return len(self.array.shape())
def __getitem__(self, key):
return ndarray(self.numpy()[key])
if type(key) is int:
return ndarray(self.array.get(NDArrayIndex.point(key)))
if type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
shape = self.array.shape()
if shape[0] == 1:
stop = shape[1]
else:
stop = shape[0]
if stop - start <= 0:
return None
if step is None or step == 1:
return ndarray(self.array.get(NDArrayIndex.interval(start, stop)))
else:
return ndarray(self.array.get(NDArrayIndex.interval(start, step, stop)))
if type(key) is list:
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
if type(key) is tuple:
key = list(key)
shape = self.array.shape()
ndim = len(shape)
nk = len(key)
key += [slice(None)] * (ndim - nk)
args = []
for i, dim in enumerate(key):
if type(dim) is int:
args.append(NDArrayIndex.point(dim))
elif type(dim) is slice:
if dim == slice(None):
args.append(NDArrayIndex.all())
else:
start = dim.start
stop = dim.stop
step = dim.step
if start is None:
start = 0
if stop is None:
stop = shape[i]
if stop - start <= 0:
return None
if step is None or step == 1:
args.append(NDArrayIndex.interval(start, stop))
else:
args.append(NDArrayIndex.interval(
start, step, stop))
elif type(dim) in (list, tuple):
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
return ndarray(self.array.get(*args))
def __setitem__(self, key, other):
self.numpy()[key] = _nparray(other)
return
other = _indarray(other)
view = self[key]
if view is None:
return
view = view.array
other = broadcast_like(other, view)
view.assign(other)
def __add__(self, other):
return ndarray(self.numpy() + _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.add(y))
def __sub__(self, other):
return ndarray(self.numpy() - _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.sub(y))
def __mul__(self, other):
return ndarray(self.numpy() * _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.mul(y))
def __div__(self, other):
return ndarray(self.numpy() / _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.div(y))
def __pow__(self, other):
return ndarray(self.numpy() ** _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(Transforms.pow(x, y))
def __iadd__(self, other):
self.numpy().__iadd__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.addi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.add(y)
return self
def __isub__(self, other):
self.numpy().__isub__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.subi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.sub(y)
return self
def __imul__(self, other):
self.numpy().__imul__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.muli(other)
else:
x, y = broadcast(self.array, other)
self.array = x.mul(y)
return self
def __idiv__(self, other):
self.numpy().__idiv__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.div(y)
return self
def __ipow__(self, other):
self.numpy().__ipow__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = Transforms.pow(x, y)
return self
def __getattr__(self, attr):
import ops
f = getattr(ops, attr)
setattr(ndarray, attr, f)
return getattr(self, attr)
def __int__(self):
if self.array.length() == 1:
return self.array.getInt(0)
raise Exception('Applicable only for scalars')
def __float__(self):
if self.array.length() == 1:
return self.array.getDouble(0)
raise Exception('Applicable only for scalars')
@property
def T(self):
return self.transpose()
def array(*args, **kwargs):
return ndarray(*args, **kwargs)
| [
"ctypes.POINTER",
"numpy.array",
"ctypes.cast",
"warnings.warn",
"numpy.dtype"
]
| [((4456, 4486), 'ctypes.POINTER', 'ctypes.POINTER', (['mapping[dtype]'], {}), '(mapping[dtype])\n', (4470, 4486), False, 'import ctypes\n'), ((4501, 4530), 'ctypes.cast', 'ctypes.cast', (['address', 'Pointer'], {}), '(address, Pointer)\n', (4512, 4530), False, 'import ctypes\n'), ((2678, 2771), 'warnings.warn', 'warnings.warn', (['"""Can not set context dtype now. Set it at the beginning of your program."""'], {}), "(\n 'Can not set context dtype now. Set it at the beginning of your program.')\n", (2691, 2771), False, 'import warnings\n'), ((1752, 1767), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (1760, 1767), True, 'import numpy as np\n'), ((5255, 5266), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5263, 5266), True, 'import numpy as np\n'), ((7783, 7797), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (7791, 7797), True, 'import numpy as np\n'), ((4869, 4880), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4877, 4880), True, 'import numpy as np\n'), ((5312, 5323), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5320, 5323), True, 'import numpy as np\n')] |
import os
from typing import Tuple
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
walk_files,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriTTS"
_CHECKSUMS = {
"http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207",
"http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d",
"http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f",
"http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4",
"http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8",
"http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d",
"http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f",
}
def load_libritts_item(
fileid: str,
path: str,
ext_audio: str,
ext_original_txt: str,
ext_normalized_txt: str,
) -> Tuple[Tensor, int, str, str, int, int, str]:
speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_")
utterance_id = fileid
normalized_text = utterance_id + ext_normalized_txt
normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text)
original_text = utterance_id + ext_original_txt
original_text = os.path.join(path, speaker_id, chapter_id, original_text)
file_audio = utterance_id + ext_audio
file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load original text
with open(original_text) as ft:
original_text = ft.readline()
# Load normalized text
with open(normalized_text, "r") as ft:
normalized_text = ft.readline()
return (
waveform,
sample_rate,
original_text,
normalized_text,
int(speaker_id),
int(chapter_id),
utterance_id,
)
class LIBRITTS(Dataset):
"""Create a Dataset for LibriTTS.
Args:
root (str): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``,
``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and
``"train-other-500"``. (default: ``"train-clean-100"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriTTS"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_original_txt = ".original.txt"
_ext_normalized_txt = ".normalized.txt"
_ext_audio = ".wav"
def __init__(
self,
root: str,
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
) -> None:
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/60/"
url = os.path.join(base_url, url + ext_archive)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum)
extract_archive(archive)
walker = walk_files(
self._path, suffix=self._ext_audio, prefix=False, remove_suffix=True
)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, original_text, normalized_text, speaker_id,
chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_libritts_item(
fileid,
self._path,
self._ext_audio,
self._ext_original_txt,
self._ext_normalized_txt,
)
def __len__(self) -> int:
return len(self._walker)
| [
"torchaudio.load",
"os.path.join",
"torchaudio.datasets.utils.extract_archive",
"os.path.isfile",
"os.path.isdir",
"os.path.basename",
"torchaudio.datasets.utils.download_url",
"torchaudio.datasets.utils.walk_files"
]
| [((1270, 1329), 'os.path.join', 'os.path.join', (['path', 'speaker_id', 'chapter_id', 'normalized_text'], {}), '(path, speaker_id, chapter_id, normalized_text)\n', (1282, 1329), False, 'import os\n'), ((1403, 1460), 'os.path.join', 'os.path.join', (['path', 'speaker_id', 'chapter_id', 'original_text'], {}), '(path, speaker_id, chapter_id, original_text)\n', (1415, 1460), False, 'import os\n'), ((1521, 1575), 'os.path.join', 'os.path.join', (['path', 'speaker_id', 'chapter_id', 'file_audio'], {}), '(path, speaker_id, chapter_id, file_audio)\n', (1533, 1575), False, 'import os\n'), ((1622, 1649), 'torchaudio.load', 'torchaudio.load', (['file_audio'], {}), '(file_audio)\n', (1637, 1649), False, 'import torchaudio\n'), ((3507, 3528), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (3523, 3528), False, 'import os\n'), ((3547, 3575), 'os.path.join', 'os.path.join', (['root', 'basename'], {}), '(root, basename)\n', (3559, 3575), False, 'import os\n'), ((3647, 3688), 'os.path.join', 'os.path.join', (['folder_in_archive', 'basename'], {}), '(folder_in_archive, basename)\n', (3659, 3688), False, 'import os\n'), ((3711, 3748), 'os.path.join', 'os.path.join', (['root', 'folder_in_archive'], {}), '(root, folder_in_archive)\n', (3723, 3748), False, 'import os\n'), ((4046, 4131), 'torchaudio.datasets.utils.walk_files', 'walk_files', (['self._path'], {'suffix': 'self._ext_audio', 'prefix': '(False)', 'remove_suffix': '(True)'}), '(self._path, suffix=self._ext_audio, prefix=False, remove_suffix=True\n )\n', (4056, 4131), False, 'from torchaudio.datasets.utils import download_url, extract_archive, walk_files\n'), ((3445, 3486), 'os.path.join', 'os.path.join', (['base_url', '(url + ext_archive)'], {}), '(base_url, url + ext_archive)\n', (3457, 3486), False, 'import os\n'), ((3790, 3815), 'os.path.isdir', 'os.path.isdir', (['self._path'], {}), '(self._path)\n', (3803, 3815), False, 'import os\n'), ((4003, 4027), 'torchaudio.datasets.utils.extract_archive', 'extract_archive', (['archive'], {}), '(archive)\n', (4018, 4027), False, 'from torchaudio.datasets.utils import download_url, extract_archive, walk_files\n'), ((3840, 3863), 'os.path.isfile', 'os.path.isfile', (['archive'], {}), '(archive)\n', (3854, 3863), False, 'import os\n'), ((3942, 3986), 'torchaudio.datasets.utils.download_url', 'download_url', (['url', 'root'], {'hash_value': 'checksum'}), '(url, root, hash_value=checksum)\n', (3954, 3986), False, 'from torchaudio.datasets.utils import download_url, extract_archive, walk_files\n')] |
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import matplotlib.pyplot as plt
import numpy as np
# 构建数据
x_data = ['2011', '2012', '2013', '2014', '2015', '2016', '2017']
y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000]
y_data2 = [52000, 54200, 51500,58300, 56800, 59500, 62700]
bar_width=0.3
# Y轴数据使用range(len(x_data), 就是0、1、2...
plt.barh(y=range(len(x_data)), width=y_data, label='疯狂Java讲义',
color='steelblue', alpha=0.8, height=bar_width)
# Y轴数据使用np.arange(len(x_data))+bar_width,
# 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了
plt.barh(y=np.arange(len(x_data))+bar_width, width=y_data2,
label='疯狂Android讲义', color='indianred', alpha=0.8, height=bar_width)
# 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式
for y, x in enumerate(y_data):
plt.text(x+5000, y-bar_width/2, '%s' % x, ha='center', va='bottom')
for y, x in enumerate(y_data2):
plt.text(x+5000, y+bar_width/2, '%s' % x, ha='center', va='bottom')
# 为Y轴设置刻度值
plt.yticks(np.arange(len(x_data))+bar_width/2, x_data)
# 设置标题
plt.title("Java与Android图书对比")
# 为两条坐标轴设置名称
plt.xlabel("销量")
plt.ylabel("年份")
# 显示图例
plt.legend()
plt.show()
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((2039, 2068), 'matplotlib.pyplot.title', 'plt.title', (['"""Java与Android图书对比"""'], {}), "('Java与Android图书对比')\n", (2048, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2100), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""销量"""'], {}), "('销量')\n", (2094, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2102, 2118), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""年份"""'], {}), "('年份')\n", (2112, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2128, 2140), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2138, 2140), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2150, 2152), True, 'import matplotlib.pyplot as plt\n'), ((1788, 1861), 'matplotlib.pyplot.text', 'plt.text', (['(x + 5000)', '(y - bar_width / 2)', "('%s' % x)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x + 5000, y - bar_width / 2, '%s' % x, ha='center', va='bottom')\n", (1796, 1861), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1967), 'matplotlib.pyplot.text', 'plt.text', (['(x + 5000)', '(y + bar_width / 2)', "('%s' % x)"], {'ha': '"""center"""', 'va': '"""bottom"""'}), "(x + 5000, y + bar_width / 2, '%s' % x, ha='center', va='bottom')\n", (1902, 1967), True, 'import matplotlib.pyplot as plt\n')] |
#! /usr/bin/Python
from gensim.models.keyedvectors import KeyedVectors
from scipy import spatial
from numpy import linalg
import argparse
import sys
vector_file = sys.argv[1]
if len(sys.argv) != 6:
print('arguments wrong!')
print(len(sys.argv))
exit()
else:
words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]]
print(words)
wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True)
print('WVs loaded.')
for w in words:
if w not in wvs.vocab:
print('out of vocab!')
exit()
#print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3))
w1 = wvs[words[0]]
w2 = wvs[words[1]]
w3 = wvs[words[2]]
w4 = wvs[words[3]]
m1 = w1 / linalg.norm(w1)
m2 = w2 / linalg.norm(w2)
m3 = w3 / linalg.norm(w3)
m4 = w4 / linalg.norm(w4)
diff1 = w1 - w2
diff2 = w3 - w4
miff1 = m1 - m2
miff2 = m3 - m4
print('-------Word Space---------')
print('to word-4: ', 1-spatial.distance.cosine(m2+m3-m1, m4))
print('to word-3: ', 1-spatial.distance.cosine(m1+m4-m2, m3))
print('to word-2: ', 1-spatial.distance.cosine(m4+m1-m3, m2))
print('to word-1: ', 1-spatial.distance.cosine(m2+m3-m4, m1))
print('------Analogy Space-------')
print(' cosine: ', 1-spatial.distance.cosine(diff1, diff2))
print(' Euclidean: ', 1-linalg.norm(diff1-diff2)/(linalg.norm(diff1)+linalg.norm(diff2)))
print(' M-cosine: ', 1-spatial.distance.cosine(miff1, miff2))
print('M-Euclidean: ', 1-linalg.norm(miff1-miff2)/(linalg.norm(miff1)+linalg.norm(miff2)))
| [
"gensim.models.keyedvectors.KeyedVectors.load_word2vec_format",
"scipy.spatial.distance.cosine",
"numpy.linalg.norm"
]
| [((366, 425), 'gensim.models.keyedvectors.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['vector_file'], {'binary': '(True)'}), '(vector_file, binary=True)\n', (399, 425), False, 'from gensim.models.keyedvectors import KeyedVectors\n'), ((709, 724), 'numpy.linalg.norm', 'linalg.norm', (['w1'], {}), '(w1)\n', (720, 724), False, 'from numpy import linalg\n'), ((736, 751), 'numpy.linalg.norm', 'linalg.norm', (['w2'], {}), '(w2)\n', (747, 751), False, 'from numpy import linalg\n'), ((763, 778), 'numpy.linalg.norm', 'linalg.norm', (['w3'], {}), '(w3)\n', (774, 778), False, 'from numpy import linalg\n'), ((790, 805), 'numpy.linalg.norm', 'linalg.norm', (['w4'], {}), '(w4)\n', (801, 805), False, 'from numpy import linalg\n'), ((939, 980), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m2 + m3 - m1)', 'm4'], {}), '(m2 + m3 - m1, m4)\n', (962, 980), False, 'from scipy import spatial\n'), ((1002, 1043), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m1 + m4 - m2)', 'm3'], {}), '(m1 + m4 - m2, m3)\n', (1025, 1043), False, 'from scipy import spatial\n'), ((1065, 1106), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m4 + m1 - m3)', 'm2'], {}), '(m4 + m1 - m3, m2)\n', (1088, 1106), False, 'from scipy import spatial\n'), ((1128, 1169), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['(m2 + m3 - m4)', 'm1'], {}), '(m2 + m3 - m4, m1)\n', (1151, 1169), False, 'from scipy import spatial\n'), ((1230, 1267), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['diff1', 'diff2'], {}), '(diff1, diff2)\n', (1253, 1267), False, 'from scipy import spatial\n'), ((1387, 1424), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['miff1', 'miff2'], {}), '(miff1, miff2)\n', (1410, 1424), False, 'from scipy import spatial\n'), ((1295, 1321), 'numpy.linalg.norm', 'linalg.norm', (['(diff1 - diff2)'], {}), '(diff1 - diff2)\n', (1306, 1321), False, 'from numpy import linalg\n'), ((1452, 1478), 'numpy.linalg.norm', 'linalg.norm', (['(miff1 - miff2)'], {}), '(miff1 - miff2)\n', (1463, 1478), False, 'from numpy import linalg\n'), ((1321, 1339), 'numpy.linalg.norm', 'linalg.norm', (['diff1'], {}), '(diff1)\n', (1332, 1339), False, 'from numpy import linalg\n'), ((1340, 1358), 'numpy.linalg.norm', 'linalg.norm', (['diff2'], {}), '(diff2)\n', (1351, 1358), False, 'from numpy import linalg\n'), ((1478, 1496), 'numpy.linalg.norm', 'linalg.norm', (['miff1'], {}), '(miff1)\n', (1489, 1496), False, 'from numpy import linalg\n'), ((1497, 1515), 'numpy.linalg.norm', 'linalg.norm', (['miff2'], {}), '(miff2)\n', (1508, 1515), False, 'from numpy import linalg\n')] |
import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True):
A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes]
nref = len(ref_nodes)
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref)
L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg)))
emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs)
emb_eig *= -1 if max(emb_eig) < 0 else 1
f = emb_eig[:,0]
if normalize:
f *= g.dn_sqrt[ref_nodes]
return ((ref_nodes,f), emb_eig_val)
| [
"numpy.real",
"scipy.sparse.identity",
"scipy.sparse.linalg.eigsh"
]
| [((1613, 1667), 'scipy.sparse.linalg.eigsh', 'splinalg.eigsh', (['L'], {'which': '"""SM"""', 'k': '(1 + dim)', 'tol': 'tol_eigs'}), "(L, which='SM', k=1 + dim, tol=tol_eigs)\n", (1627, 1667), True, 'import scipy.sparse.linalg as splinalg\n'), ((1677, 1694), 'numpy.real', 'np.real', (['p[:, 1:]'], {}), '(p[:, 1:])\n', (1684, 1694), True, 'import numpy as np\n'), ((2286, 2338), 'scipy.sparse.linalg.eigsh', 'splinalg.eigsh', (['L_sub'], {'which': '"""SM"""', 'k': '(1)', 'tol': 'tol_eigs'}), "(L_sub, which='SM', k=1, tol=tol_eigs)\n", (2300, 2338), True, 'import scipy.sparse.linalg as splinalg\n'), ((1514, 1535), 'scipy.sparse.identity', 'sp.sparse.identity', (['n'], {}), '(n)\n', (1532, 1535), True, 'import scipy as sp\n'), ((2192, 2216), 'scipy.sparse.identity', 'sp.sparse.identity', (['nref'], {}), '(nref)\n', (2210, 2216), True, 'import scipy as sp\n')] |
# Copyright 2020 XAMES3. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""
vdoXA is an open-source python package for trimming the videos.
It is built as a subsystem for < XXXXX Not to be named XXXXX > project.
Originally inspired by my colleague's work, I thought of improving the
concept and build a tool to simplify the process. I hope it comes with
strong support for continuous updates, reliable functions and overall
ease of use.
Read complete documentation at: <https://github.com/xames3/vdoxa>.
"""
from setuptools import find_packages, setup
from vdoxa.vars import dev
doclines = __doc__.split('\n')
def use_readme() -> str:
"""Use `README.md` for parsing long description."""
with open('README.md') as file:
return file.read()
with open('requirements.txt', 'r') as requirements:
required_packages = [package.rstrip() for package in requirements]
setup(
name=dev.PROJECT_NAME,
version=dev.PROJECT_VERSION,
url=dev.PROJECT_LINK,
download_url=dev.PROJECT_LINK,
author=dev.AUTHOR,
author_email=dev.AUTHOR_EMAIL,
maintainer=dev.AUTHOR,
maintainer_email=dev.AUTHOR_EMAIL,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
],
license=dev.PROJECT_LICENSE,
description=f'{doclines[1]}',
long_description=use_readme(),
long_description_content_type='text/markdown',
keywords='opencv2 cv2 moviepy',
zip_safe=False,
install_requires=required_packages,
python_requires='~=3.6',
include_package_data=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'vdoxa = vdoxa.parser:main',
],
}
)
| [
"setuptools.find_packages"
]
| [((2309, 2324), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2322, 2324), False, 'from setuptools import find_packages, setup\n')] |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import logging
import sys
class LOG:
"""
Custom logger class that acts like logging.Logger
The logger name is automatically generated by the module of the caller
Usage:
>>> LOG.debug('My message: %s', debug_str)
13:12:43.673 - :<module>:1 - DEBUG - My message: hi
>>> LOG('custom_name').debug('Another message')
13:13:10.462 - custom_name - DEBUG - Another message
"""
base_path = "stdout"
fmt = '%(asctime)s.%(msecs)03d - ' \
'%(name)s - %(levelname)s - %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(fmt, datefmt)
name = 'little_questions'
level = "DEBUG"
_loggers = {}
@classmethod
def set_level(cls, level="INFO"):
cls.level = level
for n in cls._loggers:
cls._loggers[n].setLevel(cls.level)
@classmethod
def create_logger(cls, name):
if name in cls._loggers:
return cls._loggers[name]
logger = logging.getLogger(name)
logger.propagate = False
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(cls.formatter)
logger.addHandler(stdout_handler)
logger.setLevel(cls.level)
cls._loggers[name] = logger
return logger
@classmethod
def _log(cls):
name = ""
if cls.name is not None:
name = cls.name + " - "
# Stack:
# [0] - _log()
# [1] - debug(), info(), warning(), or error()
# [2] - caller
stack = inspect.stack()
# Record:
# [0] - frame object
# [1] - filename
# [2] - line number
# [3] - function
# ...
record = stack[2]
name += record[3] + ':' + str(record[2])
logger = cls.create_logger(name)
return logger
@classmethod
def info(cls, *args, **kwargs):
cls._log().info(*args, **kwargs)
@classmethod
def debug(cls, *args, **kwargs):
cls._log().debug(*args, **kwargs)
@classmethod
def warning(cls, *args, **kwargs):
cls._log().warning(*args, **kwargs)
@classmethod
def error(cls, *args, **kwargs):
cls._log().error(*args, **kwargs)
@classmethod
def exception(cls, *args, **kwargs):
cls._log().exception(*args, **kwargs)
| [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler",
"inspect.stack"
]
| [((1186, 1217), 'logging.Formatter', 'logging.Formatter', (['fmt', 'datefmt'], {}), '(fmt, datefmt)\n', (1203, 1217), False, 'import logging\n'), ((1587, 1610), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1604, 1610), False, 'import logging\n'), ((1669, 1702), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1690, 1702), False, 'import logging\n'), ((2148, 2163), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (2161, 2163), False, 'import inspect\n')] |
#!/usr/bin/env python
import sys
import re
from subprocess import Popen, PIPE
import argparse
from pbxproj import XcodeProject, TreeType
from pbxproj import FileOptions
def main():
parser = argparse.ArgumentParser(description="MpireNxusMeasurement post build iOS script")
parser.add_argument('ios_project_path', help="path to the folder of the iOS project generated by unity3d")
with open('MpireNxusMeasurementPostBuildiOSLog.txt', 'w') as fileLog:
# Log function with file injected.
LogFunc = LogInput(fileLog)
# Path of the Xcode SDK on the system.
xcode_sdk_path = get_xcode_sdk_path(LogFunc)
# Path for unity iOS Xcode project and framework on the system.
unity_xcode_project_path, framework_path = get_paths(LogFunc, parser, xcode_sdk_path)
# Edit the Xcode project using mod_pbxproj:
# - Add the adSupport framework library.
# - Add the iAd framework library.
# - Change the compilation flags of the adjust project files to support non-ARC.
edit_unity_xcode_project(LogFunc, unity_xcode_project_path, framework_path)
# Removed.
# Change the Xcode project directly:
# - Allow objective-c exceptions
# rewrite_unity_xcode_project(LogFunc, unity_xcode_project_path)
sys.exit(0)
def LogInput(writeObject):
def Log(message, *args):
messageNLine = (message if message else "None") + "\n"
writeObject.write(messageNLine.format(*args))
return Log
def get_paths(Log, parser, xcode_sdk_path):
args, ignored_args = parser.parse_known_args()
ios_project_path = args.ios_project_path
unity_xcode_project_path = ios_project_path + "/Unity-iPhone.xcodeproj/project.pbxproj"
Log("Unity3d Xcode project path: {0}", unity_xcode_project_path)
framework_path = xcode_sdk_path + "/System/Library/Frameworks/"
Log("framework path: {0}", framework_path)
return unity_xcode_project_path, framework_path
def edit_unity_xcode_project(Log, unity_xcode_project_path, framework_path):
# load unity iOS pbxproj project file
unity_XcodeProject = XcodeProject.load(unity_xcode_project_path)
frameworks = unity_XcodeProject.get_or_create_group('Frameworks')
file_options_security_framework = FileOptions(embed_framework=False, weak=True)
unity_XcodeProject.add_file(framework_path + "Security.framework", parent=frameworks, tree='SDKROOT', force=False, file_options=file_options_security_framework)
Log("added Security framework")
# Add -ObjC to "Other Linker Flags" project settings.
unity_XcodeProject.add_other_ldflags('-ObjC')
# Save changes.
unity_XcodeProject.save()
def rewrite_unity_xcode_project(Log, unity_xcode_project_path):
unity_xcode_lines = []
# Allow objective-c exceptions
re_objc_excep = re.compile(r"\s*GCC_ENABLE_OBJC_EXCEPTIONS *= *NO.*")
with open(unity_xcode_project_path) as upf:
for line in upf:
if re_objc_excep.match(line):
#Log("matched line: {0}", re_objc_excep.match(line).group())
line = line.replace("NO","YES")
Log("Objective-c exceptions enabled")
unity_xcode_lines.append(line)
with open(unity_xcode_project_path, "w+") as upf:
upf.writelines(unity_xcode_lines)
def get_xcode_sdk_path(Log):
# Output all info from Xcode.
proc = Popen(["xcodebuild", "-version", "-sdk"], stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode not in [0, 66]:
Log("Could not retrieve Xcode sdk path. code: {0}, err: {1}", proc.returncode, err)
return None
match = re.search("iPhoneOS.*?Path: (?P<sdk_path>.*?)\n", out, re.DOTALL)
xcode_sdk_path = match.group('sdk_path') if match else None
Log("Xcode sdk path: {0}", xcode_sdk_path)
return xcode_sdk_path
if __name__ == "__main__":
main()
| [
"pbxproj.FileOptions",
"argparse.ArgumentParser",
"re.compile",
"subprocess.Popen",
"pbxproj.XcodeProject.load",
"sys.exit",
"re.search"
]
| [((197, 283), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MpireNxusMeasurement post build iOS script"""'}), "(description=\n 'MpireNxusMeasurement post build iOS script')\n", (220, 283), False, 'import argparse\n'), ((1328, 1339), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1336, 1339), False, 'import sys\n'), ((2146, 2189), 'pbxproj.XcodeProject.load', 'XcodeProject.load', (['unity_xcode_project_path'], {}), '(unity_xcode_project_path)\n', (2163, 2189), False, 'from pbxproj import XcodeProject, TreeType\n'), ((2300, 2345), 'pbxproj.FileOptions', 'FileOptions', ([], {'embed_framework': '(False)', 'weak': '(True)'}), '(embed_framework=False, weak=True)\n', (2311, 2345), False, 'from pbxproj import FileOptions\n'), ((2860, 2913), 're.compile', 're.compile', (['"""\\\\s*GCC_ENABLE_OBJC_EXCEPTIONS *= *NO.*"""'], {}), "('\\\\s*GCC_ENABLE_OBJC_EXCEPTIONS *= *NO.*')\n", (2870, 2913), False, 'import re\n'), ((3422, 3489), 'subprocess.Popen', 'Popen', (["['xcodebuild', '-version', '-sdk']"], {'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['xcodebuild', '-version', '-sdk'], stdout=PIPE, stderr=PIPE)\n", (3427, 3489), False, 'from subprocess import Popen, PIPE\n'), ((3693, 3758), 're.search', 're.search', (['"""iPhoneOS.*?Path: (?P<sdk_path>.*?)\n"""', 'out', 're.DOTALL'], {}), "('iPhoneOS.*?Path: (?P<sdk_path>.*?)\\n', out, re.DOTALL)\n", (3702, 3758), False, 'import re\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# See README.md for documentation
import typing
import argparse
import base64
import hashlib
import hmac
import json
import os
import subprocess
import sys
import time
import requests
import ghapp_token
NAMESPACE = "pipelines-as-code"
SECRET_NAME = "pipelines-as-code-secret"
ELNAME = "pipelines-as-code"
EXPIRE_MINUTES_AS_SECONDS = (
int(os.environ.get("GITHUBAPP_TOKEN_EXPIRATION_MINUTES", 10)) * 60
)
def get_controller_route():
elroute = subprocess.run(
f"kubectl get route -n {NAMESPACE} -l pipelines-as-code/route=controller -o json",
shell=True,
check=True,
capture_output=True,
)
return (
"https://"
+ json.loads(elroute.stdout)["items"][0]["status"]["ingress"][0]["host"]
)
def get_controller_ingress():
elroute = subprocess.run(
f"kubectl get ingress -n {NAMESPACE} -l pipelines-as-code/route=controller -o json",
shell=True,
check=True,
capture_output=True,
)
return (
"http://" + json.loads(elroute.stdout)["items"][0]["spec"]["rules"][0]["host"]
)
def get_token_secret(
github_api_url=ghapp_token.GITHUB_API_URL, expiration_time=EXPIRE_MINUTES_AS_SECONDS
):
secret = subprocess.run(
f"kubectl get secret {SECRET_NAME} -n{NAMESPACE} -o json",
shell=True,
check=True,
capture_output=True,
)
jeez = json.loads(secret.stdout)
private_key = base64.b64decode(jeez["data"]["github-private-key"])
app_id = base64.b64decode(jeez["data"]["github-application-id"])
webhook_secret = base64.b64decode(jeez["data"]["webhook.secret"]).decode()
if not private_key or not app_id or not webhook_secret:
print(
f"private_key={private_key[1:10]} or app_id={app_id} or webhook_secret={webhook_secret} are empty"
)
sys.exit(1)
gh = ghapp_token.GitHub(
private_key,
app_id,
expiration_time,
github_api_url,
)
return gh.token, webhook_secret, app_id
def _request_app_delivery(token, iid=None, api_url=ghapp_token.GITHUB_API_URL):
url = f"{api_url}/app/hook/deliveries"
if iid:
url += f"/{iid}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("GET", url, headers=headers)
def _request_webhooks_installed(
token: str,
owner_repo: str,
iid: typing.Union[int, None] = None,
api_url: str = ghapp_token.GITHUB_API_URL,
):
url = f"{api_url}/repos/{owner_repo}/hooks"
if iid:
url += f"/{iid}/deliveries"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("GET", url, headers=headers)
def _request_webhooks_reattempt(
token: str,
owner_repo: str,
iid: int,
delivery_id: int,
api_url: str = ghapp_token.GITHUB_API_URL,
):
url = f"{api_url}/repos/{owner_repo}/hooks/{iid}/deliveries/{delivery_id}/attempts"
print(url)
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("POST", url, headers=headers)
def ask_which(token: str, api_url: str, last: bool, deliveries: dict) -> int:
dico = []
i = 1
if "message" in deliveries:
print(deliveries)
sys.exit(0)
for delivery in deliveries:
print(
f"{i}) Action={delivery['action']} Event={delivery['event']} Delivered at {delivery['delivered_at']}"
)
dico.append(delivery["id"])
if i == 10:
break
i += 1
chosen = input("Choose a delivery: ")
# return _request_app_delivery(token, dico[int(chosen) - 1], api_url=api_url).json()
return int(chosen) - 1
def webhook_get_delivery(
token: str,
owner_repo: str,
last: bool = False,
api_url: str = ghapp_token.GITHUB_API_URL,
) -> str:
r = _request_webhooks_installed(token, api_url=api_url, owner_repo=owner_repo)
r.raise_for_status()
webhooks = r.json()
if len(webhooks) == 1:
webhook_id = int(webhooks[0]["id"])
elif len(webhooks) > 1:
cnt = 1
for wh in webhooks:
print(f"{cnt}) {wh['name']} - {wh['config']['url']} ")
cnt += 1
chosen = input("Choose a delivery: ")
webhook_id = int(webhooks[int(chosen) - 1]["id"])
else:
print("could not find any webhook configuration on your repo {}")
sys.exit(1)
r = _request_webhooks_installed(
token, api_url=api_url, owner_repo=owner_repo, iid=webhook_id
)
r.raise_for_status()
deliveries = r.json()
if not deliveries:
print("no deliveries has been set ")
sys.exit(1)
if last:
delivery_id = deliveries[0]["id"]
else:
chosen = ask_which(token, api_url, last, r.json())
delivery_id = deliveries[chosen]["id"]
r = _request_webhooks_reattempt(
token=token,
owner_repo=owner_repo,
iid=webhook_id,
api_url=api_url,
delivery_id=delivery_id,
)
r.raise_for_status()
print(f"Delivery has been replayed, you can replay directly it with: ")
s = f"http POST {api_url}/repos/{owner_repo}/hooks/{webhook_id}/deliveries/{delivery_id}/attempts"
s += f' Authorization:"Bearer { os.environ.get("PASS_TOKEN", "$TOKEN") }"'
s += " Accept:application/vnd.github.v3+json"
print(s)
return s
def app_get_delivery(
token: str, last: bool = False, api_url: str = ghapp_token.GITHUB_API_URL
) -> dict:
r = _request_app_delivery(token, api_url=api_url)
r.raise_for_status()
deliveries = r.json()
if not deliveries:
print("no deliveries has been set ")
sys.exit(1)
if last:
return _request_app_delivery(token, deliveries[0]["id"], api_url=api_url).json()
chosen = ask_which(token, api_url, last, deliveries)
return _request_app_delivery(
token, deliveries[chosen]["id"], api_url=api_url
).json()
def save_script(target: str, el_route: str, headers: dict, payload: str):
s = f"""#!/usr/bin/env python3
import requests
import sys
payload = \"\"\"{json.dumps(payload)}\"\"\"
headers={headers}
el_route = "http://localhost:8080" if (len(sys.argv) > 1 and sys.argv[1] == "-l") else "{el_route}"
r = requests.request("POST",el_route,data=payload.encode("utf-8"),headers=headers)
r.raise_for_status()
print("Request has been replayed on " + el_route)
"""
with open(target, "w") as fp:
fp.write(s)
os.chmod(target, 0o755)
print(f"Request saved to {target}")
def main(args):
el = args.eroute
if not el:
try:
el = get_controller_route()
except subprocess.CalledProcessError:
try:
el = get_controller_ingress()
except subprocess.CalledProcessError:
print("Could not find an ingress or route")
sys.exit(1)
if args.webhook_repo:
token, webhook_secret = args.webhook_token, args.webhook_secret
replays = webhook_get_delivery(
token,
last=args.last_event,
api_url=args.api_url,
owner_repo=args.webhook_repo,
)
if args.save:
open(args.save, "w").write(f"""#!/usr/bin/env bash\n{replays}\n""")
os.chmod(args.save, 0o755)
print(f"Saved to {args.save}")
sys.exit(0)
else:
token, webhook_secret, app_id = get_token_secret(github_api_url=args.api_url)
delivery = app_get_delivery(token, args.last_event, args.api_url)
jeez = delivery["request"]["payload"]
headers = delivery["request"]["headers"]
payload = json.dumps(jeez)
esha256 = hmac.new(
webhook_secret.encode("utf-8"),
msg=payload.encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
esha1 = hmac.new(
webhook_secret.encode("utf-8"),
msg=payload.encode("utf-8"),
digestmod=hashlib.sha1,
).hexdigest()
print("Replay event for repo " + jeez["repository"]["full_name"])
headers.update(
{
"X-Hub-Signature": "sha1=" + esha1,
"X-Hub-Signature-256": "sha256=" + esha256,
}
)
if args.save:
save_script(args.save, el, headers, jeez)
sys.exit(0)
for _ in range(args.retry):
try:
r = requests.request(
"POST", el, data=payload.encode("utf-8"), headers=headers
)
except requests.exceptions.ConnectionError:
print(f"sleeping until {el} is up")
time.sleep(5)
continue
print(f"Payload has been replayed on {el}: {r}")
return
print("You have reached the maximum number of retries")
def parse_args():
parser = argparse.ArgumentParser(description="Replay a webhook")
parser.add_argument(
"--installation-id",
"-i",
default=os.environ.get("INSTALLATION_ID"),
help="Installation ID",
)
parser.add_argument(
"--controller-route",
"-e",
dest="eroute",
help="Route hostname (default to detect on openshift/ingress)",
default=os.environ.get("EL_ROUTE"),
)
parser.add_argument("--last-event", "-L", action="store_true")
parser.add_argument(
"--webhook-repo", "-w", help="Use a webhook-repo instead of app"
)
parser.add_argument("--webhook-token", "-t", help="Use this token")
parser.add_argument("--webhook-secret", "-S", help="Use this webhook secret")
parser.add_argument(
"--save", "-s", help="save the request to a shell script to replay easily"
)
parser.add_argument(
"-a",
"--api-url",
help="Github API URL",
default=os.environ.get("GITHUB_API_URL", ghapp_token.GITHUB_API_URL),
)
parser.add_argument(
"--retry",
type=int,
default=1,
help="how many time to try to contact the el route",
)
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
| [
"json.loads",
"ghapp_token.GitHub",
"argparse.ArgumentParser",
"subprocess.run",
"json.dumps",
"base64.b64decode",
"requests.request",
"os.chmod",
"os.environ.get",
"time.sleep",
"sys.exit"
]
| [((1077, 1229), 'subprocess.run', 'subprocess.run', (['f"""kubectl get route -n {NAMESPACE} -l pipelines-as-code/route=controller -o json"""'], {'shell': '(True)', 'check': '(True)', 'capture_output': '(True)'}), "(\n f'kubectl get route -n {NAMESPACE} -l pipelines-as-code/route=controller -o json'\n , shell=True, check=True, capture_output=True)\n", (1091, 1229), False, 'import subprocess\n'), ((1424, 1578), 'subprocess.run', 'subprocess.run', (['f"""kubectl get ingress -n {NAMESPACE} -l pipelines-as-code/route=controller -o json"""'], {'shell': '(True)', 'check': '(True)', 'capture_output': '(True)'}), "(\n f'kubectl get ingress -n {NAMESPACE} -l pipelines-as-code/route=controller -o json'\n , shell=True, check=True, capture_output=True)\n", (1438, 1578), False, 'import subprocess\n'), ((1843, 1965), 'subprocess.run', 'subprocess.run', (['f"""kubectl get secret {SECRET_NAME} -n{NAMESPACE} -o json"""'], {'shell': '(True)', 'check': '(True)', 'capture_output': '(True)'}), "(f'kubectl get secret {SECRET_NAME} -n{NAMESPACE} -o json',\n shell=True, check=True, capture_output=True)\n", (1857, 1965), False, 'import subprocess\n'), ((2012, 2037), 'json.loads', 'json.loads', (['secret.stdout'], {}), '(secret.stdout)\n', (2022, 2037), False, 'import json\n'), ((2056, 2108), 'base64.b64decode', 'base64.b64decode', (["jeez['data']['github-private-key']"], {}), "(jeez['data']['github-private-key'])\n", (2072, 2108), False, 'import base64\n'), ((2122, 2177), 'base64.b64decode', 'base64.b64decode', (["jeez['data']['github-application-id']"], {}), "(jeez['data']['github-application-id'])\n", (2138, 2177), False, 'import base64\n'), ((2483, 2555), 'ghapp_token.GitHub', 'ghapp_token.GitHub', (['private_key', 'app_id', 'expiration_time', 'github_api_url'], {}), '(private_key, app_id, expiration_time, github_api_url)\n', (2501, 2555), False, 'import ghapp_token\n'), ((2930, 2975), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'headers': 'headers'}), "('GET', url, headers=headers)\n", (2946, 2975), False, 'import requests\n'), ((3364, 3409), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'headers': 'headers'}), "('GET', url, headers=headers)\n", (3380, 3409), False, 'import requests\n'), ((3800, 3846), 'requests.request', 'requests.request', (['"""POST"""', 'url'], {'headers': 'headers'}), "('POST', url, headers=headers)\n", (3816, 3846), False, 'import requests\n'), ((7211, 7232), 'os.chmod', 'os.chmod', (['target', '(493)'], {}), '(target, 493)\n', (7219, 7232), False, 'import os\n'), ((8381, 8397), 'json.dumps', 'json.dumps', (['jeez'], {}), '(jeez)\n', (8391, 8397), False, 'import json\n'), ((9490, 9545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Replay a webhook"""'}), "(description='Replay a webhook')\n", (9513, 9545), False, 'import argparse\n'), ((968, 1024), 'os.environ.get', 'os.environ.get', (['"""GITHUBAPP_TOKEN_EXPIRATION_MINUTES"""', '(10)'], {}), "('GITHUBAPP_TOKEN_EXPIRATION_MINUTES', 10)\n", (982, 1024), False, 'import os\n'), ((2461, 2472), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2469, 2472), False, 'import sys\n'), ((4017, 4028), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4025, 4028), False, 'import sys\n'), ((5405, 5416), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5413, 5416), False, 'import sys\n'), ((6419, 6430), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6427, 6430), False, 'import sys\n'), ((8098, 8109), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8106, 8109), False, 'import sys\n'), ((8998, 9009), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9006, 9009), False, 'import sys\n'), ((2199, 2247), 'base64.b64decode', 'base64.b64decode', (["jeez['data']['webhook.secret']"], {}), "(jeez['data']['webhook.secret'])\n", (2215, 2247), False, 'import base64\n'), ((5152, 5163), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5160, 5163), False, 'import sys\n'), ((6006, 6044), 'os.environ.get', 'os.environ.get', (['"""PASS_TOKEN"""', '"""$TOKEN"""'], {}), "('PASS_TOKEN', '$TOKEN')\n", (6020, 6044), False, 'import os\n'), ((6859, 6878), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (6869, 6878), False, 'import json\n'), ((8020, 8044), 'os.chmod', 'os.chmod', (['args.save', '(493)'], {}), '(args.save, 493)\n', (8028, 8044), False, 'import os\n'), ((9630, 9663), 'os.environ.get', 'os.environ.get', (['"""INSTALLATION_ID"""'], {}), "('INSTALLATION_ID')\n", (9644, 9663), False, 'import os\n'), ((9883, 9909), 'os.environ.get', 'os.environ.get', (['"""EL_ROUTE"""'], {}), "('EL_ROUTE')\n", (9897, 9909), False, 'import os\n'), ((10465, 10525), 'os.environ.get', 'os.environ.get', (['"""GITHUB_API_URL"""', 'ghapp_token.GITHUB_API_URL'], {}), "('GITHUB_API_URL', ghapp_token.GITHUB_API_URL)\n", (10479, 10525), False, 'import os\n'), ((9289, 9302), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (9299, 9302), False, 'import time\n'), ((7617, 7628), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7625, 7628), False, 'import sys\n'), ((1301, 1327), 'json.loads', 'json.loads', (['elroute.stdout'], {}), '(elroute.stdout)\n', (1311, 1327), False, 'import json\n'), ((1641, 1667), 'json.loads', 'json.loads', (['elroute.stdout'], {}), '(elroute.stdout)\n', (1651, 1667), False, 'import json\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class PGDModel(nn.Module):
"""
code adapted from
https://github.com/karandwivedi42/adversarial/blob/master/main.py
"""
def __init__(self, basic_net, config):
super(PGDModel, self).__init__()
self.basic_net = basic_net
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Only xent supported for now.'
def forward(self, inputs, targets, attack=False):
if not attack:
return self.basic_net(inputs)
x = inputs.clone()
if self.rand:
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
for _ in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
logits = self.basic_net(x)
loss = F.cross_entropy(logits, targets, reduction='sum')
grad = torch.autograd.grad(loss, x)[0]
x = x.detach() + self.step_size * torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs.detach() - self.epsilon),
inputs.detach() + self.epsilon)
x = torch.clamp(x, 0, 1)
return self.basic_net(x)
class PGDL2Model(nn.Module):
"""
code adapted from
https://github.com/karandwivedi42/adversarial/blob/master/main.py
"""
def __init__(self, basic_net, config):
super(PGDL2Model, self).__init__()
self.basic_net = basic_net
self.epsilon = config['epsilon']
self.rand = config['random_start']
self.step_size = config['step_size']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Only xent supported for now.'
def forward(self, inputs, targets, attack=False):
if not attack:
return self.basic_net(inputs)
x = inputs.clone()
if self.rand:
x = x + torch.zeros_like(x).normal_(0, self.step_size)
for _ in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
logits = self.basic_net(x)
loss = F.cross_entropy(logits, targets, reduction='sum')
grad = torch.autograd.grad(loss, x)[0].detach()
grad_norm = grad.view(x.size(0), -1).norm(2, 1)
delta = self.step_size * grad / grad_norm.view(x.size(0), 1, 1, 1)
x = x.detach() + delta
diff = (x - inputs).view(x.size(0), -1).renorm(2, 0, self.epsilon)
x = diff.view(x.size()) + inputs
x.clamp_(0, 1)
return self.basic_net(x)
| [
"torch.enable_grad",
"torch.autograd.grad",
"torch.nn.functional.cross_entropy",
"torch.zeros_like",
"torch.clamp"
]
| [((1317, 1337), 'torch.clamp', 'torch.clamp', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (1328, 1337), False, 'import torch\n'), ((911, 930), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (928, 930), False, 'import torch\n'), ((998, 1047), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'targets'], {'reduction': '"""sum"""'}), "(logits, targets, reduction='sum')\n", (1013, 1047), True, 'import torch.nn.functional as F\n'), ((1067, 1095), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'x'], {}), '(loss, x)\n', (1086, 1095), False, 'import torch\n'), ((2210, 2229), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (2227, 2229), False, 'import torch\n'), ((2297, 2346), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'targets'], {'reduction': '"""sum"""'}), "(logits, targets, reduction='sum')\n", (2312, 2346), True, 'import torch.nn.functional as F\n'), ((765, 784), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (781, 784), False, 'import torch\n'), ((2074, 2093), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (2090, 2093), False, 'import torch\n'), ((2366, 2394), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'x'], {}), '(loss, x)\n', (2385, 2394), False, 'import torch\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetKMSCryptoKeyVersionResult',
'AwaitableGetKMSCryptoKeyVersionResult',
'get_kms_crypto_key_version',
'get_kms_crypto_key_version_output',
]
@pulumi.output_type
class GetKMSCryptoKeyVersionResult:
"""
A collection of values returned by getKMSCryptoKeyVersion.
"""
def __init__(__self__, algorithm=None, crypto_key=None, id=None, name=None, protection_level=None, public_keys=None, state=None, version=None):
if algorithm and not isinstance(algorithm, str):
raise TypeError("Expected argument 'algorithm' to be a str")
pulumi.set(__self__, "algorithm", algorithm)
if crypto_key and not isinstance(crypto_key, str):
raise TypeError("Expected argument 'crypto_key' to be a str")
pulumi.set(__self__, "crypto_key", crypto_key)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protection_level and not isinstance(protection_level, str):
raise TypeError("Expected argument 'protection_level' to be a str")
pulumi.set(__self__, "protection_level", protection_level)
if public_keys and not isinstance(public_keys, list):
raise TypeError("Expected argument 'public_keys' to be a list")
pulumi.set(__self__, "public_keys", public_keys)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def algorithm(self) -> str:
"""
The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter(name="cryptoKey")
def crypto_key(self) -> str:
return pulumi.get(self, "crypto_key")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name for this CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectionLevel")
def protection_level(self) -> str:
"""
The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs.
"""
return pulumi.get(self, "protection_level")
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Sequence['outputs.GetKMSCryptoKeyVersionPublicKeyResult']:
"""
If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below.
"""
return pulumi.get(self, "public_keys")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def version(self) -> Optional[int]:
return pulumi.get(self, "version")
class AwaitableGetKMSCryptoKeyVersionResult(GetKMSCryptoKeyVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKMSCryptoKeyVersionResult(
algorithm=self.algorithm,
crypto_key=self.crypto_key,
id=self.id,
name=self.name,
protection_level=self.protection_level,
public_keys=self.public_keys,
state=self.state,
version=self.version)
def get_kms_crypto_key_version(crypto_key: Optional[str] = None,
version: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKMSCryptoKeyVersionResult:
"""
Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).
A CryptoKeyVersion represents an individual cryptographic key, and the associated key material.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="us-central1")
my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key",
key_ring=my_key_ring.id)
my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"])
```
:param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the
`kms.CryptoKey` resource/datasource.
:param int version: The version number for this CryptoKeyVersion. Defaults to `1`.
"""
__args__ = dict()
__args__['cryptoKey'] = crypto_key
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion', __args__, opts=opts, typ=GetKMSCryptoKeyVersionResult).value
return AwaitableGetKMSCryptoKeyVersionResult(
algorithm=__ret__.algorithm,
crypto_key=__ret__.crypto_key,
id=__ret__.id,
name=__ret__.name,
protection_level=__ret__.protection_level,
public_keys=__ret__.public_keys,
state=__ret__.state,
version=__ret__.version)
@_utilities.lift_output_func(get_kms_crypto_key_version)
def get_kms_crypto_key_version_output(crypto_key: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[Optional[int]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKMSCryptoKeyVersionResult]:
"""
Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).
A CryptoKeyVersion represents an individual cryptographic key, and the associated key material.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="us-central1")
my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key",
key_ring=my_key_ring.id)
my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"])
```
:param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the
`kms.CryptoKey` resource/datasource.
:param int version: The version number for this CryptoKeyVersion. Defaults to `1`.
"""
...
| [
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
]
| [((2465, 2496), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""cryptoKey"""'}), "(name='cryptoKey')\n", (2478, 2496), False, 'import pulumi\n'), ((3044, 3081), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""protectionLevel"""'}), "(name='protectionLevel')\n", (3057, 3081), False, 'import pulumi\n'), ((3448, 3480), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""publicKeys"""'}), "(name='publicKeys')\n", (3461, 3480), False, 'import pulumi\n'), ((947, 991), 'pulumi.set', 'pulumi.set', (['__self__', '"""algorithm"""', 'algorithm'], {}), "(__self__, 'algorithm', algorithm)\n", (957, 991), False, 'import pulumi\n'), ((1133, 1179), 'pulumi.set', 'pulumi.set', (['__self__', '"""crypto_key"""', 'crypto_key'], {}), "(__self__, 'crypto_key', crypto_key)\n", (1143, 1179), False, 'import pulumi\n'), ((1297, 1327), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (1307, 1327), False, 'import pulumi\n'), ((1451, 1485), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (1461, 1485), False, 'import pulumi\n'), ((1645, 1703), 'pulumi.set', 'pulumi.set', (['__self__', '"""protection_level"""', 'protection_level'], {}), "(__self__, 'protection_level', protection_level)\n", (1655, 1703), False, 'import pulumi\n'), ((1850, 1898), 'pulumi.set', 'pulumi.set', (['__self__', '"""public_keys"""', 'public_keys'], {}), "(__self__, 'public_keys', public_keys)\n", (1860, 1898), False, 'import pulumi\n'), ((2025, 2061), 'pulumi.set', 'pulumi.set', (['__self__', '"""state"""', 'state'], {}), "(__self__, 'state', state)\n", (2035, 2061), False, 'import pulumi\n'), ((2194, 2234), 'pulumi.set', 'pulumi.set', (['__self__', '"""version"""', 'version'], {}), "(__self__, 'version', version)\n", (2204, 2234), False, 'import pulumi\n'), ((2415, 2444), 'pulumi.get', 'pulumi.get', (['self', '"""algorithm"""'], {}), "(self, 'algorithm')\n", (2425, 2444), False, 'import pulumi\n'), ((2545, 2575), 'pulumi.get', 'pulumi.get', (['self', '"""crypto_key"""'], {}), "(self, 'crypto_key')\n", (2555, 2575), False, 'import pulumi\n'), ((2741, 2763), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (2751, 2763), False, 'import pulumi\n'), ((2999, 3023), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (3009, 3023), False, 'import pulumi\n'), ((3391, 3427), 'pulumi.get', 'pulumi.get', (['self', '"""protection_level"""'], {}), "(self, 'protection_level')\n", (3401, 3427), False, 'import pulumi\n'), ((3815, 3846), 'pulumi.get', 'pulumi.get', (['self', '"""public_keys"""'], {}), "(self, 'public_keys')\n", (3825, 3846), False, 'import pulumi\n'), ((4196, 4221), 'pulumi.get', 'pulumi.get', (['self', '"""state"""'], {}), "(self, 'state')\n", (4206, 4221), False, 'import pulumi\n'), ((4311, 4338), 'pulumi.get', 'pulumi.get', (['self', '"""version"""'], {}), "(self, 'version')\n", (4321, 4338), False, 'import pulumi\n'), ((6365, 6387), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (6385, 6387), False, 'import pulumi\n'), ((6479, 6616), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion"""', '__args__'], {'opts': 'opts', 'typ': 'GetKMSCryptoKeyVersionResult'}), "('gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion',\n __args__, opts=opts, typ=GetKMSCryptoKeyVersionResult)\n", (6500, 6616), False, 'import pulumi\n')] |
#!/usr/bin/env python3
import itertools
# Constants
NUMBERS = range(0, 10)
# Main Execution
def main():
count = 0
for length in range(0, len(NUMBERS) + 1):
for subset in itertools.combinations(NUMBERS, length):
if sum(subset) % 3 == 0:
count += 1
print(count)
if __name__ == '__main__':
main()
| [
"itertools.combinations"
]
| [((191, 230), 'itertools.combinations', 'itertools.combinations', (['NUMBERS', 'length'], {}), '(NUMBERS, length)\n', (213, 230), False, 'import itertools\n')] |
# -*- coding: utf-8 -*-
from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d
# about hashlib ==> get_md5, get_sha, get_sha3 || default mode=256
s = "test_information" # 这里只能丢字符串
print(get_md5(s)) # 5414ffd88fcb58417e64ecec51bb3a6b
print(get_md5(s, upper=True)) # 5414FFD88FCB58417E64ECEC51BB3A6B
print(get_md5(s, to_bin=True)) # b'T\x14\xff\xd8\x8f\xcbXA~d\xec\xecQ\xbb:k' # 转成二进制的需求没什么用但是可以保留
print(get_sha(s)) # d09869fdf901465c8566f0e2debfa3f6a3d878a8157e199c7c4c6dd755617f33
print(get_sha(s, to_bin=True)) # b'\xd0\x98i\xfd\xf9\x01F\\\x85f\xf0\xe2\xde\xbf\xa3\xf6\xa3\xd8x\xa8\x15~\x19\x9c|Lm\xd7Ua\x7f3'
print(get_sha(s, mode=1)) # ada5dfdf0c9a76a84958310b838a70b6fd6d01f6 # default mode=256 // mode: 1 224 256 384 512
print(get_sha3(s)) # 9c539ca35c6719f546e67837ff37fe7791e53fe40715cd4da0167c78c9adc2e8
print(get_sha3(s, to_bin=True)) # b'\x9cS\x9c\xa3\\g\x19\xf5F\xe6x7\xff7\xfew\x91\xe5?\xe4\x07\x15\xcdM\xa0\x16|x\xc9\xad\xc2\xe8'
print(get_sha3(s, mode=1)) # return "" // SUPPORT: sha3_224 sha3_256 sha3_384 sha3_512// only need inputting: 224 256 384 512 # default mode=256 // mode: 224 256 384 512
print(get_sha3(s, mode=384)) # 95c09e20a139843eae877a64cd95d6a629b3c9ff383b5460557aab2612682d4228d05fe41606a79acf5ae1c4de35160c
# about base64 ==> get_b64e, get_b64d
res_b64_encode = get_b64e(s)
print(res_b64_encode) # dGVzdF9pbmZvcm1hdGlvbg==
res_b64_bin = get_b64e(s, to_bin=True)
print(res_b64_bin) # b'dGVzdF9pbmZvcm1hdGlvbg=='
res_b32_encode = get_b64e(s, mode=32) # default mode=64 // mode: 16 32 64 85
print(res_b32_encode) # ORSXG5C7NFXGM33SNVQXI2LPNY======
res_b64_decode = get_b64d(res_b64_encode)
print(res_b64_decode) # test_information
res_b32_decode = get_b64d(res_b32_encode, mode=32) # default mode=64 // mode: 16 32 64 85
print(res_b32_decode) # test_information
| [
"lite_tools.get_b64d",
"lite_tools.get_md5",
"lite_tools.get_sha",
"lite_tools.get_sha3",
"lite_tools.get_b64e"
]
| [((1394, 1405), 'lite_tools.get_b64e', 'get_b64e', (['s'], {}), '(s)\n', (1402, 1405), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((1493, 1517), 'lite_tools.get_b64e', 'get_b64e', (['s'], {'to_bin': '(True)'}), '(s, to_bin=True)\n', (1501, 1517), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((1599, 1619), 'lite_tools.get_b64e', 'get_b64e', (['s'], {'mode': '(32)'}), '(s, mode=32)\n', (1607, 1619), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((1747, 1771), 'lite_tools.get_b64d', 'get_b64d', (['res_b64_encode'], {}), '(res_b64_encode)\n', (1755, 1771), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((1842, 1875), 'lite_tools.get_b64d', 'get_b64d', (['res_b32_encode'], {'mode': '(32)'}), '(res_b32_encode, mode=32)\n', (1850, 1875), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((207, 217), 'lite_tools.get_md5', 'get_md5', (['s'], {}), '(s)\n', (214, 217), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((275, 297), 'lite_tools.get_md5', 'get_md5', (['s'], {'upper': '(True)'}), '(s, upper=True)\n', (282, 297), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((343, 366), 'lite_tools.get_md5', 'get_md5', (['s'], {'to_bin': '(True)'}), '(s, to_bin=True)\n', (350, 366), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((444, 454), 'lite_tools.get_sha', 'get_sha', (['s'], {}), '(s)\n', (451, 454), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((544, 567), 'lite_tools.get_sha', 'get_sha', (['s'], {'to_bin': '(True)'}), '(s, to_bin=True)\n', (551, 567), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((676, 694), 'lite_tools.get_sha', 'get_sha', (['s'], {'mode': '(1)'}), '(s, mode=1)\n', (683, 694), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((801, 812), 'lite_tools.get_sha3', 'get_sha3', (['s'], {}), '(s)\n', (809, 812), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((901, 925), 'lite_tools.get_sha3', 'get_sha3', (['s'], {'to_bin': '(True)'}), '(s, to_bin=True)\n', (909, 925), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((1033, 1052), 'lite_tools.get_sha3', 'get_sha3', (['s'], {'mode': '(1)'}), '(s, mode=1)\n', (1041, 1052), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n'), ((1210, 1231), 'lite_tools.get_sha3', 'get_sha3', (['s'], {'mode': '(384)'}), '(s, mode=384)\n', (1218, 1231), False, 'from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d\n')] |
# Fast large file synchronization inspired by rsync.
#
# Author: <NAME> <<EMAIL>>
# Last Change: March 6, 2020
# URL: https://pdiffcopy.readthedocs.io
"""Parallel hashing of files using :mod:`multiprocessing` and :mod:`pdiffcopy.mp`."""
# Standard library modules.
import functools
import hashlib
import os
# External dependencies.
from six.moves import range
# Modules included in our package.
from pdiffcopy.mp import WorkerPool
# Public identifiers that require documentation.
__all__ = ("compute_hashes", "hash_worker")
def compute_hashes(filename, block_size, method, concurrency):
"""Compute checksums of a file in blocks (parallel)."""
with WorkerPool(
concurrency=concurrency,
generator_fn=functools.partial(range, 0, os.path.getsize(filename), block_size),
worker_fn=functools.partial(hash_worker, block_size=block_size, filename=filename, method=method),
) as pool:
for offset, digest in pool:
yield offset, digest
def hash_worker(offset, block_size, filename, method):
"""Worker function to be run in child processes."""
with open(filename, "rb") as handle:
handle.seek(offset)
context = hashlib.new(method)
context.update(handle.read(block_size))
return offset, context.hexdigest()
| [
"os.path.getsize",
"functools.partial",
"hashlib.new"
]
| [((1188, 1207), 'hashlib.new', 'hashlib.new', (['method'], {}), '(method)\n', (1199, 1207), False, 'import hashlib\n'), ((815, 906), 'functools.partial', 'functools.partial', (['hash_worker'], {'block_size': 'block_size', 'filename': 'filename', 'method': 'method'}), '(hash_worker, block_size=block_size, filename=filename,\n method=method)\n', (832, 906), False, 'import functools\n'), ((757, 782), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (772, 782), False, 'import os\n')] |
import pytest
from moz_library.rental_books import RentalBooks
class TestRentalBooks:
@pytest.fixture()
def books1(self):
return RentalBooks()
def test_can_extend_period_1(self, books1):
assert books1._can_extend_period("延長できません") is False
def test_can_extend_period_2(self, books1):
assert books1._can_extend_period("すでに延長されています") is False
def test_can_extend_period_3(self, books1):
assert books1._can_extend_period("それ以外") is True
| [
"pytest.fixture",
"moz_library.rental_books.RentalBooks"
]
| [((93, 109), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (107, 109), False, 'import pytest\n'), ((147, 160), 'moz_library.rental_books.RentalBooks', 'RentalBooks', ([], {}), '()\n', (158, 160), False, 'from moz_library.rental_books import RentalBooks\n')] |
#!/usr/bin/python3
## write2cly.py - reads json (generated by sml_reader.py) from stdin
## - writes values to Corlysis time series InfluxDB
##
## Writes data from smart meter to time series database (InfluxDB)
## at Corlysis.com [1]. You need to configure your database and token
## in the config section.
##
## [1] https://corlysis.com/
##==== license section ========
## This code is under MIT License: Copyright (C) 2019 <NAME>
## License details see https://choosealicense.com/licenses/mit/
##==== config section ========
# define corlysis settings here - set db and token at least
cly_base_url = 'https://corlysis.com:8086/write'
cly_parameters = {
"db": "energy",
"u" : "token",
"p" : "placeyourtokenhere",
"precision": "ms"}
# assign readable field names
config = {
"1.8.0": "Bezug",
"2.8.0": "Einspeisung",
"16.7.0": "Wirkleistung"
}
##==== code section ==== no need to change lines below ====
##-- import libraries
import json, sys, requests
import requests
import time
# load json from stdin
try:
myjson = json.load(sys.stdin)
except:
sys.stderr.write('!! error loading json')
exit(1)
# decode json
try:
line = "meter_data "
# add each meter value to line
for obis in myjson['data']:
key = config[obis] # set human readable field name
value = myjson['data'][obis] # get value from smart meter
line += key + '=' + str(value) + ',' # add key=value to insert line
# cut off last comma
line = line[:-1]
# add timestamp as unix timestamp in ms
line += ' ' + str(int(time.time()*1000)) #+ '\n'
# post data into time series database; http response should be 204
r = requests.post(cly_base_url, params=cly_parameters, data=line)
if r.status_code != 204 :
sys.stderr.write(r.status_code)
sys.stderr.write(r.content)
# catch if input is no valid json
except:
sys.stderr.write('!!error: no data block in json')
exit(2)
| [
"json.load",
"sys.stderr.write",
"requests.post",
"time.time"
]
| [((1073, 1093), 'json.load', 'json.load', (['sys.stdin'], {}), '(sys.stdin)\n', (1082, 1093), False, 'import json, sys, requests\n'), ((1669, 1730), 'requests.post', 'requests.post', (['cly_base_url'], {'params': 'cly_parameters', 'data': 'line'}), '(cly_base_url, params=cly_parameters, data=line)\n', (1682, 1730), False, 'import requests\n'), ((1104, 1145), 'sys.stderr.write', 'sys.stderr.write', (['"""!! error loading json"""'], {}), "('!! error loading json')\n", (1120, 1145), False, 'import json, sys, requests\n'), ((1763, 1794), 'sys.stderr.write', 'sys.stderr.write', (['r.status_code'], {}), '(r.status_code)\n', (1779, 1794), False, 'import json, sys, requests\n'), ((1799, 1826), 'sys.stderr.write', 'sys.stderr.write', (['r.content'], {}), '(r.content)\n', (1815, 1826), False, 'import json, sys, requests\n'), ((1872, 1922), 'sys.stderr.write', 'sys.stderr.write', (['"""!!error: no data block in json"""'], {}), "('!!error: no data block in json')\n", (1888, 1922), False, 'import json, sys, requests\n'), ((1566, 1577), 'time.time', 'time.time', ([], {}), '()\n', (1575, 1577), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import numpy as np
# Generic data augmentation
class Augmenter:
""" Generic data augmentation class with chained operations
"""
def __init__(self, ops=[]):
if not isinstance(ops, list):
print("Error: ops must be a list of functions")
quit()
self.ops = ops
def add(self, op):
self.ops.append(op)
def augment(self, img):
aug = img.copy()
for op in self.ops:
aug = op(aug)
return aug
def __call__(self, img):
return self.augment(img)
##########
# Images #
##########
def horizontal_flip(p=0.5):
def fc(img):
if random.random() < p:
return img[..., ::-1]
else:
return img
return fc
def vertical_flip(p=0.5):
def fc(img):
if random.random() < p:
return img[..., ::-1, :]
else:
return img
return fc
def gaussian_noise(p=0.5, mean=0, sigma=0.02):
def fc(img):
if random.random() < p:
gauss = np.random.normal(mean, sigma, img.shape).astype(np.float32)
return img + gauss
else:
return img
return fc
def black_vstripe(p=0.5, size=10):
def fc(img):
if random.random() < p:
j = int(random.random() * (img.shape[1]-size))
img[..., j:j+size] = 0
return img
else:
return img
return fc
def black_hstripe(p=0.5, size=10):
def fc(img):
if random.random() < p:
j = int(random.random() * (img.shape[0]-size))
img[..., j:j+size, :] = 0
return img
else:
return img
return fc
def default_augmenter(p=0.5, strip_size=3, mean=0, sigma=0.02):
"""Default data augmentation with horizontal flip, vertical flip, gaussian noise, black hstripe, and black vstripe.
Returns:
Augmenter object. Use as: aug.augment(img)
"""
print("Using default image augmenter")
return Augmenter([ horizontal_flip(p), gaussian_noise(p, mean, sigma), black_hstripe(p, size=strip_size), black_vstripe(p, size=strip_size) ])
##########
# Videos #
##########
def horizontal_flip_vid(p=0.5):
def fc(vid):
if random.random() < p:
return vid[..., ::-1]
else:
return vid
return fc
def black_vstripe_vid(p=0.5, size=10):
def fc(batch):
if random.random() < p:
j = int(random.random() * (batch.shape[-1]-size))
batch[..., j:j+size] = 0
return batch
else:
return batch
return fc
def black_hstripe_vid(p=0.5, size=10):
def fc(batch):
if random.random() < p:
j = int(random.random() * (batch.shape[-2]-size))
batch[..., j:j+size, :] = 0
return batch
else:
return batch
return fc
def default_augmenter_vid(p=0.5, strip_size=3, mean=0, sigma=0.02):
"""Default data augmentation with horizontal flip, gaussian noise, black hstripe, and black vstripe.
Returns:
Augmenter object. Use as: aug.augment(img)
"""
return Augmenter([ horizontal_flip_vid(p), gaussian_noise(p, mean, sigma), black_hstripe_vid(p, size=strip_size), black_vstripe_vid(p, size=strip_size) ])
| [
"numpy.random.normal",
"random.random"
]
| [((712, 727), 'random.random', 'random.random', ([], {}), '()\n', (725, 727), False, 'import random\n'), ((873, 888), 'random.random', 'random.random', ([], {}), '()\n', (886, 888), False, 'import random\n'), ((1058, 1073), 'random.random', 'random.random', ([], {}), '()\n', (1071, 1073), False, 'import random\n'), ((1305, 1320), 'random.random', 'random.random', ([], {}), '()\n', (1318, 1320), False, 'import random\n'), ((1558, 1573), 'random.random', 'random.random', ([], {}), '()\n', (1571, 1573), False, 'import random\n'), ((2299, 2314), 'random.random', 'random.random', ([], {}), '()\n', (2312, 2314), False, 'import random\n'), ((2475, 2490), 'random.random', 'random.random', ([], {}), '()\n', (2488, 2490), False, 'import random\n'), ((2743, 2758), 'random.random', 'random.random', ([], {}), '()\n', (2756, 2758), False, 'import random\n'), ((1099, 1139), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', 'img.shape'], {}), '(mean, sigma, img.shape)\n', (1115, 1139), True, 'import numpy as np\n'), ((1346, 1361), 'random.random', 'random.random', ([], {}), '()\n', (1359, 1361), False, 'import random\n'), ((1599, 1614), 'random.random', 'random.random', ([], {}), '()\n', (1612, 1614), False, 'import random\n'), ((2516, 2531), 'random.random', 'random.random', ([], {}), '()\n', (2529, 2531), False, 'import random\n'), ((2784, 2799), 'random.random', 'random.random', ([], {}), '()\n', (2797, 2799), False, 'import random\n')] |
import logging
import json
import asyncio
from google.protobuf import json_format
from umbra.common.protobuf.umbra_grpc import MonitorBase
from umbra.common.protobuf.umbra_pb2 import Instruction, Snapshot
from umbra.monitor.tools import Tools
logger = logging.getLogger(__name__)
logging.getLogger("hpack").setLevel(logging.WARNING)
class Monitor(MonitorBase):
def __init__(self, info):
self.tools = Tools()
async def Listen(self, stream):
logging.debug("Instruction Received")
instruction: Instruction = await stream.recv_message()
instruction_dict = json_format.MessageToDict(instruction, preserving_proto_field_name=True)
snapshot_dict = await self.tools.handle(instruction_dict)
snapshot = json_format.ParseDict(snapshot_dict, Snapshot())
await stream.send_message(snapshot)
| [
"logging.getLogger",
"umbra.monitor.tools.Tools",
"logging.debug",
"google.protobuf.json_format.MessageToDict",
"umbra.common.protobuf.umbra_pb2.Snapshot"
]
| [((256, 283), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'import logging\n'), ((284, 310), 'logging.getLogger', 'logging.getLogger', (['"""hpack"""'], {}), "('hpack')\n", (301, 310), False, 'import logging\n'), ((418, 425), 'umbra.monitor.tools.Tools', 'Tools', ([], {}), '()\n', (423, 425), False, 'from umbra.monitor.tools import Tools\n'), ((471, 508), 'logging.debug', 'logging.debug', (['"""Instruction Received"""'], {}), "('Instruction Received')\n", (484, 508), False, 'import logging\n'), ((607, 679), 'google.protobuf.json_format.MessageToDict', 'json_format.MessageToDict', (['instruction'], {'preserving_proto_field_name': '(True)'}), '(instruction, preserving_proto_field_name=True)\n', (632, 679), False, 'from google.protobuf import json_format\n'), ((802, 812), 'umbra.common.protobuf.umbra_pb2.Snapshot', 'Snapshot', ([], {}), '()\n', (810, 812), False, 'from umbra.common.protobuf.umbra_pb2 import Instruction, Snapshot\n')] |
import numpy as np
def rot_to_angle(rot):
return np.arccos(0.5*np.trace(rot)-0.5)
def rot_to_heading(rot):
# This function calculates the heading angle of the rot matrix w.r.t. the y-axis
new_rot = rot[0:3:2, 0:3:2] # remove the mid row and column corresponding to the y-axis
new_rot = new_rot/np.linalg.det(new_rot)
return np.arctan2(new_rot[1, 0], new_rot[0, 0])
| [
"numpy.trace",
"numpy.arctan2",
"numpy.linalg.det"
]
| [((349, 389), 'numpy.arctan2', 'np.arctan2', (['new_rot[1, 0]', 'new_rot[0, 0]'], {}), '(new_rot[1, 0], new_rot[0, 0])\n', (359, 389), True, 'import numpy as np\n'), ((315, 337), 'numpy.linalg.det', 'np.linalg.det', (['new_rot'], {}), '(new_rot)\n', (328, 337), True, 'import numpy as np\n'), ((69, 82), 'numpy.trace', 'np.trace', (['rot'], {}), '(rot)\n', (77, 82), True, 'import numpy as np\n')] |
import logging
logger = logging.getLogger(__name__)
import random
import chainercv
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
from pose.hand_dataset.geometry_utils import normalize_joint_zyx
from pose.hand_dataset.image_utils import normalize_depth
# Decimal Code (R,G,B)
BASE_COLOR = {
"RED": (255, 0, 0),
"GREEN": (0, 255, 0),
"BLUE": (0, 0, 255),
"YELLOW": (255, 255, 0),
"CYAN": (0, 255, 255),
"MAGENTA": (255, 0, 255),
}
def vis_image(img, ax=None):
"""
extend chainercv.visualizations.vis_image
"""
C, H, W = img.shape
if C == 1:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# remove channnel dimension
ax.imshow(img.squeeze())
else:
ax = chainercv.visualizations.vis_image(img, ax)
return ax
def preprocess(point, ax, img):
input_point = np.asarray(point)
if input_point.ndim == 2:
input_point = np.expand_dims(point, axis=0)
H, W = None, None
if ax is None:
fig = plt.figure()
if input_point.shape[-1] == 3:
ax = fig.add_subplot(1, 1, 1, projection="3d")
else:
ax = fig.add_subplot(1, 1, 1)
if img is not None:
ax = vis_image(img, ax=ax)
_, H, W = img.shape
return input_point, ax, H, W
def vis_point(point, img=None, color=None, ax=None):
"""
Visualize points in an image, customized to our purpose.
Base implementation is taken from chainercv.visualizations.vis_image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
c = np.asarray(color) / 255. if color is not None else None
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x), (z,y,x).
# (K, N) -> (N, K)
pts = point[i].transpose() # (K,N) -> (N,K)
# resort coordinate order : yx -> xy or zyx -> xyz
pts = pts[::-1]
ax.scatter(*pts, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_edge(point, indices, img=None, color=None, ax=None):
"""
Visualize edges in an image
"""
point, ax, H, W = preprocess(point, ax, img)
n_inst = len(point)
if color is not None:
color = np.asarray(color) / 255.
else:
color = [None] * len(indices)
for i in range(n_inst):
# note that the shape of `point[i]` is (K,N) and the format of one is (y, x) or (z,y,x).
pts = point[i]
for ((s, t), c) in zip(indices, color):
# Select point which consists edge. It is a pair or point (start, target).
# Note that [::-1] does resort coordinate order: yx -> xy or zyx -> xyz
edge = pts[[s, t]].transpose()
edge = edge[::-1]
ax.plot(*edge, c=c)
if W is not None:
ax.set_xlim(left=0, right=W)
if H is not None:
ax.set_ylim(bottom=H - 1, top=0)
return ax
def vis_pose(point, indices, img=None, point_color=None, edge_color=None, ax=None):
ax = vis_point(point, img=img, color=point_color, ax=ax)
vis_edge(point, indices, img=img, color=edge_color, ax=ax)
def visualize_both(dataset, keypoint_names, edges, color_map, normalize=False):
import random
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
rgb_joint = normalize_joint_zyx(rgb_joint, rgb_camera, z_size)
print(example["param"])
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_rgb(dataset, keypoint_names, edges, color_map, idx=None):
import random
if idx is None:
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax1 = fig.add_subplot(211)
ax3 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
rgb = example["rgb"]
rgb_joint = example["rgb_joint"]
rgb_camera = example["rgb_camera"]
rgb_vu = rgb_camera.zyx2vu(rgb_joint)
vis_point(rgb_vu, img=rgb, color=color, ax=ax1)
vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1)
vis_point(rgb_joint, color=color, ax=ax3)
vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3)
for ax in [ax3]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
def visualize_depth(dataset, keypoint_names, edges, color_map, normalize=False):
idx = random.randint(0, len(dataset) - 1)
logger.info("get example")
example = dataset.get_example(idx)
logger.info("Done get example")
fig = plt.figure(figsize=(5, 10))
ax2 = fig.add_subplot(211)
ax4 = fig.add_subplot(212, projection="3d")
color = [color_map[k] for k in keypoint_names]
edge_color = [color_map[s, t] for s, t in edges]
depth = example["depth"].astype(np.float32)
depth_joint = example["depth_joint"]
depth_camera = example["depth_camera"]
depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True)
z_size = example["param"]["z_size"]
if normalize:
depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size)
depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size)
print(example["param"])
vis_point(depth_vu, img=depth, color=color, ax=ax2)
vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2)
vis_point(depth_joint, color=color, ax=ax4)
vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4)
for ax in [ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
plt.savefig("output.png")
plt.show()
| [
"logging.getLogger",
"chainercv.visualizations.vis_image",
"matplotlib.pyplot.savefig",
"numpy.asarray",
"matplotlib.pyplot.figure",
"pose.hand_dataset.geometry_utils.normalize_joint_zyx",
"numpy.expand_dims",
"matplotlib.pyplot.show"
]
| [((25, 52), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (42, 52), False, 'import logging\n'), ((946, 963), 'numpy.asarray', 'np.asarray', (['point'], {}), '(point)\n', (956, 963), True, 'import numpy as np\n'), ((3562, 3588), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3572, 3588), True, 'from matplotlib import pyplot as plt\n'), ((4427, 4477), 'pose.hand_dataset.geometry_utils.normalize_joint_zyx', 'normalize_joint_zyx', (['rgb_joint', 'rgb_camera', 'z_size'], {}), '(rgb_joint, rgb_camera, z_size)\n', (4446, 4477), False, 'from pose.hand_dataset.geometry_utils import normalize_joint_zyx\n'), ((5115, 5140), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output.png"""'], {}), "('output.png')\n", (5126, 5140), True, 'from matplotlib import pyplot as plt\n'), ((5145, 5155), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5153, 5155), True, 'from matplotlib import pyplot as plt\n'), ((5434, 5461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 10)'}), '(figsize=(5, 10))\n', (5444, 5461), True, 'from matplotlib import pyplot as plt\n'), ((6155, 6180), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output.png"""'], {}), "('output.png')\n", (6166, 6180), True, 'from matplotlib import pyplot as plt\n'), ((6185, 6195), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6193, 6195), True, 'from matplotlib import pyplot as plt\n'), ((6441, 6468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 10)'}), '(figsize=(5, 10))\n', (6451, 6468), True, 'from matplotlib import pyplot as plt\n'), ((7009, 7063), 'pose.hand_dataset.geometry_utils.normalize_joint_zyx', 'normalize_joint_zyx', (['depth_joint', 'depth_camera', 'z_size'], {}), '(depth_joint, depth_camera, z_size)\n', (7028, 7063), False, 'from pose.hand_dataset.geometry_utils import normalize_joint_zyx\n'), ((7468, 7493), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output.png"""'], {}), "('output.png')\n", (7479, 7493), True, 'from matplotlib import pyplot as plt\n'), ((7498, 7508), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7506, 7508), True, 'from matplotlib import pyplot as plt\n'), ((836, 879), 'chainercv.visualizations.vis_image', 'chainercv.visualizations.vis_image', (['img', 'ax'], {}), '(img, ax)\n', (870, 879), False, 'import chainercv\n'), ((1017, 1046), 'numpy.expand_dims', 'np.expand_dims', (['point'], {'axis': '(0)'}), '(point, axis=0)\n', (1031, 1046), True, 'import numpy as np\n'), ((1102, 1114), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1112, 1114), True, 'from matplotlib import pyplot as plt\n'), ((4212, 4266), 'pose.hand_dataset.geometry_utils.normalize_joint_zyx', 'normalize_joint_zyx', (['depth_joint', 'depth_camera', 'z_size'], {}), '(depth_joint, depth_camera, z_size)\n', (4231, 4266), False, 'from pose.hand_dataset.geometry_utils import normalize_joint_zyx\n'), ((689, 701), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (699, 701), True, 'from matplotlib import pyplot as plt\n'), ((1675, 1692), 'numpy.asarray', 'np.asarray', (['color'], {}), '(color)\n', (1685, 1692), True, 'import numpy as np\n'), ((2409, 2426), 'numpy.asarray', 'np.asarray', (['color'], {}), '(color)\n', (2419, 2426), True, 'import numpy as np\n')] |
import os
import skimage.io
from torch.nn import Module
import torch.nn
from torchvision.models import resnet18
from nn.speaker_dataset import Dataset # @UnusedImport
os.environ['TORCH_MODEL_ZOO'] = '../data/'
VIDTIMIT_PATH = '../data/vidtimit/'
skimage.io.use_plugin('pil')
class Net(Module):
def __init__(self):
super().__init__()
resnet = resnet18(pretrained=True)
self.features = torch.nn.Sequential(*list(resnet.children())[:-1])
self.classifier = torch.nn.Sequential(
torch.nn.Linear(512, 2)
)
# print(len(list(self.features.parameters())))
for p in list(self.features.parameters())[:20]:
p.requires_grad = False
def forward(self, x, **kw):
# X = F.softmax(self.basenet(X))
f = self.features(x)
f = f.view(f.size(0), -1)
y = self.classifier(f)
return y
def get_speaking_detector_final():
m = torch.load('../data/speaker.pt')
m = m.eval();
return m
def get_speaking_detector(e):
m = torch.load('../data/speaker/model.e{}.pt'.format(e))
m = m.eval();
return m
| [
"torchvision.models.resnet18"
]
| [((371, 396), 'torchvision.models.resnet18', 'resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (379, 396), False, 'from torchvision.models import resnet18\n')] |
from typing import List, Tuple, Union
import numpy as np
import scipy.special
from PIL import Image, ImageFilter
class RandomBetaMorphology:
def __init__(
self, filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> None:
assert filter_size_min % 2 != 0, "Filter size must be odd"
assert filter_size_max % 2 != 0, "Filter size must be odd"
self.filter_size_min = filter_size_min
self.filter_size_max = filter_size_max
self.alpha = alpha
self.beta = beta
self.filter_sizes, self.filter_probs = self._create_filter_distribution(
filter_size_min, filter_size_max, alpha, beta
)
@staticmethod
def _create_filter_distribution(
filter_size_min: int, filter_size_max: int, alpha: float, beta: float
) -> Tuple[List[int], Union[List[float], np.ndarray]]:
n = (filter_size_max - filter_size_min) // 2 + 1
if n < 2:
return [filter_size_min], np.asarray([1.0], dtype=np.float32)
filter_sizes = []
filter_probs = []
for k in range(n):
filter_sizes.append(filter_size_min + 2 * k)
filter_probs.append(
scipy.special.comb(n, k) * scipy.special.beta(alpha + k, n - k + beta)
)
np_filter_probs = np.asarray(filter_probs, dtype=np.float32)
np_filter_probs = filter_probs / np_filter_probs.sum()
return filter_sizes, np_filter_probs
def sample_filter_size(self):
filter_size = np.random.choice(self.filter_sizes, p=self.filter_probs)
return filter_size
def __call__(self, *args, **kwargs):
return NotImplementedError
def __repr__(self) -> str:
return (
f"vision.{self.__class__.__name__}("
f"filter_size_min={self.filter_size_min}, "
f"filter_size_max={self.filter_size_max}, "
f"alpha={self.alpha}, beta={self.beta})"
)
class Dilate(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 7,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MaxFilter(filter_size))
class Erode(RandomBetaMorphology):
def __init__(
self,
filter_size_min: int = 3,
filter_size_max: int = 5,
alpha: float = 1,
beta: float = 3,
) -> None:
super().__init__(filter_size_min, filter_size_max, alpha, beta)
def __call__(self, img: Image) -> Image:
filter_size = self.sample_filter_size()
return img.filter(ImageFilter.MinFilter(filter_size))
if __name__ == "__main__":
import argparse
from PIL import ImageOps
parser = argparse.ArgumentParser()
parser.add_argument("--operation", choices=("dilate", "erode"), default="dilate")
parser.add_argument("images", type=argparse.FileType("rb"), nargs="+")
args = parser.parse_args()
transformer = Dilate() if args.operation == "dilate" else Erode()
for f in args.images:
x = Image.open(f, "r").convert("L")
x = ImageOps.invert(x)
y = transformer(x)
w, h = x.size
z = Image.new("L", (w, 2 * h))
z.paste(x, (0, 0))
z.paste(y, (0, h))
z = z.resize(size=(w // 2, h), resample=Image.BICUBIC)
z.show()
input()
| [
"argparse.FileType",
"PIL.Image.open",
"argparse.ArgumentParser",
"numpy.random.choice",
"PIL.Image.new",
"PIL.ImageFilter.MinFilter",
"numpy.asarray",
"PIL.ImageOps.invert",
"PIL.ImageFilter.MaxFilter"
]
| [((2926, 2951), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2949, 2951), False, 'import argparse\n'), ((1328, 1370), 'numpy.asarray', 'np.asarray', (['filter_probs'], {'dtype': 'np.float32'}), '(filter_probs, dtype=np.float32)\n', (1338, 1370), True, 'import numpy as np\n'), ((1536, 1592), 'numpy.random.choice', 'np.random.choice', (['self.filter_sizes'], {'p': 'self.filter_probs'}), '(self.filter_sizes, p=self.filter_probs)\n', (1552, 1592), True, 'import numpy as np\n'), ((3298, 3316), 'PIL.ImageOps.invert', 'ImageOps.invert', (['x'], {}), '(x)\n', (3313, 3316), False, 'from PIL import ImageOps\n'), ((3379, 3405), 'PIL.Image.new', 'Image.new', (['"""L"""', '(w, 2 * h)'], {}), "('L', (w, 2 * h))\n", (3388, 3405), False, 'from PIL import Image, ImageFilter\n'), ((2366, 2400), 'PIL.ImageFilter.MaxFilter', 'ImageFilter.MaxFilter', (['filter_size'], {}), '(filter_size)\n', (2387, 2400), False, 'from PIL import Image, ImageFilter\n'), ((2797, 2831), 'PIL.ImageFilter.MinFilter', 'ImageFilter.MinFilter', (['filter_size'], {}), '(filter_size)\n', (2818, 2831), False, 'from PIL import Image, ImageFilter\n'), ((3077, 3100), 'argparse.FileType', 'argparse.FileType', (['"""rb"""'], {}), "('rb')\n", (3094, 3100), False, 'import argparse\n'), ((996, 1031), 'numpy.asarray', 'np.asarray', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (1006, 1031), True, 'import numpy as np\n'), ((3254, 3272), 'PIL.Image.open', 'Image.open', (['f', '"""r"""'], {}), "(f, 'r')\n", (3264, 3272), False, 'from PIL import Image, ImageFilter\n')] |
# Copyright (c) 2014, Fundacion Dr. <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import unittest
from barf.arch import ARCH_X86_MODE_32
from barf.arch import ARCH_X86_MODE_64
from barf.arch.x86.parser import X86Parser
class X86Parser32BitsTests(unittest.TestCase):
def setUp(self):
self._parser = X86Parser(ARCH_X86_MODE_32)
def test_two_oprnd_reg_reg(self):
asm = self._parser.parse("add eax, ebx")
self.assertEqual(str(asm), "add eax, ebx")
def test_two_oprnd_reg_imm(self):
asm = self._parser.parse("add eax, 0x12345678")
self.assertEqual(str(asm), "add eax, 0x12345678")
def test_two_oprnd_reg_mem(self):
asm = self._parser.parse("add eax, [ebx + edx * 4 + 0x10]")
self.assertEqual(str(asm), "add eax, [ebx+edx*4+0x10]")
def test_two_oprnd_mem_reg(self):
asm = self._parser.parse("add [ebx + edx * 4 + 0x10], eax")
self.assertEqual(str(asm), "add [ebx+edx*4+0x10], eax")
def test_one_oprnd_reg(self):
asm = self._parser.parse("inc eax")
self.assertEqual(str(asm), "inc eax")
def test_one_oprnd_imm(self):
asm = self._parser.parse("jmp 0x12345678")
self.assertEqual(str(asm), "jmp 0x12345678")
def test_one_oprnd_mem(self):
asm = self._parser.parse("inc dword ptr [ebx+edx*4+0x10]")
self.assertEqual(str(asm), "inc dword ptr [ebx+edx*4+0x10]")
def test_zero_oprnd(self):
asm = self._parser.parse("nop")
self.assertEqual(str(asm), "nop")
# Misc
# ======================================================================== #
def test_misc_1(self):
asm = self._parser.parse("mov dword ptr [-0x21524111], ecx")
self.assertEqual(str(asm), "mov dword ptr [-0x21524111], ecx")
self.assertNotEqual(str(asm), "mov dword ptr [0xdeadbeef], ecx")
def test_misc_2(self):
asm = self._parser.parse("fucompi st(1)")
self.assertEqual(str(asm), "fucompi st1")
class X86Parser64BitsTests(unittest.TestCase):
def setUp(self):
self._parser = X86Parser(ARCH_X86_MODE_64)
def test_64_two_oprnd_reg_reg(self):
asm = self._parser.parse("add rax, rbx")
self.assertEqual(str(asm), "add rax, rbx")
def test_64_two_oprnd_reg_reg_2(self):
asm = self._parser.parse("add rax, r8")
self.assertEqual(str(asm), "add rax, r8")
def test_64_two_oprnd_reg_mem(self):
asm = self._parser.parse("add rax, [rbx + r15 * 4 + 0x10]")
self.assertEqual(str(asm), "add rax, [rbx+r15*4+0x10]")
# Misc
# ======================================================================== #
def test_misc_offset_1(self):
asm = self._parser.parse("add byte ptr [rax+0xffffff89], cl")
self.assertEqual(str(asm), "add byte ptr [rax+0xffffff89], cl")
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"unittest.main",
"barf.arch.x86.parser.X86Parser"
]
| [((4163, 4178), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4176, 4178), False, 'import unittest\n'), ((1610, 1637), 'barf.arch.x86.parser.X86Parser', 'X86Parser', (['ARCH_X86_MODE_32'], {}), '(ARCH_X86_MODE_32)\n', (1619, 1637), False, 'from barf.arch.x86.parser import X86Parser\n'), ((3386, 3413), 'barf.arch.x86.parser.X86Parser', 'X86Parser', (['ARCH_X86_MODE_64'], {}), '(ARCH_X86_MODE_64)\n', (3395, 3413), False, 'from barf.arch.x86.parser import X86Parser\n')] |
'''Analysis utility functions.
:Author: <NAME> <<EMAIL>>
:Date: 2016-03-26
:Copyright: 2016-2018, Karr Lab
:License: MIT
'''
# TODO(Arthur): IMPORTANT: refactor and replace
from matplotlib import pyplot
from matplotlib import ticker
from wc_lang import Model, Submodel
from scipy.constants import Avogadro
import numpy as np
import re
def plot(model, time = np.zeros(0),
species_counts = None, volume = np.zeros(0), extracellular_volume = np.zeros(0),
selected_species_compartments = [],
yDatas = {},
units = 'mM', title = '', fileName = ''):
#convert time to hours
time = time.copy() / 3600
#create figure
fig = pyplot.figure()
#extract data to plot
if not yDatas:
yDatas = {}
for species_compartment_id in selected_species_compartments:
#extract data
match = re.match('^(?P<speciesId>[a-z0-9\-_]+)\[(?P<compartmentId>[a-z0-9\-_]+)\]$',
species_compartment_id, re.I).groupdict()
speciesId = match['speciesId']
compartmentId = match['compartmentId']
if isinstance(model, Model):
species = model.get_component_by_id(speciesId, 'species')
compartment = model.get_component_by_id(compartmentId, 'compartments')
yData = species_counts[species.index, compartment.index, :]
elif isinstance(model, Submodel):
yData = species_counts[species_compartment_id]
else:
raise Exception('Invalid model type %s' % model.__class__.__name__)
#scale
if compartmentId == 'c':
V = volume
else:
V = extracellular_volume
if units == 'pM':
scale = 1 / Avogadro / V * 1e12
elif units == 'nM':
scale = 1 / Avogadro / V * 1e9
elif units == 'uM':
scale = 1 / Avogadro / V * 1e6
elif units == 'mM':
scale = 1 / Avogadro / V * 1e3
elif units == 'M':
scale = 1 / Avogadro / V * 1e0
elif units == 'molecules':
scale = 1
else:
raise Exception('Invalid units "%s"' % units)
yData *= scale
yDatas[species_compartment_id] = yData
#plot results
yMin = 1e12
yMax = -1e12
for label, yData in yDatas.items():
#update range
yMin = min(yMin, np.min(yData))
yMax = max(yMax, np.max(yData))
#add to plot
pyplot.plot(time, yData, label=label)
#set axis limits
pyplot.xlim((0, time[-1]))
pyplot.ylim((yMin, yMax))
#add axis labels and legend
if title:
pyplot.title(title)
pyplot.xlabel('Time (h)')
if units == 'molecules':
pyplot.ylabel('Copy number')
else:
pyplot.ylabel('Concentration (%s)' % units)
y_formatter = ticker.ScalarFormatter(useOffset=False)
pyplot.gca().get_yaxis().set_major_formatter(y_formatter)
if len(selected_species_compartments) > 1:
pyplot.legend()
#save
if fileName:
fig.savefig(fileName)
pyplot.close(fig)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"re.match",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.ticker.ScalarFormatter",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend"
]
| [((362, 373), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (370, 373), True, 'import numpy as np\n'), ((411, 422), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (419, 422), True, 'import numpy as np\n'), ((447, 458), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (455, 458), True, 'import numpy as np\n'), ((651, 666), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (664, 666), False, 'from matplotlib import pyplot\n'), ((2617, 2643), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(0, time[-1])'], {}), '((0, time[-1]))\n', (2628, 2643), False, 'from matplotlib import pyplot\n'), ((2648, 2673), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['(yMin, yMax)'], {}), '((yMin, yMax))\n', (2659, 2673), False, 'from matplotlib import pyplot\n'), ((2754, 2779), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Time (h)"""'], {}), "('Time (h)')\n", (2767, 2779), False, 'from matplotlib import pyplot\n'), ((2928, 2967), 'matplotlib.ticker.ScalarFormatter', 'ticker.ScalarFormatter', ([], {'useOffset': '(False)'}), '(useOffset=False)\n', (2950, 2967), False, 'from matplotlib import ticker\n'), ((2553, 2590), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time', 'yData'], {'label': 'label'}), '(time, yData, label=label)\n', (2564, 2590), False, 'from matplotlib import pyplot\n'), ((2729, 2748), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (2741, 2748), False, 'from matplotlib import pyplot\n'), ((2818, 2846), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Copy number"""'], {}), "('Copy number')\n", (2831, 2846), False, 'from matplotlib import pyplot\n'), ((2865, 2908), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (["('Concentration (%s)' % units)"], {}), "('Concentration (%s)' % units)\n", (2878, 2908), False, 'from matplotlib import pyplot\n'), ((3086, 3101), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {}), '()\n', (3099, 3101), False, 'from matplotlib import pyplot\n'), ((3168, 3185), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (3180, 3185), False, 'from matplotlib import pyplot\n'), ((2468, 2481), 'numpy.min', 'np.min', (['yData'], {}), '(yData)\n', (2474, 2481), True, 'import numpy as np\n'), ((2508, 2521), 'numpy.max', 'np.max', (['yData'], {}), '(yData)\n', (2514, 2521), True, 'import numpy as np\n'), ((848, 963), 're.match', 're.match', (['"""^(?P<speciesId>[a-z0-9\\\\-_]+)\\\\[(?P<compartmentId>[a-z0-9\\\\-_]+)\\\\]$"""', 'species_compartment_id', 're.I'], {}), "('^(?P<speciesId>[a-z0-9\\\\-_]+)\\\\[(?P<compartmentId>[a-z0-9\\\\-_]+)\\\\]$'\n , species_compartment_id, re.I)\n", (856, 963), False, 'import re\n'), ((2972, 2984), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (2982, 2984), False, 'from matplotlib import pyplot\n')] |
# -*- coding: utf-8 -*-
"""
Handles the tournament logic
"""
import datetime
from chess.utils.utils import get_new_id
from chess.models.actors import Player
from chess.models.round import Round
TOURNAMENT_ID_WIDTH = 8
NB_ROUND = 4
NB_PLAYERS = 8
NB_MATCH = 4
class Tournament:
""" The class Tournament is the central piece of the models. """
last_tournament_id = "0" * TOURNAMENT_ID_WIDTH
def __init__(self, name, location, timer_type, description):
Tournament.last_tournament_id = get_new_id(Tournament.last_tournament_id, TOURNAMENT_ID_WIDTH)
self.tournament_id = Tournament.last_tournament_id
self.name = name
self.location = location
self.start_date = None
self.end_date = None
self.timer_type = timer_type
self.description = description
self.number_of_rounds = NB_ROUND
self.rounds = []
self.list_of_players = []
self.players_assigned = False
self.finished = False
def define_players(self, actors):
""" Defines the list of identifier of the players who join the tournament.
:param actors:
:return: None
"""
for num_player in range(NB_PLAYERS):
self.list_of_players.append(Player(actors[num_player],
self.tournament_id,
num_player))
def init_round(self, num_round):
""" Launches the round number "num_round".
:param num_round: number of the round played
:return: None
"""
tour = Round(num_round, self.tournament_id, self.list_of_players)
tour.start_date = datetime.date.today()
tour.rank_players()
tour.define_matches()
self.rounds.append(tour)
def register_round_results(self, num_round, winner):
""" Registers the results of the round.
:param num_round: the round number.
:param winner: the list of the winners.
:return: None.
"""
self.rounds[num_round].register_results(winner)
self.rounds[num_round].assign_points()
self.rounds[num_round].finished = True
self.rounds[num_round].memorize_opponents()
self.rounds[num_round].rank_players()
self.rounds[num_round].end_date = datetime.date.today()
def tournament_to_dict(self):
""" Converts the tournament into a dictionary
:return: dictionary of the tournament instance.
"""
string_attributes = ['tournament_id',
'name',
'location',
'timer_type',
'description',
'number_of_rounds',
'players_assigned']
serialized_tournament = {}
for attribute in string_attributes:
serialized_tournament[attribute] = getattr(self, attribute)
serialized_tournament['rounds'] = []
for r0und in self.rounds:
serialized_tournament['rounds'].append(r0und.round_to_dict())
serialized_tournament['list_of_players'] = []
for player in self.list_of_players:
serialized_tournament['list_of_players'].append(player.player_to_dict())
serialized_tournament['start_date'] = str(self.start_date)
serialized_tournament['end_date'] = str(self.end_date)
return serialized_tournament
def end_tournament(self):
""" Handles the end of the tournament.
Adds the tournament_id to the players list of tournaments.
Defines the attribute finished and the end date of the tournament.
"""
for player in self.list_of_players:
player.actor.list_of_tournaments_played.append(self.tournament_id)
self.finished = True
self.end_date = datetime.date.today()
| [
"chess.utils.utils.get_new_id",
"datetime.date.today",
"chess.models.actors.Player",
"chess.models.round.Round"
]
| [((513, 575), 'chess.utils.utils.get_new_id', 'get_new_id', (['Tournament.last_tournament_id', 'TOURNAMENT_ID_WIDTH'], {}), '(Tournament.last_tournament_id, TOURNAMENT_ID_WIDTH)\n', (523, 575), False, 'from chess.utils.utils import get_new_id\n'), ((1608, 1666), 'chess.models.round.Round', 'Round', (['num_round', 'self.tournament_id', 'self.list_of_players'], {}), '(num_round, self.tournament_id, self.list_of_players)\n', (1613, 1666), False, 'from chess.models.round import Round\n'), ((1693, 1714), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1712, 1714), False, 'import datetime\n'), ((2331, 2352), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2350, 2352), False, 'import datetime\n'), ((3885, 3906), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3904, 3906), False, 'import datetime\n'), ((1262, 1320), 'chess.models.actors.Player', 'Player', (['actors[num_player]', 'self.tournament_id', 'num_player'], {}), '(actors[num_player], self.tournament_id, num_player)\n', (1268, 1320), False, 'from chess.models.actors import Player\n')] |
# !/usr/bin python
"""
#
# set-config - a small python program to setup the configuration environment for data-collect.py
# data-collect.py contain the python program to gather Metrics from vROps
# Author <NAME> <<EMAIL>>
#
"""
# Importing the required modules
import json
import base64
import os,sys
# Getting the absolute path from where the script is being run
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def get_the_inputs():
adapterkind = raw_input("Please enter Adapter Kind: ")
resourceKind = raw_input("Please enter Resource Kind: ")
servername = raw_input("Enter enter Server IP/FQDN: ")
serveruid = raw_input("Please enter user id: ")
serverpasswd = raw_input("Please enter vRops password: ")
encryptedvar = base64.b64encode(serverpasswd)
maxsamples = raw_input("Please enter the maximum number of samples to collect: ")
keys_to_monitor = raw_input("Please enter the number of keys to monitor: ")
keys = []
for i in range(int(keys_to_monitor)):
keys.append(raw_input("Enter the key: "))
data = {}
if int(maxsamples) < 1:
maxsamples = 1
data["adapterKind"] = adapterkind
data["resourceKind"] = resourceKind
data["sampleno"] = int(maxsamples)
serverdetails = {}
serverdetails["name"] = servername
serverdetails["userid"] = serveruid
serverdetails["password"] = encryptedvar
data["server"] = serverdetails
data["keys"] = keys
return data
# Getting the path where config.json file should be kept
path = get_script_path()
fullpath = path+"/"+"config.json"
# Getting the data for the config.json file
final_data = get_the_inputs()
# Saving the data to config.json file
with open(fullpath, 'w') as outfile:
json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False) | [
"os.path.realpath",
"base64.b64encode",
"json.dump"
]
| [((785, 815), 'base64.b64encode', 'base64.b64encode', (['serverpasswd'], {}), '(serverpasswd)\n', (801, 815), False, 'import base64\n'), ((1773, 1876), 'json.dump', 'json.dump', (['final_data', 'outfile'], {'sort_keys': '(True)', 'indent': '(2)', 'separators': "(',', ':')", 'ensure_ascii': '(False)'}), "(final_data, outfile, sort_keys=True, indent=2, separators=(',',\n ':'), ensure_ascii=False)\n", (1782, 1876), False, 'import json\n'), ((419, 448), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (435, 448), False, 'import os, sys\n')] |
# CoDVote plugin for BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2015 ph03n1x
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Changelog:
# v1.0.1 - Fixed vote remaining in progress if requirements for vote unmet.
# v1.0.2 - Added "!vote maps" to show what maps can be called into vote.
# - Fixed issue where person who called vote needed to vote as well. Changed to automatic yes vote.
__version__ = '1.0.2'
__author__ = 'ph03n1x'
import b3, threading
import b3.plugin
import b3.events
class CodvotePlugin(b3.plugin.Plugin):
adminPlugin = None
_vote = None # Stores which vote is currently in progress
_value = None # Stores the value of the vote
_votetime = 30 # Time before a vote will be canceled for not passing
_aVotes = {} # All votes allowed. Imported from "votes" section in config
_aMaps = {} # All vote allowed maps. Imported from "votemaps" section in config
_amt_yes = [] # Amount of players who voted yes. Checked against amount of players in game
_amt_no = []
_allplayers = [] # Amount of players in game
_mapRequested = None # Stores which map is being voted for
_kickRequested = None # Stores which player will be kicked if vote passed
_default_messages = {
'tovote': '^7Use ^2!yes ^7or ^2!no ^7 to vote',
'map': "Map vote in progress: Change map to ^3$s^7?",
'nextmap': "Next map vote in progress. Change next map to ^3$s^7?",
'kick': "Kick vote in progress: Kick ^2$s^7?",
'maprotate': "Rotate map vote in progress. Go to next map?",
'maprestart': "Maprestart vote in progress. Restart current map?",
'friendlyfire': "Friendlyfire vote in progress. Change friendlyfire mode to ^2$s^7?",
'killcam': "Killcam vote in progress. Turn killcam ^2$s^7?",
'scorelimit': "Scorelimit vote in progress. Change score limit to ^2$s^7?",
'timelimit': "Timelimit vote in progress. Change time limit to ^2$s^7?",
'roundlength': "Round length vote in progress. Change round length to ^2$s^7?",
'roundlimit': "Round limit vote in progress. Change round limit to ^2$s^7?",
}
def onStartup(self):
self.adminPlugin = self.console.getPlugin('admin')
if not self.adminPlugin:
self.error('Could not find admin plugin')
return
# Register commands
if 'commands' in self.config.sections():
for cmd in self.config.options('commands'):
level = self.config.get('commands', cmd)
sp = cmd.split('-')
alias = None
if len(sp) == 2:
cmd, alias = sp
func = self.getCmd(cmd)
if func:
self.adminPlugin.registerCommand(self, cmd, level, func, alias)
# Re-deploy commands for consideration of this plugin
self.adminPlugin.registerCommand(self, 'nextmap', 1, self.cmd_nextmap, 'nm')
self.adminPlugin.registerCommand(self, 'maprotate', 20, self.cmd_maprotate, None)
self.adminPlugin.registerCommand(self, 'allvotes', 1, self.cmd_allvotes, None)
# Register events
self.registerEvent('EVT_GAME_EXIT', self.onGameEnd)
def onLoadConfig(self):
# Load settings section
try:
self._votetime = self.config.getint('settings', 'votetime')
except:
self.debug('Unable to get [votetime] from settings. Using default: %s' % self._votetime)
# Load votemaps section
if self.config.has_section('votemaps'):
for (mapname, consolename) in self.config.items('votemaps'):
if mapname:
self._aMaps[mapname] = consolename
self.debug('Successfully entered maps for voting: %s' % self._aMaps)
# Load votes section
if self.config.has_section('votes'):
adLvl = {'guest': 0,
'user': 1,
'reg': 2,
'mod': 20,
'admin': 40,
'fulladmin': 60,
'senioradmin': 80,
'superadmin': 100}
for (entry, value) in self.config.items('votes'):
try:
value = int(value)
self._aVotes[entry.lower()] = value
except ValueError:
self._aVotes[entry.lower()] = adLvl[value]
self.debug('Allowed votes are: %s' % self._aVotes)
def getCmd(self, cmd):
cmd = 'cmd_%s' % cmd
if hasattr(self, cmd):
func = getattr(self, cmd)
return func
return None
######################### VOTE TIMING ##############################
def voteTimer(self):
t1 = threading.Timer((self._votetime - 5), self.voteMessage)
t1.start()
def voteMessage(self):
if self._vote:
self.console.say('^110 seconds until vote end!')
t2 = threading.Timer(10, self.denyVote)
t2.start()
######################### MAP HANDLING ##############################
def _search(self, maplist, partial):
a = []
for mapname, consolename in maplist.iteritems():
if partial in mapname:
a.append(mapname)
elif partial in consolename:
a.append(mapname)
return a
def mapvote(self, client, wantedMap):
# Find if map is in allowed list
match = self._search(self._aMaps, wantedMap)
if len(match) == 1:
self._mapRequested = match[0]
self._value = match[0]
return True
elif len(match) > 1:
match = (', ').join(match)
client.message('^1ABORTED!^7Multiple matches: %s' % match)
return False
elif len(match) == 0:
client.message('^1ABORTED!^7No maps matching your request')
return False
############### NEXTMAP FUNCTIONING ################
def onGameEnd(self, event):
"""
Handle EVT_GAME_ROUND_END
"""
if self._mapRequested:
self.confirmMap()
self._mapRequested = None
############### CONFIRM VOTES ######################
def confirmVote(self):
self.console.say('^3Vote passed!^7')
if self._vote == 'map':
self.confirmMap()
elif self._vote == 'nextmap':
self.debug('nextmap vote passed. Params already stored')
elif self._vote == 'kick':
self.confirmKick()
elif self._vote == 'maprotate':
if self._mapRequested:
self.confirmMap()
else:
self.console.rotateMap()
elif self._vote == 'maprestart':
self.confirmMaprestart()
elif self._vote == 'friendlyfire':
self.confirmFriendlyFire()
elif self._vote == 'killcam':
self.confirmKillCam()
elif self._vote == 'scorelimit':
self.confirmScoreLimit()
elif self._vote == 'timelimit':
self.confirmTimeLimit()
elif self._vote == 'roundlength':
self.confirmRoundLength()
elif self._vote == 'roundlimit':
self.confirmRoundLimit()
else:
self.error('Unable to commit. Vote: %s, Value: %s' % (self._vote, self._value))
self._vote = None
self._value = None
self._amt_no = []
self._amt_yes = []
self._allplayers = []
def denyVote(self):
if self._vote:
self.console.say('^3Vote failed!')
self._vote = None
self._value = None
self._amt_no = []
self._amt_yes = []
self._allplayers = []
def confirmKick(self):
# Note - to kick someone we need: client.kick(reason, keyword, admin, silent=True/False, data)
s = self._kickRequested
self.debug('Kick vote passed. Kicking %s' % s.name)
s.kick('Voted against', '', None, True, '')
self._kickRequested = None
def confirmMap(self):
# This will cycle to next map when needed.
self.console.write('map %s' % self._aMaps[self._mapRequested])
self._mapRequested = None
def confirmMaprestart(self):
# This will restart the current map
self.console.write('fast_restart')
def confirmFriendlyFire(self):
# This will toggle friendly fire on and off
setting = self._value
if not isinstance(setting, int):
if self._value == 'on':
setting = 1
elif self._value == 'off':
setting = 0
else:
self.debug('Unknown wanted setting for Friendlyfire. Toggling to next mode')
now = self.console.getCvar('scr_team_fftype').getInt()
if now >= 1:
setting = 0
elif now == 0:
setting = 1
self.console.setCvar('scr_team_fftype', int(setting))
def confirmKillCam(self):
# rcon for killcam: scr_game_allowkillcam - 0 or 1
setting = self._value
if self._value == 'on':
setting = 1
elif self._value == 'off':
setting = 0
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
now = self.console.getCvar('scr_game_allowkillcam').getInt()
self.debug('Setting being voted for is not valid. Toggling to next mode. Killcam currently: %s' % now)
if now == 0:
setting = 1
else:
setting = 0
self.console.setCvar('scr_game_allowkillcam', int(setting))
def confirmScoreLimit(self):
# CVAR to write is scr_<gametype>_scorelimit <number>
setting = self._value
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('ERROR: Could not set new scorelimit. Voted value is not integer')
return
cparams = 'scr_' + gt + '_scorelimit'
self.console.setCvar(cparams, setting)
def confirmTimeLimit(self):
setting = self._value
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('ERROR: Could not set new timelimit. Voted value is not integer')
return
cparams = 'scr_' + gt + '_timelimit'
self.console.setCvar(cparams, setting)
def confirmRoundLength(self):
setting = self._value
amodes = ['ctf', 'sd', 're', 'bas', 'dom']
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('ERROR: Could not set new round length. Voted value is not integer')
return
if gt in amodes:
cparams = 'scr_' + gt + '_roundlength'
self.console.setCvar(cparams, setting)
def confirmRoundLimit(self):
setting = self._value
amodes = ['ctf', 'sd', 're', 'bas', 'dom']
gt = self.getGameType()
if not isinstance(setting, int):
try:
setting = int(setting)
except ValueError:
self.debug('Could not set new round limit. Voted value is not integer')
return
if gt in amodes:
cparams = 'scr_' + gt + '_roundlimit'
self.console.setCvar(cparams, setting)
else:
self.debug('Could not set round limit as gametype do not have rounds')
def getGameType(self):
gametype = self.console.getCvar('g_gametype').getString()
if gametype:
return gametype
else:
self.debug('Error getting gametype. Response is %s' % gametype)
return False
def sendBroadcast(self):
# This wil broadcast vote message to server.
a = self._value
if a == 'maprestart' or a == 'maprotate':
self.console.say(self.getMessage(self._vote))
elif a != 'maprestart' and a != 'maprotate':
param = {'s': a}
self.console.say(self.getMessage(self._vote, param))
self.console.say(self.getMessage('tovote'))
def aquireCmdLock2(self, cmd, client, delay, all=True):
if client.maxLevel >= 20:
return True
elif cmd.time + 5 <= self.console.time():
return True
else:
return False
def checkIfAllowed(self, client, voteType):
if client.maxLevel >= self._aVotes[voteType]:
return True
else:
return False
#################################################################################
# COMMANDS #
#################################################################################
def cmd_vote(self, data, client, cmd=None):
"""\
!vote <setting> <value> - vote to change setting or cvar on server.
"""
# Check if vote already in progress
if self._vote:
client.message('^1ERROR^7: Vote already in progress')
return
# Check if we have enough data for vote
data = data.split()
if len(data) == 1 and data[0] == 'maprotate' or len(data) == 1 and data[0] == 'maprestart' or len(data) == 1 and data[0] == 'maps':
self._vote = data[0]
self._value = data[0]
elif len(data) == 2:
type = data[0]
value = data[1]
self._vote = type
self._value = value
else:
client.message('^1ERROR^7: Invalid usage. Type ^2!help vote ^7for info')
return
# Check if player is asking what maps can be voted on
if self._vote == 'maps':
v1 = self.checkIfAllowed(client, 'map')
v2 = self.checkIfAllowed(client, 'nextmap')
if v1 or v2:
cmd.sayLoudOrPM(client, 'Vote enabled maps: ^2%s' % (('^7, ^2').join(self._aMaps.keys())))
self._vote = None
self._value = None
return
else:
client.message('^2You do not have permission to call map votes')
self._vote = None
self._value = None
return
# Check if enough players in game to vote and store present players. Only players present at vote call can vote
playersInGame = 0
self._allplayers = []
for c in self.console.clients.getList():
if c.team != b3.TEAM_SPEC:
playersInGame += 1
self._allplayers.insert(0, c)
if playersInGame <= 1 and client.maxLevel < 100:
client.message('^1ABORT^7: Not enough players in game to vote.')
self._vote = None
return
# Check if type of vote is allowed
if self._vote not in self._aVotes:
client.message('Vote type not allowed. Use ^2!allvotes ^7for available votes.')
self._vote = None
return
# Check if player has permission to call vote type
v = self.checkIfAllowed(client, self._vote)
if not v:
client.message('You do not have permission to call this vote')
self._vote = None
return
# Get further info for proper processing
if self._vote == 'map' or self._vote == 'nextmap':
q = self.mapvote(client, self._value)
if not q:
self.debug('Vote aborted: Cannot vote for maps. mapvote turned out false')
self._vote = None
return
if self._vote == 'kick':
self._kickRequested = self.adminPlugin.findClientPrompt(self._value, client)
if self._kickRequested:
if self._kickRequested.maxLevel >= 20:
client.message('^1ABORTED^7: Cannot vote to kick admin!')
self._vote = None
self._value = None
self._kickRequested = None
return
self._value = self._kickRequested.name
else:
self.debug('could not get the person to kick')
self._vote = None
self._value = None
self._kickRequested = None
return
# Seems like vote is ok. Broadcast to server
self.sendBroadcast()
# Start timer
self.voteTimer()
# Set person who called vote as yes vote
self._amt_yes.insert(0, client)
if len(self._amt_yes) > (len(self._allplayers) / 2):
self.confirmVote()
def cmd_allvotes(self, data, client, cmd=None):
"""\
Show all the votes you are allowed to call
"""
allowed = []
for k in self._aVotes.keys():
if client.maxLevel >= self._aVotes[k]:
allowed.insert(0, k)
if len(allowed) > 0:
p = sorted(allowed)
x = (', ').join(p)
client.message('Allowed votes are: %s' % x)
elif len(allowed) == 0:
client.message('You are not allowed to call any votes')
def cmd_yes(self, data, client, cmd=None):
"""\
Vote yes to the vote in progress
"""
# Check if there is a vote in progress
if not self._vote:
client.message('No vote in progress')
return
# Check if player is allowed to vote
if client not in self._allplayers:
client.message('Sorry, you cannot enter current vote')
return
# Check if the player already voted. If not, register vote
if client in self._amt_yes or client in self._amt_no:
client.message('Are you drunk? You already voted!')
return
elif client not in self._amt_yes or client not in self._amt_no:
self._amt_yes.insert(0, client)
# Let player know that vote is registered
client.message('^3Your vote has been entered')
# Check if majority of players voted already
vYes = len(self._amt_yes)
vPass = len(self._allplayers) / 2
if vYes > vPass:
self.confirmVote()
def cmd_no(self, data, client=None, cmd=None):
"""\
Vote NO to the current vote
"""
# Check if there is a vote in progress
if not self._vote:
client.message('No vote in progress')
return
# Check if player is allowed to vote
if client not in self._allplayers:
client.message('Sorry, you cannot enter current vote')
return
# Check if the player already voted
if client in self._amt_yes or client in self._amt_no:
client.message('Are you drunk? You already voted!')
return
elif client not in self._amt_yes or client not in self._amt_no:
self._amt_no.insert(0, client)
# Let player know that vote is registered
client.message('^3Your vote has been entered')
# Check if majority of players voted
vNo = len(self._amt_no)
vPass = len(self._allplayers) / 2
if vNo > vPass:
self.denyVote()
def cmd_nextmap(self, data, client=None, cmd=None):
"""\
- list the next map in rotation
"""
if not self.aquireCmdLock2(cmd, client, 60, True):
client.message('^7Do not spam commands')
return
if self._mapRequested:
cmd.sayLoudOrPM(client, '^7Next Map: ^2%s' % self._mapRequested.title())
return
mapname = self.console.getNextMap()
if mapname:
cmd.sayLoudOrPM(client, '^7Next Map: ^2%s' % mapname)
else:
client.message('^1Error:^7 could not get map list')
def cmd_maprotate(self, data, client, cmd=None):
"""\
Cycle to next map in rotation
"""
if self._mapRequested:
self.confirmMap()
else:
self.console.rotateMap()
def cmd_veto(self, data, client, cmd=None):
"""\
Cancel a vote in progress
"""
if self._vote:
client.message('^3Vote canceled')
self.denyVote()
elif not self._vote:
client.message('^3No vote in progress')
| [
"threading.Timer"
]
| [((5453, 5506), 'threading.Timer', 'threading.Timer', (['(self._votetime - 5)', 'self.voteMessage'], {}), '(self._votetime - 5, self.voteMessage)\n', (5468, 5506), False, 'import b3, threading\n'), ((5657, 5691), 'threading.Timer', 'threading.Timer', (['(10)', 'self.denyVote'], {}), '(10, self.denyVote)\n', (5672, 5691), False, 'import b3, threading\n')] |
"""
Script updates `README.md` with respect to files at ./easy and ./medium folders.
"""
import os
curr_dir = os.path.dirname(__file__)
with open(os.path.join(curr_dir, "README.md"), 'w') as readme:
readme.write("# LeetCode\nDeliberate practice in coding.\n")
langs = [l for l in os.listdir(curr_dir) if os.path.isdir(os.path.join(curr_dir, l)) and l[0] != '.']
for lang in langs:
readme.write("## {}\n".format(lang))
readme.write("### Easy\n")
easy = sorted(os.listdir(f"{curr_dir}/{lang}/easy"))
easy = [x.split("_")[0] for x in easy]
easy_solved = ""
for el in easy:
easy_solved += "{}, ".format(el)
readme.write(easy_solved[:-2] + "\n")
readme.write("### Medium\n")
medium = sorted(os.listdir(f"{curr_dir}/{lang}/medium"))
medium = [x.split("_")[0] for x in medium]
medium_solved = ""
for el in medium:
medium_solved += "{}, ".format(el)
readme.write(medium_solved[:-2] + '\n')
| [
"os.path.dirname",
"os.listdir",
"os.path.join"
]
| [((111, 136), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (126, 136), False, 'import os\n'), ((147, 182), 'os.path.join', 'os.path.join', (['curr_dir', '"""README.md"""'], {}), "(curr_dir, 'README.md')\n", (159, 182), False, 'import os\n'), ((289, 309), 'os.listdir', 'os.listdir', (['curr_dir'], {}), '(curr_dir)\n', (299, 309), False, 'import os\n'), ((496, 533), 'os.listdir', 'os.listdir', (['f"""{curr_dir}/{lang}/easy"""'], {}), "(f'{curr_dir}/{lang}/easy')\n", (506, 533), False, 'import os\n'), ((783, 822), 'os.listdir', 'os.listdir', (['f"""{curr_dir}/{lang}/medium"""'], {}), "(f'{curr_dir}/{lang}/medium')\n", (793, 822), False, 'import os\n'), ((327, 352), 'os.path.join', 'os.path.join', (['curr_dir', 'l'], {}), '(curr_dir, l)\n', (339, 352), False, 'import os\n')] |
# -*- coding: utf-8 -*-
#retriever
import csv
from pkg_resources import parse_version
from retriever.lib.models import Table
from retriever.lib.templates import Script
try:
from retriever.lib.defaults import VERSION
try:
from retriever.lib.tools import open_fr, open_fw, open_csvw
except ImportError:
from retriever.lib.scripts import open_fr, open_fw
except ImportError:
from retriever import open_fr, open_fw, VERSION
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.title = "Commercial Fisheries Monthly Trade Data by Product, Country/Association"
self.name = "biotimesql"
self.retriever_minimum_version = "2.2.0"
self.urls = {
"sql_file": "https://zenodo.org/record/2602708/files/BioTIMESQL02_04_2018.sql?download=1",
}
self.version = "1.0.1"
self.ref = "https://zenodo.org/record/1095628#.WskN7dPwYyn"
self.citation = "<NAME>, <NAME>, <NAME>, et al. BioTIME: A database of biodiversity time series for the Anthropocene. Global Ecology & Biogeography. 2018; 00:1 - 26. https://doi.org/10.1111/geb.12729."
self.description = "The BioTIME database has species identities and abundances in ecological assemblages through time."
self.keywords = ["Time series", "Anthropocene", "Global"]
self.licenses = [{"name": "CC BY 4.0"}]
self.encoding = "latin1"
if parse_version(VERSION) <= parse_version("2.0.0"):
self.shortname = self.name
self.name = self.title
self.tags = self.keywords
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
engine = self.engine
original_sql_file = "BioTIMESQL02_04_2018.sql"
engine.download_file(self.urls["sql_file"], original_sql_file)
sql_data = open_fr(self.engine.format_filename(original_sql_file))
set_open = False
csv_writer = None
csv_file = None
table_name = None
NULL = None
for line in sql_data:
table_indicator = "-- Table structure for table "
if line.startswith(table_indicator):
st = line[len(table_indicator):].replace("`", "")
table_name = st.strip()
current_file_process = table_name
current_file_open = current_file_process
if set_open and not current_file_process == current_file_open:
csv_file.close()
set_open = False
else:
out_file = "{name}.csv".format(name=table_name)
csv_file = open_fw(engine.format_filename(out_file))
csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
set_open = True
if line.startswith("INSERT INTO `{table_name}`".format(table_name=table_name)):
row_val = line[line.index("VALUES (") + 8:-3]
table_rows = row_val.replace("\r\n","").split("),(")
for i_row in table_rows:
v = eval('[' + str(i_row) + ']')
csv_writer.writerows([v])
if csv_file:
csv_file.close()
# Create abundance table
table = Table("ID_ABUNDANCE", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_ABUNDANCE", ("int",)),
("ABUNDANCE_TYPE", ("char", "100")),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("abundance.csv"))
# Create allrawdata table
table = Table("allrawdata", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_ALL_RAW_DATA", ("int",)),
("ABUNDANCE", ("double",)),
("BIOMASS", ("double",)),
("ID_SPECIES", ("int",)),
("SAMPLE_DESC", ("char", 200)),
("PLOT", ("char", 150)),
("LATITUDE", ("double",)),
("LONGITUDE", ("double",)),
("DEPTH", ("double",)),
("DAY", ("int",)),
("MONTH", ("int",)),
("YEAR", ("int",)),
("STUDY_ID", ("int",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("allrawdata.csv"))
# Create biomass table
table = Table("biomass", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [("ID_BIOMASS", ("int",)), ("BIOMASS_TYPE", ("char", "100"))]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("biomass.csv"))
# Create citation1 table
table = Table("citation1", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_CITATION1", ("int",)),
("STUDY_ID", ("int",)),
("CITATION_LINE", ("char",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("citation1.csv"))
# Create contacts table
table = Table("contacts", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_CONTACTS", ("int",)),
("STUDY_ID", ("int",)),
("CONTACT_1", ("char", 500)),
("CONTACT_2", ("char", 500)),
("CONT_1_MAIL", ("char", 60)),
("CONT_2_MAIL", ("char", 60)),
("LICENSE", ("char", 200)),
("WEB_LINK", ("char", 200)),
("DATA_SOURCE", ("char", 250)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("contacts.csv"))
# Create countries table
table = Table("countries", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [("COUNT_ID", ("int",)), ("COUNTRY_NAME", ("char", 200))]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("countries.csv"))
# Create curation table
table = Table("curation", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_CURATION", ("int",)),
("STUDY_ID", ("int",)),
("LINK_ID", ("int",)),
("COMMENTS", ("char",)),
("DATE_STUDY_ADDED", ("char", 50)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("curation.csv"))
# Create datasets table
table = Table("datasets", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_DATASETS", ("int",)),
("STUDY_ID", ("int",)),
("TAXA", ("char", 50)),
("ORGANISMS", ("char", 200)),
("TITLE", ("char",800)),
("AB_BIO", ("char", 2)),
("HAS_PLOT", ("char", 10)),
("DATA_POINTS", ("char",)),
("START_YEAR", ("char",)),
("END_YEAR", ("char",)),
("CENT_LAT", ("double",)),
("CENT_LONG", ("double",)),
("NUMBER_OF_SPECIES", ("char",)),
("NUMBER_OF_SAMPLES", ("char",)),
("NUMBER_LAT_LONG", ("char",)),
("TOTAL", ("char",)),
("GRAIN_SIZE_TEXT", ("char",)),
("GRAIN_SQ_KM", ("double",)),
("AREA_SQ_KM", ("double",)),
("AB_TYPE", ("char", )),
("BIO_TYPE", ("char",)),
("SAMPLE_TYPE", ("char",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("datasets.csv"))
# Create downloads table
table = Table("downloads", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("D_ID", ("int",)),
("STUDY", ("char", 25)),
("NAME", ("char", 150)),
("EMAIL", ("char", 150)),
("COUNTRY", ("char", 200)),
("ROLE", ("char", 150)),
("PURPOSE", ("char", 500)),
("LOCATION", ("char", 250)),
("DATE_STAMP", ("char",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("downloads.csv"))
# Create methods table
table = Table("methods", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_METHODS", ("int",)),
("STUDY_ID", ("int",)),
("METHODS", ("char",)),
("SUMMARY_METHODS", ("char", 500)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("methods.csv"))
# Create sample table
table = Table("sample", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_SAMPLE", ("int",)),
("ID_TREAT", ("int",)),
("SAMPLE_DESC_NAME", ("char", 200)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("sample.csv"))
# Create site table
table = Table("site", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_SITE", ("int",)),
("STUDY_ID", ("int",)),
("REALM", ("char", 11)),
("CLIMATE", ("char", 20)),
("GENERAL_TREAT", ("char", 200)),
("TREATMENT", ("char", 200)),
("TREAT_COMMENTS", ("char", 250)),
("TREAT_DATE", ("char", 100)),
("CEN_LATITUDE", ("double",)),
("CEN_LONGITUDE", ("double",)),
("HABITAT", ("char", 100)),
("PROTECTED_AREA", ("char", 50)),
("AREA", ("double",)),
("BIOME_MAP", ("char", 500))
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("site.csv"))
# Create species table
table = Table("species", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_SPECIES", ("int",)),
("GENUS", ("char", 100)),
("SPECIES", ("char", 100)),
("GENUS_SPECIES", ("char", 100))
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("species.csv"))
SCRIPT = main()
| [
"retriever.lib.templates.Script.__init__",
"csv.writer",
"retriever.lib.models.Table",
"pkg_resources.parse_version",
"retriever.lib.templates.Script.download"
]
| [((520, 551), 'retriever.lib.templates.Script.__init__', 'Script.__init__', (['self'], {}), '(self, **kwargs)\n', (535, 551), False, 'from retriever.lib.templates import Script\n'), ((1681, 1717), 'retriever.lib.templates.Script.download', 'Script.download', (['self', 'engine', 'debug'], {}), '(self, engine, debug)\n', (1696, 1717), False, 'from retriever.lib.templates import Script\n'), ((3317, 3387), 'retriever.lib.models.Table', 'Table', (['"""ID_ABUNDANCE"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('ID_ABUNDANCE', delimiter=',', header_rows=0, contains_pk=False)\n", (3322, 3387), False, 'from retriever.lib.models import Table\n'), ((3701, 3769), 'retriever.lib.models.Table', 'Table', (['"""allrawdata"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('allrawdata', delimiter=',', header_rows=0, contains_pk=False)\n", (3706, 3769), False, 'from retriever.lib.models import Table\n'), ((4479, 4544), 'retriever.lib.models.Table', 'Table', (['"""biomass"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('biomass', delimiter=',', header_rows=0, contains_pk=False)\n", (4484, 4544), False, 'from retriever.lib.models import Table\n'), ((4816, 4883), 'retriever.lib.models.Table', 'Table', (['"""citation1"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('citation1', delimiter=',', header_rows=0, contains_pk=False)\n", (4821, 4883), False, 'from retriever.lib.models import Table\n'), ((5224, 5290), 'retriever.lib.models.Table', 'Table', (['"""contacts"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('contacts', delimiter=',', header_rows=0, contains_pk=False)\n", (5229, 5290), False, 'from retriever.lib.models import Table\n'), ((5883, 5950), 'retriever.lib.models.Table', 'Table', (['"""countries"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('countries', delimiter=',', header_rows=0, contains_pk=False)\n", (5888, 5950), False, 'from retriever.lib.models import Table\n'), ((6219, 6285), 'retriever.lib.models.Table', 'Table', (['"""curation"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('curation', delimiter=',', header_rows=0, contains_pk=False)\n", (6224, 6285), False, 'from retriever.lib.models import Table\n'), ((6702, 6768), 'retriever.lib.models.Table', 'Table', (['"""datasets"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('datasets', delimiter=',', header_rows=0, contains_pk=False)\n", (6707, 6768), False, 'from retriever.lib.models import Table\n'), ((7864, 7931), 'retriever.lib.models.Table', 'Table', (['"""downloads"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('downloads', delimiter=',', header_rows=0, contains_pk=False)\n", (7869, 7931), False, 'from retriever.lib.models import Table\n'), ((8494, 8559), 'retriever.lib.models.Table', 'Table', (['"""methods"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('methods', delimiter=',', header_rows=0, contains_pk=False)\n", (8499, 8559), False, 'from retriever.lib.models import Table\n'), ((8936, 9000), 'retriever.lib.models.Table', 'Table', (['"""sample"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('sample', delimiter=',', header_rows=0, contains_pk=False)\n", (8941, 9000), False, 'from retriever.lib.models import Table\n'), ((9338, 9400), 'retriever.lib.models.Table', 'Table', (['"""site"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('site', delimiter=',', header_rows=0, contains_pk=False)\n", (9343, 9400), False, 'from retriever.lib.models import Table\n'), ((10191, 10256), 'retriever.lib.models.Table', 'Table', (['"""species"""'], {'delimiter': '""","""', 'header_rows': '(0)', 'contains_pk': '(False)'}), "('species', delimiter=',', header_rows=0, contains_pk=False)\n", (10196, 10256), False, 'from retriever.lib.models import Table\n'), ((1460, 1482), 'pkg_resources.parse_version', 'parse_version', (['VERSION'], {}), '(VERSION)\n', (1473, 1482), False, 'from pkg_resources import parse_version\n'), ((1486, 1508), 'pkg_resources.parse_version', 'parse_version', (['"""2.0.0"""'], {}), "('2.0.0')\n", (1499, 1508), False, 'from pkg_resources import parse_version\n'), ((2773, 2816), 'csv.writer', 'csv.writer', (['csv_file'], {'quoting': 'csv.QUOTE_ALL'}), '(csv_file, quoting=csv.QUOTE_ALL)\n', (2783, 2816), False, 'import csv\n')] |
from django.db import models
# Create your models here.
class Schema(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
class Code(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
active_instances = models.PositiveIntegerField(default=0)
schema = models.ForeignKey(Schema, related_name="codes")
code_type = models.IntegerField(default=0)
def __unicode__(self):
if self.description:
return "%s/%s (%d): %s" % (self.schema_id, self.name, self.id, self.description)
else:
return "%s/%s (%d)" % (self.schema_id, self.name, self.id)
class DataSet(models.Model):
name = models.CharField(max_length=100)
created = models.DateTimeField()
class Session(models.Model):
set = models.ForeignKey(DataSet)
started = models.DateTimeField()
ended = models.DateTimeField()
def __unicode__(self):
return "%d (%s - %s)" % (self.id, str(self.started), str(self.ended))
class Participant(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
def __unicode__(self):
return self.name
class Message(models.Model):
session = models.ForeignKey(Session)
idx = models.IntegerField()
time = models.DateTimeField()
type = models.IntegerField()
participant = models.ForeignKey(Participant, related_name='messages')
message = models.TextField()
codes = models.ManyToManyField(Code, through='CodeInstance')
@classmethod
def get_between(cls, start, end):
"""
Get messages that are inclusively between the two messages, or two dates.
Takes into account the exact ordering of messages,
meaning that you won't get messages at the same time but after the last message, for example.
"""
if isinstance(start, Message):
after_first = ~models.Q(session=start.session) | models.Q(idx__gte=start.idx)
after_first = models.Q(time__gte=start.time) & after_first
else:
after_first = models.Q(time__gte=start)
if isinstance(end, Message):
before_last = ~models.Q(session=end.session) | models.Q(idx__lte=end.idx)
before_last = models.Q(time__lte=end.time) & before_last
else:
before_last = models.Q(time__lte=end)
return cls.objects.filter(after_first, before_last)
@property
def text(self):
return self.message
@property
def user_name(self):
return self.participant.name
@property
def created_at(self):
return self.time
class User(models.Model):
name = models.CharField(max_length=100)
full_name = models.CharField(max_length=250)
email = models.CharField(max_length=250)
def __unicode__(self):
return self.name
class AbstractCodeInstance(models.Model):
class Meta:
abstract = True
code = models.ForeignKey(Code)
message = models.ForeignKey(Message)
added = models.DateTimeField()
class CodeInstance(AbstractCodeInstance):
user = models.ForeignKey(User)
task_id = models.PositiveIntegerField()
intensity = models.FloatField()
flag = models.IntegerField()
| [
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.PositiveIntegerField",
"django.db.models.DateTimeField",
"django.db.models.Q",
"django.db.models.CharField"
]
| [((101, 133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (117, 133), False, 'from django.db import models\n'), ((152, 170), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (168, 170), False, 'from django.db import models\n'), ((215, 247), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (231, 247), False, 'from django.db import models\n'), ((266, 284), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (282, 284), False, 'from django.db import models\n'), ((308, 346), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (335, 346), False, 'from django.db import models\n'), ((360, 407), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Schema'], {'related_name': '"""codes"""'}), "(Schema, related_name='codes')\n", (377, 407), False, 'from django.db import models\n'), ((424, 454), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (443, 454), False, 'from django.db import models\n'), ((745, 777), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (761, 777), False, 'from django.db import models\n'), ((792, 814), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (812, 814), False, 'from django.db import models\n'), ((861, 887), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DataSet'], {}), '(DataSet)\n', (878, 887), False, 'from django.db import models\n'), ((902, 924), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (922, 924), False, 'from django.db import models\n'), ((937, 959), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (957, 959), False, 'from django.db import models\n'), ((1124, 1156), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1140, 1156), False, 'from django.db import models\n'), ((1175, 1193), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1191, 1193), False, 'from django.db import models\n'), ((1296, 1322), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Session'], {}), '(Session)\n', (1313, 1322), False, 'from django.db import models\n'), ((1333, 1354), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1352, 1354), False, 'from django.db import models\n'), ((1366, 1388), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1386, 1388), False, 'from django.db import models\n'), ((1400, 1421), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1419, 1421), False, 'from django.db import models\n'), ((1440, 1495), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Participant'], {'related_name': '"""messages"""'}), "(Participant, related_name='messages')\n", (1457, 1495), False, 'from django.db import models\n'), ((1510, 1528), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1526, 1528), False, 'from django.db import models\n'), ((1542, 1594), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Code'], {'through': '"""CodeInstance"""'}), "(Code, through='CodeInstance')\n", (1564, 1594), False, 'from django.db import models\n'), ((2753, 2785), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2769, 2785), False, 'from django.db import models\n'), ((2802, 2834), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (2818, 2834), False, 'from django.db import models\n'), ((2847, 2879), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (2863, 2879), False, 'from django.db import models\n'), ((3036, 3059), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Code'], {}), '(Code)\n', (3053, 3059), False, 'from django.db import models\n'), ((3074, 3100), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Message'], {}), '(Message)\n', (3091, 3100), False, 'from django.db import models\n'), ((3113, 3135), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (3133, 3135), False, 'from django.db import models\n'), ((3196, 3219), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (3213, 3219), False, 'from django.db import models\n'), ((3234, 3263), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (3261, 3263), False, 'from django.db import models\n'), ((3280, 3299), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3297, 3299), False, 'from django.db import models\n'), ((3311, 3332), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3330, 3332), False, 'from django.db import models\n'), ((2160, 2185), 'django.db.models.Q', 'models.Q', ([], {'time__gte': 'start'}), '(time__gte=start)\n', (2168, 2185), False, 'from django.db import models\n'), ((2419, 2442), 'django.db.models.Q', 'models.Q', ([], {'time__lte': 'end'}), '(time__lte=end)\n', (2427, 2442), False, 'from django.db import models\n'), ((2020, 2048), 'django.db.models.Q', 'models.Q', ([], {'idx__gte': 'start.idx'}), '(idx__gte=start.idx)\n', (2028, 2048), False, 'from django.db import models\n'), ((2075, 2105), 'django.db.models.Q', 'models.Q', ([], {'time__gte': 'start.time'}), '(time__gte=start.time)\n', (2083, 2105), False, 'from django.db import models\n'), ((2283, 2309), 'django.db.models.Q', 'models.Q', ([], {'idx__lte': 'end.idx'}), '(idx__lte=end.idx)\n', (2291, 2309), False, 'from django.db import models\n'), ((2336, 2364), 'django.db.models.Q', 'models.Q', ([], {'time__lte': 'end.time'}), '(time__lte=end.time)\n', (2344, 2364), False, 'from django.db import models\n'), ((1986, 2017), 'django.db.models.Q', 'models.Q', ([], {'session': 'start.session'}), '(session=start.session)\n', (1994, 2017), False, 'from django.db import models\n'), ((2251, 2280), 'django.db.models.Q', 'models.Q', ([], {'session': 'end.session'}), '(session=end.session)\n', (2259, 2280), False, 'from django.db import models\n')] |
# Generated by Django 3.2.3 on 2021-05-27 13:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0002_auto_20210526_1747'),
]
operations = [
migrations.AddField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('cash', 'cash'), ('wallet', 'wallet')], default='cash', max_length=10),
),
]
| [
"django.db.models.CharField"
]
| [((346, 448), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('cash', 'cash'), ('wallet', 'wallet')]", 'default': '"""cash"""', 'max_length': '(10)'}), "(choices=[('cash', 'cash'), ('wallet', 'wallet')], default=\n 'cash', max_length=10)\n", (362, 448), False, 'from django.db import migrations, models\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testAddTakeMany(self):
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = self.evaluate(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testFeedAddTakeMany(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = self.evaluate(
sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testAddManyTakeManyRoundTrip(self):
with self.session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
def testDeserializeFailsInconsistentRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
self.evaluate(sp_roundtrip)
def testTakeManyFailsWrongInputOp(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = self.evaluate(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
self.evaluate(sp_roundtrip)
class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session(config=benchmark.benchmark_config()) as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
variables.global_variables_initializer().run()
st_roundtrip_values = self.evaluate(st_roundtrip)
st_deserialized_values = self.evaluate(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.ops.sparse_ops.serialize_many_sparse",
"numpy.testing.assert_equal",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"numpy.array",
"tensorflow.python.ops.variables.Variable",
"numpy.arange",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.asarray",
"tensorflow.python.platform.benchmark.benchmark_config",
"numpy.random.seed",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.sparse_ops.deserialize_many_sparse",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.array_ops.stack",
"numpy.random.randint",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.test.main"
]
| [((10045, 10056), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10054, 10056), False, 'from tensorflow.python.platform import test\n'), ((2231, 2283), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', (['ind', 'val', 'shape'], {}), '(ind, val, shape)\n', (2266, 2283), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((2614, 2666), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', (['ind', 'val', 'shape'], {}), '(ind, val, shape)\n', (2649, 2666), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((2856, 2908), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor_lib.SparseTensorValue', (['ind', 'val', 'shape'], {}), '(ind, val, shape)\n', (2891, 2908), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((7898, 7917), 'numpy.random.seed', 'np.random.seed', (['(127)'], {}), '(127)\n', (7912, 7917), True, 'import numpy as np\n'), ((7983, 8047), 'numpy.random.randint', 'np.random.randint', (['batch_size'], {'size': 'num_elements', 'dtype': 'np.int64'}), '(batch_size, size=num_elements, dtype=np.int64)\n', (8000, 8047), True, 'import numpy as np\n'), ((8077, 8116), 'numpy.arange', 'np.arange', (['num_elements'], {'dtype': 'np.int64'}), '(num_elements, dtype=np.int64)\n', (8086, 8116), True, 'import numpy as np\n'), ((8289, 8343), 'numpy.asarray', 'np.asarray', (['[batch_size, num_elements]'], {'dtype': 'np.int64'}), '([batch_size, num_elements], dtype=np.int64)\n', (8299, 8343), True, 'import numpy as np\n'), ((1789, 1824), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {}), '(dtypes.int64)\n', (1810, 1824), False, 'from tensorflow.python.ops import array_ops\n'), ((1834, 1862), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtype'], {}), '(dtype)\n', (1855, 1862), False, 'from tensorflow.python.ops import array_ops\n'), ((1864, 1899), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.int64'], {}), '(dtypes.int64)\n', (1885, 1899), False, 'from tensorflow.python.ops import array_ops\n'), ((3335, 3370), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['[handle0, handle1]'], {}), '([handle0, handle1])\n', (3350, 3370), False, 'from tensorflow.python.ops import array_ops\n'), ((4501, 4574), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['[handle0_value, handle1_value]'], {'dtype': 'dtypes.int64'}), '([handle0_value, handle1_value], dtype=dtypes.int64)\n', (4522, 4574), False, 'from tensorflow.python.framework import ops\n'), ((5425, 5475), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0]]'], {'dtype': 'np.int64'}), '([[0, 0], [0, 1], [2, 0]], dtype=np.int64)\n', (5433, 5475), True, 'import numpy as np\n'), ((5497, 5525), 'numpy.array', 'np.array', (["[b'a', b'b', b'c']"], {}), "([b'a', b'b', b'c'])\n", (5505, 5525), True, 'import numpy as np\n'), ((5546, 5578), 'numpy.array', 'np.array', (['[4, 5]'], {'dtype': 'np.int64'}), '([4, 5], dtype=np.int64)\n', (5554, 5578), True, 'import numpy as np\n'), ((6825, 6898), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['[handle0_value, handle1_value]'], {'dtype': 'dtypes.int64'}), '([handle0_value, handle1_value], dtype=dtypes.int64)\n', (6846, 6898), False, 'from tensorflow.python.framework import ops\n'), ((1961, 2019), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]'], {}), '([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])\n', (1969, 2019), True, 'import numpy as np\n'), ((2067, 2100), 'numpy.array', 'np.array', (['[0, 10, 13, 14, 32, 33]'], {}), '([0, 10, 13, 14, 32, 33])\n', (2075, 2100), True, 'import numpy as np\n'), ((2186, 2202), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (2194, 2202), True, 'import numpy as np\n'), ((2344, 2402), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]]'], {}), '([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2], [2, 3]])\n', (2352, 2402), True, 'import numpy as np\n'), ((2450, 2483), 'numpy.array', 'np.array', (['[0, 10, 13, 14, 32, 33]'], {}), '([0, 10, 13, 14, 32, 33])\n', (2458, 2483), True, 'import numpy as np\n'), ((2569, 2585), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (2577, 2585), True, 'import numpy as np\n'), ((2716, 2737), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2724, 2737), True, 'import numpy as np\n'), ((2765, 2778), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2773, 2778), True, 'import numpy as np\n'), ((2808, 2827), 'numpy.array', 'np.array', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (2816, 2827), True, 'import numpy as np\n'), ((3050, 3062), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (3059, 3062), True, 'import numpy as np\n'), ((3110, 3122), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (3119, 3122), True, 'import numpy as np\n'), ((4204, 4216), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (4213, 4216), True, 'import numpy as np\n'), ((4265, 4277), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (4274, 4277), True, 'import numpy as np\n'), ((6539, 6551), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (6548, 6551), True, 'import numpy as np\n'), ((7386, 7398), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (7395, 7398), True, 'import numpy as np\n'), ((8426, 8446), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (8436, 8446), False, 'from tensorflow.python.framework import ops\n'), ((8466, 8493), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['indices'], {}), '(indices)\n', (8484, 8493), False, 'from tensorflow.python.ops import variables\n'), ((8511, 8537), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['values'], {}), '(values)\n', (8529, 8537), False, 'from tensorflow.python.ops import variables\n'), ((8554, 8579), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['shape'], {}), '(shape)\n', (8572, 8579), False, 'from tensorflow.python.ops import variables\n'), ((8593, 8647), 'tensorflow.python.framework.sparse_tensor.SparseTensor', 'sparse_tensor_lib.SparseTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (8623, 8647), True, 'from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\n'), ((8905, 8941), 'tensorflow.python.ops.sparse_ops.serialize_many_sparse', 'sparse_ops.serialize_many_sparse', (['st'], {}), '(st)\n', (8937, 8941), False, 'from tensorflow.python.ops import sparse_ops\n'), ((8968, 9037), 'tensorflow.python.ops.sparse_ops.deserialize_many_sparse', 'sparse_ops.deserialize_many_sparse', (['st_serialized'], {'dtype': 'values.dtype'}), '(st_serialized, dtype=values.dtype)\n', (9002, 9037), False, 'from tensorflow.python.ops import sparse_ops\n'), ((9293, 9380), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['st_roundtrip_values.values', 'st_deserialized_values.values'], {}), '(st_roundtrip_values.values, st_deserialized_values.\n values)\n', (9316, 9380), True, 'import numpy as np\n'), ((9416, 9505), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['st_roundtrip_values.indices', 'st_deserialized_values.indices'], {}), '(st_roundtrip_values.indices, st_deserialized_values\n .indices)\n', (9439, 9505), True, 'import numpy as np\n'), ((9541, 9637), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['st_roundtrip_values.dense_shape', 'st_deserialized_values.dense_shape'], {}), '(st_roundtrip_values.dense_shape,\n st_deserialized_values.dense_shape)\n', (9564, 9637), True, 'import numpy as np\n'), ((2967, 2978), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (2976, 2978), False, 'from tensorflow.python.framework import ops\n'), ((8376, 8404), 'tensorflow.python.platform.benchmark.benchmark_config', 'benchmark.benchmark_config', ([], {}), '()\n', (8402, 8404), False, 'from tensorflow.python.platform import benchmark\n'), ((9115, 9155), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (9153, 9155), False, 'from tensorflow.python.ops import variables\n')] |
import json
from typing import Dict, Optional
import requests
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document,
parse_matrix_document, parse_misskey_document)
from federation.utils.network import fetch_document
HIGHEST_SUPPORTED_NODEINFO_VERSION = 2.1
def fetch_mastodon_document(host):
doc, status_code, error = fetch_document(host=host, path='/api/v1/instance')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_mastodon_document(doc, host)
def fetch_matrix_document(host: str) -> Optional[Dict]:
doc, status_code, error = fetch_document(host=host, path='/_matrix/federation/v1/version')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_matrix_document(doc, host)
def fetch_misskey_document(host: str, mastodon_document: Dict=None) -> Optional[Dict]:
try:
response = requests.post(f'https://{host}/api/meta') # ¯\_(ツ)_/¯
except Exception:
return
try:
doc = response.json()
except json.JSONDecodeError:
return
if response.status_code == 200:
return parse_misskey_document(doc, host, mastodon_document=mastodon_document)
def fetch_nodeinfo_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/nodeinfo')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
url, highest_version = '', 0.0
if doc.get('0'):
# Buggy NodeInfo from certain old Hubzilla versions
url = doc.get('0', {}).get('href')
elif isinstance(doc.get('links'), dict):
# Another buggy NodeInfo from certain old Hubzilla versions
url = doc.get('links').get('href')
else:
for link in doc.get('links'):
version = float(link.get('rel').split('/')[-1])
if highest_version < version <= HIGHEST_SUPPORTED_NODEINFO_VERSION:
url, highest_version = link.get('href'), version
if not url:
return
doc, status_code, error = fetch_document(url=url)
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo_document(doc, host)
def fetch_nodeinfo2_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/x-nodeinfo2')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo2_document(doc, host)
def fetch_statisticsjson_document(host):
doc, status_code, error = fetch_document(host=host, path='/statistics.json')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_statisticsjson_document(doc, host)
| [
"json.loads",
"requests.post",
"federation.hostmeta.parsers.parse_misskey_document",
"federation.hostmeta.parsers.parse_matrix_document",
"federation.hostmeta.parsers.parse_statisticsjson_document",
"federation.hostmeta.parsers.parse_mastodon_document",
"federation.utils.network.fetch_document",
"federation.hostmeta.parsers.parse_nodeinfo_document",
"federation.hostmeta.parsers.parse_nodeinfo2_document"
]
| [((429, 479), 'federation.utils.network.fetch_document', 'fetch_document', ([], {'host': 'host', 'path': '"""/api/v1/instance"""'}), "(host=host, path='/api/v1/instance')\n", (443, 479), False, 'from federation.utils.network import fetch_document\n'), ((609, 643), 'federation.hostmeta.parsers.parse_mastodon_document', 'parse_mastodon_document', (['doc', 'host'], {}), '(doc, host)\n', (632, 643), False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((732, 796), 'federation.utils.network.fetch_document', 'fetch_document', ([], {'host': 'host', 'path': '"""/_matrix/federation/v1/version"""'}), "(host=host, path='/_matrix/federation/v1/version')\n", (746, 796), False, 'from federation.utils.network import fetch_document\n'), ((926, 958), 'federation.hostmeta.parsers.parse_matrix_document', 'parse_matrix_document', (['doc', 'host'], {}), '(doc, host)\n', (947, 958), False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((1444, 1499), 'federation.utils.network.fetch_document', 'fetch_document', ([], {'host': 'host', 'path': '"""/.well-known/nodeinfo"""'}), "(host=host, path='/.well-known/nodeinfo')\n", (1458, 1499), False, 'from federation.utils.network import fetch_document\n'), ((2251, 2274), 'federation.utils.network.fetch_document', 'fetch_document', ([], {'url': 'url'}), '(url=url)\n', (2265, 2274), False, 'from federation.utils.network import fetch_document\n'), ((2404, 2438), 'federation.hostmeta.parsers.parse_nodeinfo_document', 'parse_nodeinfo_document', (['doc', 'host'], {}), '(doc, host)\n', (2427, 2438), False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((2507, 2565), 'federation.utils.network.fetch_document', 'fetch_document', ([], {'host': 'host', 'path': '"""/.well-known/x-nodeinfo2"""'}), "(host=host, path='/.well-known/x-nodeinfo2')\n", (2521, 2565), False, 'from federation.utils.network import fetch_document\n'), ((2695, 2730), 'federation.hostmeta.parsers.parse_nodeinfo2_document', 'parse_nodeinfo2_document', (['doc', 'host'], {}), '(doc, host)\n', (2719, 2730), False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((2804, 2854), 'federation.utils.network.fetch_document', 'fetch_document', ([], {'host': 'host', 'path': '"""/statistics.json"""'}), "(host=host, path='/statistics.json')\n", (2818, 2854), False, 'from federation.utils.network import fetch_document\n'), ((2984, 3024), 'federation.hostmeta.parsers.parse_statisticsjson_document', 'parse_statisticsjson_document', (['doc', 'host'], {}), '(doc, host)\n', (3013, 3024), False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((534, 549), 'json.loads', 'json.loads', (['doc'], {}), '(doc)\n', (544, 549), False, 'import json\n'), ((851, 866), 'json.loads', 'json.loads', (['doc'], {}), '(doc)\n', (861, 866), False, 'import json\n'), ((1076, 1117), 'requests.post', 'requests.post', (['f"""https://{host}/api/meta"""'], {}), "(f'https://{host}/api/meta')\n", (1089, 1117), False, 'import requests\n'), ((1306, 1376), 'federation.hostmeta.parsers.parse_misskey_document', 'parse_misskey_document', (['doc', 'host'], {'mastodon_document': 'mastodon_document'}), '(doc, host, mastodon_document=mastodon_document)\n', (1328, 1376), False, 'from federation.hostmeta.parsers import parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document, parse_matrix_document, parse_misskey_document\n'), ((1554, 1569), 'json.loads', 'json.loads', (['doc'], {}), '(doc)\n', (1564, 1569), False, 'import json\n'), ((2329, 2344), 'json.loads', 'json.loads', (['doc'], {}), '(doc)\n', (2339, 2344), False, 'import json\n'), ((2620, 2635), 'json.loads', 'json.loads', (['doc'], {}), '(doc)\n', (2630, 2635), False, 'import json\n'), ((2909, 2924), 'json.loads', 'json.loads', (['doc'], {}), '(doc)\n', (2919, 2924), False, 'import json\n')] |
# -*- encoding: utf-8 -*-
# Copyright (c) 2017 Servionica
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import freezegun
import mock
import oslo_messaging as om
from watcher.common import rpc
from watcher import notifications
from watcher.objects import service as w_service
from watcher.tests.db import base
from watcher.tests.objects import utils
@freezegun.freeze_time('2016-10-18T09:52:05.219414')
class TestActionPlanNotification(base.DbTestCase):
def setUp(self):
super(TestActionPlanNotification, self).setUp()
p_get_notifier = mock.patch.object(rpc, 'get_notifier')
m_get_notifier = p_get_notifier.start()
self.addCleanup(p_get_notifier.stop)
self.m_notifier = mock.Mock(spec=om.Notifier)
def fake_get_notifier(publisher_id):
self.m_notifier.publisher_id = publisher_id
return self.m_notifier
m_get_notifier.side_effect = fake_get_notifier
def test_service_failed(self):
service = utils.get_test_service(mock.Mock(),
created_at=datetime.datetime.utcnow())
state = w_service.ServiceStatus.FAILED
notifications.service.send_service_update(mock.MagicMock(),
service,
state,
host='node0')
notification = self.m_notifier.warning.call_args[1]
payload = notification['payload']
self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id)
self.assertDictEqual({
'watcher_object.data': {
'last_seen_up': '2016-09-22T08:32:06Z',
'name': 'watcher-service',
'sevice_host': 'controller',
'status_update': {
'watcher_object.data': {
'old_state': 'ACTIVE',
'state': 'FAILED'
},
'watcher_object.name': 'ServiceStatusUpdatePayload',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.0'
}
},
'watcher_object.name': 'ServiceUpdatePayload',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.0'
},
payload
)
| [
"datetime.datetime.utcnow",
"mock.Mock",
"mock.patch.object",
"freezegun.freeze_time",
"mock.MagicMock"
]
| [((872, 923), 'freezegun.freeze_time', 'freezegun.freeze_time', (['"""2016-10-18T09:52:05.219414"""'], {}), "('2016-10-18T09:52:05.219414')\n", (893, 923), False, 'import freezegun\n'), ((1078, 1116), 'mock.patch.object', 'mock.patch.object', (['rpc', '"""get_notifier"""'], {}), "(rpc, 'get_notifier')\n", (1095, 1116), False, 'import mock\n'), ((1236, 1263), 'mock.Mock', 'mock.Mock', ([], {'spec': 'om.Notifier'}), '(spec=om.Notifier)\n', (1245, 1263), False, 'import mock\n'), ((1534, 1545), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1543, 1545), False, 'import mock\n'), ((1724, 1740), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1738, 1740), False, 'import mock\n'), ((1599, 1625), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1623, 1625), False, 'import datetime\n')] |
import torch
from torch import nn
from torch.nn.parameter import Parameter
from einops import rearrange, reduce, repeat
class dca_offsets_layer(nn.Module):
"""Constructs a Offset Generation module.
"""
def __init__(self, channel, n_offsets):
super(dca_offsets_layer, self).__init__()
self.channel = channel
self.n_offsets = n_offsets
def covariance_features(self, x):
"""
Takes in a feature map and returns the unnormalized covariance matrix
"""
m_batchsize, C, height, width = x.size()
x = x - x.mean(dim=1, keepdim=True) / (x.std(dim=1, keepdim=True) + 1e-5)
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
return energy
def forward(self, x):
m_batchsize, C, height, width = x.size()
cov_matrix = self.covariance_features(x).reshape(m_batchsize, C, 1, C)
_, locations = torch.topk(cov_matrix, self.n_offsets, dim=1)
delta = torch.stack(self.n_offsets*[torch.arange(0, self.channel)], dim=0)
delta = torch.stack(m_batchsize * [delta], dim=0)
offsets = locations.squeeze() - delta.cuda()
return offsets
| [
"torch.bmm",
"torch.topk",
"torch.stack",
"torch.arange"
]
| [((773, 804), 'torch.bmm', 'torch.bmm', (['proj_query', 'proj_key'], {}), '(proj_query, proj_key)\n', (782, 804), False, 'import torch\n'), ((1006, 1051), 'torch.topk', 'torch.topk', (['cov_matrix', 'self.n_offsets'], {'dim': '(1)'}), '(cov_matrix, self.n_offsets, dim=1)\n', (1016, 1051), False, 'import torch\n'), ((1151, 1192), 'torch.stack', 'torch.stack', (['(m_batchsize * [delta])'], {'dim': '(0)'}), '(m_batchsize * [delta], dim=0)\n', (1162, 1192), False, 'import torch\n'), ((1096, 1125), 'torch.arange', 'torch.arange', (['(0)', 'self.channel'], {}), '(0, self.channel)\n', (1108, 1125), False, 'import torch\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
import pandas as pd
import numpy as np
import time
import argparse
import sys
from multiprocessing import Pool
# In[ ]:
# ##arguments for testing
# bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam'
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph'
# ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa'
# chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes'
# out_dir = './tmp/'
# map_q = 20
# size_range = [15,500]
# CPU = 4
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file', help='sample_bam_file', required=True)
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True)
parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True)
parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True)
args = parser.parse_args()
bam_file_path = args.bam_file
bam_file_name = args.bam_file_name
mapable_path=args.mapable_regions
ref_seq_path = args.ref_seq
chrom_sizes_path = args.chrom_sizes
out_dir = args.out_dir
map_q = args.map_q
size_range = args.size_range
CPU = args.CPU
# In[ ]:
print('arguments provided:')
print('\tbam_file_path = "'+bam_file_path+'"')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_regions = "'+mapable_path+'"')
print('\tref_seq_path = "'+ref_seq_path+'"')
print('\tchrom_sizes_path = "'+chrom_sizes_path+'"')
print('\tout_dir = "'+out_dir+'"')
print('\tmap_q = '+str(map_q))
print('\tsize_range = '+str(size_range))
print('\tCPU = '+str(CPU))
# In[ ]:
mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0]
out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('out_file',out_file)
# In[ ]:
#create a directory for the GC data
if not os.path.exists(out_dir +'/'+mapable_name):
os.mkdir(out_dir +'/'+mapable_name)
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/')
# In[ ]:
#import filter
mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None)
#remove non standard chromosomes and X and Y
chroms = ['chr'+str(m) for m in range(1,23)]
mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)]
print('chroms:', chroms)
print('number_of_intervals:',len(mapable_intervals))
sys.stdout.flush()
# In[ ]:
def collect_reads(sublist):
#create a dict for holding the frequency of each read length and GC content
GC_dict = {}
for length in range(size_range[0],size_range[1]+1):
GC_dict[length]={}
for num_GC in range(0,length+1):
GC_dict[length][num_GC]=0
#import the bam file
#this needs to be done within the loop otherwise it gives a truncated file warning
bam_file = pysam.AlignmentFile(bam_file_path, "rb")
print('sublist intervals:',len(sublist))
#this might also need to be in the loop
#import the ref_seq
ref_seq=pysam.FastaFile(ref_seq_path)
for i in range(len(sublist)):
chrom = sublist.iloc[i][0]
start = sublist.iloc[i][1]
end = sublist.iloc[i][2]
if i%5000==0:
print('interval',i,':',chrom,start,end,'seconds:',np.round(time.time()-start_time))
sys.stdout.flush()
#fetch any read that overlaps the inteterval (don't need to extend the interval because the fetch function does this automatically)
fetched = bam_file.fetch(chrom,start,end)
for read in fetched:
#use both fw (positive template length) and rv (negative template length) reads
if (read.is_reverse==False and read.template_length>=size_range[0] and read.template_length<=size_range[1]) or (read.is_reverse==True and -read.template_length>=size_range[0] and -read.template_length<=size_range[1]):
#qc filters, some longer fragments are considered 'improper pairs' but I would like to keep these
if read.is_paired==True and read.mapping_quality>=map_q and read.is_duplicate==False and read.is_qcfail==False:
if read.is_reverse==False:
read_start = read.reference_start
read_end = read.reference_start+read.template_length
elif read.is_reverse==True:
read_end = read.reference_start + read.reference_length
read_start = read_end + read.template_length
fragment_seq = ref_seq.fetch(read.reference_name,read_start,read_end)
#tally up the GC content
fragment_seq=fragment_seq.replace('g','G').replace('c','C').replace('a','A').replace('t','T').replace('n','N')
# #################
# ##logic check####
# #################
# if read.is_reverse==False:
# if fragment_seq[0:read.reference_length]==read.query_sequence and len(fragment_seq)==read.template_length:
# print('fw match',read.reference_length)
# else:
# print(fragment_seq[0:read.reference_length],read.reference_length,'fw')
# print(read.query_sequence,len(read.query_sequence),'fw')
# print(len(fragment_seq),read.template_length)
# print('\n')
# elif read.is_reverse==True:
# if fragment_seq[-read.reference_length:]==read.query_sequence and len(fragment_seq)==-read.template_length:
# print('rv match',read.reference_length)
# else:
# print(fragment_seq[-read.reference_length:],read.reference_length,'rv')
# print(read.query_sequence,len(read.query_sequence),'rv')
# print(len(fragment_seq),read.template_length)
# print('\n')
# #################
#split and convert to numpy array
fragment_seq = np.array(list(fragment_seq))
#replace with values
fragment_seq[(fragment_seq=='G') | (fragment_seq=='C')]=1
fragment_seq[(fragment_seq=='A') | (fragment_seq=='T')]=0
fragment_seq[(fragment_seq=='N')]=np.random.randint(2) #choose a random 0 or 1 for N (so that you always get an integer) #should be very rare if the filter is done right
fragment_seq = fragment_seq.astype(int)
num_GC = int(fragment_seq.sum())
GC_dict[abs(read.template_length)][num_GC]+=1
print('done')
return(GC_dict)
# In[ ]:
start_time = time.time()
p = Pool(processes=CPU) #use the available CPU
sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU
GC_dict_list = p.map(collect_reads, sublists, 1)
# In[ ]:
all_GC_df = pd.DataFrame()
for i,GC_dict in enumerate(GC_dict_list):
GC_df = pd.DataFrame()
for length in GC_dict.keys():
current = pd.Series(GC_dict[length]).reset_index()
current = current.rename(columns={'index':'num_GC',0:'number_of_fragments'})
current['length']=length
current = current[['length','num_GC','number_of_fragments']]
GC_df = GC_df.append(current, ignore_index=True)
GC_df = GC_df.set_index(['length','num_GC'])
all_GC_df[i] = GC_df['number_of_fragments']
del(GC_df,GC_dict)
all_GC_df = all_GC_df.sum(axis=1)
all_GC_df = pd.DataFrame(all_GC_df).rename(columns = {0:'number_of_fragments'})
all_GC_df = all_GC_df.reset_index()
all_GC_df.to_csv(out_file,sep='\t',index=False)
# In[ ]:
print('done')
# In[ ]:
# In[ ]:
# In[ ]:
| [
"pandas.Series",
"os.path.exists",
"argparse.ArgumentParser",
"pandas.read_csv",
"pysam.AlignmentFile",
"numpy.array_split",
"numpy.random.randint",
"multiprocessing.Pool",
"os.mkdir",
"pandas.DataFrame",
"sys.stdout.flush",
"time.time",
"pysam.FastaFile"
]
| [((762, 787), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (785, 787), False, 'import argparse\n'), ((2901, 2949), 'pandas.read_csv', 'pd.read_csv', (['mapable_path'], {'sep': '"""\t"""', 'header': 'None'}), "(mapable_path, sep='\\t', header=None)\n", (2912, 2949), True, 'import pandas as pd\n'), ((3194, 3212), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3210, 3212), False, 'import sys\n'), ((7702, 7713), 'time.time', 'time.time', ([], {}), '()\n', (7711, 7713), False, 'import time\n'), ((7718, 7737), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'CPU'}), '(processes=CPU)\n', (7722, 7737), False, 'from multiprocessing import Pool\n'), ((7772, 7810), 'numpy.array_split', 'np.array_split', (['mapable_intervals', 'CPU'], {}), '(mapable_intervals, CPU)\n', (7786, 7810), True, 'import numpy as np\n'), ((7928, 7942), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7940, 7942), True, 'import pandas as pd\n'), ((2652, 2696), 'os.path.exists', 'os.path.exists', (["(out_dir + '/' + mapable_name)"], {}), "(out_dir + '/' + mapable_name)\n", (2666, 2696), False, 'import os\n'), ((2699, 2737), 'os.mkdir', 'os.mkdir', (["(out_dir + '/' + mapable_name)"], {}), "(out_dir + '/' + mapable_name)\n", (2707, 2737), False, 'import os\n'), ((2742, 2802), 'os.path.exists', 'os.path.exists', (["(out_dir + '/' + mapable_name + '/GC_counts/')"], {}), "(out_dir + '/' + mapable_name + '/GC_counts/')\n", (2756, 2802), False, 'import os\n'), ((2803, 2857), 'os.mkdir', 'os.mkdir', (["(out_dir + '/' + mapable_name + '/GC_counts/')"], {}), "(out_dir + '/' + mapable_name + '/GC_counts/')\n", (2811, 2857), False, 'import os\n'), ((3649, 3689), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_file_path', '"""rb"""'], {}), "(bam_file_path, 'rb')\n", (3668, 3689), False, 'import pysam\n'), ((3820, 3849), 'pysam.FastaFile', 'pysam.FastaFile', (['ref_seq_path'], {}), '(ref_seq_path)\n', (3835, 3849), False, 'import pysam\n'), ((7997, 8011), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8009, 8011), True, 'import pandas as pd\n'), ((8520, 8543), 'pandas.DataFrame', 'pd.DataFrame', (['all_GC_df'], {}), '(all_GC_df)\n', (8532, 8543), True, 'import pandas as pd\n'), ((4122, 4140), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4138, 4140), False, 'import sys\n'), ((8064, 8090), 'pandas.Series', 'pd.Series', (['GC_dict[length]'], {}), '(GC_dict[length])\n', (8073, 8090), True, 'import pandas as pd\n'), ((7321, 7341), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (7338, 7341), True, 'import numpy as np\n'), ((4085, 4096), 'time.time', 'time.time', ([], {}), '()\n', (4094, 4096), False, 'import time\n')] |
import discord
from discord.ext import commands
arrow = "<a:right:877425183839891496>"
kwee = "<:kannawee:877036162122924072>"
kdance = "<a:kanna_dance:877038778798207016>"
kbored = "<:kanna_bored:877036162827583538>"
ksmug = "<:kanna_smug:877038777896427560>"
heart = "<a:explosion_heart:877426228775227392>"
class Server(commands.Cog):
def __init__(self, client):
self.client = client
self.kana_id = 857835279259664403
@commands.command()
@commands.is_owner()
async def sabout(self, ctx):
kana = self.client.get_user(self.kana_id)
about_file = discord.File("./images/about_server.png")
await ctx.send(file = about_file)
emb = discord.Embed(title=f"{kdance} ABOUT SERVER {kdance}",description = f"{arrow} **DRAGON LOLI'S HOME** is the official Server of the bot **Kanna Chan**. It's a friendly community meant for having fun, chilling and spending time with others.\n{arrow} This server has cute emotes and a lot of fun events are about to be done here! So, stay tuned!", color=0xfc74c6)
emb.add_field(
name=f"{kwee} __ROLES__",
value=f"{arrow} <@&876800883441156138> The highest role supposed to be only for Kanna Chan.\n{arrow} <@&876817811396263946> Admins of the Server and have the highest power and authority after owner.\n{arrow} <@&876818242058997791> Moderators of the server meant to moderate the chat and maintain a positive environment in community.\n{arrow} <@&876801038420701196> Developer(s) of <NAME> have this role.\n{arrow} <@&876804164661944340> All other users who join this server get this role by default. They have image and embed perms by deault.\n{arrow} **PS: APART FROM THESE SELF-ROLES ARE ALSO AVAIALBLE FOR MEMBERS.**",
inline=False
)
emb.add_field(
name=f"{ksmug} __CHANNELS__",
value=f"{arrow} <#877030933847490691> Read the rules here.\n{arrow} <#877031867440832574> Channel for grabbing self-roles.\n{arrow} <#876798564704084011> The general chat for the server.\n{arrow} <#876798809819189249> Bot Commands should be executed here.\n{arrow} <#876798696078065694> You can give suggestions for improving Kanna Chan here.\n{arrow} <#876798720254029864> You can report BUGS here if you find any in Kanna Chan.\n{arrow} <#876798750876651530> For any other support or query use this channel.\n{arrow} **P.S: YOU CAN PING ANY STAFF MEMBER OR DEVELOPER WHILE REPORTING BUG OR IN CASE OF ANY QUERY.**",
inline=False
)
emb.set_footer(
text="<NAME>",
icon_url=kana.avatar_url
)
await ctx.send(embed=emb)
@commands.command()
@commands.is_owner()
async def rule(self, ctx):
kana = self.client.get_user(self.kana_id)
rule_file = discord.File("./images/rules.png")
await ctx.send(file=rule_file)
emb = discord.Embed(title=f"{kbored} RULES {kbored}", color=0xfc74c6)
emb.add_field(
name=f"{heart} **Be respectful**",
value=f"You must respect all users, regardless of your liking towards them. Treat others the way you want to be treated.",
inline=False
)
emb.add_field(
name=f"{heart} **No Inappropriate Language**",
value=f"{arrow} The use of profanity should be kept to a minimum. However, any derogatory language towards any user is prohibited.",
inline=False
)
emb.add_field(
name=f"{heart} **No spamming**",
value=f"{arrow} Don't send a lot of small messages right after each other. Do not disrupt chat by spamming.",
inline=False
)
emb.add_field(
name=f"{heart} **No pornographic/adult/other NSFW material**",
value=f"{arrow} This is a community server and not meant to share this kind of material.",
inline=False
)
emb.add_field(
name=f"{heart} **No advertisements**",
value=f"{arrow} We do not tolerate any kind of advertisements, whether it be for other communities or streams. You can post your content in the media channel if it is relevant and provides actual value (Video/Art)",
inline=False
)
emb.add_field(
name=f"{heart} **No offensive names and profile pictures**",
value=f"{arrow} You will be asked to change your name or picture if the staff deems them inappropriate.",
inline=False
)
emb.add_field(
name=f"{heart} **Server Raiding**",
value=f"{arrow} Raiding or mentions of raiding are not allowed.",
inline=False
)
emb.add_field(
name=f"{heart} **Direct & Indirect Threats**",
value=f"{arrow} Threats to other users of DDoS, Death, DoX, abuse, and other malicious threats are absolutely prohibited and disallowed.",
inline=False
)
emb.add_field(
name=f"{heart} **Follow the Discord Community Guidelines**",
value=f"{arrow} You can find them here: https://discordapp.com/guidelines",
inline=False
)
emb.add_field(
name=f"{heart} **VOICE CHANNELS**",
value=f"{arrow} Do not join voice chat channels without permission of the people already in there.",
inline=False
)
emb.add_field(
name=f"{heart} **DECISIONS AND ISSUES**",
value = f"{arrow} ***The Admins and Mods will Mute/Kick/Ban per discretion. If you feel mistreated DM an Admin and we will resolve the issue.***",
inline=False
)
emb.add_field(
name=f"{heart} **CHANGES**",
value = f"{arrow} ***Your presence in this server implies accepting these rules, including all further changes. These changes might be done at any time without notice, it is your responsibility to check for them.***",
inline=False
)
emb.set_footer(
text="<NAME>",
icon_url=kana.avatar_url
)
await ctx.send(embed=emb)
@commands.Cog.listener()
async def on_member_join(self, member):
if member.guild.id == 876798564704084008:
if member.bot:
return
else:
member_role = member.guild.get_role(876804164661944340)
await member.add_roles(member_role)
desc = f"{member.name} Thanks for joining Kanna's Server. The server is currently under construction, Thanks for being an **early supporter**!! If you need any kind of help or support just ping any staff member or DM `aSHish#1198`. Have a nice stay in the server :)"
await member.send(desc)
else:
return
def setup(client):
client.add_cog(Server(client))
print(">> Server Utility loaded") | [
"discord.ext.commands.Cog.listener",
"discord.ext.commands.is_owner",
"discord.Embed",
"discord.ext.commands.command",
"discord.File"
]
| [((449, 467), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (465, 467), False, 'from discord.ext import commands\n'), ((473, 492), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (490, 492), False, 'from discord.ext import commands\n'), ((2653, 2671), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2669, 2671), False, 'from discord.ext import commands\n'), ((2677, 2696), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (2694, 2696), False, 'from discord.ext import commands\n'), ((6127, 6150), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (6148, 6150), False, 'from discord.ext import commands\n'), ((597, 638), 'discord.File', 'discord.File', (['"""./images/about_server.png"""'], {}), "('./images/about_server.png')\n", (609, 638), False, 'import discord\n'), ((695, 1067), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""{kdance} ABOUT SERVER {kdance}"""', 'description': 'f"""{arrow} **DRAGON LOLI\'S HOME** is the official Server of the bot **Kanna Chan**. It\'s a friendly community meant for having fun, chilling and spending time with others.\n{arrow} This server has cute emotes and a lot of fun events are about to be done here! So, stay tuned!"""', 'color': '(16544966)'}), '(title=f\'{kdance} ABOUT SERVER {kdance}\', description=\n f"""{arrow} **DRAGON LOLI\'S HOME** is the official Server of the bot **Kanna Chan**. It\'s a friendly community meant for having fun, chilling and spending time with others.\n{arrow} This server has cute emotes and a lot of fun events are about to be done here! So, stay tuned!"""\n , color=16544966)\n', (708, 1067), False, 'import discord\n'), ((2798, 2832), 'discord.File', 'discord.File', (['"""./images/rules.png"""'], {}), "('./images/rules.png')\n", (2810, 2832), False, 'import discord\n'), ((2886, 2949), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""{kbored} RULES {kbored}"""', 'color': '(16544966)'}), "(title=f'{kbored} RULES {kbored}', color=16544966)\n", (2899, 2949), False, 'import discord\n')] |
import numpy as np
import network
def main():
x = np.array([2, 3])
nw = network.NeuralNetwork()
print(nw.feedforward(x))
if __name__ == "__main__":
main()
| [
"numpy.array",
"network.NeuralNetwork"
]
| [((56, 72), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (64, 72), True, 'import numpy as np\n'), ((82, 105), 'network.NeuralNetwork', 'network.NeuralNetwork', ([], {}), '()\n', (103, 105), False, 'import network\n')] |
""" Generates Tisserand plots """
from enum import Enum
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from poliastro.plotting._base import BODY_COLORS
from poliastro.twobody.mean_elements import get_mean_elements
from poliastro.util import norm
class TisserandKind(Enum):
"""All possible Tisserand kinds"""
APSIS = "apsis"
ENERGY = "energy"
PERIOD = "period"
class TisserandPlotter:
"""Generates Tisserand figures"""
def __init__(self, kind=TisserandKind.APSIS, axes=None):
"""Object initializer
Parameters
----------
kind : TisserandKind
Nature for the Tisserand
axes : ~matplotlib.pyplot.axes
Axes for the figure
"""
# Asign Tisserand kind
self.kind = kind
# Check if axis available
if not axes:
_, self.ax = plt.subplots(1, 1)
else:
self.ax = axes
# Force axes scale regarding Tisserand kind
self.ax.set_xscale("log")
if self.kind == TisserandKind.APSIS:
self.ax.set_yscale("log")
def _solve_tisserand(
self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100
):
"""Solves all possible Tisserand lines with a meshgrid workflow
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_array : ~astropy.units.Quantity
Desired Vinf for the flyby
num_contours : int
Number of contour lines for flyby speed
alpha_lim : tuple
Minimum and maximum flyby angles.
N : int
Number of points for flyby angle.
Notes
-----
The algorithm for generating Tisserand plots is the one depicted in
"Preliminary Trajectory Design of a Mission to Enceladus" by David
<NAME>, section 3.6
"""
# Generate mean orbital elements Earth
body_rv = get_mean_elements(body).to_vectors()
R_body, V_body = norm(body_rv.r), norm(body_rv.v)
# Generate non-dimensional velocity and alpha span
vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)
alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)
vinf_array /= V_body
# Construct the mesh for any configuration
V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)
# Solving for non-dimensional a_sc and ecc_sc
A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))
ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)
# Compute main Tisserand variables
RR_P = A_SC * R_body * (1 - ECC_SC)
RR_A = A_SC * R_body * (1 + ECC_SC)
TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)
EE = -body.parent.k / (2 * A_SC * R_body)
# Build color lines to internal canvas
return RR_P, RR_A, EE, TT
def _build_lines(self, RR_P, RR_A, EE, TT, color):
"""Collect lines and append them to internal data
Parameters
----------
data : list
Array containing [RR_P, RR_A, EE, TT, color]
Returns
-------
lines: list
Plotting lines for the Tisserand
"""
# Plot desired kind lines
if self.kind == TisserandKind.APSIS:
# Generate apsis lines
lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)
elif self.kind == TisserandKind.ENERGY:
# Generate energy lines
lines = self.ax.plot(
RR_P.to(u.AU), EE.to(u.km ** 2 / u.s ** 2), color=color
)
elif self.kind == TisserandKind.PERIOD:
# Generate period lines
lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)
return lines
def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):
"""Plots body Tisserand line within flyby angle
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf : ~astropy.units.Quantity
Vinf velocity line
alpha_lim : tuple
Minimum and maximum flyby angles
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# HACK: to reuse Tisserand solver, we transform input Vinf into a tuple
vinf_span = (vinf, vinf)
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(
body, vinf_span, num_contours=2, alpha_lim=alpha_lim
)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
def plot(self, body, vinf_span, num_contours=10, color=None):
"""Plots body Tisserand for given amount of solutions within Vinf span
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_span : tuple
Minimum and maximum Vinf velocities
num_contours : int
Number of points to iterate over previously defined velocities
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
| [
"poliastro.util.norm",
"numpy.sqrt",
"poliastro.twobody.mean_elements.get_mean_elements",
"numpy.linspace",
"numpy.cos",
"numpy.meshgrid",
"matplotlib.pyplot.subplots"
]
| [((2191, 2245), 'numpy.linspace', 'np.linspace', (['vinf_span[0]', 'vinf_span[-1]', 'num_contours'], {}), '(vinf_span[0], vinf_span[-1], num_contours)\n', (2202, 2245), True, 'import numpy as np\n'), ((2268, 2311), 'numpy.linspace', 'np.linspace', (['alpha_lim[0]', 'alpha_lim[-1]', 'N'], {}), '(alpha_lim[0], alpha_lim[-1], N)\n', (2279, 2311), True, 'import numpy as np\n'), ((2416, 2452), 'numpy.meshgrid', 'np.meshgrid', (['vinf_array', 'alpha_array'], {}), '(vinf_array, alpha_array)\n', (2427, 2452), True, 'import numpy as np\n'), ((2595, 2657), 'numpy.sqrt', 'np.sqrt', (['(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / 2) ** 2)'], {}), '(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / 2) ** 2)\n', (2602, 2657), True, 'import numpy as np\n'), ((904, 922), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (916, 922), True, 'from matplotlib import pyplot as plt\n'), ((2077, 2092), 'poliastro.util.norm', 'norm', (['body_rv.r'], {}), '(body_rv.r)\n', (2081, 2092), False, 'from poliastro.util import norm\n'), ((2094, 2109), 'poliastro.util.norm', 'norm', (['body_rv.v'], {}), '(body_rv.v)\n', (2098, 2109), False, 'from poliastro.util import norm\n'), ((2817, 2862), 'numpy.sqrt', 'np.sqrt', (['((A_SC * R_body) ** 3 / body.parent.k)'], {}), '((A_SC * R_body) ** 3 / body.parent.k)\n', (2824, 2862), True, 'import numpy as np\n'), ((2015, 2038), 'poliastro.twobody.mean_elements.get_mean_elements', 'get_mean_elements', (['body'], {}), '(body)\n', (2032, 2038), False, 'from poliastro.twobody.mean_elements import get_mean_elements\n'), ((2563, 2576), 'numpy.cos', 'np.cos', (['ALPHA'], {}), '(ALPHA)\n', (2569, 2576), True, 'import numpy as np\n')] |
from KeyValueTree import KeyValueTree
from truth.models import KeyValue as TruthKeyValue, Truth
from systems.models import KeyValue as KeyValue
from django.test.client import RequestFactory
from api_v2.keyvalue_handler import KeyValueHandler
import json
factory = RequestFactory()
class Rack:
rack_name = None
tree = None
kv = None
ru = None
width = None
systems = []
ethernet_patch_panel_24 = []
ethernet_patch_panel_48 = []
def __init__(self, rack_name):
self.systems = []
self.rack_name = rack_name
self.kv = Truth.objects.select_related('truth_key_value').get(name=self.rack_name)
self.system_list = KeyValue.objects.select_related('system').filter(value__contains="truth:%s" % (self.rack_name))
self.ethernet_patch_panel_24 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 24)
self.ethernet_patch_panel_48 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 48)
import pdb
h = KeyValueHandler()
for s in self.system_list:
request = factory.get('/api/v2/keyvalue/?keystore=%s' % (s.system.hostname), follow=True)
tree = h.read(request)
system_ru = self._get_system_ru(tree)
system_image = self._get_system_image(tree)
system_slot = self._get_system_slot(tree)
self.systems.append({
"system_name":s.system.hostname,
"system_id":s.system.id,
"system_ru":system_ru,
"system_image":system_image,
'system_slot':system_slot,
'operating_system':str(s.system.operating_system),
'server_model': str(s.system.server_model),
'oob_ip': str(s.system.oob_ip),
})
self.systems = sorted(self.systems, key=lambda k: k['system_slot'])
try:
self.ru = self.kv.keyvalue_set.get(key='rack_ru').value
except:
self.ru = 42
try:
self.width = self.kv.keyvalue_set.get(key='rack_width').value
except:
self.width = 30
def _get_ethernet_patch_panels(self, tree, type, port_count):
ret = []
for i in tree.keyvalue_set.all():
match_string = "%i_port_%s_patch_panel" % (port_count, type)
if str(i.key) == match_string:
ret.append(i.value)
return ret
def _get_system_ru(self, tree):
for i in tree.iterkeys():
try:
if 'system_ru' in i.split(':'):
return tree[i]
except:
pass
return 4
def _get_system_image(self, tree):
for i in tree.iterkeys():
try:
if 'system_image' in i.split(':'):
return tree[i]
except:
pass
return None
def _get_system_slot(self, tree):
for i in tree.iterkeys():
try:
if 'system_slot' in i.split(':'):
return tree[i]
except:
pass
return 1
| [
"api_v2.keyvalue_handler.KeyValueHandler",
"systems.models.KeyValue.objects.select_related",
"django.test.client.RequestFactory",
"truth.models.Truth.objects.select_related"
]
| [((265, 281), 'django.test.client.RequestFactory', 'RequestFactory', ([], {}), '()\n', (279, 281), False, 'from django.test.client import RequestFactory\n'), ((994, 1011), 'api_v2.keyvalue_handler.KeyValueHandler', 'KeyValueHandler', ([], {}), '()\n', (1009, 1011), False, 'from api_v2.keyvalue_handler import KeyValueHandler\n'), ((575, 622), 'truth.models.Truth.objects.select_related', 'Truth.objects.select_related', (['"""truth_key_value"""'], {}), "('truth_key_value')\n", (603, 622), False, 'from truth.models import KeyValue as TruthKeyValue, Truth\n'), ((675, 716), 'systems.models.KeyValue.objects.select_related', 'KeyValue.objects.select_related', (['"""system"""'], {}), "('system')\n", (706, 716), True, 'from systems.models import KeyValue as KeyValue\n')] |
from __future__ import absolute_import
from __future__ import print_function
import datetime
import os
import random
import sys
import uuid
import base64
import yaml
import re
try:
import en
except:
print("DOWNLOD NODECUBE")
print("""wget https://www.nodebox.net/code/data/media/linguistics.zip
unzip linguistics.zip""")
VERSION = "1.1"
THEME_PROB = 0
class bnfDictionary:
def __init__(self, file):
self.grammar = yaml.load(open(file,'r'))
self.poemtype = "<poem>"
def generate(self, key, num):
gram = self.grammar[key]
if len(gram)==1:
i = 0
else:
i = random.randint(0, len(gram) - 1)
string = ""
if "<" not in gram[i]:
string = gram[i]
else:
for word in gram[i].split():
if "<" not in word:
string = string + word + " "
else:
if "verb" in word and word != '<adverb>':
if "pverb" in word or "mushy" in self.poemtype:
v = self.generate("<pverb>", 1).strip()
elif "nverb" in word:
v = self.generate("<nverb>", 1).strip()
# else:
# v = self.generate("<verb>", 1).strip()
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-verb>", 1).strip()
if "verb-inf" in word:
string = string + \
en.verb.present_participle(v) + " "
elif "verb-pr" in word:
string = string + \
en.verb.present(
v, person=3, negate=False) + " "
elif "verb-past" in word:
string = string + en.verb.past(v) + " "
else:
string = string + v + " "
elif "noun" in word:
if "pnoun" in word or "mushy" in self.poemtype:
v = self.generate("<pnoun>", 1).strip()
elif "nnoun" in word:
v = self.generate("<nnoun>", 1).strip()
else:
v = self.generate("<noun>", 1).strip()
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-noun>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + v + " "
elif "person" in word:
v = self.generate("<person>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + v + " "
elif "adj" in word:
if "mushy" in self.poemtype:
v = self.generate("<padj>",1)
else:
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-adj>", 1).strip()
else:
v = self.generate(word, 1).strip()
string = string + v + " "
elif "fruit" in word:
v = self.generate("<fruit>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + self.generate(word, 1) + " "
elif "person" in word:
v = self.generate("<fruit>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + self.generate(word, 1) + " "
else:
if "-pl" in word:
v = en.noun.plural(self.generate(word.replace("-pl",""),1))
else:
v = self.generate(word, 1)
string = string + v + " "
return string
def generatePretty(self, key, seed_str):
if seed_str == None:
seed_str = str(uuid.uuid4()).split("-")[0]
random.seed(uuid.uuid5(uuid.NAMESPACE_DNS,seed_str).int)
#tool = language_check.LanguageTool('en-US')
self.poemtype = key
if key == "<mushypoem>":
key = "<poem>"
poem = self.generate(key, 1)
poem = poem.replace(" ,", ",")
puncuation = [".", ".", ".", ".", "!", "?"]
dontbreaks = ["of", "behind", "the", "when", "what", "why", "who", ",",
"your", "by", "like", "to", "you", "your", "a", "are", "become", "newline"]
capitalize = False
breaks = 0
poem2 = []
foundFirstBreak = False
for word in poem.replace("\n", "newline").split():
poem2.append(word.lower())
if random.randint(1, 100) < 2 and "newline" not in word and foundFirstBreak:
isgood = True
for dontbreak in list(dontbreaks + puncuation):
if dontbreak == word.lower():
isgood = False
if isgood:
poem2.append("newline")
if "newline" in word:
foundFirstBreak = True
poem3 = []
beforeFirstBreak = True
for word in poem2:
if "newline" in word:
breaks += 1
beforeFirstBreak = False
else:
breaks = 0
if beforeFirstBreak or word == "i" or "i'" in word:
word = word.capitalize()
poem3.append(word)
capitalize = False
else:
if breaks > 1:
capitalize = True
if capitalize == True and "newline" not in word:
word = word.capitalize()
capitalize = False
for punc in list(set(puncuation)):
if punc in word:
capitalize = True
poem3.append(word)
if random.randint(1, 100) < 0 and "newline" not in word:
isgood = True
for dontbreak in list(dontbreaks + puncuation):
if dontbreak == word.lower():
isgood = False
if isgood:
poem3.append(random.choice(puncuation))
capitalize = True
# noPunc = True
# for punc in list(set(puncuation)):
# if punc in word:
# noPunc = False
# if noPunc:
# poem3.append(random.choice(puncuation))
newPoem = " ".join(poem3)
newPoem = newPoem.replace(" a a", " an a")
newPoem = newPoem.replace("newline .", ". newline")
newPoem = newPoem.replace("newline ?", "? newline")
newPoem = newPoem.replace("newline !", "! newline")
newPoem = newPoem.replace("newline ,", ", newline")
newPoem = newPoem.replace("newline", "\n")
newPoem = newPoem.replace(" \n \n", "\n\n")
newPoem = newPoem.replace("\n \n ", "\n\n")
newPoem = newPoem.replace(" '", "'")
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
newPoem = newPoem.replace(" ,", ",")
newPoem = newPoem.replace("?.", "?")
newPoem = newPoem.replace(".?", ".")
newPoem = newPoem.replace(",.", ",")
newPoem = newPoem.replace("!.", "!")
newPoem = newPoem.replace("..", ".")
newPoem = newPoem.replace("..", ".")
newPoem = newPoem.replace("..", ".")
title = newPoem.split("\n")[0]
newTitle = title.replace(".", "")
newPoem = newPoem.replace(title, "<h1>" + newTitle + "</h1>")
newPoem2 = ""
firstLine = False
secondLine = False
for line in newPoem.split("\n"):
if len(line) > 0:
if firstLine and not secondLine:
newPoem2 = newPoem2 + "<p>\n"
secondLine = True
if firstLine == False:
firstLine = True
newPoem2 = newPoem2 + line + " \n"
if firstLine and secondLine:
newPoem2 = newPoem2 + line + " <br />\n"
else:
newPoem2 = newPoem2 + " <br />\n"
newPoem2 = newPoem2 + "</p>"
return newPoem2,seed_str
bnf = bnfDictionary('brain.yaml')
def generate_poem(poemtype, hex_seed=None):
p,seed_str = bnf.generatePretty('<' + poemtype + '>',hex_seed)
return p,seed_str
if __name__ == '__main__':
poemtype = 'poem'
if 'mushy' in sys.argv[1:]:
poemtype = 'mushypoem'
p,seed_str=generate_poem(poemtype)
print(("*"*30 + "\n"*5))
filtered = []
for line in re.sub("<.*?>", " ", p).split("\n"):
if len(line.strip()) > 0:
filtered.append(line.strip())
else:
filtered.append("pause")
print(p)
| [
"uuid.uuid5",
"random.choice",
"en.verb.present_participle",
"uuid.uuid4",
"en.verb.present",
"en.noun.plural",
"re.sub",
"random.randint",
"en.verb.past"
]
| [((9349, 9372), 're.sub', 're.sub', (['"""<.*?>"""', '""" """', 'p'], {}), "('<.*?>', ' ', p)\n", (9355, 9372), False, 'import re\n'), ((4448, 4488), 'uuid.uuid5', 'uuid.uuid5', (['uuid.NAMESPACE_DNS', 'seed_str'], {}), '(uuid.NAMESPACE_DNS, seed_str)\n', (4458, 4488), False, 'import uuid\n'), ((5150, 5172), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (5164, 5172), False, 'import random\n'), ((6373, 6395), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (6387, 6395), False, 'import random\n'), ((1363, 1385), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (1377, 1385), False, 'import random\n'), ((4399, 4411), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4409, 4411), False, 'import uuid\n'), ((6694, 6719), 'random.choice', 'random.choice', (['puncuation'], {}), '(puncuation)\n', (6707, 6719), False, 'import random\n'), ((2471, 2493), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2485, 2493), False, 'import random\n'), ((2654, 2671), 'en.noun.plural', 'en.noun.plural', (['v'], {}), '(v)\n', (2668, 2671), False, 'import en\n'), ((1600, 1629), 'en.verb.present_participle', 'en.verb.present_participle', (['v'], {}), '(v)\n', (1626, 1629), False, 'import en\n'), ((2903, 2920), 'en.noun.plural', 'en.noun.plural', (['v'], {}), '(v)\n', (2917, 2920), False, 'import en\n'), ((1764, 1806), 'en.verb.present', 'en.verb.present', (['v'], {'person': '(3)', 'negate': '(False)'}), '(v, person=3, negate=False)\n', (1779, 1806), False, 'import en\n'), ((1946, 1961), 'en.verb.past', 'en.verb.past', (['v'], {}), '(v)\n', (1958, 1961), False, 'import en\n'), ((3183, 3205), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (3197, 3205), False, 'import random\n'), ((3626, 3643), 'en.noun.plural', 'en.noun.plural', (['v'], {}), '(v)\n', (3640, 3643), False, 'import en\n'), ((3895, 3912), 'en.noun.plural', 'en.noun.plural', (['v'], {}), '(v)\n', (3909, 3912), False, 'import en\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.block_storage.v2 import _proxy
from openstack.block_storage.v2 import snapshot
from openstack.block_storage.v2 import stats
from openstack.block_storage.v2 import type
from openstack.block_storage.v2 import volume
from openstack.tests.unit import test_proxy_base
class TestVolumeProxy(test_proxy_base.TestProxyBase):
def setUp(self):
super(TestVolumeProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_snapshot_get(self):
self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot)
def test_snapshots_detailed(self):
self.verify_list(self.proxy.snapshots, snapshot.SnapshotDetail,
paginated=True,
method_kwargs={"details": True, "query": 1},
expected_kwargs={"query": 1})
def test_snapshots_not_detailed(self):
self.verify_list(self.proxy.snapshots, snapshot.Snapshot,
paginated=True,
method_kwargs={"details": False, "query": 1},
expected_kwargs={"query": 1})
def test_snapshot_create_attrs(self):
self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot)
def test_snapshot_delete(self):
self.verify_delete(self.proxy.delete_snapshot,
snapshot.Snapshot, False)
def test_snapshot_delete_ignore(self):
self.verify_delete(self.proxy.delete_snapshot,
snapshot.Snapshot, True)
def test_type_get(self):
self.verify_get(self.proxy.get_type, type.Type)
def test_types(self):
self.verify_list(self.proxy.types, type.Type, paginated=False)
def test_type_create_attrs(self):
self.verify_create(self.proxy.create_type, type.Type)
def test_type_delete(self):
self.verify_delete(self.proxy.delete_type, type.Type, False)
def test_type_delete_ignore(self):
self.verify_delete(self.proxy.delete_type, type.Type, True)
def test_volume_get(self):
self.verify_get(self.proxy.get_volume, volume.Volume)
def test_volumes_detailed(self):
self.verify_list(self.proxy.volumes, volume.VolumeDetail,
paginated=True,
method_kwargs={"details": True, "query": 1},
expected_kwargs={"query": 1})
def test_volumes_not_detailed(self):
self.verify_list(self.proxy.volumes, volume.Volume,
paginated=True,
method_kwargs={"details": False, "query": 1},
expected_kwargs={"query": 1})
def test_volume_create_attrs(self):
self.verify_create(self.proxy.create_volume, volume.Volume)
def test_volume_delete(self):
self.verify_delete(self.proxy.delete_volume, volume.Volume, False)
def test_volume_delete_ignore(self):
self.verify_delete(self.proxy.delete_volume, volume.Volume, True)
def test_volume_extend(self):
self._verify("openstack.block_storage.v2.volume.Volume.extend",
self.proxy.extend_volume,
method_args=["value", "new-size"],
expected_args=["new-size"])
def test_backend_pools(self):
self.verify_list(self.proxy.backend_pools, stats.Pools,
paginated=False)
| [
"openstack.block_storage.v2._proxy.Proxy"
]
| [((967, 993), 'openstack.block_storage.v2._proxy.Proxy', '_proxy.Proxy', (['self.session'], {}), '(self.session)\n', (979, 993), False, 'from openstack.block_storage.v2 import _proxy\n')] |
"""Support for Purrsong LavvieBot S"""
import asyncio
import logging
import voluptuous as vol
from lavviebot import LavvieBotApi
import homeassistant.helpers.config_validation as cv
from homeassistant import config_entries
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME
)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup of the component"""
return True
async def async_setup_entry(hass, config_entry):
"""Set up Lavviebot integration from a config entry."""
username = config_entry.data.get(CONF_USERNAME)
password = config_entry.data.get(CONF_PASSWORD)
_LOGGER.info("Initializing the Lavviebot API")
lavviebot = await hass.async_add_executor_job(LavvieBotApi, username, password)
_LOGGER.info("Connected to API")
hass.data[DOMAIN] = lavviebot
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
return True
| [
"logging.getLogger"
]
| [((449, 476), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (466, 476), False, 'import logging\n')] |
from mars import main_loop
import numpy as np
from mars.settings import *
class Problem:
"""
Synopsis
--------
User class for the Kelvin-Helmholtz instability
Args
----
None
Methods
-------
initialise
Set all variables in each cell to initialise the simulation.
internal_bc
Specify the internal boundary for the simulation.
TODO
----
None
"""
def __init__(self):
self.parameter = {
'Name':'<NAME> instability.',
'Dimensions':'2D',
'x1 min':-0.5,
'x1 max':0.5,
'x2 min':-0.5,
'x2 max':0.5,
'x3 min':-0.5,
'x3 max':0.5,
'resolution x1':256,
'resolution x2':256,
'resolution x3':0,
'cfl':0.3,
'initial dt':1.0e-5,
'max dt increase':1.5,
'initial t': 0.0,
'max time': 5.0,
'save frequency': 2.5e-2,
'output type': ['numpy'],
'output primitives': True,
'print to file':False,
'profiling': True,
'restart file':None,
'gamma':1.4,
'density unit':1.0,
'length unit':1.0,
'velocity unit':1.0,
'optimisation': 'numba',
'riemann':'hllc',
'reconstruction':'linear',
'limiter':'minmod',
'time stepping':'RK2',
'method':'hydro',
'lower x1 boundary':'reciprocal',
'upper x1 boundary':'reciprocal',
'lower x2 boundary':'reciprocal',
'upper x2 boundary':'reciprocal',
'lower x3 boundary':'reciprocal',
'upper x3 boundary':'reciprocal',
'internal boundary':False
}
def initialise(self, V, g, l):
if self.parameter['Dimensions'] == '2D':
Y, X = np.meshgrid(g.x1, g.x2, indexing='ij')
if self.parameter['Dimensions'] == '3D':
Z, Y, X = np.meshgrid(g.x1, g.x2, g.x3, indexing='ij')
yp = 0.25
dens_1 = 2.0
dens_2 = 1.0
pres = 2.0
vel_1 = 0.5
vel_2 = 0.0
amp = 0.001
vx1_per = (np.random.random(V.shape)*2.0 - 1)*amp
vx2_per = (np.random.random(V.shape)*2.0 - 1)*amp
region_1 = np.absolute(Y) < yp
region_2 = np.absolute(Y) > yp
V[rho, region_1] = dens_1
V[prs, region_1] = pres
V[vx1, region_1] = vel_1 + vx1_per[vx1, region_1]
V[vx2, region_1] = vel_2 + vx2_per[vx2, region_1]
V[rho, region_2] = dens_2
V[prs, region_2] = pres
V[vx1, region_2] = -vel_1 + vx1_per[vx1, region_2]
V[vx2, region_2] = vel_2 + vx2_per[vx2, region_2]
def internal_bc(self):
return None
if __name__ == "__main__":
main_loop(Problem())
| [
"numpy.random.random",
"numpy.meshgrid",
"numpy.absolute"
]
| [((1932, 1970), 'numpy.meshgrid', 'np.meshgrid', (['g.x1', 'g.x2'], {'indexing': '"""ij"""'}), "(g.x1, g.x2, indexing='ij')\n", (1943, 1970), True, 'import numpy as np\n'), ((2043, 2087), 'numpy.meshgrid', 'np.meshgrid', (['g.x1', 'g.x2', 'g.x3'], {'indexing': '"""ij"""'}), "(g.x1, g.x2, g.x3, indexing='ij')\n", (2054, 2087), True, 'import numpy as np\n'), ((2365, 2379), 'numpy.absolute', 'np.absolute', (['Y'], {}), '(Y)\n', (2376, 2379), True, 'import numpy as np\n'), ((2404, 2418), 'numpy.absolute', 'np.absolute', (['Y'], {}), '(Y)\n', (2415, 2418), True, 'import numpy as np\n'), ((2248, 2273), 'numpy.random.random', 'np.random.random', (['V.shape'], {}), '(V.shape)\n', (2264, 2273), True, 'import numpy as np\n'), ((2306, 2331), 'numpy.random.random', 'np.random.random', (['V.shape'], {}), '(V.shape)\n', (2322, 2331), True, 'import numpy as np\n')] |
import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import MadDog
x = []
y = []
def generate():
# Generate random data
base = np.linspace(0, 5, 11)
# base = np.random.randint(0, 10, 5)
outliers = np.random.randint(10, 20, 2)
data = np.concatenate((base, outliers))
np.random.shuffle(data)
return data
def fill_data():
# Build random data
return np.concatenate((np.array([0]), MadDog.find_outliers(generate()))), np.concatenate(
(np.array([0]), MadDog.find_outliers(generate()))) # np.sin(x) + np.cos(x) + np.random.random(100)
# np.linspace(0, 2*np.pi, 100)
def savitzky(x, y, ploy_nom):
return savgol_filter(x, len(x) - 1, 10), savgol_filter(y, len(y) - 1, 10)
def map(x_filtered, y_filtered, x, y, title="title"):
# Generate some test data
heatmap, xedges, yedges = np.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
heatmap, xedges, yedges = np.histogram2d(x_filtered, y_filtered, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
def show(x_filtered, y_filtered, x, y, title="Lorem ipsum"):
# Plotting
fig = plt.figure()
ax = fig.subplots()
plt.plot(x_filtered, y_filtered, 'red', marker="o")
plt.plot(x, y, 'green', marker="o")
plt.subplots_adjust(bottom=0.25)
plt.xlabel('x')
plt.ylabel('y')
plt.title(title)
plt.legend(["Filter", "Raw"])
plt.show()
# Generating the noisy signal
x, y = fill_data()
print(len(y))
# Savitzky-Golay filter
x_filtered, y_filtered = savitzky(x, y, 2)
print("X unfiltered>> ", x)
print("Y unfiltered>> ", y)
print("X filtered>> ", x_filtered)
print("Y filtered>> ", y_filtered)
show(x_filtered, y_filtered, x, y)
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"numpy.random.randint",
"matplotlib.pyplot.figure",
"numpy.concatenate",
"numpy.histogram2d",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots_adjust",
"numpy.random.shuffle"
]
| [((175, 196), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(11)'], {}), '(0, 5, 11)\n', (186, 196), True, 'import numpy as np\n'), ((253, 281), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)', '(2)'], {}), '(10, 20, 2)\n', (270, 281), True, 'import numpy as np\n'), ((293, 325), 'numpy.concatenate', 'np.concatenate', (['(base, outliers)'], {}), '((base, outliers))\n', (307, 325), True, 'import numpy as np\n'), ((330, 353), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (347, 353), True, 'import numpy as np\n'), ((877, 906), 'numpy.histogram2d', 'np.histogram2d', (['x', 'y'], {'bins': '(50)'}), '(x, y, bins=50)\n', (891, 906), True, 'import numpy as np\n'), ((972, 981), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (979, 981), True, 'import matplotlib.pyplot as plt\n'), ((986, 1038), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap.T'], {'extent': 'extent', 'origin': '"""lower"""'}), "(heatmap.T, extent=extent, origin='lower')\n", (996, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1053), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1051, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1132), 'numpy.histogram2d', 'np.histogram2d', (['x_filtered', 'y_filtered'], {'bins': '(50)'}), '(x_filtered, y_filtered, bins=50)\n', (1099, 1132), True, 'import numpy as np\n'), ((1198, 1207), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1205, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1264), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap.T'], {'extent': 'extent', 'origin': '"""lower"""'}), "(heatmap.T, extent=extent, origin='lower')\n", (1222, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1269, 1279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1277, 1279), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1380), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1378, 1380), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1460), 'matplotlib.pyplot.plot', 'plt.plot', (['x_filtered', 'y_filtered', '"""red"""'], {'marker': '"""o"""'}), "(x_filtered, y_filtered, 'red', marker='o')\n", (1417, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1500), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""green"""'], {'marker': '"""o"""'}), "(x, y, 'green', marker='o')\n", (1473, 1500), True, 'import matplotlib.pyplot as plt\n'), ((1505, 1537), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.25)'}), '(bottom=0.25)\n', (1524, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1552, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1577), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1572, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1598), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1591, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1603, 1632), 'matplotlib.pyplot.legend', 'plt.legend', (["['Filter', 'Raw']"], {}), "(['Filter', 'Raw'])\n", (1613, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1645, 1647), True, 'import matplotlib.pyplot as plt\n'), ((440, 453), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (448, 453), True, 'import numpy as np\n'), ((516, 529), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (524, 529), True, 'import numpy as np\n')] |
import pytest
from pyminhash import MinHash
from pyminhash.datasets import load_data
def test__sparse_vector():
df = load_data()
myMinHasher = MinHash(10)
res = myMinHasher._sparse_vectorize(df, 'name')
assert res.columns.tolist() == ['name', 'sparse_vector']
assert res['sparse_vector'].dtype == 'object'
def test__create_hashing_parameters():
n_hashes = 10
myMinHasher = MinHash(n_hash_tables=n_hashes)
res = myMinHasher._create_hashing_parameters()
assert len(res) == n_hashes
assert res.dtype == 'int64'
assert min(res) >= 0
assert min(res) <= myMinHasher.max_token_value
def test__create_minhash():
n_hashes = 10
myMinHasher = MinHash(n_hash_tables=n_hashes)
doc = [59, 65, 66, 67, 118, 150, 266]
res = myMinHasher._create_minhash(doc)
assert len(res) == n_hashes
def test__create_minhash_signatures():
df = load_data()
myMinHasher = MinHash(3)
df = myMinHasher._sparse_vectorize(df, 'name')
df = myMinHasher._create_minhash_signatures(df)
for col in ['hash_0', 'hash_1', 'hash_2']:
assert col in df.columns
assert df[col].dtype == 'int64'
def test_fit_predict():
df = load_data()
myMinHasher = MinHash(10)
res = myMinHasher.fit_predict(df, 'name')
assert res.columns.tolist() == ['row_number_1', 'row_number_2', 'name_1', 'name_2', 'jaccard_sim']
assert res['jaccard_sim'].dtype == 'float'
def test_fit_predict_accuracy():
def jaccard(x, y):
x_tokens = set(x.split())
y_tokens = set(y.split())
return len(x_tokens.intersection(y_tokens)) / len(x_tokens.union(y_tokens))
df = load_data()
myMinHasher = MinHash(1000)
res = myMinHasher.fit_predict(df, 'name')
assert len(res) == 1727
res['jaccard_real'] = res.apply(lambda row: jaccard(row['name_1'], row['name_2']), axis=1)
res['diff'] = res['jaccard_real'] - res['jaccard_sim']
assert abs(res['diff'].mean()) < 0.02
assert res['diff'].std() < 0.1
| [
"pyminhash.MinHash",
"pyminhash.datasets.load_data"
]
| [((124, 135), 'pyminhash.datasets.load_data', 'load_data', ([], {}), '()\n', (133, 135), False, 'from pyminhash.datasets import load_data\n'), ((154, 165), 'pyminhash.MinHash', 'MinHash', (['(10)'], {}), '(10)\n', (161, 165), False, 'from pyminhash import MinHash\n'), ((406, 437), 'pyminhash.MinHash', 'MinHash', ([], {'n_hash_tables': 'n_hashes'}), '(n_hash_tables=n_hashes)\n', (413, 437), False, 'from pyminhash import MinHash\n'), ((695, 726), 'pyminhash.MinHash', 'MinHash', ([], {'n_hash_tables': 'n_hashes'}), '(n_hash_tables=n_hashes)\n', (702, 726), False, 'from pyminhash import MinHash\n'), ((894, 905), 'pyminhash.datasets.load_data', 'load_data', ([], {}), '()\n', (903, 905), False, 'from pyminhash.datasets import load_data\n'), ((924, 934), 'pyminhash.MinHash', 'MinHash', (['(3)'], {}), '(3)\n', (931, 934), False, 'from pyminhash import MinHash\n'), ((1193, 1204), 'pyminhash.datasets.load_data', 'load_data', ([], {}), '()\n', (1202, 1204), False, 'from pyminhash.datasets import load_data\n'), ((1223, 1234), 'pyminhash.MinHash', 'MinHash', (['(10)'], {}), '(10)\n', (1230, 1234), False, 'from pyminhash import MinHash\n'), ((1651, 1662), 'pyminhash.datasets.load_data', 'load_data', ([], {}), '()\n', (1660, 1662), False, 'from pyminhash.datasets import load_data\n'), ((1681, 1694), 'pyminhash.MinHash', 'MinHash', (['(1000)'], {}), '(1000)\n', (1688, 1694), False, 'from pyminhash import MinHash\n')] |
from datetime import datetime, timedelta
from typing import final
from tools import localize_time
RSS_URL_PREFIX: final = 'https://www.youtube.com/feeds/videos.xml?channel_id={0}'
LOCATION_ARGUMENT_PREFIX: final = '--location='
CHANNEL_ARGUMENT_PREFIX: final = '--channels='
LAST_CHECK_ARGUMENT_PREFIX: final = '--last-check='
TWO_WEEKS_IN_DAYS: final = 14
DEFAULT_LAST_CHECK: final = localize_time(datetime.now() - timedelta(days=TWO_WEEKS_IN_DAYS))
EMPTY: final = ''
CHANNEL_POSTS_LIMIT: final = 20
| [
"datetime.datetime.now",
"datetime.timedelta"
]
| [((401, 415), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (413, 415), False, 'from datetime import datetime, timedelta\n'), ((418, 451), 'datetime.timedelta', 'timedelta', ([], {'days': 'TWO_WEEKS_IN_DAYS'}), '(days=TWO_WEEKS_IN_DAYS)\n', (427, 451), False, 'from datetime import datetime, timedelta\n')] |
# SPDX-License-Identifier: BSD-3-Clause
from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter
__all__ = (
'PIC16Caravel',
)
class PIC16Caravel(Elaboratable):
def elaborate(self, platform):
from .pic16 import PIC16
from .soc.busses.qspi import QSPIBus
m = Module()
reset = Signal()
busy_n = Signal(reset = 1)
m.submodules.qspiFlash = qspiFlash = QSPIBus(resourceName = ('spi_flash_4x', 0))
m.submodules.pic = pic = ResetInserter(reset)(EnableInserter(busy_n)(PIC16()))
run = platform.request('run', 0)
pBus = platform.request('p_bus', 0)
addr = pBus.addr.o
dataIn = pBus.data.i
dataOut = pBus.data.o
dataDir = pBus.data.oe
read = pBus.read
write = pBus.write
with m.If(qspiFlash.complete | reset):
m.d.sync += busy_n.eq(1)
with m.Elif(pic.iBus.read):
m.d.sync += busy_n.eq(0)
m.d.comb += [
reset.eq(~qspiFlash.ready),
run.o.eq(qspiFlash.ready & busy_n),
qspiFlash.address[0].eq(0),
qspiFlash.address[1:].eq(pic.iBus.address),
pic.iBus.data.eq(qspiFlash.data),
qspiFlash.read.eq(pic.iBus.read),
addr.eq(pic.pBus.address),
read.eq(pic.pBus.read),
pic.pBus.readData.eq(dataIn),
write.eq(pic.pBus.write),
dataOut.eq(pic.pBus.writeData),
dataDir.eq(pic.pBus.write),
]
return m
def get_ports(self):
return []
| [
"amaranth.EnableInserter",
"amaranth.ResetInserter",
"amaranth.Signal",
"amaranth.Module"
]
| [((292, 300), 'amaranth.Module', 'Module', ([], {}), '()\n', (298, 300), False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((311, 319), 'amaranth.Signal', 'Signal', ([], {}), '()\n', (317, 319), False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((331, 346), 'amaranth.Signal', 'Signal', ([], {'reset': '(1)'}), '(reset=1)\n', (337, 346), False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((460, 480), 'amaranth.ResetInserter', 'ResetInserter', (['reset'], {}), '(reset)\n', (473, 480), False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n'), ((481, 503), 'amaranth.EnableInserter', 'EnableInserter', (['busy_n'], {}), '(busy_n)\n', (495, 503), False, 'from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter\n')] |
from mrs.bucket import WriteBucket
from mrs import BinWriter, HexWriter
def test_writebucket():
b = WriteBucket(0, 0)
b.addpair((4, 'test'))
b.collect([(3, 'a'), (1, 'This'), (2, 'is')])
values = ' '.join(value for key, value in b)
assert values == 'test a This is'
b.sort()
values = ' '.join(value for key, value in b)
assert values == 'This is a test'
def test_write_only():
b = WriteBucket(0, 0)
b.addpair((4, 'test'), write_only=True)
b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True)
values = ' '.join(value for key, value in b)
assert values == ''
readonly_copy = b.readonly_copy()
assert readonly_copy.url is None
def test_writing(tmpdir):
b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter)
prefix = b.prefix()
assert prefix == 'source_2_split_4_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((1, 2))
filename = prefix + '.mrsb'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
def test_roundtrip(tmpdir):
b = WriteBucket(2, 4, dir=tmpdir.strpath, format=BinWriter)
prefix = b.prefix()
assert prefix == 'source_2_split_4_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((4, 'test'))
b.collect([(3, 'a'), (1, 'This'), (2, 'is')])
values = ' '.join(value for key, value in b)
assert values == 'test a This is'
b.close_writer(do_sync=False)
filename = prefix + '.mrsb'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
values = ' '.join(value for key, value in readonly_copy)
assert values == 'test a This is'
values = ' '.join(value for key, value in readonly_copy.stream())
assert values == 'test a This is'
b.clean()
listdir = tmpdir.listdir()
assert listdir == []
def test_roundtrip_write_only(tmpdir):
b = WriteBucket(7, 1, dir=tmpdir.strpath, format=HexWriter)
prefix = b.prefix()
assert prefix == 'source_7_split_1_'
listdir = tmpdir.listdir()
assert listdir == []
b.addpair((4, 'test'), write_only=True)
b.collect([(3, 'a'), (1, 'This'), (2, 'is')], write_only=True)
values = ' '.join(value for key, value in b)
assert values == ''
b.close_writer(do_sync=False)
filename = prefix + '.mrsx'
path = tmpdir.join(filename).strpath
listdir = tmpdir.listdir()
assert listdir == [path]
readonly_copy = b.readonly_copy()
assert readonly_copy.url == path
values = ' '.join(value for key, value in readonly_copy)
assert values == ''
values = ' '.join(value for key, value in readonly_copy.stream())
assert values == 'test a This is'
b.clean()
listdir = tmpdir.listdir()
assert listdir == []
# vim: et sw=4 sts=4
| [
"mrs.bucket.WriteBucket"
]
| [((105, 122), 'mrs.bucket.WriteBucket', 'WriteBucket', (['(0)', '(0)'], {}), '(0, 0)\n', (116, 122), False, 'from mrs.bucket import WriteBucket\n'), ((421, 438), 'mrs.bucket.WriteBucket', 'WriteBucket', (['(0)', '(0)'], {}), '(0, 0)\n', (432, 438), False, 'from mrs.bucket import WriteBucket\n'), ((735, 790), 'mrs.bucket.WriteBucket', 'WriteBucket', (['(2)', '(4)'], {'dir': 'tmpdir.strpath', 'format': 'BinWriter'}), '(2, 4, dir=tmpdir.strpath, format=BinWriter)\n', (746, 790), False, 'from mrs.bucket import WriteBucket\n'), ((1183, 1238), 'mrs.bucket.WriteBucket', 'WriteBucket', (['(2)', '(4)'], {'dir': 'tmpdir.strpath', 'format': 'BinWriter'}), '(2, 4, dir=tmpdir.strpath, format=BinWriter)\n', (1194, 1238), False, 'from mrs.bucket import WriteBucket\n'), ((2099, 2154), 'mrs.bucket.WriteBucket', 'WriteBucket', (['(7)', '(1)'], {'dir': 'tmpdir.strpath', 'format': 'HexWriter'}), '(7, 1, dir=tmpdir.strpath, format=HexWriter)\n', (2110, 2154), False, 'from mrs.bucket import WriteBucket\n')] |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
import re
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rdtypes.ANY.SOA
import dns.rrset
import dns.tokenizer
import dns.transaction
import dns.ttl
import dns.grange
class UnknownOrigin(dns.exception.DNSException):
"""Unknown origin"""
class CNAMEAndOtherData(dns.exception.DNSException):
"""A node has a CNAME and other data"""
def _check_cname_and_other_data(txn, name, rdataset):
rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset)
node = txn.get_node(name)
if node is None:
# empty nodes are neutral.
return
node_kind = node.classify()
if node_kind == dns.node.NodeKind.CNAME and \
rdataset_kind == dns.node.NodeKind.REGULAR:
raise CNAMEAndOtherData('rdataset type is not compatible with a '
'CNAME node')
elif node_kind == dns.node.NodeKind.REGULAR and \
rdataset_kind == dns.node.NodeKind.CNAME:
raise CNAMEAndOtherData('CNAME rdataset is not compatible with a '
'regular data node')
# Otherwise at least one of the node and the rdataset is neutral, so
# adding the rdataset is ok
class Reader:
"""Read a DNS zone file into a transaction."""
def __init__(self, tok, rdclass, txn, allow_include=False,
allow_directives=True, force_name=None,
force_ttl=None, force_rdclass=None, force_rdtype=None,
default_ttl=None):
self.tok = tok
(self.zone_origin, self.relativize, _) = \
txn.manager.origin_information()
self.current_origin = self.zone_origin
self.last_ttl = 0
self.last_ttl_known = False
if force_ttl is not None:
default_ttl = force_ttl
if default_ttl is None:
self.default_ttl = 0
self.default_ttl_known = False
else:
self.default_ttl = default_ttl
self.default_ttl_known = True
self.last_name = self.current_origin
self.zone_rdclass = rdclass
self.txn = txn
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.allow_directives = allow_directives
self.force_name = force_name
self.force_ttl = force_ttl
self.force_rdclass = force_rdclass
self.force_rdtype = force_rdtype
self.txn.check_put_rdataset(_check_cname_and_other_data)
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _get_identifier(self):
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
return token
def _rr_line(self):
"""Process one line from a DNS zone file."""
token = None
# Name
if self.force_name is not None:
name = self.force_name
else:
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading=True)
if not token.is_whitespace():
self.last_name = self.tok.as_name(token, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
# TTL
if self.force_ttl is not None:
ttl = self.force_ttl
self.last_ttl = ttl
self.last_ttl_known = True
else:
token = self._get_identifier()
ttl = None
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = None
except dns.ttl.BadTTL:
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
self.tok.unget(token)
# Class
if self.force_rdclass is not None:
rdclass = self.force_rdclass
else:
token = self._get_identifier()
try:
rdclass = dns.rdataclass.from_text(token.value)
except dns.exception.SyntaxError:
raise
except Exception:
rdclass = self.zone_rdclass
self.tok.unget(token)
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
if self.force_rdtype is not None:
rdtype = self.force_rdtype
else:
token = self._get_identifier()
try:
rdtype = dns.rdatatype.from_text(token.value)
except Exception:
raise dns.exception.SyntaxError(
"unknown rdatatype '%s'" % token.value)
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError(
"caught exception {}: {}".format(str(ty), str(va)))
if not self.default_ttl_known and rdtype == dns.rdatatype.SOA:
# The pre-RFC2308 and pre-BIND9 behavior inherits the zone default
# TTL from the SOA minttl if no $TTL statement is present before the
# SOA is parsed.
self.default_ttl = rd.minimum
self.default_ttl_known = True
if ttl is None:
# if we didn't have a TTL on the SOA, set it!
ttl = rd.minimum
# TTL check. We had to wait until now to do this as the SOA RR's
# own TTL can be inferred from its minimum.
if ttl is None:
raise dns.exception.SyntaxError("Missing default TTL value")
self.txn.add(name, ttl, rd)
def _parse_modify(self, side):
# Here we catch everything in '{' '}' in a group so we can replace it
# with ''.
is_generate1 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
is_generate2 = re.compile(r"^.*\$({(\+|-?)(\d+)}).*$")
is_generate3 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+)}).*$")
# Sometimes there are modifiers in the hostname. These come after
# the dollar sign. They are in the form: ${offset[,width[,base]]}.
# Make names
g1 = is_generate1.match(side)
if g1:
mod, sign, offset, width, base = g1.groups()
if sign == '':
sign = '+'
g2 = is_generate2.match(side)
if g2:
mod, sign, offset = g2.groups()
if sign == '':
sign = '+'
width = 0
base = 'd'
g3 = is_generate3.match(side)
if g3:
mod, sign, offset, width = g3.groups()
if sign == '':
sign = '+'
base = 'd'
if not (g1 or g2 or g3):
mod = ''
sign = '+'
offset = 0
width = 0
base = 'd'
if base != 'd':
raise NotImplementedError()
return mod, sign, offset, width, base
def _generate_line(self):
# range lhs [ttl] [class] type rhs [ comment ]
"""Process one line containing the GENERATE statement from a DNS
zone file."""
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get()
# Range (required)
try:
start, stop, step = dns.grange.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# lhs (required)
try:
lhs = token.value
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
if not (self.last_ttl_known or self.default_ttl_known):
raise dns.exception.SyntaxError("Missing default TTL value")
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = self.zone_rdclass
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
token.value)
# rhs (required)
rhs = token.value
# The code currently only supports base 'd', so the last value
# in the tuple _parse_modify returns is ignored
lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs)
rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs)
for i in range(start, stop + 1, step):
# +1 because bind is inclusive and python is exclusive
if lsign == '+':
lindex = i + int(loffset)
elif lsign == '-':
lindex = i - int(loffset)
if rsign == '-':
rindex = i - int(roffset)
elif rsign == '+':
rindex = i + int(roffset)
lzfindex = str(lindex).zfill(int(lwidth))
rzfindex = str(rindex).zfill(int(rwidth))
name = lhs.replace('$%s' % (lmod), lzfindex)
rdata = rhs.replace('$%s' % (rmod), rzfindex)
self.last_name = dns.name.from_text(name, self.current_origin,
self.tok.idna_codec)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
try:
rd = dns.rdata.from_text(rdclass, rdtype, rdata,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" %
(str(ty), str(va)))
self.txn.add(name, ttl, rd)
def read(self):
"""Read a DNS zone file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True)
if token.is_eof():
if self.current_file is not None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == '$' and self.allow_directives:
c = token.value.upper()
if c == '$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.default_ttl = dns.ttl.from_text(token.value)
self.default_ttl_known = True
self.tok.get_eol()
elif c == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone_origin is None:
self.zone_origin = self.current_origin
self.txn._set_origin(self.current_origin)
elif c == '$INCLUDE' and self.allow_include:
token = self.tok.get()
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin =\
dns.name.from_text(token.value,
self.current_origin,
self.tok.idna_codec)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
"bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known))
self.current_file = open(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
elif c == '$GENERATE':
self._generate_line()
else:
raise dns.exception.SyntaxError(
"Unknown zone file directive '" + c + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError as detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
ex = dns.exception.SyntaxError(
"%s:%d: %s" % (filename, line_number, detail))
tb = sys.exc_info()[2]
raise ex.with_traceback(tb) from None
class RRsetsReaderTransaction(dns.transaction.Transaction):
def __init__(self, manager, replacement, read_only):
assert not read_only
super().__init__(manager, replacement, read_only)
self.rdatasets = {}
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _get_node(self, name):
rdatasets = []
for (rdataset_name, _, _), rdataset in self.rdatasets.items():
if name == rdataset_name:
rdatasets.append(rdataset)
if len(rdatasets) == 0:
return None
node = dns.node.Node()
node.rdatasets = rdatasets
return node
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
# First remove any changes involving the name
remove = []
for key in self.rdatasets:
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
try:
del self.rdatasets[(name, rdtype, covers)]
except KeyError:
pass
def _name_exists(self, name):
for (n, _, _) in self.rdatasets:
if n == name:
return True
return False
def _changed(self):
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit and self._changed():
rrsets = []
for (name, _, _), rdataset in self.rdatasets.items():
rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype,
rdataset.covers)
rrset.update(rdataset)
rrsets.append(rrset)
self.manager.set_rrsets(rrsets)
def _set_origin(self, origin):
pass
class RRSetsReaderManager(dns.transaction.TransactionManager):
def __init__(self, origin=dns.name.root, relativize=False,
rdclass=dns.rdataclass.IN):
self.origin = origin
self.relativize = relativize
self.rdclass = rdclass
self.rrsets = []
def writer(self, replacement=False):
assert replacement is True
return RRsetsReaderTransaction(self, True, False)
def get_class(self):
return self.rdclass
def origin_information(self):
if self.relativize:
effective = dns.name.empty
else:
effective = self.origin
return (self.origin, self.relativize, effective)
def set_rrsets(self, rrsets):
self.rrsets = rrsets
def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN,
default_rdclass=dns.rdataclass.IN,
rdtype=None, default_ttl=None, idna_codec=None,
origin=dns.name.root, relativize=False):
"""Read one or more rrsets from the specified text, possibly subject
to restrictions.
*text*, a file object or a string, is the input to process.
*name*, a string, ``dns.name.Name``, or ``None``, is the owner name of
the rrset. If not ``None``, then the owner name is "forced", and the
input must not specify an owner name. If ``None``, then any owner names
are allowed and must be present in the input.
*ttl*, an ``int``, string, or None. If not ``None``, the the TTL is
forced to be the specified value and the input must not specify a TTL.
If ``None``, then a TTL may be specified in the input. If it is not
specified, then the *default_ttl* will be used.
*rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If
not ``None``, then the class is forced to the specified value, and the
input must not specify a class. If ``None``, then the input may specify
a class that matches *default_rdclass*. Note that it is not possible to
return rrsets with differing classes; specifying ``None`` for the class
simply allows the user to optionally type a class as that may be convenient
when cutting and pasting.
*default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class
of the returned rrsets.
*rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not
``None``, then the type is forced to the specified value, and the
input must not specify a type. If ``None``, then a type must be present
for each RR.
*default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if
the TTL is not forced and is not specified, then this value will be used.
if ``None``, then if the TTL is not forced an error will occur if the TTL
is not specified.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used. Note that codecs only apply to the owner name; dnspython does
not do IDNA for names in rdata, as there is no IDNA zonefile format.
*origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any
relative names in the input, and also the origin to relativize to if
*relativize* is ``True``.
*relativize*, a bool. If ``True``, names are relativized to the *origin*;
if ``False`` then any relative names in the input are made absolute by
appending the *origin*.
"""
if isinstance(origin, str):
origin = dns.name.from_text(origin, dns.name.root, idna_codec)
if isinstance(name, str):
name = dns.name.from_text(name, origin, idna_codec)
if isinstance(ttl, str):
ttl = dns.ttl.from_text(ttl)
if isinstance(default_ttl, str):
default_ttl = dns.ttl.from_text(default_ttl)
if rdclass is not None:
rdclass = dns.rdataclass.RdataClass.make(rdclass)
default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass)
if rdtype is not None:
rdtype = dns.rdatatype.RdataType.make(rdtype)
manager = RRSetsReaderManager(origin, relativize, default_rdclass)
with manager.writer(True) as txn:
tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec)
reader = Reader(tok, default_rdclass, txn, allow_directives=False,
force_name=name, force_ttl=ttl, force_rdclass=rdclass,
force_rdtype=rdtype, default_ttl=default_ttl)
reader.read()
return manager.rrsets
| [
"sys.exc_info",
"re.compile"
]
| [((7896, 7948), 're.compile', 're.compile', (['"""^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+),(.)}).*$"""'], {}), "('^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+),(.)}).*$')\n", (7906, 7948), False, 'import re\n'), ((7969, 8010), 're.compile', 're.compile', (['"""^.*\\\\$({(\\\\+|-?)(\\\\d+)}).*$"""'], {}), "('^.*\\\\$({(\\\\+|-?)(\\\\d+)}).*$')\n", (7979, 8010), False, 'import re\n'), ((8032, 8080), 're.compile', 're.compile', (['"""^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+)}).*$"""'], {}), "('^.*\\\\$({(\\\\+|-?)(\\\\d+),(\\\\d+)}).*$')\n", (8042, 8080), False, 'import re\n'), ((6879, 6893), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6891, 6893), False, 'import sys\n'), ((17791, 17805), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (17803, 17805), False, 'import sys\n'), ((13377, 13391), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13389, 13391), False, 'import sys\n')] |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import sys
import time
import signal
from re import sub
import eventlet.debug
from eventlet.hubs import use_hub
from swift.common import utils
class Daemon(object):
"""
Daemon base class
A daemon has a run method that accepts a ``once`` kwarg and will dispatch
to :meth:`run_once` or :meth:`run_forever`.
A subclass of Daemon must implement :meth:`run_once` and
:meth:`run_forever`.
A subclass of Daemon may override :meth:`get_worker_args` to dispatch
arguments to individual child process workers and :meth:`is_healthy` to
perform context specific periodic wellness checks which can reset worker
arguments.
Implementations of Daemon do not know *how* to daemonize, or execute
multiple daemonized workers, they simply provide the behavior of the daemon
and context specific knowledge about how workers should be started.
"""
def __init__(self, conf):
self.conf = conf
self.logger = utils.get_logger(conf, log_route='daemon')
def run_once(self, *args, **kwargs):
"""Override this to run the script once"""
raise NotImplementedError('run_once not implemented')
def run_forever(self, *args, **kwargs):
"""Override this to run forever"""
raise NotImplementedError('run_forever not implemented')
def run(self, once=False, **kwargs):
if once:
self.run_once(**kwargs)
else:
self.run_forever(**kwargs)
def post_multiprocess_run(self):
"""
Override this to do something after running using multiple worker
processes. This method is called in the parent process.
This is probably only useful for run-once mode since there is no
"after running" in run-forever mode.
"""
pass
def get_worker_args(self, once=False, **kwargs):
"""
For each worker yield a (possibly empty) dict of kwargs to pass along
to the daemon's :meth:`run` method after fork. The length of elements
returned from this method will determine the number of processes
created.
If the returned iterable is empty, the Strategy will fallback to
run-inline strategy.
:param once: False if the worker(s) will be daemonized, True if the
worker(s) will be run once
:param kwargs: plumbed through via command line argparser
:returns: an iterable of dicts, each element represents the kwargs to
be passed to a single worker's :meth:`run` method after fork.
"""
return []
def is_healthy(self):
"""
This method is called very frequently on the instance of the daemon
held by the parent process. If it returns False, all child workers are
terminated, and new workers will be created.
:returns: a boolean, True only if all workers should continue to run
"""
return True
class DaemonStrategy(object):
"""
This is the execution strategy for using subclasses of Daemon. The default
behavior is to invoke the daemon's :meth:`Daemon.run` method from within
the parent process. When the :meth:`Daemon.run` method returns the parent
process will exit.
However, if the Daemon returns a non-empty iterable from
:meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will
be invoked in child processes, with the arguments provided from the parent
process's instance of the daemon. If a child process exits it will be
restarted with the same options, unless it was executed in once mode.
:param daemon: an instance of a :class:`Daemon` (has a `run` method)
:param logger: a logger instance
"""
def __init__(self, daemon, logger):
self.daemon = daemon
self.logger = logger
self.running = False
# only used by multi-worker strategy
self.options_by_pid = {}
self.unspawned_worker_options = []
def setup(self, **kwargs):
utils.validate_configuration()
utils.drop_privileges(self.daemon.conf.get('user', 'swift'))
utils.clean_up_daemon_hygiene()
utils.capture_stdio(self.logger, **kwargs)
def kill_children(*args):
self.running = False
self.logger.info('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.killpg(0, signal.SIGTERM)
os._exit(0)
signal.signal(signal.SIGTERM, kill_children)
self.running = True
def _run_inline(self, once=False, **kwargs):
"""Run the daemon"""
self.daemon.run(once=once, **kwargs)
def run(self, once=False, **kwargs):
"""Daemonize and execute our strategy"""
self.setup(**kwargs)
try:
self._run(once=once, **kwargs)
except KeyboardInterrupt:
self.logger.notice('User quit')
finally:
self.cleanup()
self.running = False
def _fork(self, once, **kwargs):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.daemon.run(once, **kwargs)
self.logger.debug('Forked worker %s finished', os.getpid())
# do not return from this stack, nor execute any finally blocks
os._exit(0)
else:
self.register_worker_start(pid, kwargs)
return pid
def iter_unspawned_workers(self):
while True:
try:
per_worker_options = self.unspawned_worker_options.pop()
except IndexError:
return
yield per_worker_options
def spawned_pids(self):
return list(self.options_by_pid.keys())
def register_worker_start(self, pid, per_worker_options):
self.logger.debug('Spawned worker %s with %r', pid, per_worker_options)
self.options_by_pid[pid] = per_worker_options
def register_worker_exit(self, pid):
self.unspawned_worker_options.append(self.options_by_pid.pop(pid))
def ask_daemon_to_prepare_workers(self, once, **kwargs):
self.unspawned_worker_options = list(
self.daemon.get_worker_args(once=once, **kwargs))
def abort_workers_if_daemon_would_like(self):
if not self.daemon.is_healthy():
self.logger.debug(
'Daemon needs to change options, aborting workers')
self.cleanup()
return True
return False
def check_on_all_running_workers(self):
for p in self.spawned_pids():
try:
pid, status = os.waitpid(p, os.WNOHANG)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
self.logger.notice('Worker %s died', p)
else:
if pid == 0:
# child still running
continue
self.logger.debug('Worker %s exited', p)
self.register_worker_exit(p)
def _run(self, once, **kwargs):
self.ask_daemon_to_prepare_workers(once, **kwargs)
if not self.unspawned_worker_options:
return self._run_inline(once, **kwargs)
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
while self.running:
if self.abort_workers_if_daemon_would_like():
self.ask_daemon_to_prepare_workers(once, **kwargs)
self.check_on_all_running_workers()
if not once:
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
else:
if not self.spawned_pids():
self.logger.notice('Finished %s', os.getpid())
break
time.sleep(0.1)
self.daemon.post_multiprocess_run()
return 0
def cleanup(self):
for p in self.spawned_pids():
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD):
raise
self.register_worker_exit(p)
self.logger.debug('Cleaned up worker %s', p)
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
"""
# very often the config section_name is based on the class name
# the None singleton will be passed through to readconf as is
if section_name == '':
section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
klass.__name__).lower()
try:
conf = utils.readconf(conf_file, section_name,
log_name=kwargs.get('log_name'))
except (ValueError, IOError) as e:
# The message will be printed to stderr
# and results in an exit code of 1.
sys.exit(e)
use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = utils.get_logger(conf, conf.get('log_name', section_name),
log_to_console=kwargs.pop('verbose', False),
log_route=section_name)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
# disable fallocate if desired
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
# By default, disable eventlet printing stacktraces
eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
logger.notice('Starting %s', os.getpid())
try:
DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
| [
"signal.signal",
"swift.common.utils.validate_configuration",
"sys.exit",
"os.kill",
"swift.common.utils.disable_fallocate",
"swift.common.utils.capture_stdio",
"time.tzset",
"os.waitpid",
"swift.common.utils.get_logger",
"time.sleep",
"swift.common.utils.get_hub",
"os.killpg",
"os._exit",
"swift.common.utils.clean_up_daemon_hygiene",
"os.getpid",
"os.fork",
"re.sub",
"swift.common.utils.modify_priority"
]
| [((10760, 10795), 'swift.common.utils.modify_priority', 'utils.modify_priority', (['conf', 'logger'], {}), '(conf, logger)\n', (10781, 10795), False, 'from swift.common import utils\n'), ((11474, 11486), 'time.tzset', 'time.tzset', ([], {}), '()\n', (11484, 11486), False, 'import time\n'), ((1586, 1628), 'swift.common.utils.get_logger', 'utils.get_logger', (['conf'], {'log_route': '"""daemon"""'}), "(conf, log_route='daemon')\n", (1602, 1628), False, 'from swift.common import utils\n'), ((4639, 4669), 'swift.common.utils.validate_configuration', 'utils.validate_configuration', ([], {}), '()\n', (4667, 4669), False, 'from swift.common import utils\n'), ((4747, 4778), 'swift.common.utils.clean_up_daemon_hygiene', 'utils.clean_up_daemon_hygiene', ([], {}), '()\n', (4776, 4778), False, 'from swift.common import utils\n'), ((4787, 4829), 'swift.common.utils.capture_stdio', 'utils.capture_stdio', (['self.logger'], {}), '(self.logger, **kwargs)\n', (4806, 4829), False, 'from swift.common import utils\n'), ((5079, 5123), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'kill_children'], {}), '(signal.SIGTERM, kill_children)\n', (5092, 5123), False, 'import signal\n'), ((5655, 5664), 'os.fork', 'os.fork', ([], {}), '()\n', (5662, 5664), False, 'import os\n'), ((10224, 10239), 'swift.common.utils.get_hub', 'utils.get_hub', ([], {}), '()\n', (10237, 10239), False, 'from swift.common import utils\n'), ((10909, 10934), 'swift.common.utils.disable_fallocate', 'utils.disable_fallocate', ([], {}), '()\n', (10932, 10934), False, 'from swift.common import utils\n'), ((11521, 11532), 'os.getpid', 'os.getpid', ([], {}), '()\n', (11530, 11532), False, 'import os\n'), ((11706, 11717), 'os.getpid', 'os.getpid', ([], {}), '()\n', (11715, 11717), False, 'import os\n'), ((4959, 5004), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'signal.SIG_IGN'], {}), '(signal.SIGTERM, signal.SIG_IGN)\n', (4972, 5004), False, 'import signal\n'), ((5017, 5045), 'os.killpg', 'os.killpg', (['(0)', 'signal.SIGTERM'], {}), '(0, signal.SIGTERM)\n', (5026, 5045), False, 'import os\n'), ((5058, 5069), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (5066, 5069), False, 'import os\n'), ((5698, 5742), 'signal.signal', 'signal.signal', (['signal.SIGHUP', 'signal.SIG_DFL'], {}), '(signal.SIGHUP, signal.SIG_DFL)\n', (5711, 5742), False, 'import signal\n'), ((5755, 5800), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'signal.SIG_DFL'], {}), '(signal.SIGTERM, signal.SIG_DFL)\n', (5768, 5800), False, 'import signal\n'), ((6007, 6018), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (6015, 6018), False, 'import os\n'), ((8633, 8648), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (8643, 8648), False, 'import time\n'), ((10199, 10210), 'sys.exit', 'sys.exit', (['e'], {}), '(e)\n', (10207, 10210), False, 'import sys\n'), ((5906, 5917), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5915, 5917), False, 'import os\n'), ((7298, 7323), 'os.waitpid', 'os.waitpid', (['p', 'os.WNOHANG'], {}), '(p, os.WNOHANG)\n', (7308, 7323), False, 'import os\n'), ((8805, 8831), 'os.kill', 'os.kill', (['p', 'signal.SIGTERM'], {}), '(p, signal.SIGTERM)\n', (8812, 8831), False, 'import os\n'), ((9849, 9897), 're.sub', 'sub', (['"""([a-z])([A-Z])"""', '"""\\\\1-\\\\2"""', 'klass.__name__'], {}), "('([a-z])([A-Z])', '\\\\1-\\\\2', klass.__name__)\n", (9852, 9897), False, 'from re import sub\n'), ((8582, 8593), 'os.getpid', 'os.getpid', ([], {}), '()\n', (8591, 8593), False, 'import os\n')] |
import resource_files
resources = resource_files.ResourceFiles()
# sample use case of getting yamls
print(resources.get_yaml("Pod", "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf", "default", "mycluster"))
# sample use case of getting events
print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a'))
# sample use case of getting describe info
print(resources.get_logs('mycluster', 'default', "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf"))
| [
"resource_files.ResourceFiles"
]
| [((35, 65), 'resource_files.ResourceFiles', 'resource_files.ResourceFiles', ([], {}), '()\n', (63, 65), False, 'import resource_files\n')] |
"""
Project: flask-rest
Author: <NAME>
Description: Handle auth endpoints such as auth/signup, auth/login
"""
from api.v1 import make_json_ok_response, SageController, SageMethod
from api.v1.fundamentals import helper
from .auth_controller import AuthController
def sage_auth_signup_function(self, resource, **kwargs):
_UserModel = resource.get_account_model()
args = helper.parse_args_for_model(_UserModel)
user = _UserModel(**args) # user has been created
user.put() # save to get a key for the user
result, params = AuthController.create_unique_for_user(user.key)
if not result: # not successful
user.key.delete()
raise params # this holds the error message
else:
return params # this holds accesskey and refresh token
def sage_auth_authenticate_function(self, resource, **kwargs):
result, params = AuthController.authenticate_client()
if not result: # not successful
raise params # this holds the error message
else:
return params # this holds the refresh token and the access token
auth_controller = {
'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False),
'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False)
} | [
"api.v1.fundamentals.helper.parse_args_for_model",
"api.v1.SageController"
]
| [((378, 417), 'api.v1.fundamentals.helper.parse_args_for_model', 'helper.parse_args_for_model', (['_UserModel'], {}), '(_UserModel)\n', (405, 417), False, 'from api.v1.fundamentals import helper\n'), ((1116, 1194), 'api.v1.SageController', 'SageController', (['sage_auth_signup_function', 'SageMethod.POST'], {'authenticate': '(False)'}), '(sage_auth_signup_function, SageMethod.POST, authenticate=False)\n', (1130, 1194), False, 'from api.v1 import make_json_ok_response, SageController, SageMethod\n'), ((1216, 1304), 'api.v1.SageController', 'SageController', (['sage_auth_authenticate_function', 'SageMethod.POST'], {'authenticate': '(False)'}), '(sage_auth_authenticate_function, SageMethod.POST,\n authenticate=False)\n', (1230, 1304), False, 'from api.v1 import make_json_ok_response, SageController, SageMethod\n')] |
import random as rn
import numpy as np
# open system dynamics of a qubit and compare numerical results with the analytical calculations
# NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical
# calculations.
# currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving
# any collapse operators. For now, only looks at excited state populations
# TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics.
decayRateSM = rn.random()
excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t)
populations = {'excitedAnalytical':[], 'excitedNumerical':[]}
# this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every
# step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above.
def singleQubitDecayCalculate(qub, state, i):
populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize))
populations['excitedNumerical'].append(state[0, 0])
def test_qubitUnitaryEvolutionFromLiouville(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
def test_qubitDecay(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
| [
"numpy.exp",
"random.random"
]
| [((587, 598), 'random.random', 'rn.random', ([], {}), '()\n', (596, 598), True, 'import random as rn\n'), ((634, 690), 'numpy.exp', 'np.exp', (['(-(1e-05 * (decayRateSM + 1) * 2 + 1.0j) * 50 * t)'], {}), '(-(1e-05 * (decayRateSM + 1) * 2 + 1.0j) * 50 * t)\n', (640, 690), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#=============================================================================#
# #
# NAME: do_RMsynth_1D.py #
# #
# PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.#
# #
# MODIFIED: 16-Nov-2018 by <NAME> #
# MODIFIED: 23-October-2019 by <NAME> #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 - 2018 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import sys
import os
import time
import traceback
import json
import math as m
import numpy as np
import matplotlib.pyplot as plt
from RMutils.util_RM import do_rmsynth
from RMutils.util_RM import do_rmsynth_planes
from RMutils.util_RM import get_rmsf_planes
from RMutils.util_RM import measure_FDF_parms
from RMutils.util_RM import measure_qu_complexity
from RMutils.util_RM import measure_fdf_complexity
from RMutils.util_misc import nanmedian
from RMutils.util_misc import toscalar
from RMutils.util_misc import create_frac_spectra
from RMutils.util_misc import poly5
from RMutils.util_misc import MAD
from RMutils.util_plotTk import plot_Ipqu_spectra_fig
from RMutils.util_plotTk import plot_rmsf_fdf_fig
from RMutils.util_plotTk import plot_complexity_fig
from RMutils.util_plotTk import CustomNavbar
from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax
if sys.version_info.major == 2:
print('RM-tools will no longer run with Python 2! Please use Python 3.')
exit()
C = 2.997924538e8 # Speed of light [m/s]
#-----------------------------------------------------------------------------#
def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None,
nSamples=10.0, weightType="variance", fitRMSF=False,
noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False,
debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None):
"""Run RM synthesis on 1D data.
Args:
data (list): Contains frequency and polarization data as either:
[freq_Hz, I, Q, U, dI, dQ, dU]
freq_Hz (array_like): Frequency of each channel in Hz.
I (array_like): Stokes I intensity in each channel.
Q (array_like): Stokes Q intensity in each channel.
U (array_like): Stokes U intensity in each channel.
dI (array_like): Error in Stokes I intensity in each channel.
dQ (array_like): Error in Stokes Q intensity in each channel.
dU (array_like): Error in Stokes U intensity in each channel.
or
[freq_Hz, q, u, dq, du]
freq_Hz (array_like): Frequency of each channel in Hz.
q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
u (array_like): Fractional Stokes U intensity (U/I) in each channel.
dq (array_like): Error in fractional Stokes Q intensity in each channel.
du (array_like): Error in fractional Stokes U intensity in each channel.
Kwargs:
polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
nSamples (float): Number of samples across the RMSF.
weightType (str): Can be "variance" or "uniform"
"variance" -- Weight by uncertainty in Q and U.
"uniform" -- Weight uniformly (i.e. with 1s)
fitRMSF (bool): Fit a Gaussian to the RMSF?
noStokesI (bool: Is Stokes I data provided?
phiNoise_radm2 (float): ????
nBits (int): Precision of floating point numbers.
showPlots (bool): Show plots?
debug (bool): Turn on debugging messages & plots?
verbose (bool): Verbosity.
log (function): Which logging function to use.
units (str): Units of data.
Returns:
mDict (dict): Summary of RM synthesis results.
aDict (dict): Data output by RM synthesis.
"""
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
if verbose: log("... success.")
except Exception:
if verbose: log("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
if verbose: log("... success.")
noStokesI = True
except Exception:
if verbose: log("...failed.")
if debug:
log(traceback.format_exc())
sys.exit()
if verbose: log("Successfully read in the Stokes spectra.")
# If no Stokes I present, create a dummy spectrum = unity
if noStokesI:
if verbose: log("Warn: no Stokes I data in use.")
IArr = np.ones_like(QArr)
dIArr = np.zeros_like(QArr)
# Convert to GHz for convenience
freqArr_GHz = freqArr_Hz / 1e9
dQUArr = (dQArr + dUArr)/2.0
# Fit the Stokes I spectrum and create the fractional spectra
IModArr, qArr, uArr, dqArr, duArr, fitDict = \
create_frac_spectra(freqArr = freqArr_GHz,
IArr = IArr,
QArr = QArr,
UArr = UArr,
dIArr = dIArr,
dQArr = dQArr,
dUArr = dUArr,
polyOrd = polyOrd,
verbose = True,
debug = debug)
# Plot the data and the Stokes I model fit
if verbose: log("Plotting the input data and spectral index fit.")
freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9)
specFig = plt.figure(figsize=(12.0, 8))
plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz,
IArr = IArr,
qArr = qArr,
uArr = uArr,
dIArr = dIArr,
dqArr = dqArr,
duArr = duArr,
freqHirArr_Hz = freqHirArr_Hz,
IModArr = IModHirArr,
fig = specFig,
units = units)
# Use the custom navigation toolbar (does not work on Mac OS X)
# try:
# specFig.canvas.toolbar.pack_forget()
# CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# if not plt.isinteractive():
# specFig.show()
# DEBUG (plot the Q, U and average RMS spectrum)
if debug:
rmsFig = plt.figure(figsize=(12.0, 8))
ax = rmsFig.add_subplot(111)
ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5,
label='rms <QU>')
ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5,
label='rms Q')
ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5,
label='rms U')
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('RMS '+units)
ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
# rmsFig.show()
#-------------------------------------------------------------------------#
# Calculate some wavelength parameters
lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -
np.nanmin(lambdaSqArr_m2) )
dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))
# Set the Faraday depth range
fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
if dPhi_radm2 is None:
dPhi_radm2 = fwhmRMSF_radm2 / nSamples
if phiMax_radm2 is None:
phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM
# Faraday depth sampling. Zero always centred on middle channel
nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0
stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0
phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
phiArr_radm2 = phiArr_radm2.astype(dtFloat)
if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0],
phiArr_radm2[-1],
float(dPhi_radm2),
nChanRM))
# Calculate the weighting as 1/sigma^2 or all 1s (uniform)
if weightType=="variance":
weightArr = 1.0 / np.power(dQUArr, 2.0)
else:
weightType = "uniform"
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
if verbose: log("Weight type is '%s'." % weightType)
startTime = time.time()
# Perform RM-synthesis on the spectrum
dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,
dataU = uArr,
lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
nBits = nBits,
verbose = verbose,
log = log)
# Calculate the Rotation Measure Spread Function
RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
mskArr = ~np.isfinite(qArr),
lam0Sq_m2 = lam0Sq_m2,
double = True,
fitRMSF = fitRMSF,
fitRMSFreal = False,
nBits = nBits,
verbose = verbose,
log = log)
fwhmRMSF = float(fwhmRMSFArr)
# ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#
#dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
# do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)
#-------------------------------------------------------------------------#
endTime = time.time()
cputime = (endTime - startTime)
if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)
# Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
# Multiply the dirty FDF by Ifreq0 to recover the PI
freq0_Hz = C / m.sqrt(lam0Sq_m2)
Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9)
dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux
# Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 )
# Measure the parameters of the dirty FDF
# Use the theoretical noise to calculate uncertainties
mDict = measure_FDF_parms(FDF = dirtyFDF,
phiArr = phiArr_radm2,
fwhmRMSF = fwhmRMSF,
dFDF = dFDFth,
lamSqArr_m2 = lambdaSqArr_m2,
lam0Sq = lam0Sq_m2)
mDict["Ifreq0"] = toscalar(Ifreq0)
mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
mDict["IfitStat"] = fitDict["fitStatus"]
mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
mDict["freq0_Hz"] = toscalar(freq0_Hz)
mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
mDict["dQU"] = toscalar(nanmedian(dQUArr))
mDict["dFDFth"] = toscalar(dFDFth)
mDict["units"] = units
if fitDict["fitStatus"] >= 128:
log("WARNING: Stokes I model contains negative values!")
elif fitDict["fitStatus"] >= 64:
log("Caution: Stokes I model has low signal-to-noise.")
#Add information on nature of channels:
good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0]
mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels]))
mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels]))
mDict["N_channels"]=good_channels.size
mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz)))
# Measure the complexity of the q and u spectra
mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0)
mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
fracPol = mDict["fracPol"],
psi0_deg = mDict["polAngle0Fit_deg"],
RM_radm2 = mDict["phiPeakPIfit_rm2"])
mDict.update(mD)
# Debugging plots for spectral complexity measure
if debug:
tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
qArr=pD["yArrQ"],
dqArr=pD["dyArrQ"],
sigmaAddqArr=pD["sigmaAddArrQ"],
chiSqRedqArr=pD["chiSqRedArrQ"],
probqArr=pD["probArrQ"],
uArr=pD["yArrU"],
duArr=pD["dyArrU"],
sigmaAdduArr=pD["sigmaAddArrU"],
chiSqReduArr=pD["chiSqRedArrU"],
probuArr=pD["probArrU"],
mDict=mDict)
if saveOutput:
if verbose: print("Saving debug plots:")
outFilePlot = prefixOut + ".debug-plots.pdf"
if verbose: print("> " + outFilePlot)
tmpFig.savefig(outFilePlot, bbox_inches = 'tight')
else:
tmpFig.show()
#add array dictionary
aDict = dict()
aDict["phiArr_radm2"] = phiArr_radm2
aDict["phi2Arr_radm2"] = phi2Arr_radm2
aDict["RMSFArr"] = RMSFArr
aDict["freqArr_Hz"] = freqArr_Hz
aDict["weightArr"]=weightArr
aDict["dirtyFDF"]=dirtyFDF
if verbose:
# Print the results to the screen
log()
log('-'*80)
log('RESULTS:\n')
log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))
log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
mDict["dPolAngleFit_deg"]))
log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
mDict["dPolAngle0Fit_deg"]))
log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
mDict["dPhiPeakPIfit_rm2"]))
log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9))
log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units))
log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"],
mDict["dAmpPeakPIfit"],units))
log('QU Noise = %.4g %s' % (mDict["dQU"],units))
log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units))
log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units))
log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units))
log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"],
mDict["dSigmaAddPlusQ"],
mDict["dSigmaAddMinusQ"]))
log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"],
mDict["dSigmaAddPlusU"],
mDict["dSigmaAddMinusU"]))
log()
log('-'*80)
# Plot the RM Spread Function and dirty FDF
if showPlots or saveOutput:
fdfFig = plt.figure(figsize=(12.0, 8))
plot_rmsf_fdf_fig(phiArr = phiArr_radm2,
FDF = dirtyFDF,
phi2Arr = phi2Arr_radm2,
RMSFArr = RMSFArr,
fwhmRMSF = fwhmRMSF,
vLine = mDict["phiPeakPIfit_rm2"],
fig = fdfFig,
units = units)
# Use the custom navigation toolbar
# try:
# fdfFig.canvas.toolbar.pack_forget()
# CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# fdfFig.show()
# Pause if plotting enabled
if showPlots:
plt.show()
elif saveOutput or debug:
if verbose: print("Saving RMSF and dirty FDF plot:")
outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf"
if verbose: print("> " + outFilePlot)
fdfFig.savefig(outFilePlot, bbox_inches = 'tight')
# #if verbose: print "Press <RETURN> to exit ...",
# input()
return mDict, aDict
def readFile(dataFile, nBits, verbose=True, debug=False):
"""
Read the I, Q & U data from the ASCII file.
Inputs:
datafile (str): relative or absolute path to file.
nBits (int): number of bits to store the data as.
verbose (bool): Print verbose messages to terminal?
debug (bool): Print full traceback in case of failure?
Returns:
data (list of arrays): List containing the columns found in the file.
If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU],
else [freq_Hz, q, u, dq, du].
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# Output prefix is derived from the input file name
# Read the data-file. Format=space-delimited, comments="#".
if verbose: print("Reading the data file '%s':" % dataFile)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr,
dIArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr]
except Exception:
if verbose: print("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, QArr, UArr, dQArr, dUArr]
noStokesI = True
except Exception:
if verbose: print("...failed.")
if debug:
print(traceback.format_exc())
sys.exit()
if verbose: print("Successfully read in the Stokes spectra.")
return data
def saveOutput(outdict, arrdict, prefixOut, verbose):
# Save the dirty FDF, RMSF and weight array to ASCII files
if verbose: print("Saving the dirty FDF, RMSF weight arrays to ASCII files.")
outFile = prefixOut + "_FDFdirty.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phiArr_radm2"], arrdict["dirtyFDF"].real, arrdict["dirtyFDF"].imag)))
outFile = prefixOut + "_RMSF.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phi2Arr_radm2"], arrdict["RMSFArr"].real, arrdict["RMSFArr"].imag)))
outFile = prefixOut + "_weight.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["freqArr_Hz"], arrdict["weightArr"])))
# Save the measurements to a "key=value" text file
outFile = prefixOut + "_RMsynth.dat"
if verbose:
print("Saving the measurements on the FDF in 'key=val' and JSON formats.")
print("> %s" % outFile)
FH = open(outFile, "w")
for k, v in outdict.items():
FH.write("%s=%s\n" % (k, v))
FH.close()
outFile = prefixOut + "_RMsynth.json"
if verbose:
print("> %s" % outFile)
json.dump(dict(outdict), open(outFile, "w"))
#-----------------------------------------------------------------------------#
def main():
import argparse
"""
Start the function to perform RM-synthesis if called from the command line.
"""
# Help string to be shown using the -h option
descStr = """
Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in an ASCII
file. The Stokes I spectrum is first fit with a polynomial and the
resulting model used to create fractional q = Q/I and u = U/I spectra.
The ASCII file should the following columns, in a space separated format:
[freq_Hz, I, Q, U, I_err, Q_err, U_err]
OR
[freq_Hz, Q, U, Q_err, U_err]
To get outputs, one or more of the following flags must be set: -S, -p, -v.
"""
epilog_text="""
Outputs with -S flag:
_FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U]
_RMSF.dat: Computed RMSF [Phi, Q, U]
_RMsynth.dat: list of derived parameters for RM spectrum
(approximately equivalent to -v flag output)
_RMsynth.json: dictionary of derived parameters for RM spectrum
_weight.dat: Calculated channel weights [freq_Hz, weight]
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("dataFile", metavar="dataFile.dat", nargs=1,
help="ASCII file containing Stokes spectra & errors.")
parser.add_argument("-t", dest="fitRMSF", action="store_true",
help="fit a Gaussian to the RMSF [False]")
parser.add_argument("-l", dest="phiMax_radm2", type=float, default=None,
help="absolute max Faraday depth sampled [Auto].")
parser.add_argument("-d", dest="dPhi_radm2", type=float, default=None,
help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)")
parser.add_argument("-s", dest="nSamples", type=float, default=10,
help="number of samples across the RMSF lobe [10].")
parser.add_argument("-w", dest="weightType", default="variance",
help="weighting [inverse variance] or 'uniform' (all 1s).")
parser.add_argument("-o", dest="polyOrd", type=int, default=2,
help="polynomial order to fit to I spectrum [2].")
parser.add_argument("-i", dest="noStokesI", action="store_true",
help="ignore the Stokes I spectrum [False].")
parser.add_argument("-b", dest="bit64", action="store_true",
help="use 64-bit floating point precision [False (uses 32-bit)]")
parser.add_argument("-p", dest="showPlots", action="store_true",
help="show the plots [False].")
parser.add_argument("-v", dest="verbose", action="store_true",
help="verbose output [False].")
parser.add_argument("-S", dest="saveOutput", action="store_true",
help="save the arrays and plots [False].")
parser.add_argument("-D", dest="debug", action="store_true",
help="turn on debugging messages & plots [False].")
parser.add_argument("-U", dest="units", type=str, default="Jy/beam",
help="Intensity units of the data. [Jy/beam]")
args = parser.parse_args()
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
dataDir, dummy = os.path.split(args.dataFile[0])
# Set the floating point precision
nBits = 32
if args.bit64:
nBits = 64
verbose=args.verbose
data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug)
# Run RM-synthesis on the spectra
mDict, aDict = run_rmsynth(data = data,
polyOrd = args.polyOrd,
phiMax_radm2 = args.phiMax_radm2,
dPhi_radm2 = args.dPhi_radm2,
nSamples = args.nSamples,
weightType = args.weightType,
fitRMSF = args.fitRMSF,
noStokesI = args.noStokesI,
nBits = nBits,
showPlots = args.showPlots,
debug = args.debug,
verbose = verbose,
units = args.units,
prefixOut = prefixOut,
args = args,
)
if args.saveOutput:
saveOutput(mDict, aDict, prefixOut, verbose)
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
| [
"math.sqrt",
"RMutils.util_misc.toscalar",
"numpy.isfinite",
"sys.exit",
"numpy.nanmin",
"os.path.exists",
"RMutils.util_misc.create_frac_spectra",
"argparse.ArgumentParser",
"numpy.diff",
"os.path.split",
"numpy.max",
"numpy.linspace",
"numpy.nanmax",
"numpy.min",
"RMutils.util_plotTk.plot_rmsf_fdf_fig",
"RMutils.util_RM.measure_FDF_parms",
"RMutils.util_plotTk.plot_complexity_fig",
"RMutils.util_plotTk.plot_Ipqu_spectra_fig",
"RMutils.util_RM.measure_qu_complexity",
"numpy.ones",
"RMutils.util_RM.do_rmsynth_planes",
"os.path.splitext",
"numpy.isnan",
"time.time",
"matplotlib.pyplot.show",
"numpy.ones_like",
"traceback.format_exc",
"numpy.power",
"RMutils.util_misc.poly5",
"numpy.sum",
"matplotlib.pyplot.figure",
"RMutils.util_misc.nanmedian",
"numpy.loadtxt",
"numpy.zeros_like",
"numpy.nan_to_num"
]
| [((6474, 6508), 'os.path.splitext', 'os.path.splitext', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (6490, 6508), False, 'import os\n'), ((7816, 7980), 'RMutils.util_misc.create_frac_spectra', 'create_frac_spectra', ([], {'freqArr': 'freqArr_GHz', 'IArr': 'IArr', 'QArr': 'QArr', 'UArr': 'UArr', 'dIArr': 'dIArr', 'dQArr': 'dQArr', 'dUArr': 'dUArr', 'polyOrd': 'polyOrd', 'verbose': '(True)', 'debug': 'debug'}), '(freqArr=freqArr_GHz, IArr=IArr, QArr=QArr, UArr=UArr,\n dIArr=dIArr, dQArr=dQArr, dUArr=dUArr, polyOrd=polyOrd, verbose=True,\n debug=debug)\n', (7835, 7980), False, 'from RMutils.util_misc import create_frac_spectra\n'), ((8457, 8506), 'numpy.linspace', 'np.linspace', (['freqArr_Hz[0]', 'freqArr_Hz[-1]', '(10000)'], {}), '(freqArr_Hz[0], freqArr_Hz[-1], 10000)\n', (8468, 8506), True, 'import numpy as np\n'), ((8577, 8606), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.0, 8)'}), '(figsize=(12.0, 8))\n', (8587, 8606), True, 'import matplotlib.pyplot as plt\n'), ((8611, 8812), 'RMutils.util_plotTk.plot_Ipqu_spectra_fig', 'plot_Ipqu_spectra_fig', ([], {'freqArr_Hz': 'freqArr_Hz', 'IArr': 'IArr', 'qArr': 'qArr', 'uArr': 'uArr', 'dIArr': 'dIArr', 'dqArr': 'dqArr', 'duArr': 'duArr', 'freqHirArr_Hz': 'freqHirArr_Hz', 'IModArr': 'IModHirArr', 'fig': 'specFig', 'units': 'units'}), '(freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=\n uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=\n freqHirArr_Hz, IModArr=IModHirArr, fig=specFig, units=units)\n', (8632, 8812), False, 'from RMutils.util_plotTk import plot_Ipqu_spectra_fig\n'), ((10474, 10503), 'numpy.power', 'np.power', (['(C / freqArr_Hz)', '(2.0)'], {}), '(C / freqArr_Hz, 2.0)\n', (10482, 10503), True, 'import numpy as np\n'), ((11419, 11470), 'numpy.linspace', 'np.linspace', (['startPhi_radm2', 'stopPhi_radm2', 'nChanRM'], {}), '(startPhi_radm2, stopPhi_radm2, nChanRM)\n', (11430, 11470), True, 'import numpy as np\n'), ((12140, 12151), 'time.time', 'time.time', ([], {}), '()\n', (12149, 12151), False, 'import time\n'), ((12222, 12390), 'RMutils.util_RM.do_rmsynth_planes', 'do_rmsynth_planes', ([], {'dataQ': 'qArr', 'dataU': 'uArr', 'lambdaSqArr_m2': 'lambdaSqArr_m2', 'phiArr_radm2': 'phiArr_radm2', 'weightArr': 'weightArr', 'nBits': 'nBits', 'verbose': 'verbose', 'log': 'log'}), '(dataQ=qArr, dataU=uArr, lambdaSqArr_m2=lambdaSqArr_m2,\n phiArr_radm2=phiArr_radm2, weightArr=weightArr, nBits=nBits, verbose=\n verbose, log=log)\n', (12239, 12390), False, 'from RMutils.util_RM import do_rmsynth_planes\n'), ((13809, 13820), 'time.time', 'time.time', ([], {}), '()\n', (13818, 13820), False, 'import time\n'), ((14607, 14741), 'RMutils.util_RM.measure_FDF_parms', 'measure_FDF_parms', ([], {'FDF': 'dirtyFDF', 'phiArr': 'phiArr_radm2', 'fwhmRMSF': 'fwhmRMSF', 'dFDF': 'dFDFth', 'lamSqArr_m2': 'lambdaSqArr_m2', 'lam0Sq': 'lam0Sq_m2'}), '(FDF=dirtyFDF, phiArr=phiArr_radm2, fwhmRMSF=fwhmRMSF,\n dFDF=dFDFth, lamSqArr_m2=lambdaSqArr_m2, lam0Sq=lam0Sq_m2)\n', (14624, 14741), False, 'from RMutils.util_RM import measure_FDF_parms\n'), ((14950, 14966), 'RMutils.util_misc.toscalar', 'toscalar', (['Ifreq0'], {}), '(Ifreq0)\n', (14958, 14966), False, 'from RMutils.util_misc import toscalar\n'), ((15153, 15172), 'RMutils.util_misc.toscalar', 'toscalar', (['lam0Sq_m2'], {}), '(lam0Sq_m2)\n', (15161, 15172), False, 'from RMutils.util_misc import toscalar\n'), ((15197, 15215), 'RMutils.util_misc.toscalar', 'toscalar', (['freq0_Hz'], {}), '(freq0_Hz)\n', (15205, 15215), False, 'from RMutils.util_misc import toscalar\n'), ((15240, 15258), 'RMutils.util_misc.toscalar', 'toscalar', (['fwhmRMSF'], {}), '(fwhmRMSF)\n', (15248, 15258), False, 'from RMutils.util_misc import toscalar\n'), ((15328, 15344), 'RMutils.util_misc.toscalar', 'toscalar', (['dFDFth'], {}), '(dFDFth)\n', (15336, 15344), False, 'from RMutils.util_misc import toscalar\n'), ((16067, 16267), 'RMutils.util_RM.measure_qu_complexity', 'measure_qu_complexity', ([], {'freqArr_Hz': 'freqArr_Hz', 'qArr': 'qArr', 'uArr': 'uArr', 'dqArr': 'dqArr', 'duArr': 'duArr', 'fracPol': "mDict['fracPol']", 'psi0_deg': "mDict['polAngle0Fit_deg']", 'RM_radm2': "mDict['phiPeakPIfit_rm2']"}), "(freqArr_Hz=freqArr_Hz, qArr=qArr, uArr=uArr, dqArr=\n dqArr, duArr=duArr, fracPol=mDict['fracPol'], psi0_deg=mDict[\n 'polAngle0Fit_deg'], RM_radm2=mDict['phiPeakPIfit_rm2'])\n", (16088, 16267), False, 'from RMutils.util_RM import measure_qu_complexity\n'), ((25240, 25355), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'descStr', 'epilog': 'epilog_text', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=descStr, epilog=epilog_text,\n formatter_class=argparse.RawTextHelpFormatter)\n', (25263, 25355), False, 'import argparse\n'), ((27601, 27635), 'os.path.splitext', 'os.path.splitext', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (27617, 27635), False, 'import os\n'), ((27657, 27688), 'os.path.split', 'os.path.split', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (27670, 27688), False, 'import os\n'), ((6337, 6369), 'os.path.exists', 'os.path.exists', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (6351, 6369), False, 'import os\n'), ((6442, 6452), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6450, 6452), False, 'import sys\n'), ((7524, 7542), 'numpy.ones_like', 'np.ones_like', (['QArr'], {}), '(QArr)\n', (7536, 7542), True, 'import numpy as np\n'), ((7559, 7578), 'numpy.zeros_like', 'np.zeros_like', (['QArr'], {}), '(QArr)\n', (7572, 7578), True, 'import numpy as np\n'), ((8524, 8543), 'RMutils.util_misc.poly5', 'poly5', (["fitDict['p']"], {}), "(fitDict['p'])\n", (8529, 8543), False, 'from RMutils.util_misc import poly5\n'), ((9601, 9630), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.0, 8)'}), '(figsize=(12.0, 8))\n', (9611, 9630), True, 'import matplotlib.pyplot as plt\n'), ((10581, 10606), 'numpy.nanmax', 'np.nanmax', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10590, 10606), True, 'import numpy as np\n'), ((10634, 10659), 'numpy.nanmin', 'np.nanmin', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10643, 10659), True, 'import numpy as np\n'), ((12025, 12065), 'numpy.ones', 'np.ones', (['freqArr_Hz.shape'], {'dtype': 'dtFloat'}), '(freqArr_Hz.shape, dtype=dtFloat)\n', (12032, 12065), True, 'import numpy as np\n'), ((14081, 14098), 'math.sqrt', 'm.sqrt', (['lam0Sq_m2'], {}), '(lam0Sq_m2)\n', (14087, 14098), True, 'import math as m\n'), ((14112, 14131), 'RMutils.util_misc.poly5', 'poly5', (["fitDict['p']"], {}), "(fitDict['p'])\n", (14117, 14131), False, 'from RMutils.util_misc import poly5\n'), ((14354, 14373), 'numpy.isnan', 'np.isnan', (['weightArr'], {}), '(weightArr)\n', (14362, 14373), True, 'import numpy as np\n'), ((15287, 15304), 'RMutils.util_misc.nanmedian', 'nanmedian', (['dQUArr'], {}), '(dQUArr)\n', (15296, 15304), False, 'from RMutils.util_misc import nanmedian\n'), ((15734, 15767), 'numpy.min', 'np.min', (['freqArr_Hz[good_channels]'], {}), '(freqArr_Hz[good_channels])\n', (15740, 15767), True, 'import numpy as np\n'), ((15797, 15830), 'numpy.max', 'np.max', (['freqArr_Hz[good_channels]'], {}), '(freqArr_Hz[good_channels])\n', (15803, 15830), True, 'import numpy as np\n'), ((16655, 16979), 'RMutils.util_plotTk.plot_complexity_fig', 'plot_complexity_fig', ([], {'xArr': "pD['xArrQ']", 'qArr': "pD['yArrQ']", 'dqArr': "pD['dyArrQ']", 'sigmaAddqArr': "pD['sigmaAddArrQ']", 'chiSqRedqArr': "pD['chiSqRedArrQ']", 'probqArr': "pD['probArrQ']", 'uArr': "pD['yArrU']", 'duArr': "pD['dyArrU']", 'sigmaAdduArr': "pD['sigmaAddArrU']", 'chiSqReduArr': "pD['chiSqRedArrU']", 'probuArr': "pD['probArrU']", 'mDict': 'mDict'}), "(xArr=pD['xArrQ'], qArr=pD['yArrQ'], dqArr=pD['dyArrQ'],\n sigmaAddqArr=pD['sigmaAddArrQ'], chiSqRedqArr=pD['chiSqRedArrQ'],\n probqArr=pD['probArrQ'], uArr=pD['yArrU'], duArr=pD['dyArrU'],\n sigmaAdduArr=pD['sigmaAddArrU'], chiSqReduArr=pD['chiSqRedArrU'],\n probuArr=pD['probArrU'], mDict=mDict)\n", (16674, 16979), False, 'from RMutils.util_plotTk import plot_complexity_fig\n'), ((19693, 19722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.0, 8)'}), '(figsize=(12.0, 8))\n', (19703, 19722), True, 'import matplotlib.pyplot as plt\n'), ((19731, 19908), 'RMutils.util_plotTk.plot_rmsf_fdf_fig', 'plot_rmsf_fdf_fig', ([], {'phiArr': 'phiArr_radm2', 'FDF': 'dirtyFDF', 'phi2Arr': 'phi2Arr_radm2', 'RMSFArr': 'RMSFArr', 'fwhmRMSF': 'fwhmRMSF', 'vLine': "mDict['phiPeakPIfit_rm2']", 'fig': 'fdfFig', 'units': 'units'}), "(phiArr=phiArr_radm2, FDF=dirtyFDF, phi2Arr=phi2Arr_radm2,\n RMSFArr=RMSFArr, fwhmRMSF=fwhmRMSF, vLine=mDict['phiPeakPIfit_rm2'],\n fig=fdfFig, units=units)\n", (19748, 19908), False, 'from RMutils.util_plotTk import plot_rmsf_fdf_fig\n'), ((20471, 20481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20479, 20481), True, 'import matplotlib.pyplot as plt\n'), ((21937, 21985), 'numpy.loadtxt', 'np.loadtxt', (['dataFile'], {'unpack': '(True)', 'dtype': 'dtFloat'}), '(dataFile, unpack=True, dtype=dtFloat)\n', (21947, 21985), True, 'import numpy as np\n'), ((27464, 27496), 'os.path.exists', 'os.path.exists', (['args.dataFile[0]'], {}), '(args.dataFile[0])\n', (27478, 27496), False, 'import os\n'), ((27569, 27579), 'sys.exit', 'sys.exit', ([], {}), '()\n', (27577, 27579), False, 'import sys\n'), ((10534, 10553), 'numpy.diff', 'np.diff', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10541, 10553), True, 'import numpy as np\n'), ((10701, 10724), 'numpy.diff', 'np.diff', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10708, 10724), True, 'import numpy as np\n'), ((10766, 10789), 'numpy.diff', 'np.diff', (['lambdaSqArr_m2'], {}), '(lambdaSqArr_m2)\n', (10773, 10789), True, 'import numpy as np\n'), ((10854, 10865), 'math.sqrt', 'm.sqrt', (['(3.0)'], {}), '(3.0)\n', (10860, 10865), True, 'import math as m\n'), ((11011, 11022), 'math.sqrt', 'm.sqrt', (['(3.0)'], {}), '(3.0)\n', (11017, 11022), True, 'import math as m\n'), ((11942, 11963), 'numpy.power', 'np.power', (['dQUArr', '(2.0)'], {}), '(dQUArr, 2.0)\n', (11950, 11963), True, 'import numpy as np\n'), ((15925, 15944), 'numpy.diff', 'np.diff', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (15932, 15944), True, 'import numpy as np\n'), ((9993, 10014), 'numpy.nanmax', 'np.nanmax', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10002, 10014), True, 'import numpy as np\n'), ((10015, 10036), 'numpy.nanmin', 'np.nanmin', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10024, 10036), True, 'import numpy as np\n'), ((13086, 13103), 'numpy.isfinite', 'np.isfinite', (['qArr'], {}), '(qArr)\n', (13097, 13103), True, 'import numpy as np\n'), ((14464, 14481), 'numpy.sum', 'np.sum', (['weightArr'], {}), '(weightArr)\n', (14470, 14481), True, 'import numpy as np\n'), ((15683, 15700), 'numpy.isfinite', 'np.isfinite', (['qArr'], {}), '(qArr)\n', (15694, 15700), True, 'import numpy as np\n'), ((22356, 22404), 'numpy.loadtxt', 'np.loadtxt', (['dataFile'], {'unpack': '(True)', 'dtype': 'dtFloat'}), '(dataFile, unpack=True, dtype=dtFloat)\n', (22366, 22404), True, 'import numpy as np\n'), ((7295, 7305), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7303, 7305), False, 'import sys\n'), ((10063, 10081), 'numpy.min', 'np.min', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10069, 10081), True, 'import numpy as np\n'), ((10122, 10140), 'numpy.max', 'np.max', (['freqArr_Hz'], {}), '(freqArr_Hz)\n', (10128, 10140), True, 'import numpy as np\n'), ((22687, 22697), 'sys.exit', 'sys.exit', ([], {}), '()\n', (22695, 22697), False, 'import sys\n'), ((14435, 14456), 'numpy.nan_to_num', 'np.nan_to_num', (['dQUArr'], {}), '(dQUArr)\n', (14448, 14456), True, 'import numpy as np\n'), ((7259, 7281), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7279, 7281), False, 'import traceback\n'), ((22651, 22673), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (22671, 22673), False, 'import traceback\n')] |
# coding=utf-8
import ee
from . import utils
import json
import csv
from .. import tools
def fromShapefile(filename, crs=None, start=None, end=None):
""" Convert an ESRI file (.shp and .dbf must be present) to a
ee.FeatureCollection
At the moment only works for shapes with less than 1000 records and doesn't
handle complex shapes.
:param filename: the name of the filename. If the shape is not in the
same path than the script, specify a path instead.
:type filename: str
:param start:
:return: the FeatureCollection
:rtype: ee.FeatureCollection
"""
import shapefile
wgs84 = ee.Projection('EPSG:4326')
# read the filename
reader = shapefile.Reader(filename)
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
field_types = [field[1] for field in fields]
types = dict(zip(field_names, field_types))
features = []
projection = utils.getProjection(filename) if not crs else crs
# catch a string with format "EPSG:XXX"
if isinstance(projection, str):
if 'EPSG:' in projection:
projection = projection.split(':')[1]
projection = 'EPSG:{}'.format(projection)
# filter records with start and end
start = start if start else 0
if not end:
records = reader.shapeRecords()
end = len(records)
else:
end = end + 1
if (end-start)>1000:
msg = "Can't process more than 1000 records at a time. Found {}"
raise ValueError(msg.format(end-start))
for i in range(start, end):
# atr = dict(zip(field_names, sr.record))
sr = reader.shapeRecord(i)
atr = {}
for fld, rec in zip(field_names, sr.record):
fld_type = types[fld]
if fld_type == 'D':
value = ee.Date(rec.isoformat()).millis().getInfo()
elif fld_type in ['C', 'N', 'F']:
value = rec
else:
continue
atr[fld] = value
geom = sr.shape.__geo_interface__
if projection is not None:
geometry = ee.Geometry(geom, projection) \
.transform(wgs84, 1)
else:
geometry = ee.Geometry(geom)
feat = ee.Feature(geometry, atr)
features.append(feat)
return ee.FeatureCollection(features)
def fromGeoJSON(filename=None, data=None, crs=None):
""" Create a list of Features from a GeoJSON file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
if filename:
with open(filename, 'r') as geoj:
content = geoj.read()
geodict = json.loads(content)
else:
geodict = data
features = []
# Get crs from GeoJSON
if not crs:
filecrs = geodict.get('crs')
if filecrs:
name = filecrs.get('properties').get('name')
splitcrs = name.split(':')
cleancrs = [part for part in splitcrs if part]
try:
if cleancrs[-1] == 'CRS84':
crs = 'EPSG:4326'
elif cleancrs[-2] == 'EPSG':
crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1])
else:
raise ValueError('{} not recognized'.format(name))
except IndexError:
raise ValueError('{} not recognized'.format(name))
else:
crs = 'EPSG:4326'
for n, feat in enumerate(geodict.get('features')):
properties = feat.get('properties')
geom = feat.get('geometry')
ty = geom.get('type')
coords = geom.get('coordinates')
if ty == 'GeometryCollection':
ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs)
else:
if ty == 'Polygon':
coords = utils.removeZ(coords) if utils.hasZ(coords) else coords
ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs))
ee_feat = ee.feature.Feature(ee_geom, properties)
features.append(ee_feat)
return tuple(features)
def fromKML(filename=None, data=None, crs=None, encoding=None):
""" Create a list of Features from a KML file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding)
features = geojsondict['features']
for feat in features:
# remove styleUrl
prop = feat['properties']
if 'styleUrl' in prop:
prop.pop('styleUrl')
# remove Z value if needed
geom = feat['geometry']
ty = geom['type']
if ty == 'GeometryCollection':
geometries = geom['geometries']
for g in geometries:
c = g['coordinates']
utils.removeZ(c)
else:
coords = geom['coordinates']
utils.removeZ(coords)
return fromGeoJSON(data=geojsondict, crs=crs)
def toDict(collection, split_at=4000):
""" Get the FeatureCollection as a dict object """
size = collection.size()
condition = size.gte(4999)
def greater():
size = collection.size()
seq = tools.ee_list.sequence(0, size, split_at)
limits = ee.List.zip(seq.slice(1), seq)
def over_limits(n):
n = ee.List(n)
ini = ee.Number(n.get(0))
end = ee.Number(n.get(1))
return ee.FeatureCollection(collection.toList(ini, end))
return limits.map(over_limits)
collections = ee.List(
ee.Algorithms.If(condition,
greater(),
ee.List([collection])))
collections_size = collections.size().getInfo()
col = ee.FeatureCollection(collections.get(0))
content = col.getInfo()
feats = content['features']
for i in range(0, collections_size):
c = ee.FeatureCollection(collections.get(i))
content_c = c.getInfo()
feats_c = content_c['features']
feats = feats + feats_c
content['features'] = feats
return content
def toGeoJSON(collection, name, path=None, split_at=4000):
""" Export a FeatureCollection to a GeoJSON file
:param collection: The collection to export
:type collection: ee.FeatureCollection
:param name: name of the resulting file
:type name: str
:param path: The path where to save the file. If None, will be saved
in the current folder
:type path: str
:param split_at: limit to avoid an EE Exception
:type split_at: int
:return: A GeoJSON (.geojson) file.
:rtype: file
"""
import json
import os
if not path:
path = os.getcwd()
# name
if name[-8:-1] != '.geojson':
fname = name+'.geojson'
content = toDict(collection, split_at)
with open(os.path.join(path, fname), 'w') as thefile:
thefile.write(json.dumps(content))
return thefile
def toCSV(collection, filename, split_at=4000):
""" Alternative to download a FeatureCollection as a CSV """
d = toDict(collection, split_at)
fields = list(d['columns'].keys())
fields.append('geometry')
features = d['features']
ext = filename[-4:]
if ext != '.csv':
filename += '.csv'
with open(filename, 'w') as thecsv:
writer = csv.DictWriter(thecsv, fields)
writer.writeheader()
# write rows
for feature in features:
properties = feature['properties']
fid = feature['id']
geom = feature['geometry']['type']
# match fields
properties['system:index'] = fid
properties['geometry'] = geom
# write row
writer.writerow(properties)
return thecsv
def toLocal(collection, filename, filetype=None, selectors=None, path=None):
""" Download a FeatureCollection to a local file a CSV or geoJSON file.
This uses a different method than `toGeoJSON` and `toCSV`
:param filetype: The filetype of download, either CSV or JSON.
Defaults to CSV.
:param selectors: The selectors that should be used to determine which
attributes will be downloaded.
:param filename: The name of the file to be downloaded
"""
if not filetype:
filetype = 'CSV'
url = collection.getDownloadURL(filetype, selectors, filename)
thefile = utils.downloadFile(url, filename, filetype, path)
return thefile
def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs):
""" This function can create folders and ImageCollections on the fly.
The rest is the same to Export.image.toAsset. You can pass the same
params as the original function
:param table: the feature collection to upload
:type table: ee.FeatureCollection
:param assetPath: path to upload the image (only PATH, without
filename)
:type assetPath: str
:param name: filename for the image (AssetID will be assetPath + name)
:type name: str
:return: the tasks
:rtype: ee.batch.Task
"""
# Check if the user is specified in the asset path
is_user = (assetPath.split('/')[0] == 'users')
if not is_user:
user = ee.batch.data.getAssetRoots()[0]['id']
assetPath = "{}/{}".format(user, assetPath)
if create:
# Recrusive create path
path2create = assetPath # '/'.join(assetPath.split('/')[:-1])
utils.createAssets([path2create], 'Folder', True)
# Asset ID (Path + name)
assetId = '/'.join([assetPath, name])
# Description
description = utils.matchDescription(name)
# Init task
task = ee.batch.Export.table.toAsset(table, assetId=assetId,
description=description, **kwargs)
task.start()
if verbose:
print('Exporting {} to {}'.format(name, assetPath))
return task | [
"shapefile.Reader",
"ee.feature.Feature",
"csv.DictWriter",
"json.loads",
"ee.FeatureCollection",
"ee.List",
"ee.batch.data.getAssetRoots",
"ee.Geometry",
"ee.Feature",
"os.path.join",
"json.dumps",
"os.getcwd",
"ee.batch.Export.table.toAsset",
"ee.Projection"
]
| [((637, 663), 'ee.Projection', 'ee.Projection', (['"""EPSG:4326"""'], {}), "('EPSG:4326')\n", (650, 663), False, 'import ee\n'), ((701, 727), 'shapefile.Reader', 'shapefile.Reader', (['filename'], {}), '(filename)\n', (717, 727), False, 'import shapefile\n'), ((2313, 2343), 'ee.FeatureCollection', 'ee.FeatureCollection', (['features'], {}), '(features)\n', (2333, 2343), False, 'import ee\n'), ((10694, 10787), 'ee.batch.Export.table.toAsset', 'ee.batch.Export.table.toAsset', (['table'], {'assetId': 'assetId', 'description': 'description'}), '(table, assetId=assetId, description=\n description, **kwargs)\n', (10723, 10787), False, 'import ee\n'), ((2245, 2270), 'ee.Feature', 'ee.Feature', (['geometry', 'atr'], {}), '(geometry, atr)\n', (2255, 2270), False, 'import ee\n'), ((4505, 4544), 'ee.feature.Feature', 'ee.feature.Feature', (['ee_geom', 'properties'], {}), '(ee_geom, properties)\n', (4523, 4544), False, 'import ee\n'), ((7732, 7743), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7741, 7743), False, 'import os\n'), ((8372, 8402), 'csv.DictWriter', 'csv.DictWriter', (['thecsv', 'fields'], {}), '(thecsv, fields)\n', (8386, 8402), False, 'import csv\n'), ((2212, 2229), 'ee.Geometry', 'ee.Geometry', (['geom'], {}), '(geom)\n', (2223, 2229), False, 'import ee\n'), ((3184, 3203), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (3194, 3203), False, 'import json\n'), ((6373, 6383), 'ee.List', 'ee.List', (['n'], {}), '(n)\n', (6380, 6383), False, 'import ee\n'), ((6694, 6715), 'ee.List', 'ee.List', (['[collection]'], {}), '([collection])\n', (6701, 6715), False, 'import ee\n'), ((7881, 7906), 'os.path.join', 'os.path.join', (['path', 'fname'], {}), '(path, fname)\n', (7893, 7906), False, 'import os\n'), ((7947, 7966), 'json.dumps', 'json.dumps', (['content'], {}), '(content)\n', (7957, 7966), False, 'import json\n'), ((10262, 10291), 'ee.batch.data.getAssetRoots', 'ee.batch.data.getAssetRoots', ([], {}), '()\n', (10289, 10291), False, 'import ee\n'), ((2106, 2135), 'ee.Geometry', 'ee.Geometry', (['geom', 'projection'], {}), '(geom, projection)\n', (2117, 2135), False, 'import ee\n'), ((4467, 4485), 'ee.Projection', 'ee.Projection', (['crs'], {}), '(crs)\n', (4480, 4485), False, 'import ee\n')] |
"""
Contains functions to generate and combine a clustering ensemble.
"""
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.metrics import adjusted_mutual_info_score as ami
from sklearn.metrics import normalized_mutual_info_score as nmi
from tqdm import tqdm
from clustering.utils import reset_estimator, compare_arrays
def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):
"""
It generates an ensemble from the data given a set of clusterers (a
clusterer is an instance of a clustering algorithm with a fixed set of
parameters).
Args:
data:
A numpy array, pandas dataframe, or any other structure supported
by the clusterers as data input.
clusterers:
A dictionary with clusterers specified in this format: { 'k-means
#1': KMeans(n_clusters=2), ... }
attributes:
A list of attributes to save in the final dataframe; for example,
including "n_clusters" will extract this attribute from the
estimator and include it in the final dataframe returned.
affinity_matrix:
If the clustering algorithm is AgglomerativeClustering (from
sklearn) and the linkage method is different than ward (which only
support euclidean distance), the affinity_matrix is given as data
input to the estimator instead of data.
Returns:
A pandas DataFrame with all the partitions generated by the clusterers.
Columns include the clusterer name/id, the partition, the estimator
parameters (obtained with the get_params() method) and any other
attribute specified.
"""
ensemble = []
for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)):
# get partition
#
# for agglomerative clustering both data and affinity_matrix should be
# given; for ward linkage, data is used, and for the other linkage
# methods the affinity_matrix is used
if (type(clus_obj).__name__ == "AgglomerativeClustering") and (
clus_obj.linkage != "ward"
):
partition = clus_obj.fit_predict(affinity_matrix).astype(float)
else:
partition = clus_obj.fit_predict(data).astype(float)
# remove from partition noisy points (for example, if using DBSCAN)
partition[partition < 0] = np.nan
# get number of clusters
partition_no_nan = partition[~np.isnan(partition)]
n_clusters = np.unique(partition_no_nan).shape[0]
# stop if n_clusters <= 1
if n_clusters <= 1:
reset_estimator(clus_obj)
continue
res = pd.Series(
{
"clusterer_id": clus_name,
"clusterer_params": str(clus_obj.get_params()),
"partition": partition,
}
)
for attr in attributes:
if attr == "n_clusters" and not hasattr(clus_obj, attr):
res[attr] = n_clusters
else:
res[attr] = getattr(clus_obj, attr)
ensemble.append(res)
# for some estimators such as DBSCAN this is needed, because otherwise
# the estimator saves references of huge data structures not needed in
# this context
reset_estimator(clus_obj)
return pd.DataFrame(ensemble).set_index("clusterer_id")
def get_ensemble_distance_matrix(ensemble, n_jobs=1):
"""
Given an ensemble, it computes the coassociation matrix (a distance matrix
for all objects using the ensemble information). For each object pair, the
coassociation matrix contains the percentage of times the pair of objects
was clustered together in the ensemble.
Args:
ensemble:
A numpy array representing a set of clustering solutions on the same
data. Each row is a clustering solution (partition) and columns are
objects.
n_jobs:
The number of jobs used by the pairwise_distance matrix from
sklearn.
Returns:
A numpy array representing a square distance matrix for all objects
(coassociation matrix).
"""
def _compare(x, y):
xy = np.array([x, y]).T
xy = xy[~np.isnan(xy).any(axis=1)]
return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0]
return pairwise_distances(
ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan"
)
def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):
"""
It combines a clustering ensemble using a set of methods that the user can
specify. Each of these methods combines the ensemble and returns a single
partition. This function returns the combined partition that maximizes the
selection criterion.
Args:
ensemble:
a clustering ensemble (rows are partitions, columns are objects).
k:
the final number of clusters for the combined partition.
methods:
a list of methods to apply on the ensemble; each returns a combined
partition.
selection_criterion:
a function that represents the selection criterion; this function
has to accept an ensemble as the first argument, and a partition as
the second one.
n_jobs:
number of jobs.
use_tqdm:
ensembles/disables the use of tqdm to show a progress bar.
Returns:
Returns a tuple: (partition, best method name, best criterion value)
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
methods_results = {}
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}
for future in tqdm(
as_completed(tasks),
total=len(tasks),
disable=(not use_tqdm),
ncols=100,
):
method_name = tasks[future]
part = future.result()
criterion_value = selection_criterion(ensemble, part)
methods_results[method_name] = {
"partition": part,
"criterion_value": criterion_value,
}
# select the best performing method according to the selection criterion
best_method = max(
methods_results, key=lambda x: methods_results[x]["criterion_value"]
)
best_method_results = methods_results[best_method]
return (
best_method_results["partition"],
best_method,
best_method_results["criterion_value"],
)
def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):
"""
Runs a consensus clustering method on the ensemble data, obtains the
consolidated partition with the desired number of clusters, and computes
a series of performance measures.
Args:
method_func:
A consensus function (first argument is either the ensemble or
the coassociation matrix derived from the ensemble).
ensemble_data:
A numpy array with the ensemble data that will be given to the
specified method. For evidence accumulation methods, this is the
coassociation matrix (a square matrix with the distance between
object pairs derived from the ensemble).
ensemble:
A numpy array representing the ensemble (partitions in rows, objects
in columns).
k:
The number of clusters to obtain from the ensemble data using the
specified method.
kwargs:
Other parameters passed to `method_func`.
Returns:
It returns a tuple with the data partition derived from the ensemble
data using the specified method, and some performance measures of this
partition.
"""
part = method_func(ensemble_data, k, **kwargs)
nmi_values = np.array(
[
compare_arrays(ensemble_member, part, nmi, use_weighting=True)
for ensemble_member in ensemble
]
)
ami_values = np.array(
[
compare_arrays(ensemble_member, part, ami, use_weighting=True)
for ensemble_member in ensemble
]
)
ari_values = np.array(
[
compare_arrays(ensemble_member, part, ari, use_weighting=True)
for ensemble_member in ensemble
]
)
performance_values = {
"ari_mean": np.mean(ari_values),
"ari_median": np.median(ari_values),
"ari_std": np.std(ari_values),
"ami_mean": np.mean(ami_values),
"ami_median": np.median(ami_values),
"ami_std": np.std(ami_values),
"nmi_mean": np.mean(nmi_values),
"nmi_median": np.median(nmi_values),
"nmi_std": np.std(nmi_values),
}
return part, performance_values
| [
"numpy.mean",
"numpy.median",
"numpy.unique",
"clustering.utils.reset_estimator",
"sklearn.metrics.pairwise_distances",
"concurrent.futures.as_completed",
"numpy.array",
"numpy.isnan",
"concurrent.futures.ProcessPoolExecutor",
"numpy.std",
"pandas.DataFrame",
"clustering.utils.compare_arrays"
]
| [((4500, 4596), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['ensemble.T'], {'metric': '_compare', 'n_jobs': 'n_jobs', 'force_all_finite': '"""allow-nan"""'}), "(ensemble.T, metric=_compare, n_jobs=n_jobs,\n force_all_finite='allow-nan')\n", (4518, 4596), False, 'from sklearn.metrics import pairwise_distances\n'), ((3448, 3473), 'clustering.utils.reset_estimator', 'reset_estimator', (['clus_obj'], {}), '(clus_obj)\n', (3463, 3473), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((5826, 5865), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'n_jobs'}), '(max_workers=n_jobs)\n', (5845, 5865), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((8669, 8688), 'numpy.mean', 'np.mean', (['ari_values'], {}), '(ari_values)\n', (8676, 8688), True, 'import numpy as np\n'), ((8712, 8733), 'numpy.median', 'np.median', (['ari_values'], {}), '(ari_values)\n', (8721, 8733), True, 'import numpy as np\n'), ((8754, 8772), 'numpy.std', 'np.std', (['ari_values'], {}), '(ari_values)\n', (8760, 8772), True, 'import numpy as np\n'), ((8794, 8813), 'numpy.mean', 'np.mean', (['ami_values'], {}), '(ami_values)\n', (8801, 8813), True, 'import numpy as np\n'), ((8837, 8858), 'numpy.median', 'np.median', (['ami_values'], {}), '(ami_values)\n', (8846, 8858), True, 'import numpy as np\n'), ((8879, 8897), 'numpy.std', 'np.std', (['ami_values'], {}), '(ami_values)\n', (8885, 8897), True, 'import numpy as np\n'), ((8919, 8938), 'numpy.mean', 'np.mean', (['nmi_values'], {}), '(nmi_values)\n', (8926, 8938), True, 'import numpy as np\n'), ((8962, 8983), 'numpy.median', 'np.median', (['nmi_values'], {}), '(nmi_values)\n', (8971, 8983), True, 'import numpy as np\n'), ((9004, 9022), 'numpy.std', 'np.std', (['nmi_values'], {}), '(nmi_values)\n', (9010, 9022), True, 'import numpy as np\n'), ((2759, 2784), 'clustering.utils.reset_estimator', 'reset_estimator', (['clus_obj'], {}), '(clus_obj)\n', (2774, 2784), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((3486, 3508), 'pandas.DataFrame', 'pd.DataFrame', (['ensemble'], {}), '(ensemble)\n', (3498, 3508), True, 'import pandas as pd\n'), ((4368, 4384), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (4376, 4384), True, 'import numpy as np\n'), ((5999, 6018), 'concurrent.futures.as_completed', 'as_completed', (['tasks'], {}), '(tasks)\n', (6011, 6018), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((8152, 8214), 'clustering.utils.compare_arrays', 'compare_arrays', (['ensemble_member', 'part', 'nmi'], {'use_weighting': '(True)'}), '(ensemble_member, part, nmi, use_weighting=True)\n', (8166, 8214), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((8325, 8387), 'clustering.utils.compare_arrays', 'compare_arrays', (['ensemble_member', 'part', 'ami'], {'use_weighting': '(True)'}), '(ensemble_member, part, ami, use_weighting=True)\n', (8339, 8387), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((8498, 8560), 'clustering.utils.compare_arrays', 'compare_arrays', (['ensemble_member', 'part', 'ari'], {'use_weighting': '(True)'}), '(ensemble_member, part, ari, use_weighting=True)\n', (8512, 8560), False, 'from clustering.utils import reset_estimator, compare_arrays\n'), ((2605, 2624), 'numpy.isnan', 'np.isnan', (['partition'], {}), '(partition)\n', (2613, 2624), True, 'import numpy as np\n'), ((2647, 2674), 'numpy.unique', 'np.unique', (['partition_no_nan'], {}), '(partition_no_nan)\n', (2656, 2674), True, 'import numpy as np\n'), ((4404, 4416), 'numpy.isnan', 'np.isnan', (['xy'], {}), '(xy)\n', (4412, 4416), True, 'import numpy as np\n')] |
import logging
from django.db import transaction, connection
from django.utils import timezone
from django.utils.timezone import localtime
from chart.application.enums.department_type import DepartmentType
from chart.application.enums.gender_type import GenderType
from chart.application.service.app_logic_base import AppLogicBaseService
from chart.models import Employees, Departments
"""
employeesテーブルを操作するクラスです。
"""
class EmployeesService(AppLogicBaseService):
def __init__(self):
super().__init__()
@staticmethod
@transaction.atomic()
def create_employees():
"""
Employeesを作成する
"""
service = EmployeesService()
for emp_no in range(1, 11):
if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0:
if emp_no <= 5:
department_no = DepartmentType.SALES.value
else:
department_no = DepartmentType.MARKETING.value
select_model = Departments.objects.filter(department_no=department_no).values("id").first()
# データを登録する
service._regist_employees(select_model['id'], emp_no)
@staticmethod
@transaction.atomic()
def create_departments():
"""
Departmentsを作成する
"""
service = EmployeesService()
# データをすべて削除する
# ForeignKeyが指定されているためdeleteコマンドを実行する
Departments.objects.all().delete()
for department_type in DepartmentType:
department_no = department_type.value
if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0:
# データを登録する
service._regist_departments(department_no, department_type.en_name)
@staticmethod
@transaction.atomic()
def update_employees():
"""
Employeesを更新する
"""
service = EmployeesService()
# filterによる絞込を行う
# gt:...より大きい(>),lt:...より小さい(<)になる
for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values(
"id").first()
department_id = select_model['id']
department_date_from = 20190903
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
# filterによる絞込を行う
# gte:...以上(>=),lte:...以下(<=)になる
for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values("id").first()
department_id = select_model['id']
department_date_from = 20190905
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
@staticmethod
def select_employees():
"""
Employeesを検索する
"""
# テーブル名__項目名で指定するとINNER JOINになる
# Queryは参照先のテーブルを参照する度に発行されます
for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value,
delete_flag=0):
logging.debug("reference:emp_no={}".format(employees_item.emp_no))
logging.debug("reference:department_no={}".format(employees_item.department.department_no))
logging.debug("reference:department_name={}".format(employees_item.department.department_name))
logging.debug("reference:first_name={}".format(employees_item.first_name))
logging.debug("reference:last_name={}".format(employees_item.last_name))
# select_relatedを使用した参照先情報を取得してキャッシュします
# Queryは1回のみ発行されます
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related("department"):
logging.debug("select_related:emp_no={}".format(employees_item.emp_no))
logging.debug("select_related:first_name={}".format(employees_item.first_name))
logging.debug("select_related:last_name={}".format(employees_item.last_name))
logging.debug("select_related:department_no={}".format(employees_item.department.department_no))
logging.debug("select_related:department_name={}".format(employees_item.department.department_name))
# prefetch_relatedを使用した参照先情報を取得してキャッシュします
# Queryは2回発行されてForeignKeyで結合します
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related(
"department__employees_set"):
logging.debug("prefetch_related:emp_no={}".format(employees_item.emp_no))
logging.debug("prefetch_related:first_name={}".format(employees_item.first_name))
logging.debug("prefetch_related:last_name={}".format(employees_item.last_name))
logging.debug("prefetch_related:department_no={}".format(employees_item.department.department_no))
logging.debug("prefetch_related:department_name={}".format(employees_item.department.department_name))
@staticmethod
@transaction.atomic()
def truncate_employees():
"""
トランケートを行う
"""
cursor = connection.cursor()
cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table))
def _regist_employees(self, department_id, emp_no):
"""
employeesを登録する
"""
self.regist_model = Employees()
self.regist_model.emp_no = emp_no
self.regist_model.department_id = department_id
self.regist_model.first_name = "first_name_" + str(emp_no).zfill(3)
self.regist_model.last_name = "last_name_" + str(emp_no).zfill(3)
self.regist_model.gender = GenderType.MAN.value
self.regist_model.department_date_from = "20190902"
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
return self.regist_model.id
def _regist_departments(self, department_no, department_name):
"""
departmentsを登録する
"""
self.regist_model = Departments()
self.regist_model.department_no = department_no
self.regist_model.department_name = department_name
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
def _update_employees_department(self, employees_id, department_id, department_date_from):
"""
配属情報を更新する
"""
self.update_model = Employees()
self.update_model.pk = employees_id
self.update_model.department_id = department_id
self.update_model.department_date_from = department_date_from
self.update_model.update_dt = localtime(timezone.now())
self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])
| [
"chart.models.Departments.objects.all",
"chart.models.Employees.objects.filter",
"chart.models.Employees",
"django.db.transaction.atomic",
"chart.models.Departments.objects.filter",
"django.utils.timezone.now",
"django.db.connection.cursor",
"chart.models.Departments"
]
| [((542, 562), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (560, 562), False, 'from django.db import transaction, connection\n'), ((1209, 1229), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1227, 1229), False, 'from django.db import transaction, connection\n'), ((1791, 1811), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1809, 1811), False, 'from django.db import transaction, connection\n'), ((5290, 5310), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (5308, 5310), False, 'from django.db import transaction, connection\n'), ((2023, 2090), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', ([], {'emp_no__gt': '(1)', 'emp_no__lt': '(3)', 'delete_flag': '(0)'}), '(emp_no__gt=1, emp_no__lt=3, delete_flag=0)\n', (2047, 2090), False, 'from chart.models import Employees, Departments\n'), ((2587, 2656), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', ([], {'emp_no__gte': '(7)', 'emp_no__lte': '(9)', 'delete_flag': '(0)'}), '(emp_no__gte=7, emp_no__lte=9, delete_flag=0)\n', (2611, 2656), False, 'from chart.models import Employees, Departments\n'), ((3236, 3334), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', ([], {'department__department_no': 'DepartmentType.SALES.value', 'delete_flag': '(0)'}), '(department__department_no=DepartmentType.SALES.\n value, delete_flag=0)\n', (3260, 3334), False, 'from chart.models import Employees, Departments\n'), ((5400, 5419), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (5417, 5419), False, 'from django.db import transaction, connection\n'), ((5630, 5641), 'chart.models.Employees', 'Employees', ([], {}), '()\n', (5639, 5641), False, 'from chart.models import Employees, Departments\n'), ((6390, 6403), 'chart.models.Departments', 'Departments', ([], {}), '()\n', (6401, 6403), False, 'from chart.models import Employees, Departments\n'), ((6889, 6900), 'chart.models.Employees', 'Employees', ([], {}), '()\n', (6898, 6900), False, 'from chart.models import Employees, Departments\n'), ((6096, 6110), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6108, 6110), False, 'from django.utils import timezone\n'), ((6160, 6174), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6172, 6174), False, 'from django.utils import timezone\n'), ((6610, 6624), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6622, 6624), False, 'from django.utils import timezone\n'), ((6674, 6688), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6686, 6688), False, 'from django.utils import timezone\n'), ((7119, 7133), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7131, 7133), False, 'from django.utils import timezone\n'), ((1423, 1448), 'chart.models.Departments.objects.all', 'Departments.objects.all', ([], {}), '()\n', (1446, 1448), False, 'from chart.models import Employees, Departments\n'), ((3955, 4009), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', ([], {'emp_no__gte': '(7)', 'delete_flag': '(0)'}), '(emp_no__gte=7, delete_flag=0)\n', (3979, 4009), False, 'from chart.models import Employees, Departments\n'), ((4649, 4703), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', ([], {'emp_no__gte': '(7)', 'delete_flag': '(0)'}), '(emp_no__gte=7, delete_flag=0)\n', (4673, 4703), False, 'from chart.models import Employees, Departments\n'), ((727, 781), 'chart.models.Employees.objects.filter', 'Employees.objects.filter', ([], {'emp_no': 'emp_no', 'delete_flag': '(0)'}), '(emp_no=emp_no, delete_flag=0)\n', (751, 781), False, 'from chart.models import Employees, Departments\n'), ((1571, 1641), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', ([], {'department_no': 'department_no', 'delete_flag': '(0)'}), '(department_no=department_no, delete_flag=0)\n', (1597, 1641), False, 'from chart.models import Employees, Departments\n'), ((2164, 2237), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', ([], {'department_no': 'DepartmentType.PRODUCTION.value'}), '(department_no=DepartmentType.PRODUCTION.value)\n', (2190, 2237), False, 'from chart.models import Employees, Departments\n'), ((2730, 2798), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', ([], {'department_no': 'DepartmentType.SALES.value'}), '(department_no=DepartmentType.SALES.value)\n', (2756, 2798), False, 'from chart.models import Employees, Departments\n'), ((1011, 1066), 'chart.models.Departments.objects.filter', 'Departments.objects.filter', ([], {'department_no': 'department_no'}), '(department_no=department_no)\n', (1037, 1066), False, 'from chart.models import Employees, Departments\n')] |
from flask import render_template
def home():
return render_template('upload.html')
def about():
return render_template('about.html')
| [
"flask.render_template"
]
| [((59, 89), 'flask.render_template', 'render_template', (['"""upload.html"""'], {}), "('upload.html')\n", (74, 89), False, 'from flask import render_template\n'), ((116, 145), 'flask.render_template', 'render_template', (['"""about.html"""'], {}), "('about.html')\n", (131, 145), False, 'from flask import render_template\n')] |
# Copyright 2016 - Nokia, ZTE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import chain
from six.moves import reduce
from oslo_log import log
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import GraphAction
from vitrage.datasources.driver_base import DriverBase
from vitrage.datasources.static import STATIC_DATASOURCE
from vitrage.datasources.static import StaticFields
from vitrage.utils import file as file_utils
LOG = log.getLogger(__name__)
class StaticDriver(DriverBase):
# base fields are required for all entities, others are treated as metadata
BASE_FIELDS = {StaticFields.STATIC_ID,
StaticFields.TYPE,
StaticFields.ID}
def __init__(self, conf):
super(StaticDriver, self).__init__()
self.cfg = conf
self.entities_cache = []
@staticmethod
def _is_valid_config(config):
"""check for validity of configuration"""
# TODO(yujunz) check with yaml schema or reuse template validation
return StaticFields.DEFINITIONS in config
@staticmethod
def get_event_types():
return []
def enrich_event(self, event, event_type):
pass
def get_all(self, datasource_action):
return self.make_pickleable(self._get_and_cache_all_entities(),
STATIC_DATASOURCE,
datasource_action)
def get_changes(self, datasource_action):
return self.make_pickleable(self._get_and_cache_changed_entities(),
STATIC_DATASOURCE,
datasource_action)
def _get_and_cache_all_entities(self):
self.entities_cache = self._get_all_entities()
return self.entities_cache
def _get_all_entities(self):
files = file_utils.list_files(self.cfg.static.directory, '.yaml', True)
return list(reduce(chain, [self._get_entities_from_file(path)
for path in files], []))
def _get_and_cache_changed_entities(self):
changed_entities = []
new_entities = self._get_all_entities()
for new_entity in new_entities:
old_entity = self._find_entity(new_entity, self.entities_cache)
if old_entity:
# Add modified entities
if not self._equal_entities(old_entity, new_entity):
changed_entities.append(new_entity.copy())
else:
# Add new entities
changed_entities.append(new_entity.copy())
# Add deleted entities
for old_entity in self.entities_cache:
if not self._find_entity(old_entity, new_entities):
old_entity_copy = old_entity.copy()
old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY
changed_entities.append(old_entity_copy)
self.entities_cache = new_entities
return changed_entities
@classmethod
def _get_entities_from_file(cls, path):
config = file_utils.load_yaml_file(path)
if not cls._is_valid_config(config):
LOG.warning("Skipped invalid config (possible obsoleted): {}"
.format(path))
return []
definitions = config[StaticFields.DEFINITIONS]
entities = definitions[StaticFields.ENTITIES]
relationships = definitions[StaticFields.RELATIONSHIPS]
return cls._pack(entities, relationships)
@classmethod
def _pack(cls, entities, relationships):
entities_dict = {}
for entity in entities:
cls._pack_entity(entities_dict, entity)
for rel in relationships:
cls._pack_rel(entities_dict, rel)
return entities_dict.values()
@classmethod
def _pack_entity(cls, entities_dict, entity):
static_id = entity[StaticFields.STATIC_ID]
if static_id not in entities_dict:
metadata = {key: value for key, value in entity.items()
if key not in cls.BASE_FIELDS}
entities_dict[static_id] = entity
entity[StaticFields.RELATIONSHIPS] = []
entity[StaticFields.METADATA] = metadata
else:
LOG.warning("Skipped duplicated entity: {}".format(entity))
@classmethod
def _pack_rel(cls, entities_dict, rel):
source_id = rel[StaticFields.SOURCE]
target_id = rel[StaticFields.TARGET]
if source_id == target_id:
# self pointing relationship
entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel)
else:
source, target = entities_dict[source_id], entities_dict[target_id]
source[StaticFields.RELATIONSHIPS].append(
cls._expand_neighbor(rel, target))
@staticmethod
def _expand_neighbor(rel, neighbor):
"""Expand config id to neighbor entity
rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'}
neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}
result={'relationship_type': 'attached', 'source': 's1',
'target': {'static_id': 'h1',
'vitrage_type': 'host.nova',
'id': 1}}
"""
rel = rel.copy()
if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.SOURCE] = neighbor
elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.TARGET] = neighbor
else:
# TODO(yujunz) raise exception and ignore invalid relationship
LOG.error("Invalid neighbor {} for relationship {}"
.format(neighbor, rel))
return None
return rel
@staticmethod
def _find_entity(search_entity, entities):
# naive implementation since we don't expect many static entities
for entity in entities:
if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \
and entity[StaticFields.ID] == \
search_entity[StaticFields.ID]:
return entity
@staticmethod
def _equal_entities(old_entity, new_entity):
# TODO(iafek): compare also the relationships
return old_entity.get(StaticFields.TYPE) == \
new_entity.get(StaticFields.TYPE) and \
old_entity.get(StaticFields.ID) == \
new_entity.get(StaticFields.ID) and \
old_entity.get(StaticFields.NAME) == \
new_entity.get(StaticFields.NAME) and \
old_entity.get(StaticFields.STATE) == \
new_entity.get(StaticFields.STATE)
| [
"vitrage.utils.file.list_files",
"vitrage.utils.file.load_yaml_file",
"oslo_log.log.getLogger"
]
| [((997, 1020), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (1010, 1020), False, 'from oslo_log import log\n'), ((2380, 2443), 'vitrage.utils.file.list_files', 'file_utils.list_files', (['self.cfg.static.directory', '""".yaml"""', '(True)'], {}), "(self.cfg.static.directory, '.yaml', True)\n", (2401, 2443), True, 'from vitrage.utils import file as file_utils\n'), ((3616, 3647), 'vitrage.utils.file.load_yaml_file', 'file_utils.load_yaml_file', (['path'], {}), '(path)\n', (3641, 3647), True, 'from vitrage.utils import file as file_utils\n')] |
from django.core.exceptions import NON_FIELD_ERRORS
from rest_framework import status, viewsets, serializers
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from jet_django.filters.model_aggregate import AggregateFilter
from jet_django.filters.model_group import GroupFilter
from jet_django.pagination import CustomPageNumberPagination
from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo
from jet_django.serializers.reorder import reorder_serializer_factory
class AggregateSerializer(serializers.Serializer):
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
class GroupSerializer(serializers.Serializer):
group = serializers.CharField()
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'group_serializer' in kwargs:
self.fields['group'] = kwargs.pop('group_serializer')
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):
ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)
class Viewset(viewsets.ModelViewSet):
model = build_model
queryset = build_queryset
pagination_class = CustomPageNumberPagination
filter_class = build_filter_class
authentication_classes = ()
permission_classes = (HasProjectPermissions, ModifyNotInDemo)
def get_serializer_class(self):
if self.action == 'aggregate':
return AggregateSerializer
elif self.action == 'group':
return GroupSerializer
elif self.action == 'retrieve':
return build_detail_serializer_class
else:
return build_serializer_class
@list_route(methods=['get'])
def aggregate(self, request):
queryset = self.filter_queryset(self.get_queryset())
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
y_field = self.model._meta.get_field(y_column)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = AggregateFilter().filter(queryset, {
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
y_func_serializer=y_serializer
)
return Response(serializer.data)
@list_route(methods=['get'])
def group(self, request):
queryset = self.filter_queryset(self.get_queryset())
x_column = request.GET['_x_column']
x_lookup_name = request.GET.get('_x_lookup')
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
x_field = self.model._meta.get_field(x_column)
x_lookup = x_field.class_lookups.get(x_lookup_name)
y_field = self.model._meta.get_field(y_column)
if x_lookup:
x_field = x_lookup('none').output_field
x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field)
x_serializer = x_serializer_class(**x_serializer_kwargs)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = GroupFilter().filter(queryset, {
'x_column': x_column,
'x_lookup': x_lookup,
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
many=True,
group_serializer=x_serializer,
y_func_serializer=y_serializer
)
return Response(serializer.data)
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
@list_route(methods=['post'])
def reorder(self, request):
serializer = ReorderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
@list_route(methods=['post'])
def reset_order(self, request):
i = 1
for instance in build_queryset:
setattr(instance, ordering_field, i)
instance.save()
i += 1
return Response({})
for action in build_actions:
def route(self, request):
form = action(data=request.data)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
queryset = form.filer_queryset(self.get_queryset())
try:
result = form.save(queryset)
except Exception as e:
return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST)
return Response({'action': form._meta.name, 'result': result})
decorator = list_route(methods=['post'])
route = decorator(route)
setattr(Viewset, action._meta.name, route)
return Viewset
| [
"rest_framework.serializers.IntegerField",
"rest_framework.decorators.list_route",
"jet_django.filters.model_aggregate.AggregateFilter",
"jet_django.serializers.reorder.reorder_serializer_factory",
"rest_framework.response.Response",
"rest_framework.serializers.CharField",
"rest_framework.serializers.ModelSerializer",
"jet_django.filters.model_group.GroupFilter"
]
| [((648, 674), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (672, 674), False, 'from rest_framework import status, viewsets, serializers\n'), ((931, 954), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (952, 954), False, 'from rest_framework import status, viewsets, serializers\n'), ((968, 994), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (992, 994), False, 'from rest_framework import status, viewsets, serializers\n'), ((1486, 1544), 'jet_django.serializers.reorder.reorder_serializer_factory', 'reorder_serializer_factory', (['build_queryset', 'ordering_field'], {}), '(build_queryset, ordering_field)\n', (1512, 1544), False, 'from jet_django.serializers.reorder import reorder_serializer_factory\n'), ((2230, 2257), 'rest_framework.decorators.list_route', 'list_route', ([], {'methods': "['get']"}), "(methods=['get'])\n", (2240, 2257), False, 'from rest_framework.decorators import list_route\n'), ((3051, 3078), 'rest_framework.decorators.list_route', 'list_route', ([], {'methods': "['get']"}), "(methods=['get'])\n", (3061, 3078), False, 'from rest_framework.decorators import list_route\n'), ((4906, 4934), 'rest_framework.decorators.list_route', 'list_route', ([], {'methods': "['post']"}), "(methods=['post'])\n", (4916, 4934), False, 'from rest_framework.decorators import list_route\n'), ((5172, 5200), 'rest_framework.decorators.list_route', 'list_route', ([], {'methods': "['post']"}), "(methods=['post'])\n", (5182, 5200), False, 'from rest_framework.decorators import list_route\n'), ((6030, 6058), 'rest_framework.decorators.list_route', 'list_route', ([], {'methods': "['post']"}), "(methods=['post'])\n", (6040, 6058), False, 'from rest_framework.decorators import list_route\n'), ((3015, 3040), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (3023, 3040), False, 'from rest_framework.response import Response\n'), ((4469, 4494), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (4477, 4494), False, 'from rest_framework.response import Response\n'), ((5136, 5161), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (5144, 5161), False, 'from rest_framework.response import Response\n'), ((5430, 5442), 'rest_framework.response.Response', 'Response', (['{}'], {}), '({})\n', (5438, 5442), False, 'from rest_framework.response import Response\n'), ((5953, 6008), 'rest_framework.response.Response', 'Response', (["{'action': form._meta.name, 'result': result}"], {}), "({'action': form._meta.name, 'result': result})\n", (5961, 6008), False, 'from rest_framework.response import Response\n'), ((5616, 5673), 'rest_framework.response.Response', 'Response', (['form.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(form.errors, status=status.HTTP_400_BAD_REQUEST)\n', (5624, 5673), False, 'from rest_framework.response import Response\n'), ((2587, 2604), 'rest_framework.serializers.ModelSerializer', 'ModelSerializer', ([], {}), '()\n', (2602, 2604), False, 'from rest_framework.serializers import ModelSerializer\n'), ((2738, 2755), 'jet_django.filters.model_aggregate.AggregateFilter', 'AggregateFilter', ([], {}), '()\n', (2753, 2755), False, 'from jet_django.filters.model_aggregate import AggregateFilter\n'), ((3714, 3731), 'rest_framework.serializers.ModelSerializer', 'ModelSerializer', ([], {}), '()\n', (3729, 3731), False, 'from rest_framework.serializers import ModelSerializer\n'), ((3896, 3913), 'rest_framework.serializers.ModelSerializer', 'ModelSerializer', ([], {}), '()\n', (3911, 3913), False, 'from rest_framework.serializers import ModelSerializer\n'), ((4047, 4060), 'jet_django.filters.model_group.GroupFilter', 'GroupFilter', ([], {}), '()\n', (4058, 4060), False, 'from jet_django.filters.model_group import GroupFilter\n')] |
"""
Weather functions.
"""
from ursina import color, window, time
from nMap import nMap
class Weather:
def __init__(this, rate=1):
this.red = 0
this.green = 200
this.blue = 211
this.darkling = 0
this.rate = rate
this.towardsNight = 1
def setSky(this):
r = nMap(this.darkling,0,100,0,this.red)
g = nMap(this.darkling,0,100,0,this.green)
b = nMap(this.darkling,0,100,0,this.blue)
window.color = color.rgb(r,g,b)
def update(this):
this.darkling -= ( this.rate *
this.towardsNight *
time.dt)
if this.darkling < 0:
this.towardsNight *= -1
this.darkling = 0
this.setSky()
| [
"nMap.nMap",
"ursina.color.rgb"
]
| [((325, 365), 'nMap.nMap', 'nMap', (['this.darkling', '(0)', '(100)', '(0)', 'this.red'], {}), '(this.darkling, 0, 100, 0, this.red)\n', (329, 365), False, 'from nMap import nMap\n'), ((374, 416), 'nMap.nMap', 'nMap', (['this.darkling', '(0)', '(100)', '(0)', 'this.green'], {}), '(this.darkling, 0, 100, 0, this.green)\n', (378, 416), False, 'from nMap import nMap\n'), ((425, 466), 'nMap.nMap', 'nMap', (['this.darkling', '(0)', '(100)', '(0)', 'this.blue'], {}), '(this.darkling, 0, 100, 0, this.blue)\n', (429, 466), False, 'from nMap import nMap\n'), ((486, 504), 'ursina.color.rgb', 'color.rgb', (['r', 'g', 'b'], {}), '(r, g, b)\n', (495, 504), False, 'from ursina import color, window, time\n')] |
import functools
import gc
from abc import ABC
from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents
from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor
from sources.utils.exception_definitions import OutsideOfContextError
def throw_error_outside_context(func):
@functools.wraps(func)
def wrapper_decorator(self, *args, **kwargs):
if not self.within_context:
raise OutsideOfContextError(
"""Error: Tried to access client Dataset outside of context
manager. This might lead to data leaks and bad use of
memory. Please wrap the usage of ClientDataset.dataset_x
inside a "with statement". """)
else:
value = func(self, *args, **kwargs)
return value
return wrapper_decorator
class ClientDataset(ABC):
def __init__(self,
client_identifier: str,
client_dataset_loader: ClientDatasetLoader,
client_dataset_processor: ClientDatasetProcessor,
):
self.client_identifier = client_identifier
self.client_dataset_loader = client_dataset_loader
self.client_dataset_processor = client_dataset_processor
self._train_data = None
self._test_data = None
self._validation_data = None
self.within_context = False
def process_x(self, raw_x_batch):
"""Pre-processes each batch of features
before being fed to the model."""
return self.client_dataset_processor.process_x(raw_x_batch)
def process_y(self, raw_y_batch):
"""Pre-processes each batch of labels before being fed to the model."""
return self.client_dataset_processor.process_y(raw_y_batch)
def _lazy_initialise_data(self, data, dataset_component: DatasetComponents):
if data is None:
data = self.client_dataset_loader.load_dataset(self.client_identifier,
dataset_component)
return self.process_x(data["x"]), self.process_y(data["y"])
else:
return data
@property
@throw_error_outside_context
def training_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data
@property
@throw_error_outside_context
def training_data_x(self):
"""Returns the Training Data as an array of samples"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[0]
@property
@throw_error_outside_context
def training_data_y(self):
"""Returns the Classifications for the Training Data as array"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[1]
@property
@throw_error_outside_context
def test_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data
@property
@throw_error_outside_context
def test_data_x(self):
"""Returns the Test Data as an array of samples"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[0]
@property
@throw_error_outside_context
def test_data_y(self):
"""Returns the Classifications for the Test Data as array"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[1]
@property
@throw_error_outside_context
def validation_data(self):
"""Returns the Validation Data as pair of arrays containing the
samples x,
and classification y"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data
@property
@throw_error_outside_context
def validation_data_x(self):
"""Returns the Validation Data as an array of samples"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[0]
@property
@throw_error_outside_context
def validation_data_y(self):
"""Returns the Classifications for the Validation Data as array"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[1]
def __enter__(self):
self.within_context = True
def __exit__(self, exc_type, exc_value, exc_traceback):
self.within_context = False
self._train_data = None
self._test_data = None
self._validation_data = None
gc.collect()
| [
"gc.collect",
"functools.wraps",
"sources.utils.exception_definitions.OutsideOfContextError"
]
| [((434, 455), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (449, 455), False, 'import functools\n'), ((5595, 5607), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5605, 5607), False, 'import gc\n'), ((560, 845), 'sources.utils.exception_definitions.OutsideOfContextError', 'OutsideOfContextError', (['"""Error: Tried to access client Dataset outside of context \n manager. This might lead to data leaks and bad use of \n memory. Please wrap the usage of ClientDataset.dataset_x \n inside a "with statement". """'], {}), '(\n """Error: Tried to access client Dataset outside of context \n manager. This might lead to data leaks and bad use of \n memory. Please wrap the usage of ClientDataset.dataset_x \n inside a "with statement". """\n )\n', (581, 845), False, 'from sources.utils.exception_definitions import OutsideOfContextError\n')] |
import requests
import os
from PyInquirer import style_from_dict, Token, prompt
import sys
import utils.config as config
import utils.ends as ends
from utils.colorfy import *
from auto.testing import test_trans
import time
import json
style = style_from_dict({
Token.QuestionMark: '#E91E63 bold',
Token.Selected: '#673AB7 bold',
Token.Instruction: '#<PASSWORD>',
Token.Answer: '#<PASSWORD> bold',
Token.Question: '#<PASSWORD>16 bold',
})
def client(ip, port):
os.system('clear')
cyan('What a beautiful day to enter the cult...')
baseURL = 'http://' + ip + ':' + port
while True:
print('----------------------------------------------------------------------')
method_q = {
'type': 'list',
'name': 'method',
'message': 'Select action:',
'choices': ['Network Overlay', \
'Insert a Song', \
'Search for a Song', \
'Delete a Song', \
'Depart from Chord', \
'Run automated test', \
'Help', \
'Exit']
}
method_a = prompt(method_q, style=style)['method']
os.system('clear')
if method_a == 'Depart from Chord':
print(cyan("Preparing Node to depart from Chord..."))
try:
response = requests.get(baseURL + ends.c_depart)
if response.status_code == 200:
if response.text == "Left the Chord":
print(response.text)
print(green("Node is out of Toychord network"))
else:
print(red(response.text))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Node didnt depart..."))
print(red("Unfortunately exiting..."))
break
elif method_a == 'Insert a Song':
print('Insert a Title-Value pair for the song you wish to insert')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
},
{
'type': 'input',
'name': 'value',
'message': 'Value:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Inserting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']})
if response.status_code == 200:
print(cyan("Inserted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Song wasnt inserted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Delete a Song':
print('Insert the Song Title you wish to delete')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Deleting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
# print(cyan("Deleting Song: ") + green(response.text.split(" ")[1]) + )
print(cyan("Deleted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(yellow("Song doesnt exist in the Chord"))
print(yellow("Couldnt delete it"))
except:
print(red("Could not establish connection with Node. Song wasnt deleted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Search for a Song':
print('Insert the Song Title you wish to Search or * to get all songs of the Chord')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
if fetch_a['key'] == "*":
print(cyan("Fetching all the songs of the Chord..."))
try:
response = requests.get(baseURL + ends.c_query_star)
if response.status_code == 200:
nodes_list = json.loads(response.text)
# print(green(response.text))
# print(cyan()))
for node in nodes_list["res"]:
print(header("\n" + node["uid"]) + " " + underline(node["ip"] + ":" + node["port"]))
for song in node["song"]:
print(" -" + green(song["key"]) + " " + song["value"])
else:
print(yellow("Something went Wrong...") + response.status_code)
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
else:
print(cyan("Searching Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
print("Song found in node with id: ",green(response.text.split(" ")[0]))
print("Song value: " + green(response.text.split(" ")[1]))
else:
print(yellow("Song doesnt exist in the Chord"))
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Network Overlay':
print(cyan("Initiating Network Overlay..."))
try:
response = requests.get(baseURL + ends.c_overlay)
if response.status_code == 200:
nodes_list = json.loads(response.text)
print('\n')
for node in nodes_list["res"]:
print(green(node["ip"] + ":" + node["port"]), end = '')
if node != nodes_list["res"][-1]:
print(" -> ", end = '')
print('\n')
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Help':
print('-------------------------------- Help --------------------------------\n')
overlayHelp=header("Overlay: ") + cyan("This functions recreates and prints the current Network Topology(eg. Node1 -> Node2 -> ...)\n")
insertHelp=header("Insert Song: ") + cyan("This functions expects a Song Title and a Song Value and inserts them in the Chord\n")
queryHelp=header("Search Song: ") + cyan("This function expects a Song Title and returns the Node in whitch the song is stored and the value of the song\n")
deleteHelp=header("Delete Song: ") + cyan("This function expects a Song Title and returns the Node who deleted the song\n")
departHelp=header("Depart: ") + cyan("This function makes the node connected to this cli leave the Chord\n")
autoTests=header("Run automated tests: ") + cyan("This function expects a test number (1=insert, 2=query, 3=requests), runs the test and returns the chord throughput")
print( " -",overlayHelp,"\n"
" -",insertHelp,"\n",
"-",queryHelp,"\n",
"-",deleteHelp,"\n",
"-",departHelp,"\n",
"-",autoTests,"\n",
)
continue
elif method_a == 'Run automated test':
print('Select which test you wish to run (1 = insert, 2 = query, 3 = requests)')
fetch_q = [
{
'type': 'input',
'name': 'test_n',
'message': 'Test:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's'
if test_number not in ('1', '2', '3'):
print(yellow("Wrong test number (give 1, 2 or 3)"))
continue
print(cyan("Running automated test: ") + ("insert" if test_number == '1' else ("query" if test_number == '2' else "requests")) + cyan("..."))
print(blue(test_trans(test_number)))
print(cyan("Done!"))
continue
elif method_a == 'Exit':
os.system('clear')
break
else:
os.system('clear')
continue
if __name__ == '__main__':
if len(sys.argv) < 3:
print("!! you must tell me the port. Ex. -p 5000 !!")
exit(0)
if sys.argv[1] in ("-p", "-P"):
my_port = sys.argv[2]
my_ip = os.popen('ip addr show ' + config.NETIFACE + ' | grep "\<inet\>" | awk \'{ print $2 }\' | awk -F "/" \'{ print $1 }\'').read().strip()
client(my_ip, my_port)
| [
"json.loads",
"requests.post",
"PyInquirer.prompt",
"requests.get",
"os.popen",
"auto.testing.test_trans",
"os.system",
"PyInquirer.style_from_dict"
]
| [((243, 442), 'PyInquirer.style_from_dict', 'style_from_dict', (["{Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold', Token.\n Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD> bold', Token.\n Question: '#<PASSWORD>16 bold'}"], {}), "({Token.QuestionMark: '#E91E63 bold', Token.Selected:\n '#673AB7 bold', Token.Instruction: '#<PASSWORD>', Token.Answer:\n '#<PASSWORD> bold', Token.Question: '#<PASSWORD>16 bold'})\n", (258, 442), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((467, 485), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (476, 485), False, 'import os\n'), ((1022, 1040), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1031, 1040), False, 'import os\n'), ((980, 1009), 'PyInquirer.prompt', 'prompt', (['method_q'], {'style': 'style'}), '(method_q, style=style)\n', (986, 1009), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((1159, 1196), 'requests.get', 'requests.get', (['(baseURL + ends.c_depart)'], {}), '(baseURL + ends.c_depart)\n', (1171, 1196), False, 'import requests\n'), ((1997, 2025), 'PyInquirer.prompt', 'prompt', (['fetch_q'], {'style': 'style'}), '(fetch_q, style=style)\n', (2003, 2025), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((2115, 2214), 'requests.post', 'requests.post', (['(baseURL + ends.c_insert)'], {'data': "{'key': fetch_a['key'], 'value': fetch_a['value']}"}), "(baseURL + ends.c_insert, data={'key': fetch_a['key'], 'value':\n fetch_a['value']})\n", (2128, 2214), False, 'import requests\n'), ((2809, 2837), 'PyInquirer.prompt', 'prompt', (['fetch_q'], {'style': 'style'}), '(fetch_q, style=style)\n', (2815, 2837), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((2926, 2994), 'requests.post', 'requests.post', (['(baseURL + ends.c_delete)'], {'data': "{'key': fetch_a['key']}"}), "(baseURL + ends.c_delete, data={'key': fetch_a['key']})\n", (2939, 2994), False, 'import requests\n'), ((3770, 3798), 'PyInquirer.prompt', 'prompt', (['fetch_q'], {'style': 'style'}), '(fetch_q, style=style)\n', (3776, 3798), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((7962, 8092), 'os.popen', 'os.popen', (['(\'ip addr show \' + config.NETIFACE +\n \' | grep "\\\\<inet\\\\>" | awk \\\'{ print $2 }\\\' | awk -F "/" \\\'{ print $1 }\\\'\'\n )'], {}), '(\'ip addr show \' + config.NETIFACE +\n \' | grep "\\\\<inet\\\\>" | awk \\\'{ print $2 }\\\' | awk -F "/" \\\'{ print $1 }\\\'\'\n )\n', (7970, 8092), False, 'import os\n'), ((3911, 3952), 'requests.get', 'requests.get', (['(baseURL + ends.c_query_star)'], {}), '(baseURL + ends.c_query_star)\n', (3923, 3952), False, 'import requests\n'), ((4658, 4725), 'requests.post', 'requests.post', (['(baseURL + ends.c_query)'], {'data': "{'key': fetch_a['key']}"}), "(baseURL + ends.c_query, data={'key': fetch_a['key']})\n", (4671, 4725), False, 'import requests\n'), ((5292, 5330), 'requests.get', 'requests.get', (['(baseURL + ends.c_overlay)'], {}), '(baseURL + ends.c_overlay)\n', (5304, 5330), False, 'import requests\n'), ((4009, 4034), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (4019, 4034), False, 'import json\n'), ((5385, 5410), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (5395, 5410), False, 'import json\n'), ((7250, 7278), 'PyInquirer.prompt', 'prompt', (['fetch_q'], {'style': 'style'}), '(fetch_q, style=style)\n', (7256, 7278), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((7707, 7725), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (7716, 7725), False, 'import os\n'), ((7747, 7765), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (7756, 7765), False, 'import os\n'), ((7614, 7637), 'auto.testing.test_trans', 'test_trans', (['test_number'], {}), '(test_number)\n', (7624, 7637), False, 'from auto.testing import test_trans\n')] |
"""
Provides a class that handles the fits metadata required by PypeIt.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import io
import string
from copy import deepcopy
import datetime
from IPython import embed
import numpy as np
import yaml
from astropy import table, coordinates, time, units
from pypeit import msgs
from pypeit import utils
from pypeit.core import framematch
from pypeit.core import flux_calib
from pypeit.core import parse
from pypeit.core import meta
from pypeit.io import dict_to_lines
from pypeit.par import PypeItPar
from pypeit.par.util import make_pypeit_file
from pypeit.bitmask import BitMask
# TODO: Turn this into a DataContainer
# Initially tried to subclass this from astropy.table.Table, but that
# proved too difficult.
class PypeItMetaData:
"""
Provides a table and interface to the relevant fits file metadata
used during the reduction.
The content of the fits table is dictated by the header keywords
specified for the provided spectrograph. It is expected that this
table can be used to set the frame type of each file.
The metadata is validated using checks specified by the provided
spectrograph class.
For the data table, one should typically provide either the file
list from which to grab the data from the fits headers or the
data directly. If neither are provided the table is instantiated
without any data.
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:obj:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior.
files (:obj:`str`, :obj:`list`, optional):
The list of files to include in the table.
data (table-like, optional):
The data to include in the table. The type can be anything
allowed by the instantiation of
:class:`astropy.table.Table`.
usrdata (:obj:`astropy.table.Table`, optional):
A user provided set of data used to supplement or overwrite
metadata read from the file headers. The table must have a
`filename` column that is used to match to the metadata
table generated within PypeIt. **Note**: This is ignored if
`data` is also provided. This functionality is only used
when building the metadata from the fits files.
strict (:obj:`bool`, optional):
Function will fault if there is a problem with the reading
the header for any of the provided files; see
:func:`pypeit.spectrographs.spectrograph.get_headarr`. Set
to False to instead report a warning and continue.
Attributes:
spectrograph
(:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:class:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior. If not
provided, the default parameters specific to the provided
spectrograph are used.
configs (:obj:`dict`):
A dictionary of the unique configurations identified.
type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`):
The bitmask used to set the frame type of each fits file.
calib_bitmask (:class:`BitMask`):
The bitmask used to keep track of the calibration group bits.
table (:class:`astropy.table.Table`):
The table with the relevant metadata for each fits file to
use in the data reduction.
"""
def __init__(self, spectrograph, par, files=None, data=None, usrdata=None,
strict=True):
if data is None and files is None:
# Warn that table will be empty
msgs.warn('Both data and files are None in the instantiation of PypeItMetaData.'
' The table will be empty!')
# Initialize internals
self.spectrograph = spectrograph
self.par = par
if not isinstance(self.par, PypeItPar):
raise TypeError('Input parameter set must be of type PypeItPar.')
self.type_bitmask = framematch.FrameTypeBitMask()
# Build table
self.table = table.Table(data if files is None
else self._build(files, strict=strict,
usrdata=usrdata))
# Merge with user data, if present
if usrdata is not None:
self.merge(usrdata)
# Impose types on specific columns
self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str])
# Initialize internal attributes
self.configs = None
self.calib_bitmask = None
# Initialize columns that the user might add
self.set_user_added_columns()
# Validate instrument name
self.spectrograph.vet_instrument(self.table)
def _impose_types(self, columns, types):
"""
Impose a set of types on certain columns.
.. note::
:attr:`table` is edited in place.
Args:
columns (:obj:`list`):
List of column names
types (:obj:`list`):
List of types
"""
for c,t in zip(columns, types):
if c in self.keys():
self.table[c] = self.table[c].astype(t)
def _build(self, files, strict=True, usrdata=None):
"""
Generate the fitstbl that will be at the heart of PypeItMetaData.
Args:
files (:obj:`str`, :obj:`list`):
One or more files to use to build the table.
strict (:obj:`bool`, optional):
Function will fault if :func:`fits.getheader` fails to
read any of the headers. Set to False to report a
warning and continue.
usrdata (astropy.table.Table, optional):
Parsed for frametype for a few instruments (e.g. VLT)
where meta data may not be required
Returns:
dict: Dictionary with the data to assign to :attr:`table`.
"""
# Allow for single files
_files = files if hasattr(files, '__len__') else [files]
# Build lists to fill
data = {k:[] for k in self.spectrograph.meta.keys()}
data['directory'] = ['None']*len(_files)
data['filename'] = ['None']*len(_files)
# Build the table
for idx, ifile in enumerate(_files):
# User data (for frame type)
if usrdata is None:
usr_row = None
else:
# TODO: This check should be done elsewhere
# Check
if os.path.basename(ifile) != usrdata['filename'][idx]:
msgs.error('File name list does not match user-provided metadata table. See '
'usrdata argument of instantiation of PypeItMetaData.')
usr_row = usrdata[idx]
# Add the directory and file name to the table
data['directory'][idx], data['filename'][idx] = os.path.split(ifile)
if not data['directory'][idx]:
data['directory'][idx] = '.'
# Read the fits headers
headarr = self.spectrograph.get_headarr(ifile, strict=strict)
# Grab Meta
for meta_key in self.spectrograph.meta.keys():
value = self.spectrograph.get_meta_value(headarr, meta_key,
required=strict,
usr_row=usr_row,
ignore_bad_header = self.par['rdx']['ignore_bad_headers'])
if isinstance(value, str) and '#' in value:
value = value.replace('#', '')
msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(
meta_key, value))
data[meta_key].append(value)
msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1]))
# JFH Changed the below to not crash if some files have None in
# their MJD. This is the desired behavior since if there are
# empty or corrupt files we still want this to run.
# Validate, print out a warning if there is problem
try:
time.Time(data['mjd'], format='mjd')
except ValueError:
mjd = np.asarray(data['mjd'])
filenames = np.asarray(data['filename'])
bad_files = filenames[mjd == None]
# Print status message
msg = 'Time invalid for {0} files.\n'.format(len(bad_files))
msg += 'Continuing, but the following frames may be empty or have corrupt headers:\n'
for file in bad_files:
msg += ' {0}\n'.format(file)
msgs.warn(msg)
# Return
return data
# TODO: In this implementation, slicing the PypeItMetaData object
# will return an astropy.table.Table, not a PypeItMetaData object.
def __getitem__(self, item):
return self.table.__getitem__(item)
def __setitem__(self, item, value):
return self.table.__setitem__(item, value)
def __len__(self):
return self.table.__len__()
def __repr__(self):
return self.table._base_repr_(html=False,
descr_vals=['PypeItMetaData:\n',
' spectrograph={0}\n'.format(
self.spectrograph.name),
' length={0}\n'.format(len(self))])
def _repr_html_(self):
return self.table._base_repr_(html=True, max_width=-1,
descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\n'.format(
self.spectrograph.name, len(self))])
@staticmethod
def default_keys():
return [ 'directory', 'filename', 'instrume' ]
def keys(self):
return self.table.keys()
def sort(self, col):
return self.table.sort(col)
def merge(self, usrdata, match_type=True):
"""
Use the provided table to supplement or overwrite the metadata.
If the internal table already contains the column in `usrdata`,
the function will try to match the data type of the `usrdata`
column to the existing data type. If it can't it will just add
the column anyway, with the type in `usrdata`. You can avoid
this step by setting `match_type=False`.
Args:
usrdata (:obj:`astropy.table.Table`):
A user provided set of data used to supplement or
overwrite metadata read from the file headers. The
table must have a `filename` column that is used to
match to the metadata table generated within PypeIt.
match_type (:obj:`bool`, optional):
Attempt to match the data type in `usrdata` to the type
in the internal table. See above.
Raises:
TypeError:
Raised if `usrdata` is not an `astropy.io.table.Table`
KeyError:
Raised if `filename` is not a key in the provided table.
"""
meta_data_model = meta.get_meta_data_model()
# Check the input
if not isinstance(usrdata, table.Table):
raise TypeError('Must provide an astropy.io.table.Table instance.')
if 'filename' not in usrdata.keys():
raise KeyError('The user-provided table must have \'filename\' column!')
# Make sure the data are correctly ordered
srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']]
# Convert types if possible
existing_keys = list(set(self.table.keys()) & set(usrdata.keys()))
radec_done = False
if len(existing_keys) > 0 and match_type:
for key in existing_keys:
if len(self.table[key].shape) > 1: # NOT ALLOWED!!
# TODO: This should be converted to an assert statement...
raise ValueError('CODING ERROR: Found high-dimensional column.')
#embed(header='372 of metadata')
elif key in meta_data_model.keys(): # Is this meta data??
dtype = meta_data_model[key]['dtype']
else:
dtype = self.table[key].dtype
# Deal with None's properly
nones = usrdata[key] == 'None'
usrdata[key][nones] = None
# Rest
# Allow for str RA, DEC (backwards compatability)
if key in ['ra', 'dec'] and not radec_done:
ras, decs = meta.convert_radec(usrdata['ra'][~nones].data,
usrdata['dec'][~nones].data)
usrdata['ra'][~nones] = ras.astype(dtype)
usrdata['dec'][~nones] = decs.astype(dtype)
radec_done = True
else:
usrdata[key][~nones] = usrdata[key][~nones].astype(dtype)
# Include the user data in the table
for key in usrdata.keys():
self.table[key] = usrdata[key][srt]
def finalize_usr_build(self, frametype, setup):
"""
Finalize the build of the table based on user-provided data,
typically pulled from the PypeIt file.
This function:
- sets the frame types based on the provided object
- sets all the configurations to the provided `setup`
- assigns all frames to a single calibration group, if the
'calib' column does not exist
- if the 'comb_id' column does not exist, this sets the
combination groups to be either undefined or to be unique
for each science or standard frame, see
:func:`set_combination_groups`.
.. note::
This should only be run if all files are from a single
instrument configuration. :attr:`table` is modified
in-place.
See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`.
.. todo::
- Why isn't frametype just in the user-provided data? It
may be (see get_frame_types) and I'm just not using it...
Args:
frametype (:obj:`dict`):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
setup (:obj:`str`):
If the 'setup' columns does not exist, fill the
configuration setup columns with this single identifier.
"""
self.get_frame_types(user=frametype)
# TODO: Add in a call to clean_configurations? I didn't add it
# here, because this method is only called for a preconstructed
# pypeit file, which should nominally follow an execution of
# pypeit_setup. If the user edits back in a frame that has an
# invalid key, at least for now the DEIMOS image reader will
# fault.
self.set_configurations(fill=setup)
self.set_calibration_groups(default=True)
self.set_combination_groups()
def get_configuration(self, indx, cfg_keys=None):
"""
Return the configuration dictionary for a given frame.
This is not the same as the backwards compatible "setup"
dictionary.
Args:
indx (:obj:`int`):
The index of the table row to use to construct the
configuration.
cfg_keys (:obj:`list`, optional):
The list of metadata keys to use to construct the
configuration. If None, the `configuration_keys` of
:attr:`spectrograph` is used.
Returns:
dict: A dictionary with the metadata values from the
selected row.
"""
_cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys
return {k:self.table[k][indx] for k in _cfg_keys}
def master_key(self, row, det=1):
"""
Construct the master key for the file in the provided row.
The master key is the combination of the configuration, the
calibration group, and the detector. The configuration ID is
the same as included in the configuration column (A, B, C, etc),
the calibration group is the same as the calibration bit number,
and the detector number is provided as an argument and converted
to a zero-filled string with two digits (the maximum number of
detectors is 99).
Using the calibration bit in the keyword allows MasterFrames to
be used with multiple calibration groups.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the key.
det (:obj:`int`, :obj:`tuple`, optional):
The 1-indexed detector number(s). If a tuple, it must include
detectors designated as a viable mosaic for
:attr:`spectrograph`; see
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`.
Returns:
:obj:`str`: Master key with configuration, calibration group(s), and
detector.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns
haven't been defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot provide master key string without setup and calibbit; '
'run set_configurations and set_calibration_groups.')
det_name = self.spectrograph.get_det_name(det)
return f"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}"
def construct_obstime(self, row):
"""
Construct the MJD of when the frame was observed.
.. todo::
- Consolidate with :func:`convert_time` ?
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
Returns:
astropy.time.Time: The MJD of the observation.
"""
return time.Time(self['mjd'][row], format='mjd')
def construct_basename(self, row, obstime=None):
"""
Construct the root name primarily for PypeIt file output.
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
obstime (:class:`astropy.time.Time`, optional):
The MJD of the observation. If None, constructed using
:func:`construct_obstime`.
Returns:
str: The root name for file output.
"""
_obstime = self.construct_obstime(row) if obstime is None else obstime
tiso = time.Time(_obstime, format='isot')
dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')
return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],
self['target'][row].replace(" ", ""),
self.spectrograph.camera,
datetime.datetime.strftime(dtime, '%Y%m%dT'),
tiso.value.split("T")[1].replace(':',''))
def get_setup(self, row, det=None, config_only=False):
"""
Construct the setup dictionary.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting it. And it may be something to put
in the relevant spectrograph class.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the setup.
det (:obj:`int`, optional):
The 1-indexed detector to include. If None, all
detectors are included.
config_only (:obj:`bool`, optional):
Just return the dictionary with the configuration, don't
include the top-level designation of the configuration
itself.
Returns:
dict: The pypeit setup dictionary with the default format.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot provide instrument setup without \'setup\' column; '
'run set_configurations.')
dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row]
dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row]
dichroic = 'none' if 'dichroic' not in self.keys() else self['dichroic'][row]
decker = 'none' if 'decker' not in self.keys() else self['decker'][row]
slitwid = 'none' if 'slitwid' not in self.keys() else self['slitwid'][row]
slitlen = 'none' if 'slitlen' not in self.keys() else self['slitlen'][row]
binning = '1,1' if 'binning' not in self.keys() else self['binning'][row]
skey = 'Setup {}'.format(self['setup'][row])
# Key names *must* match configuration_keys() for spectrographs
setup = {skey:
{'--':
{'disperser': {'dispname': dispname, 'dispangle':dispangle},
'dichroic': dichroic,
'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen},
'binning': binning, # PypeIt orientation binning of a science image
}
}
}
#_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det]
#for d in _det:
# setup[skey][str(d).zfill(2)] \
# = {'binning': binning, 'det': d,
# 'namp': self.spectrograph.detector[d-1]['numamplifiers']}
return setup[skey] if config_only else setup
def get_configuration_names(self, ignore=None, return_index=False, configs=None):
"""
Get the list of the unique configuration names.
This provides just the list of setup identifiers ('A', 'B',
etc.) and the row index where it first occurs. This is
different from :func:`unique_configurations` because the latter
determines and provides the configurations themselves.
This is mostly a convenience function for the writing routines.
Args:
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
return_index (:obj:`bool`, optional):
Return row indices with the first occurence of these
configurations.
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']).
Returns:
numpy.array: The list of unique setup names. A second
returned object provides the indices of the first occurrence
of these setups, if requested.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot get setup names; run set_configurations.')
# Unique configurations
setups, indx = np.unique(self['setup'], return_index=True)
if ignore is not None:
# Remove the selected configurations to ignore
rm = np.logical_not(np.isin(setups, ignore))
setups = setups[rm]
indx = indx[rm]
# Restrict
_configs = None if configs is None else np.atleast_1d(configs)
# TODO: Why do we need to specify 'all' here? Can't `configs is
# None` mean that you want all the configurations? Or can we
# make the default 'all'?
if configs is not None and 'all' not in _configs:
use = np.isin(setups, _configs)
setups = setups[use]
indx = indx[use]
return setups, indx if return_index else setups
def _get_cfgs(self, copy=False, rm_none=False):
"""
Convenience method to return :attr:`configs` with possible
alterations.
This method *should not* be called by any method outside of
this class; use :func:`unique_configurations` instead.
Args:
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
"""
_cfg = deepcopy(self.configs) if copy else self.configs
if rm_none and 'None' in _cfg.keys():
del _cfg['None']
return _cfg
def unique_configurations(self, force=False, copy=False, rm_none=False):
"""
Return the unique instrument configurations.
If run before the ``'setup'`` column is initialized, this function
determines the unique instrument configurations by finding
unique combinations of the items in the metadata table listed by
the spectrograph ``configuration_keys`` method.
If run after the ``'setup'`` column has been set, this simply
constructs the configuration dictionary using the unique
configurations in that column.
This is used to set the internal :attr:`configs`. If this
attribute is not None, this function simply returns
:attr:`config` (cf. ``force``).
.. warning::
Any frame types returned by the
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
method for :attr:`spectrograph` will be ignored in the
construction of the unique configurations. If
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
does not return None and the frame types have not yet
been defined (see :func:`get_frame_types`), this method
will fault!
Args:
force (:obj:`bool`, optional):
Force the configurations to be redetermined. Otherwise
the configurations are only determined if
:attr:`configs` has not yet been defined.
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
Raises:
PypeItError:
Raised if there are list of frame types to ignore but
the frame types have not been defined yet.
"""
if self.configs is not None and not force:
return self._get_cfgs(copy=copy, rm_none=rm_none)
if 'setup' in self.keys():
msgs.info('Setup column already set. Finding unique configurations.')
uniq, indx = np.unique(self['setup'], return_index=True)
ignore = uniq == 'None'
if np.sum(ignore) > 0:
msgs.warn('Ignoring {0} frames with configuration set to None.'.format(
np.sum(ignore)))
self.configs = {}
for i in range(len(uniq)):
if ignore[i]:
continue
self.configs[uniq[i]] = self.get_configuration(indx[i])
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
msgs.info('Using metadata to determine unique configurations.')
# If the frame types have been set, ignore anything listed in
# the ignore_frames
indx = np.arange(len(self))
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To ignore frames, types must have been defined; run get_frame_types.')
ignore_frames = list(ignore_frames.keys())
msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames))
use = np.ones(len(self), dtype=bool)
for ftype in ignore_frames:
use &= np.logical_not(self.find_frames(ftype))
indx = indx[use]
if len(indx) == 0:
msgs.error('No frames to use to define configurations!')
# Get the list of keys to use
cfg_keys = self.spectrograph.configuration_keys()
# Configuration identifiers are iterations through the
# upper-case letters: A, B, C, etc.
double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase]
cfg_iter = list(string.ascii_uppercase) + double_alphabet
cfg_indx = 0
# TODO: Placeholder: Allow an empty set of configuration keys
# meaning that the instrument setup has only one configuration.
if len(cfg_keys) == 0:
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = {}
msgs.info('All files assumed to be from a single configuration.')
return self._get_cfgs(copy=copy, rm_none=rm_none)
# Use the first file to set the first unique configuration
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys)
cfg_indx += 1
# Check if any of the other files show a different
# configuration.
for i in indx[1:]:
j = 0
for c in self.configs.values():
if row_match_config(self.table[i], c, self.spectrograph):
break
j += 1
unique = j == len(self.configs)
if unique:
if cfg_indx == len(cfg_iter):
msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter)))
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys)
cfg_indx += 1
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
def set_configurations(self, configs=None, force=False, fill=None):
"""
Assign each frame to a configuration (setup) and include it
in the metadata table.
The internal table is edited *in place*. If the 'setup'
column already exists, the configurations are **not** reset
unless you call the function with ``force=True``.
Args:
configs (:obj:`dict`, optional):
A nested dictionary, one dictionary per configuration
with the associated values of the metadata associated
with each configuration. The metadata keywords in the
dictionary should be the same as in the table, and the
keywords used to set the configuration should be the
same as returned by the spectrograph
`configuration_keys` method. The latter is not checked.
If None, this is set by :func:`unique_configurations`.
force (:obj:`bool`, optional):
Force the configurations to be reset.
fill (:obj:`str`, optional):
If the 'setup' column does not exist, fill the
configuration setup columns with this single identifier.
Ignores other inputs.
Raises:
PypeItError:
Raised if none of the keywords in the provided
configuration match with the metadata keywords. Also
raised when some frames cannot be assigned to a
configuration, the spectrograph defined frames that
have been ignored in the determination of the unique
configurations, but the frame types have not been set
yet.
"""
# Configurations have already been set
if 'setup' in self.keys() and not force:
return
if 'setup' not in self.keys() and fill is not None:
self['setup'] = fill
return
_configs = self.unique_configurations() if configs is None else configs
for k, cfg in _configs.items():
if len(set(cfg.keys()) - set(self.keys())) > 0:
msgs.error('Configuration {0} defined using unavailable keywords!'.format(k))
self.table['setup'] = 'None'
nrows = len(self)
for i in range(nrows):
for d, cfg in _configs.items():
if row_match_config(self.table[i], cfg, self.spectrograph):
self.table['setup'][i] = d
# Check if any of the configurations are not set
not_setup = self.table['setup'] == 'None'
if not np.any(not_setup):
# All are set, so we're done
return
# Some frame types may have been ignored
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is None:
# Nope, we're still done
return
# At this point, we need the frame type to continue
if 'frametype' not in self.keys():
msgs.error('To account for ignored frames, types must have been defined; run '
'get_frame_types.')
# For each configuration, determine if any of the frames with
# the ignored frame types should be assigned to it:
for cfg_key in _configs.keys():
in_cfg = self.table['setup'] == cfg_key
for ftype, metakey in ignore_frames.items():
# TODO: For now, use this assert to check that the
# metakey is either not set or a string
assert metakey is None or isinstance(metakey, str), \
'CODING ERROR: metadata keywords set by config_indpendent_frames are not ' \
'correctly defined for {0}; values must be None or a string.'.format(
self.spectrograph.__class__.__name__)
# Get the list of frames of this type without a
# configuration
indx = (self.table['setup'] == 'None') & self.find_frames(ftype)
if not np.any(indx):
continue
if metakey is None:
# No matching meta data defined, so just set all
# the frames to this (first) configuration
self.table['setup'][indx] = cfg_key
continue
# Find the unique values of meta for this configuration
uniq_meta = np.unique(self.table[metakey][in_cfg].data)
# Warn the user that the matching meta values are not
# unique for this configuration.
if uniq_meta.size != 1:
msgs.warn('When setting the instrument configuration for {0} '.format(ftype)
+ 'frames, configuration {0} does not have unique '.format(cfg_key)
+ '{0} values.' .format(meta))
# Find the frames of this type that match any of the
# meta data values
indx &= np.isin(self.table[metakey], uniq_meta)
self.table['setup'][indx] = cfg_key
def clean_configurations(self):
"""
Ensure that configuration-defining keywords all have values
that will yield good PypeIt reductions. Any frames that do
not are removed from :attr:`table`, meaning this method may
modify that attribute directly.
The valid values for configuration keys is set by
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`.
"""
cfg_limits = self.spectrograph.valid_configuration_values()
if cfg_limits is None:
# No values specified, so we're done
return
good = np.ones(len(self), dtype=bool)
for key in cfg_limits.keys():
# NOTE: For now, check that the configuration values were
# correctly assigned in the spectrograph class definition.
# This should probably go somewhere else or just removed.
assert isinstance(cfg_limits[key], list), \
'CODING ERROR: valid_configuration_values is not correctly defined ' \
'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)
# Check that the metadata are valid for this column.
indx = np.isin(self[key], cfg_limits[key])
if not np.all(indx):
msgs.warn('Found frames with invalid {0}.'.format(key))
good &= indx
if np.all(good):
# All values good, so we're done
return
# Alert the user that some of the frames are going to be
# removed
msg = 'The following frames have configurations that cannot be reduced by PypeIt' \
' and will be removed from the metadata table (pypeit file):\n'
indx = np.where(np.logical_not(good))[0]
for i in indx:
msg += ' {0}\n'.format(self['filename'][i])
msgs.warn(msg)
# And remove 'em
self.table = self.table[good]
def _set_calib_group_bits(self):
"""
Set the calibration group bit based on the string values of the
'calib' column.
"""
# Find the number groups by searching for the maximum number
# provided, regardless of whether or not a science frame is
# assigned to that group.
ngroups = 0
for i in range(len(self)):
if self['calib'][i] in ['all', 'None']:
# No information, keep going
continue
# Convert to a list of numbers
l = np.amax([ 0 if len(n) == 0 else int(n)
for n in self['calib'][i].replace(':',',').split(',')])
# Check against current maximum
ngroups = max(l+1, ngroups)
# Define the bitmask and initialize the bits
self.calib_bitmask = BitMask(np.arange(ngroups))
self['calibbit'] = 0
# Set the calibration bits
for i in range(len(self)):
# Convert the string to the group list
grp = parse.str2list(self['calib'][i], ngroups)
if grp is None:
# No group selected
continue
# Assign the group; ensure the integers are unique
self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp)
def _check_calib_groups(self):
"""
Check that the calibration groups are valid.
This currently only checks that the science frames are
associated with one calibration group.
TODO: Is this appropriate for NIR data?
"""
is_science = self.find_frames('science')
for i in range(len(self)):
if not is_science[i]:
continue
if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1:
msgs.error('Science frames can only be assigned to a single calibration group.')
@property
def n_calib_groups(self):
"""Return the number of calibration groups."""
return None if self.calib_bitmask is None else self.calib_bitmask.nbits
def set_calibration_groups(self, global_frames=None, default=False, force=False):
"""
Group calibration frames into sets.
Requires the 'setup' column to have been defined. For now this
is a simple grouping of frames with the same configuration.
.. todo::
- Maintain a detailed description of the logic.
The 'calib' column has a string type to make sure that it
matches with what can be read from the pypeit file. The
'calibbit' column is actually what is used to determine the
calibration group of each frame; see :attr:`calib_bitmask`.
Args:
global_frames (:obj:`list`, optional):
A list of strings with the frame types to use in all
calibration groups (e.g., ['bias', 'dark']).
default (:obj:`bool`, optional):
If the 'calib' column is not present, set a single
calibration group *for all rows*.
force (:obj:`bool`, optional):
Force the calibration groups to be reconstructed if
the 'calib' column already exists.
Raises:
PypeItError:
Raised if 'setup' column is not defined, or if
`global_frames` is provided but the frame types have not
been defined yet.
"""
# Set the default if requested and 'calib' doesn't exist yet
if 'calib' not in self.keys() and default:
self['calib'] = '0'
# Make sure the calibbit column does not exist
if 'calibbit' in self.keys():
del self['calibbit']
# Groups have already been set
if 'calib' in self.keys() and 'calibbit' in self.keys() and not force:
return
# Groups have been set but the bits have not (likely because the
# data was read from a pypeit file)
if 'calib' in self.keys() and 'calibbit' not in self.keys() and not force:
self._set_calib_group_bits()
self._check_calib_groups()
return
# TODO: The rest of this just nominally sets the calibration
# group based on the configuration. This will change!
# The configuration must be present to determine the calibration
# group
if 'setup' not in self.keys():
msgs.error('Must have defined \'setup\' column first; try running set_configurations.')
configs = np.unique(self['setup'].data).tolist()
if 'None' in configs:
configs.remove('None') # Ignore frames with undefined configurations
n_cfg = len(configs)
# TODO: Science frames can only have one calibration group
# Assign everything from the same configuration to the same
# calibration group; this needs to have dtype=object, otherwise
# any changes to the strings will be truncated at 4 characters.
self.table['calib'] = np.full(len(self), 'None', dtype=object)
for i in range(n_cfg):
self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i)
# Allow some frame types to be used in all calibration groups
# (like biases and darks)
if global_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To set global frames, types must have been defined; '
'run get_frame_types.')
calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str))
for ftype in global_frames:
indx = np.where(self.find_frames(ftype))[0]
for i in indx:
self['calib'][i] = calibs
# Set the bits based on the string representation of the groups
self._set_calib_group_bits()
# Check that the groups are valid
self._check_calib_groups()
def find_frames(self, ftype, calib_ID=None, index=False):
"""
Find the rows with the associated frame type.
If the index is provided, the frames must also be matched to the
relevant science frame.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`. If
set to the string 'None', this returns all frames
without a known type.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
index (:obj:`bool`, optional):
Return an array of 0-indexed indices instead of a
boolean array.
Returns:
numpy.ndarray: A boolean array, or an integer array if
index=True, with the rows that contain the frames of the
requested type.
Raises:
PypeItError:
Raised if the `framebit` column is not set in the table.
"""
if 'framebit' not in self.keys():
msgs.error('Frame types are not set. First run get_frame_types.')
if ftype == 'None':
return self['framebit'] == 0
# Select frames
indx = self.type_bitmask.flagged(self['framebit'], ftype)
if calib_ID is not None:
# Select frames in the same calibration group
indx &= self.find_calib_group(calib_ID)
# Return
return np.where(indx)[0] if index else indx
def find_frame_files(self, ftype, calib_ID=None):
"""
Return the list of files with a given frame type.
The frames must also match the science frame index, if it is
provided.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
Returns:
list: List of file paths that match the frame type and
science frame ID, if the latter is provided.
"""
return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))
def frame_paths(self, indx):
"""
Return the full paths to one or more frames.
Args:
indx (:obj:`int`, array-like):
One or more 0-indexed rows in the table with the frames
to return. Can be an array of indices or a boolean
array of the correct length.
Returns:
list: List of the full paths of one or more frames.
"""
if isinstance(indx, (int,np.integer)):
return os.path.join(self['directory'][indx], self['filename'][indx])
return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])]
def set_frame_types(self, type_bits, merge=True):
"""
Set and return a Table with the frame types and bits.
Args:
type_bits (numpy.ndarray):
Integer bitmask with the frame types. The length must
match the existing number of table rows.
merge (:obj:`bool`, optional):
Merge the types and bits into the existing table. This
will *overwrite* any existing columns.
Returns:
`astropy.table.Table`: Table with two columns, the frame
type name and bits.
"""
# Making Columns to pad string array
ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype')
# KLUDGE ME
#
# TODO: It would be good to get around this. Is it related to
# this change?
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3
#
# See also:
#
# http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode
#
# Or we can force type_names() in bitmask to always return the
# correct type...
if int(str(ftype_colmA.dtype)[2:]) < 9:
ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9',
name='frametype')
else:
ftype_colm = ftype_colmA
fbits_colm = table.Column(type_bits, name='framebit')
t = table.Table([ftype_colm, fbits_colm])
if merge:
self['frametype'] = t['frametype']
self['framebit'] = t['framebit']
return t
def edit_frame_type(self, indx, frame_type, append=False):
"""
Edit the frame type by hand.
Args:
indx (:obj:`int`):
The 0-indexed row in the table to edit
frame_type (:obj:`str`, :obj:`list`):
One or more frame types to append/overwrite.
append (:obj:`bool`, optional):
Append the frame type. If False, all existing frame
types are overwitten by the provided type.
"""
if not append:
self['framebit'][indx] = 0
self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type)
self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx])
def get_frame_types(self, flag_unknown=False, user=None, merge=True):
"""
Generate a table of frame types from the input metadata object.
.. todo::
- Here's where we could add a SPIT option.
Args:
flag_unknown (:obj:`bool`, optional):
Instead of crashing out if there are unidentified files,
leave without a type and continue.
user (:obj:`dict`, optional):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
merge (:obj:`bool`, optional):
Merge the frame typing into the exiting table.
Returns:
:obj:`astropy.table.Table`: A Table with two columns, the
type names and the type bits. See
:class:`pypeit.core.framematch.FrameTypeBitMask` for the
allowed frame types.
"""
# Checks
if 'frametype' in self.keys() or 'framebit' in self.keys():
msgs.warn('Removing existing frametype and framebit columns.')
if 'frametype' in self.keys():
del self.table['frametype']
if 'framebit' in self.keys():
del self.table['framebit']
# # TODO: This needs to be moved into each Spectrograph
# if useIDname and 'idname' not in self.keys():
# raise ValueError('idname is not set in table; cannot use it for file typing.')
# Start
msgs.info("Typing files")
type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype())
# Use the user-defined frame types from the input dictionary
if user is not None:
if len(user.keys()) != len(self):
raise ValueError('The user-provided dictionary does not match table length.')
msgs.info('Using user-provided frame types.')
for ifile,ftypes in user.items():
indx = self['filename'] == ifile
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(','))
return self.set_frame_types(type_bits, merge=merge)
# Loop over the frame types
for i, ftype in enumerate(self.type_bitmask.keys()):
# # Initialize: Flag frames with the correct ID name or start by
# # flagging all as true
# indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \
# else np.ones(len(self), dtype=bool)
# Include a combination of instrument-specific checks using
# combinations of the full set of metadata
exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \
else self.par['calibrations']['{0}frame'.format(ftype)]['exprng']
# TODO: Use & or | ? Using idname above gets overwritten by
# this if the frames to meet the other checks in this call.
# indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
# Turn on the relevant bits
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype)
# Find the nearest standard star to each science frame
# TODO: Should this be 'standard' or 'science' or both?
if 'ra' not in self.keys() or 'dec' not in self.keys():
msgs.warn('Cannot associate standard with science frames without sky coordinates.')
else:
# TODO: Do we want to do this here?
indx = self.type_bitmask.flagged(type_bits, flag='standard')
for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx],
self['dec'][indx]):
if ra == 'None' or dec == 'None':
msgs.warn('RA and DEC must not be None for file:' + msgs.newline() + f)
msgs.warn('The above file could be a twilight flat frame that was'
+ msgs.newline() + 'missed by the automatic identification.')
b = self.type_bitmask.turn_off(b, flag='standard')
continue
# If an object exists within 20 arcmins of a listed standard,
# then it is probably a standard star
foundstd = flux_calib.find_standard_file(ra, dec, check=True)
b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard')
# Find the files without any types
indx = np.logical_not(self.type_bitmask.flagged(type_bits))
if np.any(indx):
msgs.info("Couldn't identify the following files:")
for f in self['filename'][indx]:
msgs.info(f)
if not flag_unknown:
msgs.error("Check these files before continuing")
# Finish up (note that this is called above if user is not None!)
msgs.info("Typing completed!")
return self.set_frame_types(type_bits, merge=merge)
def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False):
"""
Generate the list of columns to be included in the fitstbl
(nearly the complete list).
Args:
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Returns:
`numpy.ndarray`_: Array of columns to be used in the fits
table>
"""
# Columns for output
columns = self.spectrograph.pypeit_file_keys()
extras = []
# comb, bkg columns
if write_bkg_pairs:
extras += ['calib', 'comb_id', 'bkg_id']
# manual
if write_manual:
extras += ['manual']
for key in extras:
if key not in columns:
columns += [key]
# Take only those present
output_cols = np.array(columns)
return output_cols[np.isin(output_cols, self.keys())].tolist()
def set_combination_groups(self, assign_objects=True):
"""
Set combination groups.
.. note::
:attr:`table` is edited in place.
This function can be used to initialize the combination group
and background group columns, and/or to initialize the combination
groups to the set of objects (science or standard frames) to a
unique integer.
If the 'comb_id' or 'bkg_id' columns do not exist, they're set
to -1.
Args:
assign_objects (:obj:`bool`, optional):
If all of 'comb_id' values are less than 0 (meaning
they're unassigned), the combination groups are set to
be unique for each standard and science frame.
"""
if 'comb_id' not in self.keys():
self['comb_id'] = -1
if 'bkg_id' not in self.keys():
self['bkg_id'] = -1
if assign_objects and np.all(self['comb_id'] < 0):
# find_frames will throw an exception if framebit is not
# set...
sci_std_idx = np.where(np.any([self.find_frames('science'),
self.find_frames('standard')], axis=0))[0]
self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1
def set_user_added_columns(self):
"""
Set columns that the user *might* add
.. note::
:attr:`table` is edited in place.
This function can be used to initialize columns
that the user might add
"""
if 'manual' not in self.keys():
self['manual'] = ''
def write_sorted(self, ofile, overwrite=True, ignore=None,
write_bkg_pairs=False, write_manual=False):
"""
Write the sorted file.
The sorted file lists all the unique instrument configurations
(setups) and the frames associated with each configuration. The
output data table is identical to the pypeit file output.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot write sorted instrument configuration table without \'setup\' '
'column; run set_configurations.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
cfgs = self.unique_configurations(copy=ignore is not None)
if ignore is not None:
for key in cfgs.keys():
if key in ignore:
del cfgs[key]
# Construct file
ff = open(ofile, 'w')
for setup in cfgs.keys():
# Get the subtable of frames taken in this configuration
indx = self['setup'] == setup
if not np.any(indx):
continue
subtbl = self.table[output_cols][indx]
# Write the file
ff.write('##########################################################\n')
ff.write('Setup {:s}\n'.format(setup))
ff.write('\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\n')
ff.write('#---------------------------------------------------------\n')
mjd = subtbl['mjd'].copy()
# Deal with possibly None mjds if there were corrupt header cards
mjd[mjd == None] = -99999.0
isort = np.argsort(mjd)
subtbl = subtbl[isort]
subtbl.write(ff, format='ascii.fixed_width')
ff.write('##end\n')
ff.close()
# TODO: Do we need a calib file?
def write_calib(self, ofile, overwrite=True, ignore=None):
"""
Write the calib file.
The calib file provides the unique instrument configurations
(setups) and the association of each frame from that
configuration with a given calibration group.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
- This is complicated by allowing some frame types to have
no association with an instrument configuration
- This is primarily used for QA now; but could probably use the pypeit file instead
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore calibration groups in the provided list.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns haven't been
defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot write calibration groups without \'setup\' and \'calibbit\' '
'columns; run set_configurations and set_calibration_groups.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Construct the setups dictionary
cfg = self.unique_configurations(copy=True, rm_none=True)
# TODO: We should edit the relevant follow-on code so that we
# don't have to do these gymnastics. Or better yet, just stop
# producing/using the *.calib file.
_cfg = {}
for setup in cfg.keys():
_cfg[setup] = {}
_cfg[setup]['--'] = deepcopy(cfg[setup])
cfg = _cfg
# Iterate through the calibration bit names as these are the root of the
# MasterFrames and QA
for icbit in np.unique(self['calibbit'].data):
cbit = int(icbit) # for yaml
# Skip this group
if ignore is not None and cbit in ignore:
continue
# Find the frames in this group
#in_group = self.find_calib_group(i)
in_cbit = self['calibbit'] == cbit
# Find the unique configurations in this group, ignoring any
# undefined ('None') configurations
#setup = np.unique(self['setup'][in_group]).tolist()
setup = np.unique(self['setup'][in_cbit]).tolist()
if 'None' in setup:
setup.remove('None')
# Make sure that each calibration group should only contain
# frames from a single configuration
if len(setup) != 1:
msgs.error('Each calibration group must be from one and only one instrument '
'configuration with a valid letter identifier; i.e., the '
'configuration cannot be None.')
# Find the frames of each type in this group
cfg[setup[0]][cbit] = {}
for key in self.type_bitmask.keys():
#ftype_in_group = self.find_frames(key) & in_group
ftype_in_group = self.find_frames(key) & in_cbit
cfg[setup[0]][cbit][key] = [ os.path.join(d,f)
for d,f in zip(self['directory'][ftype_in_group],
self['filename'][ftype_in_group])]
# Write it
ff = open(ofile, 'w')
ff.write(yaml.dump(utils.yamlify(cfg)))
ff.close()
def write_pypeit(self, output_path=None, cfg_lines=None,
write_bkg_pairs=False, write_manual=False,
configs=None):
"""
Write a pypeit file in data-table format.
The pypeit file is the main configuration file for PypeIt,
configuring the control-flow and algorithmic parameters and
listing the data files to read. This function writes the
columns selected by the
:func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`,
which can be specific to each instrument.
Args:
output_path (:obj:`str`, optional):
Root path for the output pypeit files. If None, set
to current directory. If the output directory does
not exist, it is created.
cfg_lines (:obj:`list`, optional):
The list of configuration lines to include in the file.
If None are provided, the vanilla configuration is
included.
write_bkg_pairs (:obj:`bool`, optional):
When constructing the
:class:`pypeit.metadata.PypeItMetaData` object, include
two columns called `comb_id` and `bkg_id` that identify
object and background frame pairs.
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']). See
:attr:`configs`.
Raises:
PypeItError:
Raised if the 'setup' isn't defined and split is True.
Returns:
:obj:`list`: List of ``PypeIt`` files generated.
"""
# Set output path
if output_path is None:
output_path = os.getcwd()
# Find unique configurations, always ignoring any 'None'
# configurations...
cfg = self.unique_configurations(copy=True, rm_none=True)
# Get the setups to write
if configs is None or configs == 'all' or configs == ['all']:
cfg_keys = list(cfg.keys())
else:
_configs = configs if isinstance(configs, list) else [configs]
cfg_keys = [key for key in cfg.keys() if key in _configs]
if len(cfg_keys) == 0:
msgs.error('No setups to write!')
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
# Write the pypeit files
ofiles = [None]*len(cfg_keys)
for j,setup in enumerate(cfg_keys):
# Create the output directory
root = '{0}_{1}'.format(self.spectrograph.name, setup)
odir = os.path.join(output_path, root)
if not os.path.isdir(odir):
os.makedirs(odir)
# Create the output file name
ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root))
# Get the setup lines
setup_lines = dict_to_lines({'Setup {0}'.format(setup):
utils.yamlify(cfg[setup])}, level=1)
# Get the paths
in_cfg = self['setup'] == setup
if not np.any(in_cfg):
continue
paths = np.unique(self['directory'][in_cfg]).tolist()
# Get the data lines
subtbl = self.table[output_cols][in_cfg]
subtbl.sort(['frametype','filename'])
with io.StringIO() as ff:
subtbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
# Write the file
make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,
setup_lines=setup_lines, sorted_files=data_lines, paths=paths)
# Return
return ofiles
def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False,
header=None):
"""
Write the metadata either to a file or to the screen.
The method allows you to set the columns to print and which column to
use for sorting.
Args:
output (:obj:`str`, optional):
Output signature or file name. If None, the table contents
are printed to the screen. If ``'table'``, the table that
would have been printed/written to disk is returned.
Otherwise, the string is interpreted as the name of an ascii
file to which to write the table contents.
rows (`numpy.ndarray`_, optional):
A boolean vector selecting the rows of the table to write. If
None, all rows are written. Shape must match the number of
the rows in the table.
columns (:obj:`str`, :obj:`list`, optional):
A list of columns to include in the output file. Can be
provided as a list directly or as a comma-separated string.
If None or ``'all'``, all columns in are written; if
``'pypeit'``, the columns are the same as those included in
the pypeit file. Each selected column must be a valid pypeit
metadata keyword, specific to :attr:`spectrograph`.
Additional valid keywords, depending on the processing level
of the metadata table, are directory, filename, frametype,
framebit, setup, calib, and calibbit.
sort_col (:obj:`str`, optional):
Name of the column to use for sorting the output. If
None, the table is printed in its current state.
overwrite (:obj:`bool`, optional):
Overwrite any existing file; otherwise raise an
exception.
header (:obj:`str`, :obj:`list`, optional):
One or more strings to write to the top of the file, on
string per file line; ``# `` is added to the beginning of
each string. Ignored if ``output`` does not specify an output
file.
Returns:
`astropy.table.Table`: The table object that would have been
written/printed if ``output == 'table'``. Otherwise, the method
always returns None.
Raises:
ValueError:
Raised if the columns to include are not valid, or if the
column to use for sorting is not valid.
FileExistsError:
Raised if overwrite is False and the file exists.
"""
# Check the file can be written (this is here because the spectrograph
# needs to be defined first)
ofile = None if output in [None, 'table'] else output
if ofile is not None and os.path.isfile(ofile) and not overwrite:
raise FileExistsError(f'{ofile} already exists; set flag to overwrite.')
# Check the rows input
if rows is not None and len(rows) != len(self.table):
raise ValueError('Boolean vector selecting output rows has incorrect length.')
# Get the columns to return
if columns in [None, 'all']:
tbl_cols = list(self.keys())
elif columns == 'pypeit':
tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True)
else:
all_cols = list(self.keys())
tbl_cols = columns if isinstance(columns, list) else columns.split(',')
badcol = [col not in all_cols for col in tbl_cols]
if np.any(badcol):
raise ValueError('The following columns are not valid: {0}'.format(
', '.join(tbl_cols[badcol])))
# Make sure the basic parameters are the first few columns; do them in
# reverse order so I can always insert at the beginning of the list
for col in ['framebit', 'frametype', 'filename', 'directory']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != 0:
tbl_cols.insert(0, tbl_cols.pop(indx))
# Make sure the dithers and combination and background IDs are the last
# few columns
ncol = len(tbl_cols)
for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != ncol-1:
tbl_cols.insert(ncol-1, tbl_cols.pop(indx))
# Copy the internal table so that it is unaltered
output_tbl = self.table.copy()
# Select the output rows if a vector was provided
if rows is not None:
output_tbl = output_tbl[rows]
# Select and sort the data by a given column
if sort_col is not None:
if sort_col not in self.keys():
raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.')
# Ignore any NoneTypes
indx = output_tbl[sort_col] != None
is_None = np.logical_not(indx)
srt = np.append(np.where(is_None)[0],
np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)])
output_tbl = output_tbl[tbl_cols][srt]
else:
output_tbl = output_tbl[tbl_cols]
if output == 'table':
# Instead of writing, just return the modified table
return output_tbl
# Always write the table in ascii format
with io.StringIO() as ff:
output_tbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
if ofile is None:
# Output file not defined so just print it
print('\n'.join(data_lines))
return None
# Write the output to an ascii file
with open(ofile, 'w') as f:
if header is not None:
_header = header if isinstance(header, list) else [header]
for h in _header:
f.write(f'# {h}\n')
f.write('\n')
f.write('\n'.join(data_lines))
f.write('\n')
# Just to be explicit that the method returns None when writing to a
# file...
return None
def find_calib_group(self, grp):
"""
Find all the frames associated with the provided calibration group.
Args:
grp (:obj:`int`):
The calibration group integer.
Returns:
numpy.ndarray: Boolean array selecting those frames in the
table included in the selected calibration group.
Raises:
PypeItError:
Raised if the 'calibbit' column is not defined.
"""
if 'calibbit' not in self.keys():
msgs.error('Calibration groups are not set. First run set_calibration_groups.')
return self.calib_bitmask.flagged(self['calibbit'].data, grp)
def find_frame_calib_groups(self, row):
"""
Find the calibration groups associated with a specific frame.
"""
return self.calib_bitmask.flagged_bits(self['calibbit'][row])
# TODO: Is there a reason why this is not an attribute of
# PypeItMetaData?
def row_match_config(row, config, spectrograph):
"""
Queries whether a row from the fitstbl matches the
input configuration
Args:
row (astropy.table.Row): From fitstbl
config (dict): Defines the configuration
spectrograph (pypeit.spectrographs.spectrograph.Spectrograph):
Used to grab the rtol value for float meta (e.g. dispangle)
Returns:
bool: True if the row matches the input configuration
"""
# Loop on keys in config
match = []
for k in config.keys():
# Deal with floating configs (e.g. grating angle)
if isinstance(config[k], float):
if row[k] is None:
match.append(False)
elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']:
match.append(True)
else:
match.append(False)
else:
# The np.all allows for arrays in the Table (e.g. binning)
match.append(np.all(config[k] == row[k]))
# Check
return np.all(match)
| [
"pypeit.core.framematch.FrameTypeBitMask",
"astropy.table.Table",
"pypeit.io.dict_to_lines",
"pypeit.core.parse.str2list",
"numpy.logical_not",
"pypeit.msgs.newline",
"numpy.isin",
"numpy.argsort",
"numpy.array",
"pypeit.core.meta.convert_radec",
"copy.deepcopy",
"numpy.arange",
"pypeit.msgs.error",
"numpy.where",
"pypeit.utils.yamlify",
"numpy.asarray",
"os.path.split",
"pypeit.msgs.warn",
"os.path.isdir",
"io.StringIO",
"numpy.abs",
"pypeit.core.flux_calib.find_standard_file",
"pypeit.msgs.info",
"numpy.any",
"os.path.isfile",
"astropy.table.Column",
"pypeit.par.util.make_pypeit_file",
"pypeit.core.meta.get_meta_data_model",
"numpy.atleast_1d",
"numpy.unique",
"os.makedirs",
"datetime.datetime.strptime",
"os.path.join",
"os.getcwd",
"astropy.time.Time",
"numpy.sum",
"os.path.basename",
"numpy.all",
"datetime.datetime.strftime"
]
| [((78727, 78740), 'numpy.all', 'np.all', (['match'], {}), '(match)\n', (78733, 78740), True, 'import numpy as np\n'), ((4634, 4663), 'pypeit.core.framematch.FrameTypeBitMask', 'framematch.FrameTypeBitMask', ([], {}), '()\n', (4661, 4663), False, 'from pypeit.core import framematch\n'), ((11930, 11956), 'pypeit.core.meta.get_meta_data_model', 'meta.get_meta_data_model', ([], {}), '()\n', (11954, 11956), False, 'from pypeit.core import meta\n'), ((19255, 19296), 'astropy.time.Time', 'time.Time', (["self['mjd'][row]"], {'format': '"""mjd"""'}), "(self['mjd'][row], format='mjd')\n", (19264, 19296), False, 'from astropy import table, coordinates, time, units\n'), ((19877, 19911), 'astropy.time.Time', 'time.Time', (['_obstime'], {'format': '"""isot"""'}), "(_obstime, format='isot')\n", (19886, 19911), False, 'from astropy import table, coordinates, time, units\n'), ((19928, 19990), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['tiso.value', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')\n", (19954, 19990), False, 'import datetime\n'), ((24686, 24729), 'numpy.unique', 'np.unique', (["self['setup']"], {'return_index': '(True)'}), "(self['setup'], return_index=True)\n", (24695, 24729), True, 'import numpy as np\n'), ((29498, 29561), 'pypeit.msgs.info', 'msgs.info', (['"""Using metadata to determine unique configurations."""'], {}), "('Using metadata to determine unique configurations.')\n", (29507, 29561), False, 'from pypeit import msgs\n'), ((38774, 38786), 'numpy.all', 'np.all', (['good'], {}), '(good)\n', (38780, 38786), True, 'import numpy as np\n'), ((39245, 39259), 'pypeit.msgs.warn', 'msgs.warn', (['msg'], {}), '(msg)\n', (39254, 39259), False, 'from pypeit import msgs\n'), ((50007, 50047), 'astropy.table.Column', 'table.Column', (['type_bits'], {'name': '"""framebit"""'}), "(type_bits, name='framebit')\n", (50019, 50047), False, 'from astropy import table, coordinates, time, units\n'), ((50060, 50097), 'astropy.table.Table', 'table.Table', (['[ftype_colm, fbits_colm]'], {}), '([ftype_colm, fbits_colm])\n', (50071, 50097), False, 'from astropy import table, coordinates, time, units\n'), ((52805, 52830), 'pypeit.msgs.info', 'msgs.info', (['"""Typing files"""'], {}), "('Typing files')\n", (52814, 52830), False, 'from pypeit import msgs\n'), ((56013, 56025), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (56019, 56025), True, 'import numpy as np\n'), ((56351, 56381), 'pypeit.msgs.info', 'msgs.info', (['"""Typing completed!"""'], {}), "('Typing completed!')\n", (56360, 56381), False, 'from pypeit import msgs\n'), ((57480, 57497), 'numpy.array', 'np.array', (['columns'], {}), '(columns)\n', (57488, 57497), True, 'import numpy as np\n'), ((64257, 64289), 'numpy.unique', 'np.unique', (["self['calibbit'].data"], {}), "(self['calibbit'].data)\n", (64266, 64289), True, 'import numpy as np\n'), ((4251, 4368), 'pypeit.msgs.warn', 'msgs.warn', (['"""Both data and files are None in the instantiation of PypeItMetaData. The table will be empty!"""'], {}), "(\n 'Both data and files are None in the instantiation of PypeItMetaData. The table will be empty!'\n )\n", (4260, 4368), False, 'from pypeit import msgs\n'), ((7604, 7624), 'os.path.split', 'os.path.split', (['ifile'], {}), '(ifile)\n', (7617, 7624), False, 'import os\n'), ((8888, 8924), 'astropy.time.Time', 'time.Time', (["data['mjd']"], {'format': '"""mjd"""'}), "(data['mjd'], format='mjd')\n", (8897, 8924), False, 'from astropy import table, coordinates, time, units\n'), ((18587, 18722), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot provide master key string without setup and calibbit; run set_configurations and set_calibration_groups."""'], {}), "(\n 'Cannot provide master key string without setup and calibbit; run set_configurations and set_calibration_groups.'\n )\n", (18597, 18722), False, 'from pypeit import msgs\n'), ((20268, 20312), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['dtime', '"""%Y%m%dT"""'], {}), "(dtime, '%Y%m%dT')\n", (20294, 20312), False, 'import datetime\n'), ((21439, 21542), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot provide instrument setup without \'setup\' column; run set_configurations."""'], {}), '(\n "Cannot provide instrument setup without \'setup\' column; run set_configurations."\n )\n', (21449, 21542), False, 'from pypeit import msgs\n'), ((24568, 24629), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot get setup names; run set_configurations."""'], {}), "('Cannot get setup names; run set_configurations.')\n", (24578, 24629), False, 'from pypeit import msgs\n'), ((25006, 25028), 'numpy.atleast_1d', 'np.atleast_1d', (['configs'], {}), '(configs)\n', (25019, 25028), True, 'import numpy as np\n'), ((25280, 25305), 'numpy.isin', 'np.isin', (['setups', '_configs'], {}), '(setups, _configs)\n', (25287, 25305), True, 'import numpy as np\n'), ((26261, 26283), 'copy.deepcopy', 'deepcopy', (['self.configs'], {}), '(self.configs)\n', (26269, 26283), False, 'from copy import deepcopy\n'), ((28799, 28869), 'pypeit.msgs.info', 'msgs.info', (['"""Setup column already set. Finding unique configurations."""'], {}), "('Setup column already set. Finding unique configurations.')\n", (28808, 28869), False, 'from pypeit import msgs\n'), ((28895, 28938), 'numpy.unique', 'np.unique', (["self['setup']"], {'return_index': '(True)'}), "(self['setup'], return_index=True)\n", (28904, 28938), True, 'import numpy as np\n'), ((30324, 30380), 'pypeit.msgs.error', 'msgs.error', (['"""No frames to use to define configurations!"""'], {}), "('No frames to use to define configurations!')\n", (30334, 30380), False, 'from pypeit import msgs\n'), ((31053, 31118), 'pypeit.msgs.info', 'msgs.info', (['"""All files assumed to be from a single configuration."""'], {}), "('All files assumed to be from a single configuration.')\n", (31062, 31118), False, 'from pypeit import msgs\n'), ((34828, 34845), 'numpy.any', 'np.any', (['not_setup'], {}), '(not_setup)\n', (34834, 34845), True, 'import numpy as np\n'), ((35233, 35338), 'pypeit.msgs.error', 'msgs.error', (['"""To account for ignored frames, types must have been defined; run get_frame_types."""'], {}), "(\n 'To account for ignored frames, types must have been defined; run get_frame_types.'\n )\n", (35243, 35338), False, 'from pypeit import msgs\n'), ((38596, 38631), 'numpy.isin', 'np.isin', (['self[key]', 'cfg_limits[key]'], {}), '(self[key], cfg_limits[key])\n', (38603, 38631), True, 'import numpy as np\n'), ((40190, 40208), 'numpy.arange', 'np.arange', (['ngroups'], {}), '(ngroups)\n', (40199, 40208), True, 'import numpy as np\n'), ((40379, 40420), 'pypeit.core.parse.str2list', 'parse.str2list', (["self['calib'][i]", 'ngroups'], {}), "(self['calib'][i], ngroups)\n", (40393, 40420), False, 'from pypeit.core import parse\n'), ((43833, 43923), 'pypeit.msgs.error', 'msgs.error', (['"""Must have defined \'setup\' column first; try running set_configurations."""'], {}), '(\n "Must have defined \'setup\' column first; try running set_configurations.")\n', (43843, 43923), False, 'from pypeit import msgs\n'), ((46578, 46644), 'pypeit.msgs.error', 'msgs.error', (['"""Frame types are not set. First run get_frame_types."""'], {}), "('Frame types are not set. First run get_frame_types.')\n", (46588, 46644), False, 'from pypeit import msgs\n'), ((48316, 48377), 'os.path.join', 'os.path.join', (["self['directory'][indx]", "self['filename'][indx]"], {}), "(self['directory'][indx], self['filename'][indx])\n", (48328, 48377), False, 'import os\n'), ((48394, 48412), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (48406, 48412), False, 'import os\n'), ((52350, 52412), 'pypeit.msgs.warn', 'msgs.warn', (['"""Removing existing frametype and framebit columns."""'], {}), "('Removing existing frametype and framebit columns.')\n", (52359, 52412), False, 'from pypeit import msgs\n'), ((53167, 53212), 'pypeit.msgs.info', 'msgs.info', (['"""Using user-provided frame types."""'], {}), "('Using user-provided frame types.')\n", (53176, 53212), False, 'from pypeit import msgs\n'), ((54788, 54876), 'pypeit.msgs.warn', 'msgs.warn', (['"""Cannot associate standard with science frames without sky coordinates."""'], {}), "(\n 'Cannot associate standard with science frames without sky coordinates.')\n", (54797, 54876), False, 'from pypeit import msgs\n'), ((56039, 56090), 'pypeit.msgs.info', 'msgs.info', (['"""Couldn\'t identify the following files:"""'], {}), '("Couldn\'t identify the following files:")\n', (56048, 56090), False, 'from pypeit import msgs\n'), ((58525, 58552), 'numpy.all', 'np.all', (["(self['comb_id'] < 0)"], {}), "(self['comb_id'] < 0)\n", (58531, 58552), True, 'import numpy as np\n'), ((60486, 60608), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot write sorted instrument configuration table without \'setup\' column; run set_configurations."""'], {}), '(\n "Cannot write sorted instrument configuration table without \'setup\' column; run set_configurations."\n )\n', (60496, 60608), False, 'from pypeit import msgs\n'), ((60639, 60660), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (60653, 60660), False, 'import os\n'), ((61966, 61981), 'numpy.argsort', 'np.argsort', (['mjd'], {}), '(mjd)\n', (61976, 61981), True, 'import numpy as np\n'), ((63363, 63509), 'pypeit.msgs.error', 'msgs.error', (['"""Cannot write calibration groups without \'setup\' and \'calibbit\' columns; run set_configurations and set_calibration_groups."""'], {}), '(\n "Cannot write calibration groups without \'setup\' and \'calibbit\' columns; run set_configurations and set_calibration_groups."\n )\n', (63373, 63509), False, 'from pypeit import msgs\n'), ((63542, 63563), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (63556, 63563), False, 'import os\n'), ((64082, 64102), 'copy.deepcopy', 'deepcopy', (['cfg[setup]'], {}), '(cfg[setup])\n', (64090, 64102), False, 'from copy import deepcopy\n'), ((68106, 68117), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (68115, 68117), False, 'import os\n'), ((68626, 68659), 'pypeit.msgs.error', 'msgs.error', (['"""No setups to write!"""'], {}), "('No setups to write!')\n", (68636, 68659), False, 'from pypeit import msgs\n'), ((69081, 69112), 'os.path.join', 'os.path.join', (['output_path', 'root'], {}), '(output_path, root)\n', (69093, 69112), False, 'import os\n'), ((69989, 70132), 'pypeit.par.util.make_pypeit_file', 'make_pypeit_file', (['ofiles[j]', 'self.spectrograph.name', '[]'], {'cfg_lines': 'cfg_lines', 'setup_lines': 'setup_lines', 'sorted_files': 'data_lines', 'paths': 'paths'}), '(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,\n setup_lines=setup_lines, sorted_files=data_lines, paths=paths)\n', (70005, 70132), False, 'from pypeit.par.util import make_pypeit_file\n'), ((73155, 73176), 'os.path.isfile', 'os.path.isfile', (['ofile'], {}), '(ofile)\n', (73169, 73176), False, 'import os\n'), ((75475, 75495), 'numpy.logical_not', 'np.logical_not', (['indx'], {}), '(indx)\n', (75489, 75495), True, 'import numpy as np\n'), ((75938, 75951), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (75949, 75951), False, 'import io\n'), ((77248, 77333), 'pypeit.msgs.error', 'msgs.error', (['"""Calibration groups are not set. First run set_calibration_groups."""'], {}), "('Calibration groups are not set. First run set_calibration_groups.'\n )\n", (77258, 77333), False, 'from pypeit import msgs\n'), ((8970, 8993), 'numpy.asarray', 'np.asarray', (["data['mjd']"], {}), "(data['mjd'])\n", (8980, 8993), True, 'import numpy as np\n'), ((9018, 9046), 'numpy.asarray', 'np.asarray', (["data['filename']"], {}), "(data['filename'])\n", (9028, 9046), True, 'import numpy as np\n'), ((9395, 9409), 'pypeit.msgs.warn', 'msgs.warn', (['msg'], {}), '(msg)\n', (9404, 9409), False, 'from pypeit import msgs\n'), ((24853, 24876), 'numpy.isin', 'np.isin', (['setups', 'ignore'], {}), '(setups, ignore)\n', (24860, 24876), True, 'import numpy as np\n'), ((28990, 29004), 'numpy.sum', 'np.sum', (['ignore'], {}), '(ignore)\n', (28996, 29004), True, 'import numpy as np\n'), ((29868, 29955), 'pypeit.msgs.error', 'msgs.error', (['"""To ignore frames, types must have been defined; run get_frame_types."""'], {}), "(\n 'To ignore frames, types must have been defined; run get_frame_types.')\n", (29878, 29955), False, 'from pypeit import msgs\n'), ((36676, 36719), 'numpy.unique', 'np.unique', (['self.table[metakey][in_cfg].data'], {}), '(self.table[metakey][in_cfg].data)\n', (36685, 36719), True, 'import numpy as np\n'), ((37263, 37302), 'numpy.isin', 'np.isin', (['self.table[metakey]', 'uniq_meta'], {}), '(self.table[metakey], uniq_meta)\n', (37270, 37302), True, 'import numpy as np\n'), ((38651, 38663), 'numpy.all', 'np.all', (['indx'], {}), '(indx)\n', (38657, 38663), True, 'import numpy as np\n'), ((39130, 39150), 'numpy.logical_not', 'np.logical_not', (['good'], {}), '(good)\n', (39144, 39150), True, 'import numpy as np\n'), ((41171, 41256), 'pypeit.msgs.error', 'msgs.error', (['"""Science frames can only be assigned to a single calibration group."""'], {}), "('Science frames can only be assigned to a single calibration group.'\n )\n", (41181, 41256), False, 'from pypeit import msgs\n'), ((43939, 43968), 'numpy.unique', 'np.unique', (["self['setup'].data"], {}), "(self['setup'].data)\n", (43948, 43968), True, 'import numpy as np\n'), ((44811, 44902), 'pypeit.msgs.error', 'msgs.error', (['"""To set global frames, types must have been defined; run get_frame_types."""'], {}), "(\n 'To set global frames, types must have been defined; run get_frame_types.')\n", (44821, 44902), False, 'from pypeit import msgs\n'), ((46981, 46995), 'numpy.where', 'np.where', (['indx'], {}), '(indx)\n', (46989, 46995), True, 'import numpy as np\n'), ((55741, 55791), 'pypeit.core.flux_calib.find_standard_file', 'flux_calib.find_standard_file', (['ra', 'dec'], {'check': '(True)'}), '(ra, dec, check=True)\n', (55770, 55791), False, 'from pypeit.core import flux_calib\n'), ((56152, 56164), 'pypeit.msgs.info', 'msgs.info', (['f'], {}), '(f)\n', (56161, 56164), False, 'from pypeit import msgs\n'), ((56214, 56263), 'pypeit.msgs.error', 'msgs.error', (['"""Check these files before continuing"""'], {}), "('Check these files before continuing')\n", (56224, 56263), False, 'from pypeit import msgs\n'), ((61373, 61385), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (61379, 61385), True, 'import numpy as np\n'), ((65071, 65244), 'pypeit.msgs.error', 'msgs.error', (['"""Each calibration group must be from one and only one instrument configuration with a valid letter identifier; i.e., the configuration cannot be None."""'], {}), "(\n 'Each calibration group must be from one and only one instrument configuration with a valid letter identifier; i.e., the configuration cannot be None.'\n )\n", (65081, 65244), False, 'from pypeit import msgs\n'), ((65906, 65924), 'pypeit.utils.yamlify', 'utils.yamlify', (['cfg'], {}), '(cfg)\n', (65919, 65924), False, 'from pypeit import utils\n'), ((69132, 69151), 'os.path.isdir', 'os.path.isdir', (['odir'], {}), '(odir)\n', (69145, 69151), False, 'import os\n'), ((69169, 69186), 'os.makedirs', 'os.makedirs', (['odir'], {}), '(odir)\n', (69180, 69186), False, 'import os\n'), ((69546, 69560), 'numpy.any', 'np.any', (['in_cfg'], {}), '(in_cfg)\n', (69552, 69560), True, 'import numpy as np\n'), ((69806, 69819), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (69817, 69819), False, 'import io\n'), ((73898, 73912), 'numpy.any', 'np.any', (['badcol'], {}), '(badcol)\n', (73904, 73912), True, 'import numpy as np\n'), ((78675, 78702), 'numpy.all', 'np.all', (['(config[k] == row[k])'], {}), '(config[k] == row[k])\n', (78681, 78702), True, 'import numpy as np\n'), ((7206, 7229), 'os.path.basename', 'os.path.basename', (['ifile'], {}), '(ifile)\n', (7222, 7229), False, 'import os\n'), ((7279, 7420), 'pypeit.msgs.error', 'msgs.error', (['"""File name list does not match user-provided metadata table. See usrdata argument of instantiation of PypeItMetaData."""'], {}), "(\n 'File name list does not match user-provided metadata table. See usrdata argument of instantiation of PypeItMetaData.'\n )\n", (7289, 7420), False, 'from pypeit import msgs\n'), ((12309, 12346), 'numpy.where', 'np.where', (["(f == self.table['filename'])"], {}), "(f == self.table['filename'])\n", (12317, 12346), True, 'import numpy as np\n'), ((13414, 13489), 'pypeit.core.meta.convert_radec', 'meta.convert_radec', (["usrdata['ra'][~nones].data", "usrdata['dec'][~nones].data"], {}), "(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data)\n", (13432, 13489), False, 'from pypeit.core import meta\n'), ((36279, 36291), 'numpy.any', 'np.any', (['indx'], {}), '(indx)\n', (36285, 36291), True, 'import numpy as np\n'), ((64789, 64822), 'numpy.unique', 'np.unique', (["self['setup'][in_cbit]"], {}), "(self['setup'][in_cbit])\n", (64798, 64822), True, 'import numpy as np\n'), ((65616, 65634), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (65628, 65634), False, 'import os\n'), ((69418, 69443), 'pypeit.utils.yamlify', 'utils.yamlify', (['cfg[setup]'], {}), '(cfg[setup])\n', (69431, 69443), False, 'from pypeit import utils\n'), ((69607, 69643), 'numpy.unique', 'np.unique', (["self['directory'][in_cfg]"], {}), "(self['directory'][in_cfg])\n", (69616, 69643), True, 'import numpy as np\n'), ((74368, 74408), 'numpy.where', 'np.where', (['[(t == col) for t in tbl_cols]'], {}), '([(t == col) for t in tbl_cols])\n', (74376, 74408), True, 'import numpy as np\n'), ((74790, 74830), 'numpy.where', 'np.where', (['[(t == col) for t in tbl_cols]'], {}), '([(t == col) for t in tbl_cols])\n', (74798, 74830), True, 'import numpy as np\n'), ((75524, 75541), 'numpy.where', 'np.where', (['is_None'], {}), '(is_None)\n', (75532, 75541), True, 'import numpy as np\n'), ((75592, 75635), 'numpy.argsort', 'np.argsort', (['output_tbl[sort_col][indx].data'], {}), '(output_tbl[sort_col][indx].data)\n', (75602, 75635), True, 'import numpy as np\n'), ((8574, 8594), 'os.path.split', 'os.path.split', (['ifile'], {}), '(ifile)\n', (8587, 8594), False, 'import os\n'), ((29126, 29140), 'numpy.sum', 'np.sum', (['ignore'], {}), '(ignore)\n', (29132, 29140), True, 'import numpy as np\n'), ((61659, 61694), 'pypeit.io.dict_to_lines', 'dict_to_lines', (['cfgs[setup]'], {'level': '(1)'}), '(cfgs[setup], level=1)\n', (61672, 61694), False, 'from pypeit.io import dict_to_lines\n'), ((75574, 75588), 'numpy.where', 'np.where', (['indx'], {}), '(indx)\n', (75582, 75588), True, 'import numpy as np\n'), ((78409, 78435), 'numpy.abs', 'np.abs', (['(config[k] - row[k])'], {}), '(config[k] - row[k])\n', (78415, 78435), True, 'import numpy as np\n'), ((44982, 44998), 'numpy.arange', 'np.arange', (['n_cfg'], {}), '(n_cfg)\n', (44991, 44998), True, 'import numpy as np\n'), ((55282, 55296), 'pypeit.msgs.newline', 'msgs.newline', ([], {}), '()\n', (55294, 55296), False, 'from pypeit import msgs\n'), ((55421, 55435), 'pypeit.msgs.newline', 'msgs.newline', ([], {}), '()\n', (55433, 55435), False, 'from pypeit import msgs\n')] |
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from cms.models.fields import PlaceholderField
from cms.utils import get_language_from_request
from cms.utils.urlutils import admin_reverse
from hvad.models import TranslatableModel, TranslatedFields
def dynamic_placeholder_1(instance):
return instance.char_1
def dynamic_placeholder_2(instance):
return instance.char_2
@python_2_unicode_compatible
class Example1(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
date_field = models.DateField(null=True)
placeholder = PlaceholderField('placeholder')
static_admin_url = ''
def __init__(self, *args, **kwargs):
super(Example1, self).__init__(*args, **kwargs)
def callable_item(self, request):
return self.char_1
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("example_detail", args=(self.pk,))
def get_draft_url(self):
return self.get_absolute_url()
def get_public_url(self):
return '/public/view/'
def set_static_url(self, request):
language = get_language_from_request(request)
if self.pk:
self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
return self.pk
def dynamic_url(self, request):
language = get_language_from_request(request)
return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
class TwoPlaceholderExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
placeholder_1 = PlaceholderField('placeholder_1', related_name='p1')
placeholder_2 = PlaceholderField('placeholder_2', related_name='p2')
class DynamicPlaceholderSlotExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1')
placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2')
@python_2_unicode_compatible
class CharPksExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
slug = models.SlugField(u'char_1', max_length=255, primary_key=True)
placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1')
def __str__(self):
return "%s - %s" % (self.char_1, self.pk)
@python_2_unicode_compatible
class MultilingualExample1(TranslatableModel):
translations = TranslatedFields(
char_1=models.CharField(u'char_1', max_length=255),
char_2=models.CharField(u'char_2', max_length=255),
)
placeholder_1 = PlaceholderField('placeholder_1')
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("detail_multi", args=(self.pk,))
| [
"django.db.models.DateField",
"cms.models.fields.PlaceholderField",
"django.core.urlresolvers.reverse",
"cms.utils.get_language_from_request",
"django.db.models.SlugField",
"cms.utils.urlutils.admin_reverse",
"django.db.models.CharField"
]
| [((544, 587), 'django.db.models.CharField', 'models.CharField', (['u"""char_1"""'], {'max_length': '(255)'}), "(u'char_1', max_length=255)\n", (560, 587), False, 'from django.db import models\n'), ((601, 644), 'django.db.models.CharField', 'models.CharField', (['u"""char_2"""'], {'max_length': '(255)'}), "(u'char_2', max_length=255)\n", (617, 644), False, 'from django.db import models\n'), ((658, 701), 'django.db.models.CharField', 'models.CharField', (['u"""char_3"""'], {'max_length': '(255)'}), "(u'char_3', max_length=255)\n", (674, 701), False, 'from django.db import models\n'), ((715, 758), 'django.db.models.CharField', 'models.CharField', (['u"""char_4"""'], {'max_length': '(255)'}), "(u'char_4', max_length=255)\n", (731, 758), False, 'from django.db import models\n'), ((776, 803), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)'}), '(null=True)\n', (792, 803), False, 'from django.db import models\n'), ((822, 853), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (['"""placeholder"""'], {}), "('placeholder')\n", (838, 853), False, 'from cms.models.fields import PlaceholderField\n'), ((1811, 1854), 'django.db.models.CharField', 'models.CharField', (['u"""char_1"""'], {'max_length': '(255)'}), "(u'char_1', max_length=255)\n", (1827, 1854), False, 'from django.db import models\n'), ((1868, 1911), 'django.db.models.CharField', 'models.CharField', (['u"""char_2"""'], {'max_length': '(255)'}), "(u'char_2', max_length=255)\n", (1884, 1911), False, 'from django.db import models\n'), ((1925, 1968), 'django.db.models.CharField', 'models.CharField', (['u"""char_3"""'], {'max_length': '(255)'}), "(u'char_3', max_length=255)\n", (1941, 1968), False, 'from django.db import models\n'), ((1982, 2025), 'django.db.models.CharField', 'models.CharField', (['u"""char_4"""'], {'max_length': '(255)'}), "(u'char_4', max_length=255)\n", (1998, 2025), False, 'from django.db import models\n'), ((2046, 2098), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (['"""placeholder_1"""'], {'related_name': '"""p1"""'}), "('placeholder_1', related_name='p1')\n", (2062, 2098), False, 'from cms.models.fields import PlaceholderField\n'), ((2119, 2171), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (['"""placeholder_2"""'], {'related_name': '"""p2"""'}), "('placeholder_2', related_name='p2')\n", (2135, 2171), False, 'from cms.models.fields import PlaceholderField\n'), ((2238, 2281), 'django.db.models.CharField', 'models.CharField', (['u"""char_1"""'], {'max_length': '(255)'}), "(u'char_1', max_length=255)\n", (2254, 2281), False, 'from django.db import models\n'), ((2295, 2338), 'django.db.models.CharField', 'models.CharField', (['u"""char_2"""'], {'max_length': '(255)'}), "(u'char_2', max_length=255)\n", (2311, 2338), False, 'from django.db import models\n'), ((2359, 2427), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (['dynamic_placeholder_1'], {'related_name': '"""dynamic_pl_1"""'}), "(dynamic_placeholder_1, related_name='dynamic_pl_1')\n", (2375, 2427), False, 'from cms.models.fields import PlaceholderField\n'), ((2448, 2516), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (['dynamic_placeholder_2'], {'related_name': '"""dynamic_pl_2"""'}), "(dynamic_placeholder_2, related_name='dynamic_pl_2')\n", (2464, 2516), False, 'from cms.models.fields import PlaceholderField\n'), ((2597, 2640), 'django.db.models.CharField', 'models.CharField', (['u"""char_1"""'], {'max_length': '(255)'}), "(u'char_1', max_length=255)\n", (2613, 2640), False, 'from django.db import models\n'), ((2652, 2713), 'django.db.models.SlugField', 'models.SlugField', (['u"""char_1"""'], {'max_length': '(255)', 'primary_key': '(True)'}), "(u'char_1', max_length=255, primary_key=True)\n", (2668, 2713), False, 'from django.db import models\n'), ((2734, 2793), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (['"""placeholder_1"""'], {'related_name': '"""charpk_p1"""'}), "('placeholder_1', related_name='charpk_p1')\n", (2750, 2793), False, 'from cms.models.fields import PlaceholderField\n'), ((3129, 3162), 'cms.models.fields.PlaceholderField', 'PlaceholderField', (['"""placeholder_1"""'], {}), "('placeholder_1')\n", (3145, 3162), False, 'from cms.models.fields import PlaceholderField\n'), ((1144, 1186), 'django.core.urlresolvers.reverse', 'reverse', (['"""example_detail"""'], {'args': '(self.pk,)'}), "('example_detail', args=(self.pk,))\n", (1151, 1186), False, 'from django.core.urlresolvers import reverse\n'), ((1377, 1411), 'cms.utils.get_language_from_request', 'get_language_from_request', (['request'], {}), '(request)\n', (1402, 1411), False, 'from cms.utils import get_language_from_request\n'), ((1625, 1659), 'cms.utils.get_language_from_request', 'get_language_from_request', (['request'], {}), '(request)\n', (1650, 1659), False, 'from cms.utils import get_language_from_request\n'), ((1675, 1752), 'cms.utils.urlutils.admin_reverse', 'admin_reverse', (['"""placeholderapp_example1_edit_field"""'], {'args': '(self.pk, language)'}), "('placeholderapp_example1_edit_field', args=(self.pk, language))\n", (1688, 1752), False, 'from cms.utils.urlutils import admin_reverse\n'), ((3262, 3302), 'django.core.urlresolvers.reverse', 'reverse', (['"""detail_multi"""'], {'args': '(self.pk,)'}), "('detail_multi', args=(self.pk,))\n", (3269, 3302), False, 'from django.core.urlresolvers import reverse\n'), ((1468, 1545), 'cms.utils.urlutils.admin_reverse', 'admin_reverse', (['"""placeholderapp_example1_edit_field"""'], {'args': '(self.pk, language)'}), "('placeholderapp_example1_edit_field', args=(self.pk, language))\n", (1481, 1545), False, 'from cms.utils.urlutils import admin_reverse\n'), ((2998, 3041), 'django.db.models.CharField', 'models.CharField', (['u"""char_1"""'], {'max_length': '(255)'}), "(u'char_1', max_length=255)\n", (3014, 3041), False, 'from django.db import models\n'), ((3058, 3101), 'django.db.models.CharField', 'models.CharField', (['u"""char_2"""'], {'max_length': '(255)'}), "(u'char_2', max_length=255)\n", (3074, 3101), False, 'from django.db import models\n')] |
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from mock import patch
import repokid.utils.roledata
from repokid.role import Role
from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES
AARDVARK_DATA = {
"arn:aws:iam::123456789012:role/all_services_used": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "s3"}],
"arn:aws:iam::123456789012:role/unused_ec2": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": 0,
"serviceNamespace": "ec2"}],
"arn:aws:iam::123456789012:role/young_role": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "s3"}]
}
class TestRoledata(object):
@patch('repokid.utils.roledata.expand_policy')
@patch('repokid.utils.roledata.get_actions_from_statement')
@patch('repokid.utils.roledata.all_permissions')
def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy):
test_role = Role(ROLES[0])
all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket',
's3:getobject']
# empty policy to make sure we get the latest
test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}]
mock_all_permissions.return_value = all_permissions
mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']
mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']
permissions = repokid.utils.roledata._get_role_permissions(test_role)
assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms'])
@patch('repokid.hooks.call_hooks')
def test_get_repoable_permissions(self, mock_call_hooks):
minimum_age = 1
repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2']
repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4']
hooks = {}
permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4',
'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1',
'service_4:action_2']
# service_1 and service_2 both used more than a day ago, which is outside of our test filter for age
aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000},
{'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000},
{'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}]
no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() + 1000}
repoable_decision = repokid.utils.roledata.RepoablePermissionDecision()
repoable_decision.repoable = True
mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision,
'service_1:action_2': repoable_decision,
'service_4:action_1': repoable_decision}}
repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data,
no_repo_permissions, minimum_age,
hooks)
# service_1:action_3 and action_4 are unsupported actions, service_2 is an unsupported service, service_3
# was used too recently, service_4 action 2 is in no_repo_permissions and not expired
assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1'])
@patch('repokid.utils.roledata._get_role_permissions')
@patch('repokid.utils.roledata._get_repoable_permissions')
@patch('repokid.hooks.call_hooks')
def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions):
roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])]
roles[0].disqualified_by = []
roles[0].aa_data = 'some_aa_data'
# disqualified by a filter
roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}]
roles[1].disqualified_by = ['some_filter']
roles[1].aa_data = 'some_aa_data'
# no AA data
roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}]
roles[2].disqualified_by = []
roles[2].aa_data = None
hooks = {}
mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy',
'ec2:AllocateHosts', 'ec2:AssociateAddress'],
['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'],
['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']]
mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])
mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])]
minimum_age = 90
repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks)
assert roles[0].repoable_permissions == 2
assert roles[0].repoable_services == ['iam']
assert roles[1].repoable_permissions == 0
assert roles[1].repoable_services == []
assert roles[2].repoable_permissions == 0
assert roles[2].repoable_services == []
def test_get_repoed_policy(self):
policies = ROLE_POLICIES['all_services_used']
repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket'])
rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions)
assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17',
'Statement': [{'Action': ['s3:deletebucket'],
'Resource': ['*'],
'Effect': 'Allow'}]}}
assert empty_policies == ['iam_perms']
def test_find_newly_added_permissions(self):
old_policy = ROLE_POLICIES['all_services_used']
new_policy = ROLE_POLICIES['unused_ec2']
new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy)
assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress'])
def test_convert_repoable_perms_to_perms_and_services(self):
all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n']
repoable_perms = ['b:l', 'c:m']
expected_repoed_services = ['b']
expected_repoed_permissions = ['c:m']
assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) ==
(expected_repoed_permissions, expected_repoed_services))
def test_convert_repoed_service_to_sorted_perms_and_services(self):
repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl']
expected_services = ['ec2', 'route53']
expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl']
assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == (
expected_permissions, expected_services
)
def test_get_epoch_authenticated(self):
assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True))
assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True))
assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False))
def test_filter_scheduled_repoable_perms(self):
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a']
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a']
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a:b', 'a:c']) == ['a:b', 'a:c']
| [
"mock.patch",
"repokid.role.Role",
"time.time"
]
| [((1527, 1572), 'mock.patch', 'patch', (['"""repokid.utils.roledata.expand_policy"""'], {}), "('repokid.utils.roledata.expand_policy')\n", (1532, 1572), False, 'from mock import patch\n'), ((1578, 1636), 'mock.patch', 'patch', (['"""repokid.utils.roledata.get_actions_from_statement"""'], {}), "('repokid.utils.roledata.get_actions_from_statement')\n", (1583, 1636), False, 'from mock import patch\n'), ((1642, 1689), 'mock.patch', 'patch', (['"""repokid.utils.roledata.all_permissions"""'], {}), "('repokid.utils.roledata.all_permissions')\n", (1647, 1689), False, 'from mock import patch\n'), ((2572, 2605), 'mock.patch', 'patch', (['"""repokid.hooks.call_hooks"""'], {}), "('repokid.hooks.call_hooks')\n", (2577, 2605), False, 'from mock import patch\n'), ((4839, 4892), 'mock.patch', 'patch', (['"""repokid.utils.roledata._get_role_permissions"""'], {}), "('repokid.utils.roledata._get_role_permissions')\n", (4844, 4892), False, 'from mock import patch\n'), ((4898, 4955), 'mock.patch', 'patch', (['"""repokid.utils.roledata._get_repoable_permissions"""'], {}), "('repokid.utils.roledata._get_repoable_permissions')\n", (4903, 4955), False, 'from mock import patch\n'), ((4961, 4994), 'mock.patch', 'patch', (['"""repokid.hooks.call_hooks"""'], {}), "('repokid.hooks.call_hooks')\n", (4966, 4994), False, 'from mock import patch\n'), ((1826, 1840), 'repokid.role.Role', 'Role', (['ROLES[0]'], {}), '(ROLES[0])\n', (1830, 1840), False, 'from repokid.role import Role\n'), ((5129, 5143), 'repokid.role.Role', 'Role', (['ROLES[0]'], {}), '(ROLES[0])\n', (5133, 5143), False, 'from repokid.role import Role\n'), ((5145, 5159), 'repokid.role.Role', 'Role', (['ROLES[1]'], {}), '(ROLES[1])\n', (5149, 5159), False, 'from repokid.role import Role\n'), ((5161, 5175), 'repokid.role.Role', 'Role', (['ROLES[2]'], {}), '(ROLES[2])\n', (5165, 5175), False, 'from repokid.role import Role\n'), ((3653, 3664), 'time.time', 'time.time', ([], {}), '()\n', (3662, 3664), False, 'import time\n'), ((3692, 3703), 'time.time', 'time.time', ([], {}), '()\n', (3701, 3703), False, 'import time\n'), ((890, 901), 'time.time', 'time.time', ([], {}), '()\n', (899, 901), False, 'import time\n'), ((982, 993), 'time.time', 'time.time', ([], {}), '()\n', (991, 993), False, 'import time\n'), ((1126, 1137), 'time.time', 'time.time', ([], {}), '()\n', (1135, 1137), False, 'import time\n'), ((1341, 1352), 'time.time', 'time.time', ([], {}), '()\n', (1350, 1352), False, 'import time\n'), ((1433, 1444), 'time.time', 'time.time', ([], {}), '()\n', (1442, 1444), False, 'import time\n'), ((3578, 3589), 'time.time', 'time.time', ([], {}), '()\n', (3587, 3589), False, 'import time\n'), ((3369, 3380), 'time.time', 'time.time', ([], {}), '()\n', (3378, 3380), False, 'import time\n'), ((3474, 3485), 'time.time', 'time.time', ([], {}), '()\n', (3483, 3485), False, 'import time\n')] |
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
# copy from
# classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
classify_image.setup_args()
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
| [
"rospy.Publisher",
"rospy.Subscriber",
"cv2.imencode",
"rospy.init_node",
"tensorflow.Session",
"rospy.get_param",
"numpy.squeeze",
"cv_bridge.CvBridge",
"rospy.spin",
"classify_image.setup_args",
"classify_image.NodeLookup",
"classify_image.create_graph",
"classify_image.maybe_download_and_extract",
"rospy.loginfo"
]
| [((1726, 1753), 'classify_image.setup_args', 'classify_image.setup_args', ([], {}), '()\n', (1751, 1753), False, 'import classify_image\n'), ((1758, 1790), 'rospy.init_node', 'rospy.init_node', (['"""rostensorflow"""'], {}), "('rostensorflow')\n", (1773, 1790), False, 'import rospy\n'), ((243, 286), 'classify_image.maybe_download_and_extract', 'classify_image.maybe_download_and_extract', ([], {}), '()\n', (284, 286), False, 'import classify_image\n'), ((311, 323), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (321, 323), True, 'import tensorflow as tf\n'), ((332, 361), 'classify_image.create_graph', 'classify_image.create_graph', ([], {}), '()\n', (359, 361), False, 'import classify_image\n'), ((388, 398), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (396, 398), False, 'from cv_bridge import CvBridge\n'), ((420, 494), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/usb_cam/image_raw"""', 'Image', 'self.callback'], {'queue_size': '(1)'}), "('/usb_cam/image_raw', Image, self.callback, queue_size=1)\n", (436, 494), False, 'import rospy\n'), ((515, 562), 'rospy.Publisher', 'rospy.Publisher', (['"""result"""', 'String'], {'queue_size': '(1)'}), "('result', String, queue_size=1)\n", (530, 562), False, 'import rospy\n'), ((594, 634), 'rospy.get_param', 'rospy.get_param', (['"""~score_threshold"""', '(0.1)'], {}), "('~score_threshold', 0.1)\n", (609, 634), False, 'import rospy\n'), ((660, 692), 'rospy.get_param', 'rospy.get_param', (['"""~use_top_k"""', '(5)'], {}), "('~use_top_k', 5)\n", (675, 692), False, 'import rospy\n'), ((1163, 1186), 'numpy.squeeze', 'np.squeeze', (['predictions'], {}), '(predictions)\n', (1173, 1186), True, 'import numpy as np\n'), ((1262, 1289), 'classify_image.NodeLookup', 'classify_image.NodeLookup', ([], {}), '()\n', (1287, 1289), False, 'import classify_image\n'), ((1681, 1693), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1691, 1693), False, 'import rospy\n'), ((1545, 1603), 'rospy.loginfo', 'rospy.loginfo', (["('%s (score = %.5f)' % (human_string, score))"], {}), "('%s (score = %.5f)' % (human_string, score))\n", (1558, 1603), False, 'import rospy\n'), ((866, 896), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'cv_image'], {}), "('.jpg', cv_image)\n", (878, 896), False, 'import cv2\n')] |
import numpy as np
def segment_Y(Y, **params):
Y_segments = params.get("Y_segments")
Y_quantile = params.get("Y_quantile")
print("segmenting Y")
Y = Y.values.reshape(-1)
Y_quantile = np.quantile(Y, Y_quantile, axis = 0)
bigger_mask = (Y > Y_quantile).copy()
smaller_mask = (Y <= Y_quantile).copy()
Y[bigger_mask] = 1
Y[smaller_mask] = 0
Y = Y.astype(int)
return Y | [
"numpy.quantile"
]
| [((191, 225), 'numpy.quantile', 'np.quantile', (['Y', 'Y_quantile'], {'axis': '(0)'}), '(Y, Y_quantile, axis=0)\n', (202, 225), True, 'import numpy as np\n')] |
import mysql.connector
import random
from voice import synthetize_voice, delete_wav
def AllQuestionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="<PASSWORD>",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
for line in rows:
i += 1
enonce = line[1]
proposition1 = line[2]
proposition2 = line[3]
proposition3 = line[4]
proposition4 = line[5]
reponse = line[5]
print("*******************************************************************************")
print(" QUESTION ",i," ")
print("*******************************************************************************")
print("ENONCE : ", enonce)
print("PROPOSITION 1 : ", proposition1)
print("PROPOSITION 2 : ", proposition2)
print("PROPOSITION 3 : ", proposition3)
print("PROPOSITION 4 : ", proposition4)
print("REPONSE : ", reponse)
else:
print("Ce thème ne contient pas de questions")
def questionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="<PASSWORD>",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
nb_rows = len(rows)
num_question = random.randint(1, nb_rows)
#L'index de la liste commence à zéro, il faut donc décaler d'un le numéro
num_question = num_question - 1
question = rows[num_question]
result = [] #Tab which stores the query results
#RECUPERATION DES TUPLES
result.append(question[1])
result.append(question[2])
result.append(question[3])
result.append(question[4])
result.append(question[5])
result.append(question[5]) #This last one is the answer
print("*******************************************************************************")
print(" QUESTION ",num_question+1," ")
print("*******************************************************************************")
print("ENONCE : ", result[0])
print("PROPOSITION 1 : ", result[1])
print("PROPOSITION 2 : ", result[2])
print("PROPOSITION 3 : ", result[3])
print("PROPOSITION 4 : ", result[4])
print("REPONSE : ", result[5])
#complete_question = ''.join(complete_question) #Convert tuple into string
return result
else:
print("Ce thème ne contient pas de questions")
def tell_question(question):
synthetize_voice(question[0])
for i in range(1,5) :
num_prop = "Proposition {} ".format(i)
num_prop = ''.join(num_prop)
line = ''.join(question[i])
line = num_prop + line
synthetize_voice(line)
delete_wav()
def quiz():
counter = 1
while(counter <= 5):
questionAI(1)
if (__name__ == '__main__'):
result = questionAI(1)
tell_question(result)
| [
"voice.delete_wav",
"random.randint",
"voice.synthetize_voice"
]
| [((3440, 3469), 'voice.synthetize_voice', 'synthetize_voice', (['question[0]'], {}), '(question[0])\n', (3456, 3469), False, 'from voice import synthetize_voice, delete_wav\n'), ((3685, 3697), 'voice.delete_wav', 'delete_wav', ([], {}), '()\n', (3695, 3697), False, 'from voice import synthetize_voice, delete_wav\n'), ((2171, 2197), 'random.randint', 'random.randint', (['(1)', 'nb_rows'], {}), '(1, nb_rows)\n', (2185, 2197), False, 'import random\n'), ((3658, 3680), 'voice.synthetize_voice', 'synthetize_voice', (['line'], {}), '(line)\n', (3674, 3680), False, 'from voice import synthetize_voice, delete_wav\n')] |
import numpy
def lax_friedrichs(cons_minus, cons_plus, simulation, tl):
alpha = tl.grid.dx / tl.dt
flux = numpy.zeros_like(cons_minus)
prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim)
prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim)
f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus)
f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus )
flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \
alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) )
return flux
def upwind(cons_minus, cons_plus, simulation, patch):
flux = numpy.zeros_like(cons_minus)
flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2],
cons_minus[:, 1:-1])
return flux
| [
"numpy.zeros_like"
]
| [((115, 143), 'numpy.zeros_like', 'numpy.zeros_like', (['cons_minus'], {}), '(cons_minus)\n', (131, 143), False, 'import numpy\n'), ((666, 694), 'numpy.zeros_like', 'numpy.zeros_like', (['cons_minus'], {}), '(cons_minus)\n', (682, 694), False, 'import numpy\n')] |
from terra_sdk.exceptions import LCDResponseError
from terrakg import logger
# Logging
from terrakg.client import ClientContainer
logger = logger.get_logger(__name__)
class Rates:
"""
Access the most recent rates.
"""
def __init__(self, client: ClientContainer):
self.client = client
def get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int = 1000000, reverse: bool = False):
"""
Returns the price for `amount` of the token `pair` (exchange is included in pair).
Set `reverse` to true to get the inverse price.
"""
desc, action, result_key = ("reverse_simulation", "ask_asset", "offer_amount") if reverse else (
"simulation", "offer_asset", "return_amount")
query_msg = {
desc: {
action: {
"amount": str(amount),
"info": {"token": {
"contract_addr": token_contract
}
}
}
}
}
try:
result = self.client.lcd_client.wasm.contract_query(pair, query_msg)
return result[result_key], result['commission_amount']
except LCDResponseError as e:
logger.warning(f"Issue with price query: {e}")
return None
| [
"terrakg.logger.get_logger",
"terrakg.logger.warning"
]
| [((142, 169), 'terrakg.logger.get_logger', 'logger.get_logger', (['__name__'], {}), '(__name__)\n', (159, 169), False, 'from terrakg import logger\n'), ((1272, 1318), 'terrakg.logger.warning', 'logger.warning', (['f"""Issue with price query: {e}"""'], {}), "(f'Issue with price query: {e}')\n", (1286, 1318), False, 'from terrakg import logger\n')] |
import bpy
import os, glob
from pathlib import Path
from enum import Enum
from abc import ABC, abstractmethod
import csv
from . import keying_module
def export_tracking_data(self, context):
clip = context.space_data.clip
clip_name = os.path.splitext(clip.name)[0]
tracker_name = context.scene.tracking_local.tracker_name
output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name)
keying_module.create_directory(output_path)
file = open(os.path.join(output_path,clip_name+".csv"), "w", newline='')
writer = csv.writer(file, delimiter=',')
multiplier = context.scene.tracking_local.tracking_multiplier
tracker = clip.tracking.tracks.get(tracker_name)
if tracker is not None:
prev = tracker.markers[0].co[0]
for m in tracker.markers:
writer.writerow([(m.co[0] - prev) * multiplier])
prev = m.co[0]
self.report({"INFO"},"TRACKER SUCESSFULLY EXPORTED")
else:
self.report({"ERROR"},"TRACKER NOT FOUND")
file.close()
#----------------------------------------
# PROPERTIES
#----------------------------------------
class TrackingSceneProps(bpy.types.PropertyGroup):
tracker_name: bpy.props.StringProperty \
(
name = "Track name",
description = "Name of the tracker for data export",
)
tracking_multiplier: bpy.props.FloatProperty \
(
name = "Distance multiplier",
description = "The exported tracking distance gets multiplied by this value",
default = 1,
min = 0.0001
)
class TrackingPanel(bpy.types.Panel):
bl_label = "Tracking Panel"
bl_idname = "SCENE_PT_tracking_rendering"
bl_space_type = "CLIP_EDITOR"
bl_region_type = "UI"
bl_context = "render"
def draw(self, context):
layout = self.layout
scene = context.scene
box = layout.box()
box.row().label(text = "Tracking export")
box.row().prop(scene.tracking_local, "tracker_name")
box.row().prop(scene.tracking_local, "tracking_multiplier")
box.row().operator("tracking.export_data")
class TrackingExportDataOp(bpy.types.Operator):
bl_idname = "tracking.export_data"
bl_label = "Export Data"
bl_description = "Export the tracking data of the chosen tracker"
def execute(self, context):
export_tracking_data(self, context)
return {"FINISHED"}
classes = (
TrackingExportDataOp,
TrackingPanel,
TrackingSceneProps
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
del bpy.types.Scene.tracking_local | [
"bpy.props.PointerProperty",
"bpy.utils.unregister_class",
"bpy.props.StringProperty",
"csv.writer",
"bpy.props.FloatProperty",
"os.path.splitext",
"os.path.join",
"bpy.utils.register_class"
]
| [((563, 594), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (573, 594), False, 'import csv\n'), ((1251, 1350), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'name': '"""Track name"""', 'description': '"""Name of the tracker for data export"""'}), "(name='Track name', description=\n 'Name of the tracker for data export')\n", (1275, 1350), False, 'import bpy\n'), ((1411, 1571), 'bpy.props.FloatProperty', 'bpy.props.FloatProperty', ([], {'name': '"""Distance multiplier"""', 'description': '"""The exported tracking distance gets multiplied by this value"""', 'default': '(1)', 'min': '(0.0001)'}), "(name='Distance multiplier', description=\n 'The exported tracking distance gets multiplied by this value', default\n =1, min=0.0001)\n", (1434, 1571), False, 'import bpy\n'), ((2686, 2736), 'bpy.props.PointerProperty', 'bpy.props.PointerProperty', ([], {'type': 'TrackingSceneProps'}), '(type=TrackingSceneProps)\n', (2711, 2736), False, 'import bpy\n'), ((243, 270), 'os.path.splitext', 'os.path.splitext', (['clip.name'], {}), '(clip.name)\n', (259, 270), False, 'import os, glob\n'), ((489, 534), 'os.path.join', 'os.path.join', (['output_path', "(clip_name + '.csv')"], {}), "(output_path, clip_name + '.csv')\n", (501, 534), False, 'import os, glob\n'), ((2619, 2648), 'bpy.utils.register_class', 'bpy.utils.register_class', (['cls'], {}), '(cls)\n', (2643, 2648), False, 'import bpy\n'), ((2798, 2829), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['cls'], {}), '(cls)\n', (2824, 2829), False, 'import bpy\n')] |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetViewTemplate(view):
if not view: return None
elif hasattr(view, "ViewTemplateId"):
if view.ViewTemplateId.IntegerValue == -1: return None
else: return view.Document.GetElement(view.ViewTemplateId)
else: return None
views = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views]
else: OUT = GetViewTemplate(views) | [
"clr.AddReference"
]
| [((11, 39), 'clr.AddReference', 'clr.AddReference', (['"""RevitAPI"""'], {}), "('RevitAPI')\n", (27, 39), False, 'import clr\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0108_auto_20171130_1004'),
]
operations = [
migrations.AlterModelOptions(
name='relaysenderwhitelist',
options={'verbose_name': '\u4e2d\u7ee7\u53d1\u4ef6\u4eba\u767d\u540d\u5355'},
),
migrations.AlterModelOptions(
name='spamrptblacklist',
options={'verbose_name': '\u7f51\u5173\u9694\u79bb\u62a5\u544a\u6536\u4ef6\u4eba\u9ed1\u540d\u5355'},
),
]
| [
"django.db.migrations.AlterModelOptions"
]
| [((248, 348), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""relaysenderwhitelist"""', 'options': "{'verbose_name': '中继发件人白名单'}"}), "(name='relaysenderwhitelist', options={\n 'verbose_name': '中继发件人白名单'})\n", (276, 348), False, 'from django.db import models, migrations\n'), ((428, 528), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""spamrptblacklist"""', 'options': "{'verbose_name': '网关隔离报告收件人黑名单'}"}), "(name='spamrptblacklist', options={\n 'verbose_name': '网关隔离报告收件人黑名单'})\n", (456, 528), False, 'from django.db import models, migrations\n')] |
#!/usr/bin/env python
"""Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import re
import sys
from distutils.version import StrictVersion, LooseVersion
from functools import partial
import yaml
from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA
from voluptuous import Required, Schema, Invalid
from voluptuous.humanize import humanize_error
from ansible.module_utils.six import string_types
from ansible.utils.version import SemanticVersion
def isodate(value, check_deprecation_date=False, is_tombstone=False):
"""Validate a datetime.date or ISO 8601 date string."""
# datetime.date objects come from YAML dates, these are ok
if isinstance(value, datetime.date):
removal_date = value
else:
# make sure we have a string
msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'
if not isinstance(value, string_types):
raise Invalid(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
raise Invalid(msg)
try:
removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise Invalid(msg)
# Make sure date is correct
today = datetime.date.today()
if is_tombstone:
# For a tombstone, the removal date must be in the past
if today < removal_date:
raise Invalid(
'The tombstone removal_date (%s) must not be after today (%s)' % (removal_date, today))
else:
# For a deprecation, the removal date must be in the future. Only test this if
# check_deprecation_date is truish, to avoid checks to suddenly start to fail.
if check_deprecation_date and today > removal_date:
raise Invalid(
'The deprecation removal_date (%s) must be after today (%s)' % (removal_date, today))
return value
def removal_version(value, is_ansible, current_version=None, is_tombstone=False):
"""Validate a removal version string."""
msg = (
'Removal version must be a string' if is_ansible else
'Removal version must be a semantic version (https://semver.org/)'
)
if not isinstance(value, string_types):
raise Invalid(msg)
try:
if is_ansible:
version = StrictVersion()
version.parse(value)
version = LooseVersion(value) # We're storing Ansible's version as a LooseVersion
else:
version = SemanticVersion()
version.parse(value)
if version.major != 0 and (version.minor != 0 or version.patch != 0):
raise Invalid('removal_version (%r) must be a major release, not a minor or patch release '
'(see specification at https://semver.org/)' % (value, ))
if current_version is not None:
if is_tombstone:
# For a tombstone, the removal version must not be in the future
if version > current_version:
raise Invalid('The tombstone removal_version (%r) must not be after the '
'current version (%s)' % (value, current_version))
else:
# For a deprecation, the removal version must be in the future
if version <= current_version:
raise Invalid('The deprecation removal_version (%r) must be after the '
'current version (%s)' % (value, current_version))
except ValueError:
raise Invalid(msg)
return value
def any_value(value):
"""Accepts anything."""
return value
def get_ansible_version():
"""Return current ansible-core version"""
from ansible.release import __version__
return LooseVersion('.'.join(__version__.split('.')[:3]))
def get_collection_version():
"""Return current collection version, or None if it is not available"""
import importlib.util
collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'collection_detail.py')
collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path)
collection_detail = importlib.util.module_from_spec(collection_detail_spec)
sys.modules['collection_detail'] = collection_detail
collection_detail_spec.loader.exec_module(collection_detail)
# noinspection PyBroadException
try:
result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.')
return SemanticVersion(result['version'])
except Exception: # pylint: disable=broad-except
# We do not care why it fails, in case we cannot get the version
# just return None to indicate "we don't know".
return None
def validate_metadata_file(path, is_ansible, check_deprecation_dates=False):
"""Validate explicit runtime metadata file"""
try:
with open(path, 'r') as f_path:
routing = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line +
1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
return
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' %
(path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return
if is_ansible:
current_version = get_ansible_version()
else:
current_version = get_collection_version()
# Updates to schema MUST also be reflected in the documentation
# ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html
# plugin_routing schema
avoid_additional_data = Schema(
Any(
{
Required('removal_version'): any_value,
'warning_text': any_value,
},
{
Required('removal_date'): any_value,
'warning_text': any_value,
}
),
extra=PREVENT_EXTRA
)
deprecation_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version),
'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
tombstoning_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version, is_tombstone=True),
'removal_date': partial(isodate, is_tombstone=True),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
plugin_routing_schema = Any(
Schema({
('deprecation'): Any(deprecation_schema),
('tombstone'): Any(tombstoning_schema),
('redirect'): Any(*string_types),
}, extra=PREVENT_EXTRA),
)
list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}
for str_type in string_types]
plugin_schema = Schema({
('action'): Any(None, *list_dict_plugin_routing_schema),
('become'): Any(None, *list_dict_plugin_routing_schema),
('cache'): Any(None, *list_dict_plugin_routing_schema),
('callback'): Any(None, *list_dict_plugin_routing_schema),
('cliconf'): Any(None, *list_dict_plugin_routing_schema),
('connection'): Any(None, *list_dict_plugin_routing_schema),
('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
('filter'): Any(None, *list_dict_plugin_routing_schema),
('httpapi'): Any(None, *list_dict_plugin_routing_schema),
('inventory'): Any(None, *list_dict_plugin_routing_schema),
('lookup'): Any(None, *list_dict_plugin_routing_schema),
('module_utils'): Any(None, *list_dict_plugin_routing_schema),
('modules'): Any(None, *list_dict_plugin_routing_schema),
('netconf'): Any(None, *list_dict_plugin_routing_schema),
('shell'): Any(None, *list_dict_plugin_routing_schema),
('strategy'): Any(None, *list_dict_plugin_routing_schema),
('terminal'): Any(None, *list_dict_plugin_routing_schema),
('test'): Any(None, *list_dict_plugin_routing_schema),
('vars'): Any(None, *list_dict_plugin_routing_schema),
}, extra=PREVENT_EXTRA)
# import_redirection schema
import_redirection_schema = Any(
Schema({
('redirect'): Any(*string_types),
# import_redirect doesn't currently support deprecation
}, extra=PREVENT_EXTRA)
)
list_dict_import_redirection_schema = [{str_type: import_redirection_schema}
for str_type in string_types]
# top level schema
schema = Schema({
# All of these are optional
('plugin_routing'): Any(plugin_schema),
('import_redirection'): Any(None, *list_dict_import_redirection_schema),
# requires_ansible: In the future we should validate this with SpecifierSet
('requires_ansible'): Any(*string_types),
('action_groups'): dict,
}, extra=PREVENT_EXTRA)
# Ensure schema is valid
try:
schema(routing)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line/column numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))
def main():
"""Validate runtime metadata"""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
collection_legacy_file = 'meta/routing.yml'
collection_runtime_file = 'meta/runtime.yml'
# This is currently disabled, because if it is enabled this test can start failing
# at a random date. For this to be properly activated, we (a) need to be able to return
# codes for this test, and (b) make this error optional.
check_deprecation_dates = False
for path in paths:
if path == collection_legacy_file:
print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file)))
continue
validate_metadata_file(
path,
is_ansible=path not in (collection_legacy_file, collection_runtime_file),
check_deprecation_dates=check_deprecation_dates)
if __name__ == '__main__':
main()
| [
"ansible.utils.version.SemanticVersion",
"voluptuous.humanize.humanize_error",
"voluptuous.Required",
"distutils.version.StrictVersion",
"ansible.release.__version__.split",
"datetime.datetime.strptime",
"re.match",
"voluptuous.Any",
"voluptuous.Invalid",
"sys.stdin.read",
"yaml.safe_load",
"os.path.dirname",
"functools.partial",
"distutils.version.LooseVersion",
"datetime.date.today"
]
| [((1510, 1531), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1529, 1531), False, 'import datetime\n'), ((2513, 2525), 'voluptuous.Invalid', 'Invalid', (['msg'], {}), '(msg)\n', (2520, 2525), False, 'from voluptuous import Required, Schema, Invalid\n'), ((4884, 4918), 'ansible.utils.version.SemanticVersion', 'SemanticVersion', (["result['version']"], {}), "(result['version'])\n", (4899, 4918), False, 'from ansible.utils.version import SemanticVersion\n'), ((1076, 1088), 'voluptuous.Invalid', 'Invalid', (['msg'], {}), '(msg)\n', (1083, 1088), False, 'from voluptuous import Required, Schema, Invalid\n'), ((1235, 1282), 're.match', 're.match', (['"""^[0-9]{4}-[0-9]{2}-[0-9]{2}$"""', 'value'], {}), "('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value)\n", (1243, 1282), False, 'import re\n'), ((1302, 1314), 'voluptuous.Invalid', 'Invalid', (['msg'], {}), '(msg)\n', (1309, 1314), False, 'from voluptuous import Required, Schema, Invalid\n'), ((1668, 1768), 'voluptuous.Invalid', 'Invalid', (["('The tombstone removal_date (%s) must not be after today (%s)' % (\n removal_date, today))"], {}), "('The tombstone removal_date (%s) must not be after today (%s)' % (\n removal_date, today))\n", (1675, 1768), False, 'from voluptuous import Required, Schema, Invalid\n'), ((2043, 2141), 'voluptuous.Invalid', 'Invalid', (["('The deprecation removal_date (%s) must be after today (%s)' % (\n removal_date, today))"], {}), "('The deprecation removal_date (%s) must be after today (%s)' % (\n removal_date, today))\n", (2050, 2141), False, 'from voluptuous import Required, Schema, Invalid\n'), ((2580, 2595), 'distutils.version.StrictVersion', 'StrictVersion', ([], {}), '()\n', (2593, 2595), False, 'from distutils.version import StrictVersion, LooseVersion\n'), ((2651, 2670), 'distutils.version.LooseVersion', 'LooseVersion', (['value'], {}), '(value)\n', (2663, 2670), False, 'from distutils.version import StrictVersion, LooseVersion\n'), ((2760, 2777), 'ansible.utils.version.SemanticVersion', 'SemanticVersion', ([], {}), '()\n', (2775, 2777), False, 'from ansible.utils.version import SemanticVersion\n'), ((3822, 3834), 'voluptuous.Invalid', 'Invalid', (['msg'], {}), '(msg)\n', (3829, 3834), False, 'from voluptuous import Required, Schema, Invalid\n'), ((5322, 5344), 'yaml.safe_load', 'yaml.safe_load', (['f_path'], {}), '(f_path)\n', (5336, 5344), False, 'import yaml\n'), ((7929, 7972), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (7932, 7972), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((7994, 8037), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (7997, 8037), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8058, 8101), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8061, 8101), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8125, 8168), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8128, 8168), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8191, 8234), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8194, 8234), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8260, 8303), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8263, 8303), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8332, 8375), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8335, 8375), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8397, 8440), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8400, 8440), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8463, 8506), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8466, 8506), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8531, 8574), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8534, 8574), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8596, 8639), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8599, 8639), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8667, 8710), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8670, 8710), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8733, 8776), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8736, 8776), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8799, 8842), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8802, 8842), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8863, 8906), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8866, 8906), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8930, 8973), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (8933, 8973), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((8997, 9040), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (9000, 9040), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((9060, 9103), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (9063, 9103), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((9123, 9166), 'voluptuous.Any', 'Any', (['None', '*list_dict_plugin_routing_schema'], {}), '(None, *list_dict_plugin_routing_schema)\n', (9126, 9166), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((9702, 9720), 'voluptuous.Any', 'Any', (['plugin_schema'], {}), '(plugin_schema)\n', (9705, 9720), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((9754, 9801), 'voluptuous.Any', 'Any', (['None', '*list_dict_import_redirection_schema'], {}), '(None, *list_dict_import_redirection_schema)\n', (9757, 9801), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((9917, 9935), 'voluptuous.Any', 'Any', (['*string_types'], {}), '(*string_types)\n', (9920, 9935), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((1453, 1465), 'voluptuous.Invalid', 'Invalid', (['msg'], {}), '(msg)\n', (1460, 1465), False, 'from voluptuous import Required, Schema, Invalid\n'), ((2915, 3064), 'voluptuous.Invalid', 'Invalid', (["('removal_version (%r) must be a major release, not a minor or patch release (see specification at https://semver.org/)'\n % (value,))"], {}), "(\n 'removal_version (%r) must be a major release, not a minor or patch release (see specification at https://semver.org/)'\n % (value,))\n", (2922, 3064), False, 'from voluptuous import Required, Schema, Invalid\n'), ((4074, 4096), 'ansible.release.__version__.split', '__version__.split', (['"""."""'], {}), "('.')\n", (4091, 4096), False, 'from ansible.release import __version__\n'), ((4312, 4337), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4327, 4337), False, 'import os\n'), ((6169, 6196), 'voluptuous.Required', 'Required', (['"""removal_version"""'], {}), "('removal_version')\n", (6177, 6196), False, 'from voluptuous import Required, Schema, Invalid\n'), ((6297, 6321), 'voluptuous.Required', 'Required', (['"""removal_date"""'], {}), "('removal_date')\n", (6305, 6321), False, 'from voluptuous import Required, Schema, Invalid\n'), ((6634, 6719), 'functools.partial', 'partial', (['removal_version'], {'is_ansible': 'is_ansible', 'current_version': 'current_version'}), '(removal_version, is_ansible=is_ansible, current_version=current_version\n )\n', (6641, 6719), False, 'from functools import partial\n'), ((6791, 6855), 'functools.partial', 'partial', (['isodate'], {'check_deprecation_date': 'check_deprecation_dates'}), '(isodate, check_deprecation_date=check_deprecation_dates)\n', (6798, 6855), False, 'from functools import partial\n'), ((6889, 6907), 'voluptuous.Any', 'Any', (['*string_types'], {}), '(*string_types)\n', (6892, 6907), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((7168, 7272), 'functools.partial', 'partial', (['removal_version'], {'is_ansible': 'is_ansible', 'current_version': 'current_version', 'is_tombstone': '(True)'}), '(removal_version, is_ansible=is_ansible, current_version=\n current_version, is_tombstone=True)\n', (7175, 7272), False, 'from functools import partial\n'), ((7344, 7379), 'functools.partial', 'partial', (['isodate'], {'is_tombstone': '(True)'}), '(isodate, is_tombstone=True)\n', (7351, 7379), False, 'from functools import partial\n'), ((7413, 7431), 'voluptuous.Any', 'Any', (['*string_types'], {}), '(*string_types)\n', (7416, 7431), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((7574, 7597), 'voluptuous.Any', 'Any', (['deprecation_schema'], {}), '(deprecation_schema)\n', (7577, 7597), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((7626, 7649), 'voluptuous.Any', 'Any', (['tombstoning_schema'], {}), '(tombstoning_schema)\n', (7629, 7649), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((7677, 7695), 'voluptuous.Any', 'Any', (['*string_types'], {}), '(*string_types)\n', (7680, 7695), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((9310, 9328), 'voluptuous.Any', 'Any', (['*string_types'], {}), '(*string_types)\n', (9313, 9328), False, 'from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA\n'), ((10335, 10351), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (10349, 10351), False, 'import sys\n'), ((1355, 1400), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['value', '"""%Y-%m-%d"""'], {}), "(value, '%Y-%m-%d')\n", (1381, 1400), False, 'import datetime\n'), ((3311, 3436), 'voluptuous.Invalid', 'Invalid', (["('The tombstone removal_version (%r) must not be after the current version (%s)'\n % (value, current_version))"], {}), "(\n 'The tombstone removal_version (%r) must not be after the current version (%s)'\n % (value, current_version))\n", (3318, 3436), False, 'from voluptuous import Required, Schema, Invalid\n'), ((3634, 3757), 'voluptuous.Invalid', 'Invalid', (["('The deprecation removal_version (%r) must be after the current version (%s)'\n % (value, current_version))"], {}), "(\n 'The deprecation removal_version (%r) must be after the current version (%s)'\n % (value, current_version))\n", (3641, 3757), False, 'from voluptuous import Required, Schema, Invalid\n'), ((10224, 10254), 'voluptuous.humanize.humanize_error', 'humanize_error', (['routing', 'error'], {}), '(routing, error)\n', (10238, 10254), False, 'from voluptuous.humanize import humanize_error\n')] |
#!/usr/bin/env python3
"""Find unicode control characters in source files
By default the script takes one or more files or directories and looks for
unicode control characters in all text files. To narrow down the files, provide
a config file with the -c command line, defining a scan_exclude list, which
should be a list of regular expressions matching paths to exclude from the scan.
There is a second mode enabled with -p which when set to 'all', prints all
control characters and when set to 'bidi', prints only the 9 bidirectional
control characters.
"""
import sys, os, argparse, re, unicodedata, magic
import importlib
from stat import *
scan_exclude = [r'\.git/', r'\.hg/', r'\.desktop$', r'ChangeLog$', r'NEWS$',
r'\.ppd$', r'\.txt$', r'\.directory$']
scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$',
r'text/html$']
verbose_mode = False
# Print to stderr in verbose mode.
def eprint(*args, **kwargs):
if verbose_mode:
print(*args, file=sys.stderr, **kwargs)
# Decode a single latin1 line.
def decodeline(inf):
if isinstance(inf, str):
return inf
return inf.decode('latin-1')
# Make a text string from a file, attempting to decode from latin1 if necessary.
# Other non-utf-8 locales are not supported at the moment.
def getfiletext(filename):
text = None
with open(filename) as infile:
try:
if detailed_mode:
return [decodeline(inf) for inf in infile]
except Exception as e:
eprint('%s: %s' % (filename, e))
return None
try:
text = ''.join(infile)
except UnicodeDecodeError:
eprint('%s: Retrying with latin1' % filename)
try:
text = ''.join([decodeline(inf) for inf in infile])
except Exception as e:
eprint('%s: %s' % (filename, e))
if text:
return set(text)
else:
return None
def analyze_text_detailed(filename, text, disallowed, msg):
line = 0
warned = False
for t in text:
line = line + 1
subset = [c for c in t if c in disallowed]
if subset:
print('%s:%d %s: %s' % (filename, line, msg, subset))
warned = True
if not warned:
eprint('%s: OK' % filename)
# Look for disallowed characters in the text. We reduce all characters into a
# set to speed up analysis. FIXME: Add a slow mode to get line numbers in files
# that have these disallowed chars.
def analyze_text(filename, text, disallowed, msg):
if detailed_mode:
analyze_text_detailed(filename, text, disallowed, msg)
return
if not text.isdisjoint(disallowed):
print('%s: %s: %s' % (filename, msg, text & disallowed))
else:
eprint('%s: OK' % filename)
def should_read(f):
m = magic.detect_from_filename(f)
# Fast check, just the file name.
if [e for e in scan_exclude if re.search(e, f)]:
return False
# Slower check, mime type.
if not 'text/' in m.mime_type \
or [e for e in scan_exclude_mime if re.search(e, m.mime_type)]:
return False
return True
# Get file text and feed into analyze_text.
def analyze_file(f, disallowed, msg):
eprint('%s: Reading file' % f)
if should_read(f):
text = getfiletext(f)
if text:
analyze_text(f, text, disallowed, msg)
else:
eprint('%s: SKIPPED' % f)
# Actual implementation of the recursive descent into directories.
def analyze_any(p, disallowed, msg):
mode = os.stat(p).st_mode
if S_ISDIR(mode):
analyze_dir(p, disallowed, msg)
elif S_ISREG(mode):
analyze_file(p, disallowed, msg)
else:
eprint('%s: UNREADABLE' % p)
# Recursively analyze files in the directory.
def analyze_dir(d, disallowed, msg):
for f in os.listdir(d):
analyze_any(os.path.join(d, f), disallowed, msg)
def analyze_paths(paths, disallowed, msg):
for p in paths:
analyze_any(p, disallowed, msg)
# All control characters. We omit the ascii control characters.
def nonprint_unicode(c):
cat = unicodedata.category(c)
if cat.startswith('C') and cat != 'Cc':
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Look for Unicode control characters")
parser.add_argument('path', metavar='path', nargs='+',
help='Sources to analyze')
parser.add_argument('-p', '--nonprint', required=False,
type=str, choices=['all', 'bidi'],
help='Look for either all non-printable unicode characters or bidirectional control characters.')
parser.add_argument('-v', '--verbose', required=False, action='store_true',
help='Verbose mode.')
parser.add_argument('-d', '--detailed', required=False, action='store_true',
help='Print line numbers where characters occur.')
parser.add_argument('-t', '--notests', required=False,
action='store_true', help='Exclude tests (basically test.* as a component of path).')
parser.add_argument('-c', '--config', required=False, type=str,
help='Configuration file to read settings from.')
args = parser.parse_args()
verbose_mode = args.verbose
detailed_mode = args.detailed
if not args.nonprint:
# Formatting control characters in the unicode space. This includes the
# bidi control characters.
disallowed = set(chr(c) for c in range(sys.maxunicode) if \
unicodedata.category(chr(c)) == 'Cf')
msg = 'unicode control characters'
elif args.nonprint == 'all':
# All control characters.
disallowed = set(chr(c) for c in range(sys.maxunicode) if \
nonprint_unicode(chr(c)))
msg = 'disallowed characters'
else:
# Only bidi control characters.
disallowed = set([
chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e),
chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)])
msg = 'bidirectional control characters'
if args.config:
spec = importlib.util.spec_from_file_location("settings", args.config)
settings = importlib.util.module_from_spec(spec)
spec.loader.exec_module(settings)
if hasattr(settings, 'scan_exclude'):
scan_exclude = scan_exclude + settings.scan_exclude
if hasattr(settings, 'scan_exclude_mime'):
scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime
if args.notests:
scan_exclude = scan_exclude + [r'/test[^/]+/']
analyze_paths(args.path, disallowed, msg)
| [
"os.listdir",
"argparse.ArgumentParser",
"importlib.util.spec_from_file_location",
"os.path.join",
"magic.detect_from_filename",
"unicodedata.category",
"importlib.util.module_from_spec",
"os.stat",
"re.search"
]
| [((2859, 2888), 'magic.detect_from_filename', 'magic.detect_from_filename', (['f'], {}), '(f)\n', (2885, 2888), False, 'import sys, os, argparse, re, unicodedata, magic\n'), ((3872, 3885), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (3882, 3885), False, 'import sys, os, argparse, re, unicodedata, magic\n'), ((4149, 4172), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (4169, 4172), False, 'import sys, os, argparse, re, unicodedata, magic\n'), ((4295, 4369), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Look for Unicode control characters"""'}), "(description='Look for Unicode control characters')\n", (4318, 4369), False, 'import sys, os, argparse, re, unicodedata, magic\n'), ((3582, 3592), 'os.stat', 'os.stat', (['p'], {}), '(p)\n', (3589, 3592), False, 'import sys, os, argparse, re, unicodedata, magic\n'), ((6183, 6246), 'importlib.util.spec_from_file_location', 'importlib.util.spec_from_file_location', (['"""settings"""', 'args.config'], {}), "('settings', args.config)\n", (6221, 6246), False, 'import importlib\n'), ((6266, 6303), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['spec'], {}), '(spec)\n', (6297, 6303), False, 'import importlib\n'), ((2962, 2977), 're.search', 're.search', (['e', 'f'], {}), '(e, f)\n', (2971, 2977), False, 'import sys, os, argparse, re, unicodedata, magic\n'), ((3907, 3925), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (3919, 3925), False, 'import sys, os, argparse, re, unicodedata, magic\n'), ((3117, 3142), 're.search', 're.search', (['e', 'm.mime_type'], {}), '(e, m.mime_type)\n', (3126, 3142), False, 'import sys, os, argparse, re, unicodedata, magic\n')] |
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import unicode_literals
import time
from datetime import datetime
import mock
import pytest
from mock import MagicMock
from pyVmomi import vim
from datadog_checks.vsphere import VSphereCheck
from datadog_checks.vsphere.cache_config import CacheConfig
from datadog_checks.vsphere.common import SOURCE_TYPE
from datadog_checks.vsphere.errors import BadConfigError, ConnectionError
from datadog_checks.vsphere.vsphere import (
REFRESH_METRICS_METADATA_INTERVAL,
REFRESH_MORLIST_INTERVAL,
RESOURCE_TYPE_METRICS,
SHORT_ROLLUP,
)
from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server
SERVICE_CHECK_TAGS = ["vcenter_server:vsphere_mock", "vcenter_host:None", "foo:bar"]
def test__init__(instance):
with pytest.raises(BadConfigError):
# Must define a unique 'name' per vCenter instance
VSphereCheck('vsphere', {}, {}, [{'': ''}])
init_config = {
'clean_morlist_interval': 50,
'refresh_morlist_interval': 42,
'refresh_metrics_metadata_interval': -42,
'batch_property_collector_size': -1,
}
check = VSphereCheck('vsphere', init_config, {}, [instance])
i_key = check._instance_key(instance)
assert check.time_started > 0
assert not check.server_instances
assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42
assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42
assert check.clean_morlist_interval == 50
assert len(check.event_config) == 1
assert 'vsphere_mock' in check.event_config
assert not check.registry
assert not check.latest_event_query
assert check.batch_collector_size == 0
assert check.batch_morlist_size == 50
assert check.excluded_host_tags == []
def test_excluded_host_tags(vsphere, instance, aggregator):
# Check default value and precedence of instance config over init config
check = VSphereCheck('vsphere', {}, {}, [instance])
assert check.excluded_host_tags == []
check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance])
assert check.excluded_host_tags == ["vsphere_host"]
instance["excluded_host_tags"] = []
check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance])
assert check.excluded_host_tags == []
# Test host tags are excluded from external host metadata, but still stored in the cache for metrics
vsphere.excluded_host_tags = ["vsphere_host"]
mocked_vm = MockedMOR(spec="VirtualMachine")
mocked_host = MockedMOR(spec="HostSystem")
mocked_mors_attrs = {
mocked_vm: {
"name": "mocked_vm",
"parent": mocked_host,
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
},
mocked_host: {"name": "mocked_host", "parent": None},
}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
server_instance = vsphere._get_server_instance(instance)
result = MagicMock()
result.value = [23.4]
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)]
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.in_compatibility_mode.return_value = False
vsphere.check(instance)
ext_host_tags = vsphere.get_external_host_tags()
# vsphere_host tag not in external metadata
for host, source_tags in ext_host_tags:
if host == u"mocked_vm":
tags = source_tags["vsphere"]
for tag in tags:
assert "vsphere_host:" not in tag
break
# vsphere_host tag still in cache for sending with metrics
aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="mocked_vm", count=1)
aggregator.assert_metric_has_tag('vsphere.mymetric', tag="vsphere_host:mocked_host", count=1)
def test__is_excluded():
"""
* Exclude hosts/vms not compliant with the user's `*_include` configuration.
* Exclude "non-labeled" virtual machines when the user configuration instructs to.
"""
# Sample(s)
include_regexes = {'host_include': "f[o]+", 'vm_include': "f[o]+"}
# OK
included_host = MockedMOR(spec="HostSystem", name="foo")
included_vm = MockedMOR(spec="VirtualMachine", name="foo")
assert not VSphereCheck._is_excluded(included_host, {"name": included_host.name}, include_regexes, None)
assert not VSphereCheck._is_excluded(included_vm, {"name": included_vm.name}, include_regexes, None)
# Not OK!
excluded_host = MockedMOR(spec="HostSystem", name="bar")
excluded_vm = MockedMOR(spec="VirtualMachine", name="bar")
assert VSphereCheck._is_excluded(excluded_host, {"name": excluded_host.name}, include_regexes, None)
assert VSphereCheck._is_excluded(excluded_vm, {"name": excluded_vm.name}, include_regexes, None)
# Sample(s)
include_regexes = None
include_only_marked = True
# OK
included_vm = MockedMOR(spec="VirtualMachine", name="foo", label=True)
assert not VSphereCheck._is_excluded(
included_vm, {"customValue": included_vm.customValue}, include_regexes, include_only_marked
)
# Not OK
included_vm = MockedMOR(spec="VirtualMachine", name="foo")
assert VSphereCheck._is_excluded(included_vm, {"customValue": []}, include_regexes, include_only_marked)
def test_vms_in_filtered_host_are_filtered(vsphere, instance):
"""Test that all vms belonging to a filtered host are also filtered"""
server_instance = vsphere._get_server_instance(instance)
filtered_host = MockedMOR(spec="HostSystem")
filtered_vm = MockedMOR(spec="VirtualMachine")
non_filtered_host = MockedMOR(spec="HostSystem")
non_filtered_vm = MockedMOR(spec="VirtualMachine")
mocked_mors_attrs = {
filtered_host: {"name": "filtered_host_number_1", "parent": None},
filtered_vm: {
"name": "this_vm_is_filtered",
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
"runtime.host": filtered_host,
},
non_filtered_host: {"name": "non_filtered_host_number_1", "parent": None},
non_filtered_vm: {
"name": "this_vm_is_not_filtered",
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
"runtime.host": non_filtered_host,
},
}
regex = {'host_include': '^(?!filtered_.+)'}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
obj_list = vsphere._get_all_objs(server_instance, regex, False, [])
assert len(obj_list[vim.VirtualMachine]) == 1
assert len(obj_list[vim.HostSystem]) == 1
assert {
"mor_type": "vm",
"mor": non_filtered_vm,
"hostname": "this_vm_is_not_filtered",
"tags": ["vsphere_host:non_filtered_host_number_1", "vsphere_type:vm"],
} == obj_list[vim.VirtualMachine][0]
assert {
"mor_type": "host",
"mor": non_filtered_host,
"hostname": "non_filtered_host_number_1",
"tags": ["vsphere_type:host"],
} == obj_list[vim.HostSystem][0]
def test__get_all_objs(vsphere, instance):
"""
Test that we don't raise KeyError if the property collector failed to collect some attributes
and that we handle the case were there are missing attributes
"""
server_instance = vsphere._get_server_instance(instance)
vm_no_parent = MockedMOR(spec="VirtualMachine")
vm_no_powerstate = MockedMOR(spec="VirtualMachine")
vm_host_parent = MockedMOR(spec="VirtualMachine")
mocked_host = MockedMOR(spec="HostSystem")
mocked_datastore = MockedMOR(spec="Datastore")
mocked_datacenter = MockedMOR(spec="Datacenter")
mocked_cluster = MockedMOR(spec="ClusterComputeResource")
mocked_mors_attrs = {
vm_no_parent: {"name": "vm_no_parent", "runtime.powerState": vim.VirtualMachinePowerState.poweredOn},
vm_no_powerstate: {"name": "vm_no_powerstate"},
vm_host_parent: {"parent": mocked_host, "runtime.powerState": vim.VirtualMachinePowerState.poweredOn},
mocked_host: {"name": "mocked_host", "parent": None},
mocked_datastore: {},
mocked_cluster: {"name": "cluster"},
mocked_datacenter: {"parent": MockedMOR(spec="Folder", name="unknown folder"), "name": "datacenter"},
}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
obj_list = vsphere._get_all_objs(server_instance, None, False, [])
assert len(obj_list[vim.VirtualMachine]) == 2
assert {
"mor_type": "vm",
"mor": vm_no_parent,
"hostname": "vm_no_parent",
"tags": ["vsphere_host:unknown", "vsphere_type:vm"],
} in obj_list[vim.VirtualMachine]
assert {
"mor_type": "vm",
"mor": vm_host_parent,
"hostname": "unknown",
"tags": ["vsphere_host:mocked_host", "vsphere_host:unknown", "vsphere_type:vm"],
} in obj_list[vim.VirtualMachine]
assert len(obj_list[vim.HostSystem]) == 1
assert {
"mor_type": "host",
"mor": mocked_host,
"hostname": "mocked_host",
"tags": ["vsphere_type:host"],
} in obj_list[vim.HostSystem]
assert len(obj_list[vim.Datastore]) == 1
assert {
"mor_type": "datastore",
"mor": mocked_datastore,
"hostname": None,
"tags": ["vsphere_datastore:unknown", "vsphere_type:datastore"],
} in obj_list[vim.Datastore]
assert len(obj_list[vim.Datacenter]) == 1
assert {
"mor_type": "datacenter",
"mor": mocked_datacenter,
"hostname": None,
"tags": ["vsphere_folder:unknown", "vsphere_datacenter:datacenter", "vsphere_type:datacenter"],
} in obj_list[vim.Datacenter]
assert len(obj_list[vim.ClusterComputeResource]) == 1
assert {
"mor_type": "cluster",
"mor": mocked_cluster,
"hostname": None,
"tags": ["vsphere_cluster:cluster", "vsphere_type:cluster"],
} in obj_list[vim.ClusterComputeResource]
def test__collect_mors_and_attributes(vsphere, instance):
"""
Test that we check for errors when collecting properties with property collector
"""
server_instance = vsphere._get_server_instance(instance)
with mock.patch("datadog_checks.vsphere.vsphere.vmodl"):
obj = MagicMock(missingSet=None, obj="obj")
result = MagicMock(token=None, objects=[obj])
server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result
log = MagicMock()
vsphere.log = log
mor_attrs = vsphere._collect_mors_and_attributes(server_instance)
log.error.assert_not_called()
assert len(mor_attrs) == 1
obj.missingSet = [MagicMock(path="prop", fault="fault")]
mor_attrs = vsphere._collect_mors_and_attributes(server_instance)
log.error.assert_called_once_with('Unable to retrieve property %s for object %s: %s', 'prop', 'obj', 'fault')
assert len(mor_attrs) == 1
def test__cache_morlist_raw(vsphere, instance):
"""
Explore the vCenter infrastructure to discover hosts, virtual machines.
Input topology:
```
rootFolder
- datacenter1
- compute_resource1
- host1 # Filtered out
- host2
- folder1
- datacenter2
- compute_resource2
- host3
- vm1 # Not labeled
- vm2 # Filtered out
- vm3 # Powered off
- vm4
```
"""
# Samples
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
instance["host_include_only_regex"] = "host[2-9]"
instance["vm_include_only_regex"] = "vm[^2]"
instance["include_only_marked"] = True
# Discover hosts and virtual machines
vsphere._cache_morlist_raw(instance)
# Assertions: 1 labeled+monitored VM + 2 hosts + 2 datacenters + 2 clusters + 1 datastore.
assertMOR(vsphere, instance, count=8)
# ...on hosts
assertMOR(vsphere, instance, spec="host", count=2)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:rootFolder",
"vsphere_datacenter:datacenter1",
"vsphere_compute:compute_resource1",
"vsphere_cluster:compute_resource1",
"vsphere_type:host",
]
assertMOR(vsphere, instance, name="host2", spec="host", tags=tags)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:rootFolder",
"vsphere_folder:folder1",
"vsphere_datacenter:datacenter2",
"vsphere_compute:compute_resource2",
"vsphere_cluster:compute_resource2",
"vsphere_type:host",
]
assertMOR(vsphere, instance, name="host3", spec="host", tags=tags)
# ...on VMs
assertMOR(vsphere, instance, spec="vm", count=1)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:folder1",
"vsphere_datacenter:datacenter2",
"vsphere_compute:compute_resource2",
"vsphere_cluster:compute_resource2",
"vsphere_host:host3",
"vsphere_type:vm",
]
assertMOR(vsphere, instance, name="vm4", spec="vm", subset=True, tags=tags)
def test_use_guest_hostname(vsphere, instance):
# Default value
with mock.patch("datadog_checks.vsphere.VSphereCheck._get_all_objs") as mock_get_all_objs, mock.patch(
"datadog_checks.vsphere.vsphere.vmodl"
):
vsphere._cache_morlist_raw(instance)
# Default value
assert not mock_get_all_objs.call_args[1]["use_guest_hostname"]
# use guest hostname
instance["use_guest_hostname"] = True
vsphere._cache_morlist_raw(instance)
assert mock_get_all_objs.call_args[1]["use_guest_hostname"]
with mock.patch("datadog_checks.vsphere.vsphere.vmodl"):
# Discover hosts and virtual machines
instance["use_guest_hostname"] = True
vsphere._cache_morlist_raw(instance)
assertMOR(vsphere, instance, spec="vm", count=3)
# Fallback on VM name when guest hostname not available
assertMOR(vsphere, instance, name="vm1", spec="vm", subset=True)
assertMOR(vsphere, instance, name="vm2_guest", spec="vm", subset=True)
assertMOR(vsphere, instance, name="vm4_guest", spec="vm", subset=True)
def test__process_mor_objects_queue(vsphere, instance):
vsphere.log = MagicMock()
vsphere._process_mor_objects_queue_async = MagicMock()
vsphere._process_mor_objects_queue(instance)
# Queue hasn't been initialized
vsphere.log.debug.assert_called_once_with(
"Objects queue is not initialized yet for instance %s, skipping processing", vsphere._instance_key(instance)
)
vsphere.batch_morlist_size = 1
i_key = vsphere._instance_key(instance)
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11
vsphere._process_mor_objects_queue(instance)
# Object queue should be empty after processing
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0
assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only
for call_args in vsphere._process_mor_objects_queue_async.call_args_list:
# query_specs parameter should be a list of size 1 since the batch size is 1
assert len(call_args[0][1]) == 1
instance["collect_realtime_only"] = False
vsphere._cache_morlist_raw(instance)
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11
vsphere._process_mor_objects_queue(instance)
# Object queue should be empty after processing
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0
assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2 clusters, 1 datastore
def test_collect_realtime_only(vsphere, instance):
"""
Test the collect_realtime_only parameter acts as expected
"""
vsphere._process_mor_objects_queue_async = MagicMock()
instance["collect_realtime_only"] = False
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
# Called once to process the 2 datacenters, then 2 clusters, then the datastore
assert vsphere._process_mor_objects_queue_async.call_count == 3
instance["collect_realtime_only"] = True
vsphere._process_mor_objects_queue_async.reset_mock()
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
assert vsphere._process_mor_objects_queue_async.call_count == 0
def test__cache_metrics_metadata(vsphere, instance):
vsphere.metadata_cache = MagicMock()
vsphere._cache_metrics_metadata(instance)
vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance))
vsphere.metadata_cache.set_metadata.assert_called_once()
vsphere.metadata_cache.set_metric_ids.assert_called_once()
def test__cache_metrics_metadata_compatibility(vsphere, instance):
server_instance = vsphere._get_server_instance(instance)
i_key = vsphere._instance_key(instance)
counter = MagicMock()
counter.rollupType = "average"
counter.key = 1
vsphere.format_metric_name = MagicMock()
# New way
instance["collection_level"] = 3
server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter]
vsphere._cache_metrics_metadata(instance)
server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3)
assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1
assert len(vsphere.metadata_cache._metadata[i_key]) == 1
vsphere.format_metric_name.assert_called_once_with(counter)
# Compatibility mode
instance["all_metrics"] = False
del instance["collection_level"]
vsphere.format_metric_name.reset_mock()
server_instance.content.perfManager.perfCounter = [counter]
vsphere._cache_metrics_metadata(instance)
assert not vsphere.metadata_cache._metric_ids[i_key]
assert len(vsphere.metadata_cache._metadata[i_key]) == 1
vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True)
def test_in_compatibility_mode(vsphere, instance):
vsphere.log = MagicMock()
instance["collection_level"] = 2
assert not vsphere.in_compatibility_mode(instance)
instance["all_metrics"] = True
assert not vsphere.in_compatibility_mode(instance)
vsphere.log.warning.assert_not_called()
assert not vsphere.in_compatibility_mode(instance, log_warning=True)
vsphere.log.warning.assert_called_once()
del instance["collection_level"]
vsphere.log.reset_mock()
assert vsphere.in_compatibility_mode(instance)
vsphere.log.warning.assert_not_called()
assert vsphere.in_compatibility_mode(instance, log_warning=True)
vsphere.log.warning.assert_called_once()
def test_format_metric_name(vsphere):
counter = MagicMock()
counter.groupInfo.key = "group"
counter.nameInfo.key = "name"
counter.rollupType = "rollup"
assert vsphere.format_metric_name(counter, compatibility=True) == "group.name"
for rollup, short_rollup in SHORT_ROLLUP.items():
counter.rollupType = rollup
assert vsphere.format_metric_name(counter) == "group.name.{}".format(short_rollup)
def test_collect_metrics(vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere.batch_morlist_size = 1
vsphere._collect_metrics_async = MagicMock()
vsphere._cache_metrics_metadata(instance)
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
vsphere.collect_metrics(instance)
assert vsphere._collect_metrics_async.call_count == 6 # One for each VM/host, datacenters are not collected
for call_args in vsphere._collect_metrics_async.call_args_list:
# query_specs parameter should be a list of size 1 since the batch size is 1
assert len(call_args[0][1]) == 1
def test__collect_metrics_async_compatibility(vsphere, instance):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])]
vsphere.mor_cache = MagicMock()
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "unknown"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.log = MagicMock()
vsphere.in_compatibility_mode.return_value = True
vsphere._collect_metrics_async(instance, [])
vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown')
vsphere.log.reset_mock()
vsphere.in_compatibility_mode.return_value = False
vsphere._collect_metrics_async(instance, [])
vsphere.log.debug.assert_not_called()
def test__collect_metrics_async_hostname(vsphere, instance, aggregator):
server_instance = vsphere._get_server_instance(instance)
result = MagicMock()
result.value = [23.4]
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])]
mor = {"hostname": "foo"}
vsphere.mor_cache = MagicMock()
vsphere.mor_cache.get_mor.return_value = mor
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.in_compatibility_mode.return_value = False
vsphere._collect_metrics_async(instance, [])
aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="foo")
def test_check(vsphere, instance):
"""
Test the check() method
"""
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags:
vsphere.check(instance)
set_external_tags.assert_called_once()
all_the_tags = dict(set_external_tags.call_args[0][0])
assert all_the_tags['vm4'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['host1'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_datacenter:datacenter1',
'vsphere_cluster:compute_resource1',
'vsphere_compute:compute_resource1',
'vsphere_type:host',
]
assert all_the_tags['host3'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_type:host',
]
assert all_the_tags['vm2'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['vm1'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['host2'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_datacenter:datacenter1',
'vsphere_cluster:compute_resource1',
'vsphere_compute:compute_resource1',
'vsphere_type:host',
]
def test_service_check_ko(aggregator, instance):
check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))
with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:
# SmartConnect fails
SmartConnect.side_effect = Exception()
with pytest.raises(ConnectionError):
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS
)
aggregator.reset()
# SmartConnect succeeds, CurrentTime fails
server = MagicMock()
server.CurrentTime.side_effect = Exception()
SmartConnect.side_effect = None
SmartConnect.return_value = server
with pytest.raises(ConnectionError):
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS
)
def test_service_check_ok(aggregator, instance):
check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:
SmartConnect.return_value = get_mocked_server()
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS
)
def test__instance_key(vsphere, instance):
assert vsphere._instance_key(instance) == "vsphere_mock"
del instance['name']
with pytest.raises(BadConfigError):
vsphere._instance_key(instance)
def test__should_cache(instance):
now = time.time()
# do not use fixtures for the check instance, some params are set at
# __init__ time and we need to instantiate the check multiple times
check = VSphereCheck('vsphere', {}, {}, [instance])
i_key = check._instance_key(instance)
# first run should always cache
assert check._should_cache(instance, CacheConfig.Morlist)
assert check._should_cache(instance, CacheConfig.Metadata)
# explicitly set cache expiration times, don't use defaults so we also test
# configuration is properly propagated
init_config = {
'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL,
'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL,
}
check = VSphereCheck('vsphere', init_config, {}, [instance])
# simulate previous runs, set the last execution time in the past
check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL))
check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL))
with mock.patch("time.time", return_value=now):
assert not check._should_cache(instance, CacheConfig.Morlist)
assert not check._should_cache(instance, CacheConfig.Metadata)
def alarm_event(from_status='green', to_status='red', message='Some error'):
now = datetime.utcnow()
vm = MockedMOR(spec='VirtualMachine')
dc = MockedMOR(spec="Datacenter")
dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')
alarm = MockedMOR(spec="Alarm")
alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1')
entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1')
event = vim.event.AlarmStatusChangedEvent(
entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg
)
setattr(event, 'from', from_status) # noqa: B009
return event
def migrated_event():
now = datetime.utcnow()
vm = MockedMOR(spec='VirtualMachine', name='vm1')
vm_arg = vim.event.VmEventArgument(vm=vm)
host = MockedMOR(spec='HostSystem')
host_arg = vim.event.HostEventArgument(host=host, name='host1')
host_dest = MockedMOR(spec='HostSystem')
host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2')
dc = MockedMOR(spec='Datacenter')
dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')
dc_dest = MockedMOR(spec='Datacenter')
dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2')
ds = MockedMOR(spec='Datastore')
ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1')
ds_dest = MockedMOR(spec='Datastore')
ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2')
event = vim.event.VmBeingHotMigratedEvent(
vm=vm_arg,
userName='John',
fullFormattedMessage='Some error',
createdTime=now,
host=host_arg,
destHost=host_dest_arg,
datacenter=dc_arg,
destDatacenter=dc_dest_arg,
ds=ds_arg,
destDatastore=ds_dest_arg,
)
return event
def test_events(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar']
)
def test_events_tags(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"John has launched a hot migration of this virtual machine",
exact_match=False,
tags=[
'foo:bar',
'vsphere_host:host1',
'vsphere_host:host2',
'vsphere_datacenter:dc1',
'vsphere_datacenter:dc2',
],
)
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar']
)
def test_events_gray_handled(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
event = alarm_event(from_status='gray', message='Went from Gray to Red')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was gray and it's now red.", tags=['foo:bar']
)
event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was yellow and it's now gray.",
tags=['foo:bar'],
alert_type='info',
)
def test_events_gray_ignored(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
event = alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
assert not aggregator.events
event = alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.check(instance)
assert not aggregator.events
| [
"mock.patch",
"pyVmomi.vim.event.HostEventArgument",
"datetime.datetime.utcnow",
"pyVmomi.vim.event.VmBeingHotMigratedEvent",
"pyVmomi.vim.event.AlarmEventArgument",
"mock.patch.object",
"datadog_checks.vsphere.vsphere.SHORT_ROLLUP.items",
"pyVmomi.vim.event.ManagedEntityEventArgument",
"pyVmomi.vim.event.DatastoreEventArgument",
"pytest.raises",
"pyVmomi.vim.event.AlarmStatusChangedEvent",
"pyVmomi.vim.event.VmEventArgument",
"datadog_checks.vsphere.VSphereCheck._is_excluded",
"pyVmomi.vim.event.DatacenterEventArgument",
"datadog_checks.vsphere.VSphereCheck",
"time.time",
"mock.MagicMock"
]
| [((1225, 1277), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""vsphere"""', 'init_config', '{}', '[instance]'], {}), "('vsphere', init_config, {}, [instance])\n", (1237, 1277), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((2031, 2074), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""vsphere"""', '{}', '{}', '[instance]'], {}), "('vsphere', {}, {}, [instance])\n", (2043, 2074), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((2129, 2215), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""vsphere"""', "{'excluded_host_tags': ['vsphere_host']}", '{}', '[instance]'], {}), "('vsphere', {'excluded_host_tags': ['vsphere_host']}, {}, [\n instance])\n", (2141, 2215), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((2319, 2405), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""vsphere"""', "{'excluded_host_tags': ['vsphere_host']}", '{}', '[instance]'], {}), "('vsphere', {'excluded_host_tags': ['vsphere_host']}, {}, [\n instance])\n", (2331, 2405), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((5024, 5121), 'datadog_checks.vsphere.VSphereCheck._is_excluded', 'VSphereCheck._is_excluded', (['excluded_host', "{'name': excluded_host.name}", 'include_regexes', 'None'], {}), "(excluded_host, {'name': excluded_host.name},\n include_regexes, None)\n", (5049, 5121), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((5129, 5222), 'datadog_checks.vsphere.VSphereCheck._is_excluded', 'VSphereCheck._is_excluded', (['excluded_vm', "{'name': excluded_vm.name}", 'include_regexes', 'None'], {}), "(excluded_vm, {'name': excluded_vm.name},\n include_regexes, None)\n", (5154, 5222), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((5615, 5716), 'datadog_checks.vsphere.VSphereCheck._is_excluded', 'VSphereCheck._is_excluded', (['included_vm', "{'customValue': []}", 'include_regexes', 'include_only_marked'], {}), "(included_vm, {'customValue': []}, include_regexes,\n include_only_marked)\n", (5640, 5716), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((15299, 15310), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (15308, 15310), False, 'from mock import MagicMock\n'), ((15358, 15369), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (15367, 15369), False, 'from mock import MagicMock\n'), ((17168, 17179), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (17177, 17179), False, 'from mock import MagicMock\n'), ((17964, 17975), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (17973, 17975), False, 'from mock import MagicMock\n'), ((18433, 18444), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (18442, 18444), False, 'from mock import MagicMock\n'), ((18533, 18544), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (18542, 18544), False, 'from mock import MagicMock\n'), ((19539, 19550), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (19548, 19550), False, 'from mock import MagicMock\n'), ((20229, 20240), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (20238, 20240), False, 'from mock import MagicMock\n'), ((20461, 20481), 'datadog_checks.vsphere.vsphere.SHORT_ROLLUP.items', 'SHORT_ROLLUP.items', ([], {}), '()\n', (20479, 20481), False, 'from datadog_checks.vsphere.vsphere import REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP\n'), ((21574, 21585), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (21583, 21585), False, 'from mock import MagicMock\n'), ((21615, 21626), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (21624, 21626), False, 'from mock import MagicMock\n'), ((21738, 21749), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (21747, 21749), False, 'from mock import MagicMock\n'), ((21768, 21779), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (21777, 21779), False, 'from mock import MagicMock\n'), ((22294, 22305), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (22303, 22305), False, 'from mock import MagicMock\n'), ((22480, 22491), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (22489, 22491), False, 'from mock import MagicMock\n'), ((22570, 22581), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (22579, 22581), False, 'from mock import MagicMock\n'), ((22708, 22719), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (22717, 22719), False, 'from mock import MagicMock\n'), ((27623, 27634), 'time.time', 'time.time', ([], {}), '()\n', (27632, 27634), False, 'import time\n'), ((27792, 27835), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""vsphere"""', '{}', '{}', '[instance]'], {}), "('vsphere', {}, {}, [instance])\n", (27804, 27835), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((28352, 28404), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""vsphere"""', 'init_config', '{}', '[instance]'], {}), "('vsphere', init_config, {}, [instance])\n", (28364, 28404), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((28964, 28981), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (28979, 28981), False, 'from datetime import datetime\n'), ((29075, 29135), 'pyVmomi.vim.event.DatacenterEventArgument', 'vim.event.DatacenterEventArgument', ([], {'datacenter': 'dc', 'name': '"""dc1"""'}), "(datacenter=dc, name='dc1')\n", (29108, 29135), False, 'from pyVmomi import vim\n'), ((29188, 29244), 'pyVmomi.vim.event.AlarmEventArgument', 'vim.event.AlarmEventArgument', ([], {'alarm': 'alarm', 'name': '"""alarm1"""'}), "(alarm=alarm, name='alarm1')\n", (29216, 29244), False, 'from pyVmomi import vim\n'), ((29258, 29317), 'pyVmomi.vim.event.ManagedEntityEventArgument', 'vim.event.ManagedEntityEventArgument', ([], {'entity': 'vm', 'name': '"""vm1"""'}), "(entity=vm, name='vm1')\n", (29294, 29317), False, 'from pyVmomi import vim\n'), ((29330, 29480), 'pyVmomi.vim.event.AlarmStatusChangedEvent', 'vim.event.AlarmStatusChangedEvent', ([], {'entity': 'entity', 'fullFormattedMessage': 'message', 'createdTime': 'now', 'to': 'to_status', 'datacenter': 'dc_arg', 'alarm': 'alarm_arg'}), '(entity=entity, fullFormattedMessage=\n message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg)\n', (29363, 29480), False, 'from pyVmomi import vim\n'), ((29595, 29612), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (29610, 29612), False, 'from datetime import datetime\n'), ((29680, 29712), 'pyVmomi.vim.event.VmEventArgument', 'vim.event.VmEventArgument', ([], {'vm': 'vm'}), '(vm=vm)\n', (29705, 29712), False, 'from pyVmomi import vim\n'), ((29768, 29820), 'pyVmomi.vim.event.HostEventArgument', 'vim.event.HostEventArgument', ([], {'host': 'host', 'name': '"""host1"""'}), "(host=host, name='host1')\n", (29795, 29820), False, 'from pyVmomi import vim\n'), ((29886, 29943), 'pyVmomi.vim.event.HostEventArgument', 'vim.event.HostEventArgument', ([], {'host': 'host_dest', 'name': '"""host2"""'}), "(host=host_dest, name='host2')\n", (29913, 29943), False, 'from pyVmomi import vim\n'), ((29995, 30055), 'pyVmomi.vim.event.DatacenterEventArgument', 'vim.event.DatacenterEventArgument', ([], {'datacenter': 'dc', 'name': '"""dc1"""'}), "(datacenter=dc, name='dc1')\n", (30028, 30055), False, 'from pyVmomi import vim\n'), ((30117, 30182), 'pyVmomi.vim.event.DatacenterEventArgument', 'vim.event.DatacenterEventArgument', ([], {'datacenter': 'dc_dest', 'name': '"""dc2"""'}), "(datacenter=dc_dest, name='dc2')\n", (30150, 30182), False, 'from pyVmomi import vim\n'), ((30233, 30291), 'pyVmomi.vim.event.DatastoreEventArgument', 'vim.event.DatastoreEventArgument', ([], {'datastore': 'ds', 'name': '"""ds1"""'}), "(datastore=ds, name='ds1')\n", (30265, 30291), False, 'from pyVmomi import vim\n'), ((30352, 30415), 'pyVmomi.vim.event.DatastoreEventArgument', 'vim.event.DatastoreEventArgument', ([], {'datastore': 'ds_dest', 'name': '"""ds2"""'}), "(datastore=ds_dest, name='ds2')\n", (30384, 30415), False, 'from pyVmomi import vim\n'), ((30428, 30677), 'pyVmomi.vim.event.VmBeingHotMigratedEvent', 'vim.event.VmBeingHotMigratedEvent', ([], {'vm': 'vm_arg', 'userName': '"""John"""', 'fullFormattedMessage': '"""Some error"""', 'createdTime': 'now', 'host': 'host_arg', 'destHost': 'host_dest_arg', 'datacenter': 'dc_arg', 'destDatacenter': 'dc_dest_arg', 'ds': 'ds_arg', 'destDatastore': 'ds_dest_arg'}), "(vm=vm_arg, userName='John',\n fullFormattedMessage='Some error', createdTime=now, host=host_arg,\n destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg,\n ds=ds_arg, destDatastore=ds_dest_arg)\n", (30461, 30677), False, 'from pyVmomi import vim\n'), ((871, 900), 'pytest.raises', 'pytest.raises', (['BadConfigError'], {}), '(BadConfigError)\n', (884, 900), False, 'import pytest\n'), ((969, 1012), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""vsphere"""', '{}', '{}', "[{'': ''}]"], {}), "('vsphere', {}, {}, [{'': ''}])\n", (981, 1012), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((2973, 3087), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes"""'], {'return_value': 'mocked_mors_attrs'}), "('datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes',\n return_value=mocked_mors_attrs)\n", (2983, 3087), False, 'import mock\n'), ((3168, 3179), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3177, 3179), False, 'from mock import MagicMock\n'), ((3358, 3369), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3367, 3369), False, 'from mock import MagicMock\n'), ((3504, 3515), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3513, 3515), False, 'from mock import MagicMock\n'), ((4674, 4771), 'datadog_checks.vsphere.VSphereCheck._is_excluded', 'VSphereCheck._is_excluded', (['included_host', "{'name': included_host.name}", 'include_regexes', 'None'], {}), "(included_host, {'name': included_host.name},\n include_regexes, None)\n", (4699, 4771), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((4783, 4876), 'datadog_checks.vsphere.VSphereCheck._is_excluded', 'VSphereCheck._is_excluded', (['included_vm', "{'name': included_vm.name}", 'include_regexes', 'None'], {}), "(included_vm, {'name': included_vm.name},\n include_regexes, None)\n", (4808, 4876), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((5394, 5517), 'datadog_checks.vsphere.VSphereCheck._is_excluded', 'VSphereCheck._is_excluded', (['included_vm', "{'customValue': included_vm.customValue}", 'include_regexes', 'include_only_marked'], {}), "(included_vm, {'customValue': included_vm.\n customValue}, include_regexes, include_only_marked)\n", (5419, 5517), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((6771, 6885), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes"""'], {'return_value': 'mocked_mors_attrs'}), "('datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes',\n return_value=mocked_mors_attrs)\n", (6781, 6885), False, 'import mock\n'), ((8778, 8892), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes"""'], {'return_value': 'mocked_mors_attrs'}), "('datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes',\n return_value=mocked_mors_attrs)\n", (8788, 8892), False, 'import mock\n'), ((10885, 10935), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (10895, 10935), False, 'import mock\n'), ((10951, 10988), 'mock.MagicMock', 'MagicMock', ([], {'missingSet': 'None', 'obj': '"""obj"""'}), "(missingSet=None, obj='obj')\n", (10960, 10988), False, 'from mock import MagicMock\n'), ((11006, 11042), 'mock.MagicMock', 'MagicMock', ([], {'token': 'None', 'objects': '[obj]'}), '(token=None, objects=[obj])\n', (11015, 11042), False, 'from mock import MagicMock\n'), ((11150, 11161), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (11159, 11161), False, 'from mock import MagicMock\n'), ((12337, 12387), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (12347, 12387), False, 'import mock\n'), ((14189, 14252), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.VSphereCheck._get_all_objs"""'], {}), "('datadog_checks.vsphere.VSphereCheck._get_all_objs')\n", (14199, 14252), False, 'import mock\n'), ((14275, 14325), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (14285, 14325), False, 'import mock\n'), ((14681, 14731), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (14691, 14731), False, 'import mock\n'), ((15714, 15764), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (15724, 15764), False, 'import mock\n'), ((17235, 17285), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (17245, 17285), False, 'import mock\n'), ((17658, 17708), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (17668, 17708), False, 'import mock\n'), ((20666, 20716), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (20676, 20716), False, 'import mock\n'), ((20798, 20809), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (20807, 20809), False, 'from mock import MagicMock\n'), ((22399, 22424), 'mock.MagicMock', 'MagicMock', ([], {'value': '[result]'}), '(value=[result])\n', (22408, 22424), False, 'from mock import MagicMock\n'), ((22992, 23042), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (23002, 23042), False, 'import mock\n'), ((25897, 25937), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""disk"""', '{}', '{}', '[instance]'], {}), "('disk', {}, {}, [instance])\n", (25909, 25937), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((25949, 26014), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.connect.SmartConnect"""'], {}), "('datadog_checks.vsphere.vsphere.connect.SmartConnect')\n", (25959, 26014), False, 'import mock\n'), ((26445, 26456), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (26454, 26456), False, 'from mock import MagicMock\n'), ((26916, 26956), 'datadog_checks.vsphere.VSphereCheck', 'VSphereCheck', (['"""disk"""', '{}', '{}', '[instance]'], {}), "('disk', {}, {}, [instance])\n", (26928, 26956), False, 'from datadog_checks.vsphere import VSphereCheck\n'), ((26967, 27017), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (26977, 27017), False, 'import mock\n'), ((27506, 27535), 'pytest.raises', 'pytest.raises', (['BadConfigError'], {}), '(BadConfigError)\n', (27519, 27535), False, 'import pytest\n'), ((28691, 28732), 'mock.patch', 'mock.patch', (['"""time.time"""'], {'return_value': 'now'}), "('time.time', return_value=now)\n", (28701, 28732), False, 'import mock\n'), ((30829, 30879), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (30839, 30879), False, 'import mock\n'), ((31362, 31412), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (31372, 31412), False, 'import mock\n'), ((32460, 32510), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (32470, 32510), False, 'import mock\n'), ((33488, 33538), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.vmodl"""'], {}), "('datadog_checks.vsphere.vsphere.vmodl')\n", (33498, 33538), False, 'import mock\n'), ((3280, 3323), 'mock.MagicMock', 'MagicMock', ([], {'value': '[result]', 'entity': 'mocked_vm'}), '(value=[result], entity=mocked_vm)\n', (3289, 3323), False, 'from mock import MagicMock\n'), ((11362, 11399), 'mock.MagicMock', 'MagicMock', ([], {'path': '"""prop"""', 'fault': '"""fault"""'}), "(path='prop', fault='fault')\n", (11371, 11399), False, 'from mock import MagicMock\n'), ((23057, 23104), 'mock.patch.object', 'mock.patch.object', (['vsphere', '"""set_external_tags"""'], {}), "(vsphere, 'set_external_tags')\n", (23074, 23104), False, 'import mock\n'), ((26122, 26152), 'pytest.raises', 'pytest.raises', (['ConnectionError'], {}), '(ConnectionError)\n', (26135, 26152), False, 'import pytest\n'), ((26607, 26637), 'pytest.raises', 'pytest.raises', (['ConnectionError'], {}), '(ConnectionError)\n', (26620, 26637), False, 'import pytest\n'), ((27032, 27097), 'mock.patch', 'mock.patch', (['"""datadog_checks.vsphere.vsphere.connect.SmartConnect"""'], {}), "('datadog_checks.vsphere.vsphere.connect.SmartConnect')\n", (27042, 27097), False, 'import mock\n'), ((21535, 21546), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (21544, 21546), False, 'from mock import MagicMock\n')] |
# File taken from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py
"""Utility function to parse metadata from a THREDDS Data Server catalog."""
def walk(cat, depth=1):
"""Return a generator walking a THREDDS data catalog for datasets.
Parameters
----------
cat : TDSCatalog
THREDDS catalog.
depth : int
Maximum recursive depth. Setting 0 will return only datasets within the top-level catalog. If None,
depth is set to 1000.
"""
yield from cat.datasets.items()
if depth is None:
depth = 1000
if depth > 0:
for name, ref in cat.catalog_refs.items():
child = ref.follow()
yield from walk(child, depth=depth-1)
def attrs_from_ds(ds):
"""Extract attributes from TDS Dataset."""
url = ds.access_urls["NCML"]
attrs = attrs_from_ncml(url)
attrs["__services__"] = ds.access_urls
return attrs
def attrs_from_ncml(url):
"""Extract attributes from NcML file.
Parameters
----------
url : str
Link to NcML service of THREDDS server for a dataset.
Returns
-------
dict
Global attribute values keyed by facet names, with variable attributes in `__variable__` nested dict, and
additional specialized attributes in `__group__` nested dict.
"""
import lxml.etree
import requests
parser = lxml.etree.XMLParser(encoding='UTF-8')
ns = {"ncml": "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2"}
# Parse XML content - UTF-8 encoded documents need to be read as bytes
xml = requests.get(url).content
doc = lxml.etree.fromstring(xml, parser=parser)
nc = doc.xpath("/ncml:netcdf", namespaces=ns)[0]
# Extract global attributes
out = _attrib_to_dict(nc.xpath("ncml:attribute", namespaces=ns))
# Extract group attributes
gr = {}
for group in nc.xpath("ncml:group", namespaces=ns):
gr[group.attrib["name"]] = _attrib_to_dict(group.xpath("ncml:attribute", namespaces=ns))
# Extract variable attributes
va = {}
for variable in nc.xpath("ncml:variable", namespaces=ns):
if '_CoordinateAxisType' in variable.xpath("ncml:attribute/@name", namespaces=ns):
continue
va[variable.attrib["name"]] = _attrib_to_dict(variable.xpath("ncml:attribute", namespaces=ns))
out["__group__"] = gr
out["__variable__"] = va
return out
def _attrib_to_dict(elems):
"""Convert element attributes to dictionary.
Ignore attributes with names starting with _
"""
hidden_prefix = "_"
out = {}
for e in elems:
a = e.attrib
if a["name"].startswith(hidden_prefix):
continue
out[a["name"]] = a["value"]
return out | [
"requests.get"
]
| [((1578, 1595), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1590, 1595), False, 'import requests\n')] |
import numpy as np
import random
from collections import namedtuple
def generate_prob_matrix(n):
matrix = np.random.rand(n, n)
for i in range(n):
matrix[i][i] = 0
for i in range(n):
matrix[i] = (1/np.sum(matrix[i]))*matrix[i]
return matrix
def categorical(p):
return np.random.choice(len(p), 1, p=p)[0]
Drone = namedtuple('Drone', 'speed probability')
Site = namedtuple('Site', 'location')
class System:
def __init__(self, sites, drones):
self.sites = {}
self.drones = {}
n = len(sites)
for i, drone in enumerate(drones):
self.drones[i] = drone
for i, site in enumerate(sites):
self.sites[i] = site
distance = np.zeros([n, n])
for i in range(n):
for j in range(n):
if i < j:
x = np.subtract(sites[i], sites[j])
d = np.linalg.norm(x)
distance[i][j] = d
distance[j][i] = d
self.distance = distance
def get_site(self, site_id):
return self.sites[site_id]
def get_drone(self, drone_id):
return self.drones[drone_id]
def compute_path_distance(self, path):
n = len(path)
d = 0
for i in range(n - 1):
d += self.distance[path[i]][path[i + 1]]
return d
def compute_path_time(self, path, drone_id):
d = self.compute_path_distance(path)
return d/self.get_drone(drone_id).speed
def generate_path_of_length(self, length, drone_id):
path = []
P = self.get_drone(drone_id).probability
num_sites = len(self.sites)
s = categorical([1/num_sites]*num_sites)
path.append(s)
site = s
for i in range(length):
site = categorical(P[site])
path.append(site)
return path
def generate_path(self, s, t, drone_id):
path = [s]
P = self.get_drone(drone_id).probability
site = categorical(P[s])
path.append(site)
while site != t:
site = categorical(P[site])
path.append(site)
return path
@staticmethod
def generate_random_system(n, k):
locations = np.random.rand(n, 2)
sites = []
for i in locations:
sites.append(Site(i))
drones = []
for i in range(k):
speed = abs(random.random())
probability = generate_prob_matrix(n)
drones.append(Drone(speed, probability))
return System(sites, drones)
def _compute_arrival_times(path, drone_id, sites, speed):
arrival_times = []
t = 0
for i in range(len(path) - 1):
t += system.compute_path_time(path[i:i+2], drone_id=drone_id)
arrival_times.append((drone_id, path[i], path[i+1], t))
return arrival_times
def _generate_arrival_times(system, num_drones, length):
arrival_times = [[] for _ in range(len(system.sites))]
events = []
for i in range(system):
pass
events.extend(compute_arrival_times(path, i))
def get_key(item):
return item[3]
events = sorted(events, key=get_key)
for event in events:
drone_id = event[0]
site_id = event[2]
time = event[3]
arrival_times[site_id].append((drone_id, time))
return arrival_times
def compute_cost(system, n):
arrival_times = generate_arrival_times(system, n)
interarrival_times = [[] for _ in range(len(system.sites))]
for i in range(len(arrival_times)):
arrivals = arrival_times[i]
for j in range(len(arrivals) - 1):
interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1])
interarrival_avgs = [compute_average(i) for i in interarrival_times]
return max(interarrival_avgs)
def compute_average(data):
return (1/len(data))*sum(data)
| [
"collections.namedtuple",
"numpy.random.rand",
"numpy.subtract",
"numpy.sum",
"numpy.zeros",
"numpy.linalg.norm",
"random.random"
]
| [((357, 397), 'collections.namedtuple', 'namedtuple', (['"""Drone"""', '"""speed probability"""'], {}), "('Drone', 'speed probability')\n", (367, 397), False, 'from collections import namedtuple\n'), ((405, 435), 'collections.namedtuple', 'namedtuple', (['"""Site"""', '"""location"""'], {}), "('Site', 'location')\n", (415, 435), False, 'from collections import namedtuple\n'), ((113, 133), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (127, 133), True, 'import numpy as np\n'), ((738, 754), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (746, 754), True, 'import numpy as np\n'), ((2252, 2272), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (2266, 2272), True, 'import numpy as np\n'), ((230, 247), 'numpy.sum', 'np.sum', (['matrix[i]'], {}), '(matrix[i])\n', (236, 247), True, 'import numpy as np\n'), ((2426, 2441), 'random.random', 'random.random', ([], {}), '()\n', (2439, 2441), False, 'import random\n'), ((863, 894), 'numpy.subtract', 'np.subtract', (['sites[i]', 'sites[j]'], {}), '(sites[i], sites[j])\n', (874, 894), True, 'import numpy as np\n'), ((919, 936), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (933, 936), True, 'import numpy as np\n')] |
#-*- coding:utf-8 -*-
# &Author AnFany
# 引入方法
import Kmeans_AnFany as K_Af # AnFany
import Kmeans_Sklearn as K_Sk # Sklearn
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
mpl.rcParams['axes.unicode_minus'] = False
import numpy as np
# 利用sklearn生成数据集
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=600, centers=6, n_features=2)
# 绘制散点图
def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']):
typeclass = sorted(list(set(eydata)))
for ii in range(len(typeclass)):
datax = exdata[eydata == typeclass[ii]]
plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])
plt.title(titl)
#plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9))
plt.xlabel('特征1')
plt.ylabel('特征2')
# 调用不同的方法
# AnFany
kresult = K_Af.op_kmeans(X, countcen=6)
# Sklearn
sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10)
train = sk.fit(X)
result = sk.predict(X)
skru = K_Sk.trans(result)
#绘制算法后的类别的散点图
def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'):
du = 1
for jj in signdict:
xdata = Xdata[signdict[jj]]
plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图
for ss in Center:
if du:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点
du = 0
else:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点
plt.legend(bbox_to_anchor=(1.2, 1))
plt.title(titl)
plt.xlabel('特征1')
plt.ylabel('特征2')
# 定义欧几里得距离
def dis(sample, center):
cen = np.array([center])
sample = np.array(sample)
if len(sample) != 0:
usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5
return usb
else:
return 0
# 计算最终的分类结果的成本值
def Cost(Xdata, typedict):
center = {}
for kk in typedict:
center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值
cio = 0
for cc in typedict:
cio += np.sum(dis(Xdata[typedict[cc]], center[cc]))
return cio
# 最终的结果展示
plt.subplot(2, 2, 1)
fig_scatter(X, Y)
plt.subplot(2, 2, 2)
sca(X, kresult[0], kresult[2])
plt.subplot(2, 2, 3)
sca(X, train.cluster_centers_, skru, titl='Sklearn 结果')
plt.subplot(2, 2, 4)
plt.axis('off')
plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2]))
plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru))
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"sklearn.datasets.make_blobs",
"numpy.array",
"numpy.sum",
"Kmeans_AnFany.op_kmeans",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"Kmeans_Sklearn.KMeans",
"matplotlib.pyplot.subplot",
"Kmeans_Sklearn.trans",
"matplotlib.pyplot.show"
]
| [((393, 443), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(600)', 'centers': '(6)', 'n_features': '(2)'}), '(n_samples=600, centers=6, n_features=2)\n', (403, 443), False, 'from sklearn.datasets import make_blobs\n'), ((964, 993), 'Kmeans_AnFany.op_kmeans', 'K_Af.op_kmeans', (['X'], {'countcen': '(6)'}), '(X, countcen=6)\n', (978, 993), True, 'import Kmeans_AnFany as K_Af\n'), ((1015, 1069), 'Kmeans_Sklearn.KMeans', 'K_Sk.KMeans', ([], {'init': '"""k-means++"""', 'n_clusters': '(6)', 'n_init': '(10)'}), "(init='k-means++', n_clusters=6, n_init=10)\n", (1026, 1069), True, 'import Kmeans_Sklearn as K_Sk\n'), ((1123, 1141), 'Kmeans_Sklearn.trans', 'K_Sk.trans', (['result'], {}), '(result)\n', (1133, 1141), True, 'import Kmeans_Sklearn as K_Sk\n'), ((2335, 2355), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (2346, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2398), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2389, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2434, 2454), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2445, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2515, 2535), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (2526, 2535), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2552), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2545, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2689, 2691), True, 'import matplotlib.pyplot as plt\n'), ((792, 807), 'matplotlib.pyplot.title', 'plt.title', (['titl'], {}), '(titl)\n', (801, 807), True, 'import matplotlib.pyplot as plt\n'), ((887, 904), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""特征1"""'], {}), "('特征1')\n", (897, 904), True, 'import matplotlib.pyplot as plt\n'), ((910, 927), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""特征2"""'], {}), "('特征2')\n", (920, 927), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1753), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.2, 1)'}), '(bbox_to_anchor=(1.2, 1))\n', (1728, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1774), 'matplotlib.pyplot.title', 'plt.title', (['titl'], {}), '(titl)\n', (1768, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1797), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""特征1"""'], {}), "('特征1')\n", (1790, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1820), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""特征2"""'], {}), "('特征2')\n", (1813, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1890), 'numpy.array', 'np.array', (['[center]'], {}), '([center])\n', (1880, 1890), True, 'import numpy as np\n'), ((1905, 1921), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (1913, 1921), True, 'import numpy as np\n'), ((713, 786), 'matplotlib.pyplot.scatter', 'plt.scatter', (['datax[:, 0]', 'datax[:, -1]'], {'c': 'co[ii]', 's': '(50)', 'marker': 'marker[ii]'}), '(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])\n', (724, 786), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1465), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xdata[:, 0]', 'xdata[:, -1]'], {'c': 'co[jj]', 's': '(50)', 'marker': 'marker[jj]', 'label': "('%d类' % jj)"}), "(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj],\n label='%d类' % jj)\n", (1381, 1465), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2200), 'numpy.mean', 'np.mean', (['Xdata[typedict[kk]]'], {'axis': '(0)'}), '(Xdata[typedict[kk]], axis=0)\n', (2171, 2200), True, 'import numpy as np\n'), ((1525, 1590), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ss[0]', 'ss[1]'], {'c': '"""k"""', 's': '(100)', 'marker': '"""8"""', 'label': '"""类别中心"""'}), "(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心')\n", (1536, 1590), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1699), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ss[0]', 'ss[1]'], {'c': '"""k"""', 's': '(100)', 'marker': '"""8"""'}), "(ss[0], ss[1], c='k', s=100, marker='8')\n", (1659, 1699), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1998), 'numpy.sum', 'np.sum', (['((sample - cen) ** 2)'], {'axis': '(1)'}), '((sample - cen) ** 2, axis=1)\n', (1969, 1998), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Control panel file
"""
import pddl_solver as pddl
import ik
import rospy
from get_object_position import get_object_position
import time
from constants import *
from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models
from delete_models import delete_all, delete_model
def control_panel():
robot = ik.MoveGroupPythonIntefaceTutorial()
# robot.go_to_init_state()
# robot.open_gripper()
bottle = 'bottle_1'
# simulatiuon
current_bottle_orig_pos = get_object_position(bottle)
# real_world
# current_bottle_orig_pos = Real_poses(bottle)
# current_bottle_orig_pos[-1] += BZS
while(True):
print()
cmd = raw_input("Enter command:\n open, close, init,\n gtb, hover, gtc, move,\n pour, cb, rb, ra,\n pgr, parm, pj,\n setj, att, box,\n del, dela, spawn, exit:\n")
if(cmd == 'open'): # open the gripper
robot.open_gripper()
elif(cmd == 'close'): # close the gripper
goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n"))
if(goal==""):
goal = -0.075
while(goal > 0 or goal < -0.12):
goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n"))
robot.close_gripper(goal)
elif(cmd == 'init'): # go to initial pose
robot.go_to_init_state()
elif(cmd == 'gtb'): # go to bottle
x,y,z = current_bottle_orig_pos
h = raw_input("Set z level: ")
if(h == ""):
h = BZS
else:
h = float(h)
robot.go_to_xyz(x, y, z + h)
elif(cmd == 'hover'): # hover over the bottle
x,y,z = current_bottle_orig_pos
robot.go_to_xyz(x, y, BUO)
elif(cmd == 'gtc'): # go to cup
# simulation
x,y,z = get_object_position('cup_1')
# real_world
# pos, angle = Real_world_PourPos[cup]
# x,y,z = pos
robot.go_to_xyz(x, y, CUO)
elif(cmd == 'move'): # go to cup
x,y,z = robot.get_arm_pose()
dir = raw_input("Enter coord: x,y or z:\n")
while(dir not in ['x','y','z']):
dir = raw_input("Enter coord: x,y or z:\n")
step = float(raw_input("Enter step size:\n"))
if(dir == 'x'):
x += step
elif(dir == 'y'):
y += step
elif(dir == 'z'):
z += step
robot.go_to_xyz(x, y, z)
elif(cmd == 'pour'): # turn gripper on pouring angle
robot.rotate_gripper(angle = 1)
rospy.sleep(1.5)
robot.rotate_gripper(angle = 0)
elif(cmd == 'cb'): # change bottle
b_n = int(raw_input("Enter bottle number from 1 to 6\n"))
while(b_n not in [1,2,3,4,5,6]):
b_n = int(raw_input("Enter bottle number from 1 to 6\n"))
bottle = 'bottle_' + str(b_n)
# simulatiuon
current_bottle_orig_pos = get_object_position(bottle)
# real_world
# current_bottle_orig_pos = Real_poses(bottle)
elif(cmd == 'rb'): # reset bottle position
reset_model_position(bottle)
elif(cmd == 'ra'): # reset all models positions
reset_all()
elif(cmd == 'pgr'): # print gripper postiion
pos = robot.get_gripper_pose()
print("Current gripper coordinates: " + str(pos))
elif(cmd == 'parm'): # print arm postiion
pos = robot.get_arm_pose()
print("Current arm coordinates: " + str(pos))
elif(cmd == 'pj'): # print arm joints
current_joints = robot.get_arm_joints()
print("Current joints poistion: " + str(current_joints))
elif(cmd == 'setj'): # set robot joint angles
joints = robot.get_arm_joints()
# joints[0] = float(raw_input("Enter theta_0")) # We don't want to change the arm direction
t1 = raw_input("Enter theta_1: ")
t2 = raw_input("Enter theta_2: ")
t3 = raw_input("Enter theta_3: ")
if(t1 != ''):
joints[1] = float(t1)
if(t2 != ''):
joints[2] = float(t2)
if(t3 != ''):
joints[3] = float(t3)
joints[4] = 0
robot.set_joints(joints)
elif(cmd == 'att'): # attaches object to the gripper
robot.attach_object(bottle)
attached_objects = robot.scene.get_attached_objects([bottle])
print("Attached objects: " + str(attached_objects))
elif(cmd == 'box'):
robot.add_box()
robot.attach_object('box')
attached_objects = robot.scene.get_attached_objects([bottle])
print("Attached objects: " + str(attached_objects))
elif(cmd == 'del'):
delete_model(bottle)
print("Bottle " + str(bottle.split('_')[1]) + " was deleted")
elif(cmd == 'dela'):
delete_all()
print("All models were deleted")
elif(cmd == 'spawn'):
spawn_model(bottle)
print("Bottle " + str(bottle.split('_')[1]) + " was spawned")
elif(cmd == 'exit'): # exit control panel script
print('Finish performance')
return
else:
print('Wrong command')
if __name__ == '__main__':
control_panel() | [
"spawn_models.reset_model_position",
"spawn_models.spawn_model",
"spawn_models.reset_all",
"ik.MoveGroupPythonIntefaceTutorial",
"get_object_position.get_object_position",
"delete_models.delete_all",
"rospy.sleep",
"delete_models.delete_model"
]
| [((365, 401), 'ik.MoveGroupPythonIntefaceTutorial', 'ik.MoveGroupPythonIntefaceTutorial', ([], {}), '()\n', (399, 401), False, 'import ik\n'), ((533, 560), 'get_object_position.get_object_position', 'get_object_position', (['bottle'], {}), '(bottle)\n', (552, 560), False, 'from get_object_position import get_object_position\n'), ((1906, 1934), 'get_object_position.get_object_position', 'get_object_position', (['"""cup_1"""'], {}), "('cup_1')\n", (1925, 1934), False, 'from get_object_position import get_object_position\n'), ((2707, 2723), 'rospy.sleep', 'rospy.sleep', (['(1.5)'], {}), '(1.5)\n', (2718, 2723), False, 'import rospy\n'), ((3111, 3138), 'get_object_position.get_object_position', 'get_object_position', (['bottle'], {}), '(bottle)\n', (3130, 3138), False, 'from get_object_position import get_object_position\n'), ((3291, 3319), 'spawn_models.reset_model_position', 'reset_model_position', (['bottle'], {}), '(bottle)\n', (3311, 3319), False, 'from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models\n'), ((3393, 3404), 'spawn_models.reset_all', 'reset_all', ([], {}), '()\n', (3402, 3404), False, 'from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models\n'), ((4999, 5019), 'delete_models.delete_model', 'delete_model', (['bottle'], {}), '(bottle)\n', (5011, 5019), False, 'from delete_models import delete_all, delete_model\n'), ((5135, 5147), 'delete_models.delete_all', 'delete_all', ([], {}), '()\n', (5145, 5147), False, 'from delete_models import delete_all, delete_model\n'), ((5235, 5254), 'spawn_models.spawn_model', 'spawn_model', (['bottle'], {}), '(bottle)\n', (5246, 5254), False, 'from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models\n')] |
from Enigma.Rotor import Rotor
from Enigma.Reflector import Reflector
from Enigma.Plugboard import Plugboard
class Enigma:
def __init__(self , rotors = [ Rotor(0,"IC") , Rotor(0,"IIC") , Rotor(0,"IIIC") ] , plugboard = Plugboard() , reflector = Reflector("A")):
self.rotors = rotors
for i in range(len(rotors)):
if i + 1 < len(rotors):
rotors[i].on("Sidereal", lambda *args: rotors[i+1].step())
self.Plugboard = plugboard;
self.Reflector = reflector;
def encrypt(self,data):
data = data.upper().replace(" ","");
string = "";
for char in data:
string += self.each(char,True);
return string;
def decrypt(self,data):
data = data.upper();
string = "";
for char in data:
string += self.each(char,False);
return string;
def each(self,char,flag):
self.rotors[0].step()
output = self.Plugboard.get(char)
for rotor in self.rotors:
if flag:
output = rotor.scramble(output)
else:
output = rotor.unscramble(output)
output = self.Reflector.get(output)
for rotor in self.rotors[::-1]:
if flag:
output = rotor.scramble(output)
else:
output = rotor.unscramble(output)
return self.Plugboard.get(output);
| [
"Enigma.Plugboard.Plugboard",
"Enigma.Rotor.Rotor",
"Enigma.Reflector.Reflector"
]
| [((229, 240), 'Enigma.Plugboard.Plugboard', 'Plugboard', ([], {}), '()\n', (238, 240), False, 'from Enigma.Plugboard import Plugboard\n'), ((255, 269), 'Enigma.Reflector.Reflector', 'Reflector', (['"""A"""'], {}), "('A')\n", (264, 269), False, 'from Enigma.Reflector import Reflector\n'), ((164, 178), 'Enigma.Rotor.Rotor', 'Rotor', (['(0)', '"""IC"""'], {}), "(0, 'IC')\n", (169, 178), False, 'from Enigma.Rotor import Rotor\n'), ((180, 195), 'Enigma.Rotor.Rotor', 'Rotor', (['(0)', '"""IIC"""'], {}), "(0, 'IIC')\n", (185, 195), False, 'from Enigma.Rotor import Rotor\n'), ((197, 213), 'Enigma.Rotor.Rotor', 'Rotor', (['(0)', '"""IIIC"""'], {}), "(0, 'IIIC')\n", (202, 213), False, 'from Enigma.Rotor import Rotor\n')] |
import math
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template import loader
from magicauth import settings as magicauth_settings
from django.conf import settings as django_settings
from magicauth.models import MagicToken
import sendgrid
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY)
class SendTokenMixin(object):
"""
Helper for sending an email containing a link containing the MagicToken.
"""
def create_token(self, user):
token = MagicToken.objects.create(user=user)
return token
def get_user_from_email(self, user_email):
"""
Query the DB for the user corresponding to the email.
- We use get_user_model() instead of User (in case the Django app has customised the User
class)
- We use magicauth_settings.EMAIL_FIELD, which is the name of the field in the user
model. By default "username" but not always.
"""
user_class = get_user_model()
email_field = magicauth_settings.EMAIL_FIELD
field_lookup = {f"{email_field}__iexact": user_email}
user = user_class.objects.get(**field_lookup)
return user
def send_email(self, user, user_email, token, extra_context=None):
email_subject = magicauth_settings.EMAIL_SUBJECT
html_template = magicauth_settings.EMAIL_HTML_TEMPLATE
text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE
from_email = magicauth_settings.FROM_EMAIL
context = {
"token": token,
"user": user,
"site": get_current_site(self.request),
"TOKEN_DURATION_MINUTES": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60),
"TOKEN_DURATION_SECONDS": magicauth_settings.TOKEN_DURATION_SECONDS,
}
if extra_context:
context.update(extra_context)
text_message = loader.render_to_string(text_template, context)
html_message = loader.render_to_string(html_template, context)
mail = Mail(
from_email=(
django_settings.MAGICAUTH_FROM_EMAIL,
django_settings.MAGICAUTH_SENDER
),
to_emails=[user_email],
subject=email_subject,
html_content=html_message
)
sg.send(mail)
def send_token(self, user_email, extra_context=None):
user = self.get_user_from_email(user_email)
token = self.create_token(user)
self.send_email(user, user_email, token, extra_context)
| [
"django.contrib.auth.get_user_model",
"magicauth.models.MagicToken.objects.create",
"math.floor",
"sendgrid.helpers.mail.Mail",
"sendgrid.SendGridAPIClient",
"django.template.loader.render_to_string",
"django.contrib.sites.shortcuts.get_current_site"
]
| [((441, 501), 'sendgrid.SendGridAPIClient', 'sendgrid.SendGridAPIClient', (['django_settings.SENDGRID_API_KEY'], {}), '(django_settings.SENDGRID_API_KEY)\n', (467, 501), False, 'import sendgrid\n'), ((678, 714), 'magicauth.models.MagicToken.objects.create', 'MagicToken.objects.create', ([], {'user': 'user'}), '(user=user)\n', (703, 714), False, 'from magicauth.models import MagicToken\n'), ((1151, 1167), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1165, 1167), False, 'from django.contrib.auth import get_user_model\n'), ((2069, 2116), 'django.template.loader.render_to_string', 'loader.render_to_string', (['text_template', 'context'], {}), '(text_template, context)\n', (2092, 2116), False, 'from django.template import loader\n'), ((2140, 2187), 'django.template.loader.render_to_string', 'loader.render_to_string', (['html_template', 'context'], {}), '(html_template, context)\n', (2163, 2187), False, 'from django.template import loader\n'), ((2204, 2376), 'sendgrid.helpers.mail.Mail', 'Mail', ([], {'from_email': '(django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER)', 'to_emails': '[user_email]', 'subject': 'email_subject', 'html_content': 'html_message'}), '(from_email=(django_settings.MAGICAUTH_FROM_EMAIL, django_settings.\n MAGICAUTH_SENDER), to_emails=[user_email], subject=email_subject,\n html_content=html_message)\n', (2208, 2376), False, 'from sendgrid.helpers.mail import Mail\n'), ((1757, 1787), 'django.contrib.sites.shortcuts.get_current_site', 'get_current_site', (['self.request'], {}), '(self.request)\n', (1773, 1787), False, 'from django.contrib.sites.shortcuts import get_current_site\n'), ((1827, 1885), 'math.floor', 'math.floor', (['(magicauth_settings.TOKEN_DURATION_SECONDS / 60)'], {}), '(magicauth_settings.TOKEN_DURATION_SECONDS / 60)\n', (1837, 1885), False, 'import math\n')] |
import argparse
from deploy_tix.bugzilla_rest_client import BugzillaRESTClient
from deploy_tix.release_notes import ReleaseNotes
from output_helper import OutputHelper
def main(args=None):
parser = argparse.ArgumentParser(
description='Scripts for creating / updating deployment tickets in \
Bugzilla',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-a', '--application',
help='Example: loop-server',
required=True)
parser.add_argument(
'-B', '--bugzilla-mozilla',
help='Set this switch to post directly to bugzilla.mozilla.org \
(without switch posts to: bugzilla-dev.allizom.org)',
action='store_true',
default=False,
required=False)
subparsers = parser.add_subparsers(help='Ticket action')
# parser for ticket - {create} option
parser_create = \
subparsers.add_parser('NEW', help='Create a NEW deployment ticket.')
parser_create.add_argument(
'-o', '--repo-owner',
help='Example: mozilla-services',
default='mozilla-services',
required=False)
parser_create.add_argument(
'-e', '--environment',
help='Enter: STAGE, PROD',
default='STAGE',
required=False)
parser_create.add_argument(
'-m', '--cc-mail',
help='Example: <EMAIL> \
NOTE: must be a registered username!',
default='',
required=False)
# parser for ticket - {upate} option
parser_update = subparsers.add_parser(
'UPDATE',
help='UPDATE an existing deployment ticket'
)
parser_update.add_argument(
'-i', '--bug-id',
help='Example: 1234567',
required=False)
parser_update.add_argument(
'-c', '--comment',
help='Enter: <your bug comment>',
required=True)
args = vars(parser.parse_args())
application = args['application']
bugzilla_mozilla = args['bugzilla_mozilla']
ticket = BugzillaRESTClient(bugzilla_mozilla)
if all(key in args for key in ['bug_id', 'comment']):
bug_id = args['bug_id']
comment = args['comment']
ticket.bug_update(application, comment, bug_id)
if all(key in args for key in ['repo_owner', 'application', 'environment']): # noqa
repo_owner = args['repo_owner']
environment = args['environment'].lower()
if args['cc_mail']:
cc_mail = args['cc_mail']
else:
cc_mail = ''
status = 'NEW'
output = OutputHelper()
output.log('Create deployment ticket', True, True)
notes = ReleaseNotes(repo_owner, application, environment)
description = notes.get_release_notes()
release_num = notes.last_tag
output.log('Release Notes', True)
output.log(description)
ticket.bug_create(
release_num, application, environment, status, description, cc_mail
)
| [
"deploy_tix.release_notes.ReleaseNotes",
"output_helper.OutputHelper",
"deploy_tix.bugzilla_rest_client.BugzillaRESTClient",
"argparse.ArgumentParser"
]
| [((205, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Scripts for creating / updating deployment tickets in Bugzilla"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Scripts for creating / updating deployment tickets in Bugzilla',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (228, 379), False, 'import argparse\n'), ((2029, 2065), 'deploy_tix.bugzilla_rest_client.BugzillaRESTClient', 'BugzillaRESTClient', (['bugzilla_mozilla'], {}), '(bugzilla_mozilla)\n', (2047, 2065), False, 'from deploy_tix.bugzilla_rest_client import BugzillaRESTClient\n'), ((2573, 2587), 'output_helper.OutputHelper', 'OutputHelper', ([], {}), '()\n', (2585, 2587), False, 'from output_helper import OutputHelper\n'), ((2663, 2713), 'deploy_tix.release_notes.ReleaseNotes', 'ReleaseNotes', (['repo_owner', 'application', 'environment'], {}), '(repo_owner, application, environment)\n', (2675, 2713), False, 'from deploy_tix.release_notes import ReleaseNotes\n')] |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC utility functions related to typing compilation phase
"""
import numpy
import numba
import sdc
from numba import types
from numba.core.errors import TypingError
from numba.np import numpy_support
from sdc.datatypes.indexes import *
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.categorical.types import Categorical
sdc_old_index_types = (types.Array, StringArrayType, )
sdc_pandas_index_types = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
Int64IndexType,
MultiIndexType,
) + sdc_old_index_types
sdc_indexes_range_like = (
PositionalIndexType,
RangeIndexType,
)
# TO-DO: support caching of data allocated for range indexes at request for .values
sdc_indexes_wo_values_cache = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
)
sdc_pandas_df_column_types = (
types.Array,
StringArrayType,
Categorical,
)
class TypeChecker:
"""
Validate object type and raise TypingError if the type is invalid, e.g.:
Method nsmallest(). The object n
given: bool
expected: int
"""
msg_template = '{} The object {}\n given: {}\n expected: {}'
def __init__(self, func_name):
"""
Parameters
----------
func_name: :obj:`str`
name of the function where types checking
"""
self.func_name = func_name
def raise_exc(self, data, expected_types, name=''):
"""
Raise exception with unified message
Parameters
----------
data: :obj:`any`
real type of the data
expected_types: :obj:`str`
expected types inserting directly to the exception
name: :obj:`str`
name of the parameter
"""
msg = self.msg_template.format(self.func_name, name, data, expected_types)
raise TypingError(msg)
def check(self, data, accepted_type, name=''):
"""
Check data type belongs to specified type
Parameters
----------
data: :obj:`any`
real type of the data
accepted_type: :obj:`type`
accepted type
name: :obj:`str`
name of the parameter
"""
if not isinstance(data, accepted_type):
self.raise_exc(data, accepted_type.__name__, name=name)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def kwsparams2list(params):
"""Convert parameters dict to a list of string of a format 'key=value'"""
return ['{}={}'.format(k, v) for k, v in params.items()]
def sigparams2list(param_names, defaults):
"""Creates a list of strings of a format 'key=value' from parameter names and default values"""
return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for param in param_names]
def has_literal_value(var, value):
"""Used during typing to check that variable var is a Numba literal value equal to value"""
if not isinstance(var, types.Literal):
return False
if value is None:
return isinstance(var, types.NoneType) or var.literal_value is value
elif isinstance(value, type(bool)):
return var.literal_value is value
else:
return var.literal_value == value
def has_python_value(var, value):
"""Used during typing to check that variable var was resolved as Python type and has specific value"""
if not isinstance(var, type(value)):
return False
if value is None or isinstance(value, type(bool)):
return var is value
else:
return var == value
def is_default(var, value):
return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted)
def check_is_numeric_array(type_var):
"""Used during typing to check that type_var is a numeric numpy arrays"""
return check_is_array_of_dtype(type_var, types.Number)
def check_index_is_numeric(ty_series):
"""Used during typing to check that series has numeric index"""
return isinstance(ty_series.index.dtype, types.Number)
def check_types_comparable(ty_left, ty_right):
"""Used during typing to check that specified types can be compared"""
if hasattr(ty_left, 'dtype'):
ty_left = ty_left.dtype
if hasattr(ty_right, 'dtype'):
ty_right = ty_right.dtype
# add the rest of supported types here
if isinstance(ty_left, types.Number):
return isinstance(ty_right, types.Number)
if isinstance(ty_left, types.UnicodeType):
return isinstance(ty_right, types.UnicodeType)
if isinstance(ty_left, types.Boolean):
return isinstance(ty_right, types.Boolean)
if isinstance(ty_left, (types.Tuple, types.UniTuple)):
# FIXME: just for now to unblock compilation
return ty_left == ty_right
return False
def check_arrays_comparable(ty_left, ty_right):
"""Used during typing to check that underlying arrays of specified types can be compared"""
return ((ty_left == string_array_type and ty_right == string_array_type)
or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right)))
def check_is_array_of_dtype(type_var, dtype):
"""Used during typing to check that type_var is a numeric numpy array of specific dtype"""
return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype)
def find_common_dtype_from_numpy_dtypes(array_types, scalar_types):
"""Used to find common numba dtype for a sequences of numba dtypes each representing some numpy dtype"""
np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types]
np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types]
np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes)
numba_common_dtype = numpy_support.from_dtype(np_common_dtype)
return numba_common_dtype
def find_index_common_dtype(left, right):
"""Used to find common dtype for indexes of two series and verify if index dtypes are equal"""
left_index_dtype = left.dtype
right_index_dtype = right.dtype
index_dtypes_match = left_index_dtype == right_index_dtype
if not index_dtypes_match:
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[left_index_dtype, right_index_dtype], [])
else:
numba_index_common_dtype = left_index_dtype
return index_dtypes_match, numba_index_common_dtype
def gen_impl_generator(codegen, impl_name):
"""Generate generator of an implementation"""
def _df_impl_generator(*args, **kwargs):
func_text, global_vars = codegen(*args, **kwargs)
loc_vars = {}
exec(func_text, global_vars, loc_vars)
_impl = loc_vars[impl_name]
return _impl
return _df_impl_generator
def check_signed_integer(ty):
return isinstance(ty, types.Integer) and ty.signed
def _check_dtype_param_type(dtype):
""" Returns True is dtype is a valid type for dtype parameter and False otherwise.
Used in RangeIndex ctor and other methods that take dtype parameter. """
valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass)
return isinstance(dtype, valid_dtype_types) or dtype is None
| [
"numba.np.numpy_support.from_dtype",
"numba.np.numpy_support.as_dtype",
"numba.core.errors.TypingError",
"numpy.find_common_type"
]
| [((7400, 7457), 'numpy.find_common_type', 'numpy.find_common_type', (['np_array_dtypes', 'np_scalar_dtypes'], {}), '(np_array_dtypes, np_scalar_dtypes)\n', (7422, 7457), False, 'import numpy\n'), ((7483, 7524), 'numba.np.numpy_support.from_dtype', 'numpy_support.from_dtype', (['np_common_dtype'], {}), '(np_common_dtype)\n', (7507, 7524), False, 'from numba.np import numpy_support\n'), ((3501, 3517), 'numba.core.errors.TypingError', 'TypingError', (['msg'], {}), '(msg)\n', (3512, 3517), False, 'from numba.core.errors import TypingError\n'), ((7241, 7270), 'numba.np.numpy_support.as_dtype', 'numpy_support.as_dtype', (['dtype'], {}), '(dtype)\n', (7263, 7270), False, 'from numba.np import numpy_support\n'), ((7321, 7350), 'numba.np.numpy_support.as_dtype', 'numpy_support.as_dtype', (['dtype'], {}), '(dtype)\n', (7343, 7350), False, 'from numba.np import numpy_support\n')] |
import argparse
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from saliency.visualizer.smiles_visualizer import SmilesVisualizer
def visualize(dir_path):
parent_dir = os.path.dirname(dir_path)
saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy"))
saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy"))
saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy"))
visualizer = SmilesVisualizer()
os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True)
test_idx = np.load(os.path.join(dir_path, "test_idx.npy"))
answer = np.load(os.path.join(dir_path, "answer.npy"))
output = np.load(os.path.join(dir_path, "output.npy"))
smiles_all = np.load(os.path.join(parent_dir, "smiles.npy"))
def calc_range(saliency):
vmax = float('-inf')
vmin = float('inf')
for v in saliency:
vmax = max(vmax, np.max(v))
vmin = min(vmin, np.min(v))
return vmin, vmax
v_range_vanilla = calc_range(saliency_vanilla)
v_range_smooth = calc_range(saliency_smooth)
v_range_bayes = calc_range(saliency_bayes)
def get_scaler(v_range):
def scaler(saliency_):
saliency = np.copy(saliency_)
minv, maxv = v_range
if maxv == minv:
saliency = np.zeros_like(saliency)
else:
pos = saliency >= 0.0
saliency[pos] = saliency[pos]/maxv
nega = saliency < 0.0
saliency[nega] = saliency[nega]/(np.abs(minv))
return saliency
return scaler
scaler_vanilla = get_scaler(v_range_vanilla)
scaler_smooth = get_scaler(v_range_smooth)
scaler_bayes = get_scaler(v_range_bayes)
def color(x):
if x > 0:
# Red for positive value
return 1., 1. - x, 1. - x
else:
# Blue for negative value
x *= -1
return 1. - x, 1. - x, 1.
for i, id in enumerate(test_idx):
smiles = smiles_all[id]
out = output[i]
ans = answer[i]
# legend = "t:{}, p:{}".format(ans, out)
legend = ''
ext = '.png' # '.svg'
# visualizer.visualize(
# saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color)
# visualizer.visualize(
# saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color)
visualizer.visualize(
saliency_bayes[id], smiles, save_filepath=os.path.join(parent_dir, "result_bayes", str(id) + ext),
visualize_ratio=1.0, legend=legend, scaler=scaler_bayes, color_fn=color)
def plot_result(prediction, answer, save_filepath='result.png'):
plt.scatter(prediction, answer, marker='.')
plt.plot([-100, 100], [-100, 100], c='r')
max_v = max(np.max(prediction), np.max(answer))
min_v = min(np.min(prediction), np.min(answer))
plt.xlim([min_v-0.1, max_v+0.1])
plt.xlabel("prediction")
plt.ylim([min_v-0.1, max_v+0.1])
plt.ylabel("ground truth")
plt.savefig(save_filepath)
plt.close()
def main():
parser = argparse.ArgumentParser(
description='Regression with own dataset.')
parser.add_argument('--dirpath', '-d', type=str, default='./results/M_30_3_32_32')
args = parser.parse_args()
path = args.dirpath
n_split = 5
output = []
answer = []
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
output.append(np.load(os.path.join(path, suffix, "output.npy")))
answer.append(np.load(os.path.join(path, suffix, "answer.npy")))
output = np.concatenate(output)
answer = np.concatenate(answer)
plot_result(output, answer, save_filepath=os.path.join(path, "result.png"))
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
print(suffix)
visualize(os.path.join(path, suffix))
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.ylabel",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"numpy.min",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"os.path.dirname",
"matplotlib.pyplot.xlim",
"numpy.copy",
"os.path.join",
"saliency.visualizer.smiles_visualizer.SmilesVisualizer",
"os.path.abspath",
"numpy.zeros_like"
]
| [((76, 97), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (90, 97), False, 'import matplotlib\n'), ((336, 361), 'os.path.dirname', 'os.path.dirname', (['dir_path'], {}), '(dir_path)\n', (351, 361), False, 'import os\n'), ((611, 629), 'saliency.visualizer.smiles_visualizer.SmilesVisualizer', 'SmilesVisualizer', ([], {}), '()\n', (627, 629), False, 'from saliency.visualizer.smiles_visualizer import SmilesVisualizer\n'), ((3296, 3339), 'matplotlib.pyplot.scatter', 'plt.scatter', (['prediction', 'answer'], {'marker': '"""."""'}), "(prediction, answer, marker='.')\n", (3307, 3339), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3385), 'matplotlib.pyplot.plot', 'plt.plot', (['[-100, 100]', '[-100, 100]'], {'c': '"""r"""'}), "([-100, 100], [-100, 100], c='r')\n", (3352, 3385), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3530), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[min_v - 0.1, max_v + 0.1]'], {}), '([min_v - 0.1, max_v + 0.1])\n', (3502, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3531, 3555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""prediction"""'], {}), "('prediction')\n", (3541, 3555), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3596), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[min_v - 0.1, max_v + 0.1]'], {}), '([min_v - 0.1, max_v + 0.1])\n', (3568, 3596), True, 'import matplotlib.pyplot as plt\n'), ((3597, 3623), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ground truth"""'], {}), "('ground truth')\n", (3607, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3628, 3654), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_filepath'], {}), '(save_filepath)\n', (3639, 3654), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3670), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3668, 3670), True, 'import matplotlib.pyplot as plt\n'), ((3698, 3765), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Regression with own dataset."""'}), "(description='Regression with own dataset.')\n", (3721, 3765), False, 'import argparse\n'), ((4198, 4220), 'numpy.concatenate', 'np.concatenate', (['output'], {}), '(output)\n', (4212, 4220), True, 'import numpy as np\n'), ((4234, 4256), 'numpy.concatenate', 'np.concatenate', (['answer'], {}), '(answer)\n', (4248, 4256), True, 'import numpy as np\n'), ((393, 439), 'os.path.join', 'os.path.join', (['dir_path', '"""saliency_vanilla.npy"""'], {}), "(dir_path, 'saliency_vanilla.npy')\n", (405, 439), False, 'import os\n'), ((471, 516), 'os.path.join', 'os.path.join', (['dir_path', '"""saliency_smooth.npy"""'], {}), "(dir_path, 'saliency_smooth.npy')\n", (483, 516), False, 'import os\n'), ((547, 591), 'os.path.join', 'os.path.join', (['dir_path', '"""saliency_bayes.npy"""'], {}), "(dir_path, 'saliency_bayes.npy')\n", (559, 591), False, 'import os\n'), ((646, 688), 'os.path.join', 'os.path.join', (['parent_dir', '"""result_vanilla"""'], {}), "(parent_dir, 'result_vanilla')\n", (658, 688), False, 'import os\n'), ((721, 762), 'os.path.join', 'os.path.join', (['parent_dir', '"""result_smooth"""'], {}), "(parent_dir, 'result_smooth')\n", (733, 762), False, 'import os\n'), ((795, 835), 'os.path.join', 'os.path.join', (['parent_dir', '"""result_bayes"""'], {}), "(parent_dir, 'result_bayes')\n", (807, 835), False, 'import os\n'), ((876, 914), 'os.path.join', 'os.path.join', (['dir_path', '"""test_idx.npy"""'], {}), "(dir_path, 'test_idx.npy')\n", (888, 914), False, 'import os\n'), ((937, 973), 'os.path.join', 'os.path.join', (['dir_path', '"""answer.npy"""'], {}), "(dir_path, 'answer.npy')\n", (949, 973), False, 'import os\n'), ((996, 1032), 'os.path.join', 'os.path.join', (['dir_path', '"""output.npy"""'], {}), "(dir_path, 'output.npy')\n", (1008, 1032), False, 'import os\n'), ((1060, 1098), 'os.path.join', 'os.path.join', (['parent_dir', '"""smiles.npy"""'], {}), "(parent_dir, 'smiles.npy')\n", (1072, 1098), False, 'import os\n'), ((3402, 3420), 'numpy.max', 'np.max', (['prediction'], {}), '(prediction)\n', (3408, 3420), True, 'import numpy as np\n'), ((3422, 3436), 'numpy.max', 'np.max', (['answer'], {}), '(answer)\n', (3428, 3436), True, 'import numpy as np\n'), ((3454, 3472), 'numpy.min', 'np.min', (['prediction'], {}), '(prediction)\n', (3460, 3472), True, 'import numpy as np\n'), ((3474, 3488), 'numpy.min', 'np.min', (['answer'], {}), '(answer)\n', (3480, 3488), True, 'import numpy as np\n'), ((1553, 1571), 'numpy.copy', 'np.copy', (['saliency_'], {}), '(saliency_)\n', (1560, 1571), True, 'import numpy as np\n'), ((4304, 4336), 'os.path.join', 'os.path.join', (['path', '"""result.png"""'], {}), "(path, 'result.png')\n", (4316, 4336), False, 'import os\n'), ((4452, 4478), 'os.path.join', 'os.path.join', (['path', 'suffix'], {}), '(path, suffix)\n', (4464, 4478), False, 'import os\n'), ((195, 220), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (210, 220), False, 'import os\n'), ((1244, 1253), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (1250, 1253), True, 'import numpy as np\n'), ((1284, 1293), 'numpy.min', 'np.min', (['v'], {}), '(v)\n', (1290, 1293), True, 'import numpy as np\n'), ((1661, 1684), 'numpy.zeros_like', 'np.zeros_like', (['saliency'], {}), '(saliency)\n', (1674, 1684), True, 'import numpy as np\n'), ((4069, 4109), 'os.path.join', 'os.path.join', (['path', 'suffix', '"""output.npy"""'], {}), "(path, suffix, 'output.npy')\n", (4081, 4109), False, 'import os\n'), ((4142, 4182), 'os.path.join', 'os.path.join', (['path', 'suffix', '"""answer.npy"""'], {}), "(path, suffix, 'answer.npy')\n", (4154, 4182), False, 'import os\n'), ((1879, 1891), 'numpy.abs', 'np.abs', (['minv'], {}), '(minv)\n', (1885, 1891), True, 'import numpy as np\n')] |
import numpy as np
from PySide2.QtCore import QSignalBlocker, Signal
from PySide2.QtWidgets import QGridLayout, QWidget
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
DEFAULT_ENABLED_STYLE_SHEET = 'background-color: white'
DEFAULT_DISABLED_STYLE_SHEET = 'background-color: #F0F0F0'
INVALID_MATRIX_STYLE_SHEET = 'background-color: red'
class MatrixEditor(QWidget):
data_modified = Signal()
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
# If this is not None, then only the elements present in the
# list (as (i, j) items) will be enabled.
self._enabled_elements = None
# If this is set, it will be called every time the data updates
# to apply equality constraints.
self._apply_constraints_func = None
# Whether or not the matrix is currently invalid
self.matrix_invalid = False
# Reason the matrix is currently invalid
self.matrix_invalid_reason = ''
self.setLayout(QGridLayout())
self.add_spin_boxes()
self.update_gui()
def add_spin_boxes(self):
layout = self.layout()
for i in range(self.rows):
for j in range(self.cols):
sb = self.create_spin_box()
layout.addWidget(sb, i, j)
def create_spin_box(self):
sb = ScientificDoubleSpinBox()
sb.setKeyboardTracking(False)
sb.valueChanged.connect(self.element_modified)
return sb
def element_modified(self):
self.update_data()
@property
def data(self):
return self._data
@data.setter
def data(self, v):
if not np.array_equal(self._data, v):
if self._data.shape != v.shape:
msg = (f'Shape {v.shape} does not match original shape '
f'{self._data.shape}')
raise AttributeError(msg)
self._data = v
self.reset_disabled_values()
self.update_gui()
@property
def rows(self):
return self.data.shape[0]
@property
def cols(self):
return self.data.shape[1]
def update_data(self):
self.data[:] = self.gui_data
self.apply_constraints()
self.data_modified.emit()
def update_gui(self):
self.gui_data = self.data
@property
def gui_data(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [[self.gui_value(i, j) for j in col_range] for i in row_range]
@gui_data.setter
def gui_data(self, v):
blockers = [QSignalBlocker(w) for w in self.all_widgets] # noqa: F841
for i in range(self.rows):
for j in range(self.cols):
self.set_gui_value(i, j, v[i][j])
@property
def all_widgets(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [self.widget(i, j) for j in col_range for i in row_range]
@property
def enabled_widgets(self):
widgets = []
for i in range(self.rows):
for j in range(self.cols):
if (i, j) in self.enabled_elements:
widgets.append(self.widget(i, j))
return widgets
def widget(self, row, col):
return self.layout().itemAtPosition(row, col).widget()
def gui_value(self, row, col):
return self.widget(row, col).value()
def set_gui_value(self, row, col, val):
self.widget(row, col).setValue(val)
def set_matrix_invalid(self, s):
self.matrix_invalid = True
self.matrix_invalid_reason = s
self.update_tooltips()
self.update_enable_states()
def set_matrix_valid(self):
self.matrix_invalid = False
self.matrix_invalid_reason = ''
self.update_tooltips()
self.update_enable_states()
def update_tooltips(self):
if self.matrix_invalid:
tooltip = self.matrix_invalid_reason
else:
tooltip = ''
for w in self.enabled_widgets:
w.setToolTip(tooltip)
def update_enable_states(self):
enable_all = self.enabled_elements is None
for i in range(self.rows):
for j in range(self.cols):
w = self.widget(i, j)
enable = enable_all or (i, j) in self.enabled_elements
w.setEnabled(enable)
enabled_str = 'enabled' if enable else 'disabled'
style_sheet = getattr(self, f'{enabled_str}_style_sheet')
w.setStyleSheet(style_sheet)
def reset_disabled_values(self):
# Resets all disabled values to zero, then applies constraints
for i in range(self.rows):
for j in range(self.cols):
if not self.widget(i, j).isEnabled():
self.data[i, j] = 0.0
self.apply_constraints()
self.update_gui()
@property
def enabled_style_sheet(self):
if self.matrix_invalid:
return INVALID_MATRIX_STYLE_SHEET
return DEFAULT_ENABLED_STYLE_SHEET
@property
def disabled_style_sheet(self):
return DEFAULT_DISABLED_STYLE_SHEET
@property
def enabled_elements(self):
return self._enabled_elements
@enabled_elements.setter
def enabled_elements(self, v):
if self._enabled_elements != v:
self._enabled_elements = v
self.update_enable_states()
self.reset_disabled_values()
@property
def apply_constraints_func(self):
return self._apply_constraints_func
@apply_constraints_func.setter
def apply_constraints_func(self, v):
if self._apply_constraints_func != v:
self._apply_constraints_func = v
self.apply_constraints()
def apply_constraints(self):
if (func := self.apply_constraints_func) is None:
return
func(self.data)
self.update_gui()
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout
if len(sys.argv) < 2:
sys.exit('Usage: <script> <matrix_size>')
rows, cols = [int(x) for x in sys.argv[1].split('x')]
data = np.ones((rows, cols))
app = QApplication(sys.argv)
dialog = QDialog()
layout = QVBoxLayout()
dialog.setLayout(layout)
editor = MatrixEditor(data)
layout.addWidget(editor)
# def constraints(x):
# x[2][2] = x[1][1]
# editor.enabled_elements = [(1, 1), (3, 4)]
# editor.apply_constraints_func = constraints
def on_data_modified():
print(f'Data modified: {editor.data}')
editor.data_modified.connect(on_data_modified)
dialog.finished.connect(app.quit)
dialog.show()
app.exec_()
| [
"hexrd.ui.scientificspinbox.ScientificDoubleSpinBox",
"PySide2.QtWidgets.QGridLayout",
"numpy.ones",
"PySide2.QtCore.QSignalBlocker",
"PySide2.QtCore.Signal",
"PySide2.QtWidgets.QApplication",
"numpy.array_equal",
"sys.exit",
"PySide2.QtWidgets.QVBoxLayout",
"PySide2.QtWidgets.QDialog"
]
| [((407, 415), 'PySide2.QtCore.Signal', 'Signal', ([], {}), '()\n', (413, 415), False, 'from PySide2.QtCore import QSignalBlocker, Signal\n'), ((6248, 6269), 'numpy.ones', 'np.ones', (['(rows, cols)'], {}), '((rows, cols))\n', (6255, 6269), True, 'import numpy as np\n'), ((6281, 6303), 'PySide2.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6293, 6303), False, 'from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout\n'), ((6317, 6326), 'PySide2.QtWidgets.QDialog', 'QDialog', ([], {}), '()\n', (6324, 6326), False, 'from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout\n'), ((6340, 6353), 'PySide2.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (6351, 6353), False, 'from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout\n'), ((1383, 1408), 'hexrd.ui.scientificspinbox.ScientificDoubleSpinBox', 'ScientificDoubleSpinBox', ([], {}), '()\n', (1406, 1408), False, 'from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox\n'), ((6136, 6177), 'sys.exit', 'sys.exit', (['"""Usage: <script> <matrix_size>"""'], {}), "('Usage: <script> <matrix_size>')\n", (6144, 6177), False, 'import sys\n'), ((1044, 1057), 'PySide2.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1055, 1057), False, 'from PySide2.QtWidgets import QGridLayout, QWidget\n'), ((1697, 1726), 'numpy.array_equal', 'np.array_equal', (['self._data', 'v'], {}), '(self._data, v)\n', (1711, 1726), True, 'import numpy as np\n'), ((2623, 2640), 'PySide2.QtCore.QSignalBlocker', 'QSignalBlocker', (['w'], {}), '(w)\n', (2637, 2640), False, 'from PySide2.QtCore import QSignalBlocker, Signal\n')] |
from typing import Tuple, Union, Any, Sequence
from collections import deque, defaultdict, OrderedDict
from ...validators.one import JustLen
from ...functional.mixins import CompositionClassMixin
from ..one import Just
dict_keys = type({}.keys())
odict_keys = type(OrderedDict({}).keys())
dict_values = type({}.values())
odict_values = type(OrderedDict({}).values())
dict_items = type({}.items())
odict_items = type(OrderedDict({}).items())
NAMED_TYPES = (frozenset, slice, range,
deque, defaultdict, OrderedDict,
dict_keys, dict_values, dict_items,
odict_keys, odict_values, odict_items)
TypesT = Union[type, Sequence[type]]
class TypedTuple(CompositionClassMixin):
"""Checks for different type(s) of each element in a defined-length tuple.
Parameters
----------
value : tuple
The tuple to check the length and element types of.
name : str, optional
The name of the tuple to check the length and the element type(s) of.
Defaults to None.
types : tuple(type), tuple(tuple(type))
Tuple of the length to check for with either one type for each element
of `value` or a tuple of types for each element of `value`. Use the
ellipsis literal ... to skip type checking of the tuple element at
that position.
Returns
-------
tuple
The tuple passed in.
Methods
-------
o(callable) : CompositionOf
Daisy-chains the tuple length and type checker to another `callable`,
returning the functional composition of both. The argument `types` is
passed through to the `TypedTuple` checker when when calling the
composition.
Raises
------
WrongTypeError
If `value` is not a tuple or if any of its elements do not have (one
of) the permitted type(s).
LenError
If the tuple passed in does not have the same length as `types` or
if the type specification does not have a meaningful length.
TypeError
If `types` is not a tuple or any of its elements are not of type type.
See Also
--------
All, JustLen, CompositionOf
"""
def __new__(cls, value: tuple, name=None, *, types=(), **kwargs) -> tuple:
cls.__name = str(name) if name is not None else ''
cls.__string = cls.__name or str(value)
types, length = cls.__valid(types)
value = JustLen.JustTuple(value, name=name, length=length)
for index, element in enumerate(value):
if not cls.__is_or_contains_ellipsis(types[index]):
element_name = f'element {index} in tuple {cls.__string}'
_ = Just(types[index])(element, name=element_name)
return value
@classmethod
def __valid(cls, types: Sequence[TypesT]) -> Tuple[TypesT, int]:
if type(types) not in (tuple, list, deque):
message = cls.__wrong_type_message_for(types)
raise TypeError(message)
return types, len(types)
@staticmethod
def __wrong_type_message_for(types: Any) -> str:
type_name = type(types).__name__
if isinstance(types, NAMED_TYPES):
of_type = type_name
else:
of_type = f'{type_name} like {types}'
return f'Type of types argument must be tuple, not {of_type}!'
@staticmethod
def __is_or_contains_ellipsis(types: TypesT) -> bool:
is_ellipsis = types is ...
try:
contains_ellipsis = ... in types
except TypeError:
contains_ellipsis = False
return is_ellipsis or contains_ellipsis
| [
"collections.OrderedDict"
]
| [((266, 281), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (277, 281), False, 'from collections import deque, defaultdict, OrderedDict\n'), ((342, 357), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (353, 357), False, 'from collections import deque, defaultdict, OrderedDict\n'), ((417, 432), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (428, 432), False, 'from collections import deque, defaultdict, OrderedDict\n')] |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OptionsConfig(AppConfig):
name = 'rdmo.options'
verbose_name = _('Options')
| [
"django.utils.translation.ugettext_lazy"
]
| [((169, 181), 'django.utils.translation.ugettext_lazy', '_', (['"""Options"""'], {}), "('Options')\n", (170, 181), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from main import models
class Admin(UserAdmin):
list_display = ("id", "username", "email", "date_joined", "last_login")
admin.site.register(models.User, Admin)
class DocumentAdmin(admin.ModelAdmin):
list_display = ("id", "title")
admin.site.register(models.Document, DocumentAdmin)
| [
"django.contrib.admin.site.register"
]
| [((210, 249), 'django.contrib.admin.site.register', 'admin.site.register', (['models.User', 'Admin'], {}), '(models.User, Admin)\n', (229, 249), False, 'from django.contrib import admin\n'), ((328, 379), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Document', 'DocumentAdmin'], {}), '(models.Document, DocumentAdmin)\n', (347, 379), False, 'from django.contrib import admin\n')] |
import json
import logging
import joblib
import pandas as pd
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
@app.route("/api/machinePrediction", methods=['GET'])
def home():
incomingMachineId = request.args.get('machineId')
modelPath = request.args.get('modelPath')
column_names = request.args.get('columnNames')
data_points = request.args.get('dataPoints')
app.logger.info('Received machine id is %s', incomingMachineId)
app.logger.info('Model path is %s', modelPath)
json_object = json.loads(data_points)
pairs = json_object.items()
vitals_value = []
for key, value in pairs:
vitals_value.append(value)
modelObj = joblib.load(modelPath)
data = [vitals_value]
df = pd.DataFrame(data=data, columns = column_names)
modelPrediction = modelObj.predict(df)
app.logger.info('Model prediction is: %s', modelPrediction)
return jsonify(modelPrediction[0])
if __name__ == "__main__":
app.run(debug=True)
# To start the server
# python3 app.py
| [
"flask.request.args.get",
"json.loads",
"flask_cors.CORS",
"flask.Flask",
"joblib.load",
"pandas.DataFrame",
"flask.jsonify"
]
| [((153, 168), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (158, 168), False, 'from flask import Flask, jsonify, request\n'), ((169, 178), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (173, 178), False, 'from flask_cors import CORS, cross_origin\n'), ((270, 299), 'flask.request.args.get', 'request.args.get', (['"""machineId"""'], {}), "('machineId')\n", (286, 299), False, 'from flask import Flask, jsonify, request\n'), ((316, 345), 'flask.request.args.get', 'request.args.get', (['"""modelPath"""'], {}), "('modelPath')\n", (332, 345), False, 'from flask import Flask, jsonify, request\n'), ((365, 396), 'flask.request.args.get', 'request.args.get', (['"""columnNames"""'], {}), "('columnNames')\n", (381, 396), False, 'from flask import Flask, jsonify, request\n'), ((415, 445), 'flask.request.args.get', 'request.args.get', (['"""dataPoints"""'], {}), "('dataPoints')\n", (431, 445), False, 'from flask import Flask, jsonify, request\n'), ((585, 608), 'json.loads', 'json.loads', (['data_points'], {}), '(data_points)\n', (595, 608), False, 'import json\n'), ((742, 764), 'joblib.load', 'joblib.load', (['modelPath'], {}), '(modelPath)\n', (753, 764), False, 'import joblib\n'), ((801, 846), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'column_names'}), '(data=data, columns=column_names)\n', (813, 846), True, 'import pandas as pd\n'), ((969, 996), 'flask.jsonify', 'jsonify', (['modelPrediction[0]'], {}), '(modelPrediction[0])\n', (976, 996), False, 'from flask import Flask, jsonify, request\n')] |
import pytest
from PySide2.QtGui import QVector3D
from nexus_constructor.model.component import Component
from nexus_constructor.model.dataset import Dataset
from nexus_constructor.model.instrument import Instrument
from nexus_constructor.model.value_type import ValueTypes
values = Dataset(
name="scalar_value",
type=ValueTypes.DOUBLE,
size=[1],
values=90.0,
parent_node=None,
)
@pytest.fixture
def instrument():
return Instrument(parent_node=None)
def test_remove_from_beginning_1(instrument):
component1 = Component("component1", instrument)
rot = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot
assert len(rot.dependents) == 1
rot.remove_from_dependee_chain()
assert component1.depends_on is None
def test_remove_from_beginning_2(instrument):
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
rot1.depends_on = rot2
assert len(rot2.dependents) == 1
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 1
assert rot2.dependents[0] == component1
assert component1.depends_on == rot2
def test_remove_from_beginning_3(instrument):
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
rot1.depends_on = rot2
assert len(rot2.dependents) == 2
rot1.remove_from_dependee_chain()
assert len(rot2.dependents) == 2
assert component2 in rot2.dependents
assert component1 in rot2.dependents
assert component1.depends_on == rot2
assert component1.transforms.link.linked_component == component2
def test_remove_from_middle():
component1 = Component("component1", instrument)
component2 = Component("component2", instrument)
component3 = Component("component3", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component2.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot3 = component3.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
component1.depends_on = rot1
component2.depends_on = rot2
component3.depends_on = rot3
component1.transforms.link.linked_component = component2
component2.transforms.link.linked_component = component3
rot2.remove_from_dependee_chain()
assert rot1.depends_on == rot3
assert component1.transforms.link.linked_component == component3
assert rot1 in rot3.dependents
assert component3 in rot3.dependents
def test_remove_from_end():
component1 = Component("component1", instrument)
rot1 = component1.add_rotation(
name="rotation1",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
)
rot2 = component1.add_rotation(
name="rotation2",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot1,
)
rot3 = component1.add_rotation(
name="rotation3",
axis=QVector3D(1.0, 0.0, 0.0),
angle=values.values,
values=values,
depends_on=rot2,
)
component1.depends_on = rot3
rot1.remove_from_dependee_chain()
assert rot1.depends_on is None
assert not rot1.dependents
assert component1.depends_on == rot3
assert rot2.dependents[0] == rot3
assert len(component1.transforms) == 2
| [
"nexus_constructor.model.component.Component",
"nexus_constructor.model.dataset.Dataset",
"PySide2.QtGui.QVector3D",
"nexus_constructor.model.instrument.Instrument"
]
| [((285, 382), 'nexus_constructor.model.dataset.Dataset', 'Dataset', ([], {'name': '"""scalar_value"""', 'type': 'ValueTypes.DOUBLE', 'size': '[1]', 'values': '(90.0)', 'parent_node': 'None'}), "(name='scalar_value', type=ValueTypes.DOUBLE, size=[1], values=90.0,\n parent_node=None)\n", (292, 382), False, 'from nexus_constructor.model.dataset import Dataset\n'), ((449, 477), 'nexus_constructor.model.instrument.Instrument', 'Instrument', ([], {'parent_node': 'None'}), '(parent_node=None)\n', (459, 477), False, 'from nexus_constructor.model.instrument import Instrument\n'), ((543, 578), 'nexus_constructor.model.component.Component', 'Component', (['"""component1"""', 'instrument'], {}), "('component1', instrument)\n", (552, 578), False, 'from nexus_constructor.model.component import Component\n'), ((948, 983), 'nexus_constructor.model.component.Component', 'Component', (['"""component1"""', 'instrument'], {}), "('component1', instrument)\n", (957, 983), False, 'from nexus_constructor.model.component import Component\n'), ((1624, 1659), 'nexus_constructor.model.component.Component', 'Component', (['"""component1"""', 'instrument'], {}), "('component1', instrument)\n", (1633, 1659), False, 'from nexus_constructor.model.component import Component\n'), ((1677, 1712), 'nexus_constructor.model.component.Component', 'Component', (['"""component2"""', 'instrument'], {}), "('component2', instrument)\n", (1686, 1712), False, 'from nexus_constructor.model.component import Component\n'), ((2478, 2513), 'nexus_constructor.model.component.Component', 'Component', (['"""component1"""', 'instrument'], {}), "('component1', instrument)\n", (2487, 2513), False, 'from nexus_constructor.model.component import Component\n'), ((2531, 2566), 'nexus_constructor.model.component.Component', 'Component', (['"""component2"""', 'instrument'], {}), "('component2', instrument)\n", (2540, 2566), False, 'from nexus_constructor.model.component import Component\n'), ((2584, 2619), 'nexus_constructor.model.component.Component', 'Component', (['"""component3"""', 'instrument'], {}), "('component3', instrument)\n", (2593, 2619), False, 'from nexus_constructor.model.component import Component\n'), ((3584, 3619), 'nexus_constructor.model.component.Component', 'Component', (['"""component1"""', 'instrument'], {}), "('component1', instrument)\n", (3593, 3619), False, 'from nexus_constructor.model.component import Component\n'), ((653, 677), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (662, 677), False, 'from PySide2.QtGui import QVector3D\n'), ((1059, 1083), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (1068, 1083), False, 'from PySide2.QtGui import QVector3D\n'), ((1218, 1242), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (1227, 1242), False, 'from PySide2.QtGui import QVector3D\n'), ((1788, 1812), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (1797, 1812), False, 'from PySide2.QtGui import QVector3D\n'), ((1947, 1971), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (1956, 1971), False, 'from PySide2.QtGui import QVector3D\n'), ((2695, 2719), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (2704, 2719), False, 'from PySide2.QtGui import QVector3D\n'), ((2854, 2878), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (2863, 2878), False, 'from PySide2.QtGui import QVector3D\n'), ((3013, 3037), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (3022, 3037), False, 'from PySide2.QtGui import QVector3D\n'), ((3695, 3719), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (3704, 3719), False, 'from PySide2.QtGui import QVector3D\n'), ((3854, 3878), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (3863, 3878), False, 'from PySide2.QtGui import QVector3D\n'), ((4038, 4062), 'PySide2.QtGui.QVector3D', 'QVector3D', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (4047, 4062), False, 'from PySide2.QtGui import QVector3D\n')] |
import pygame
from loguru import logger
from somegame.osd import OSD
class FpsOSD(OSD):
def __init__(self, game):
super().__init__(game)
logger.info('Loading font')
self.font = pygame.font.Font(pygame.font.get_default_font(), 32)
def draw(self, surface):
fps = self.game.get_average_fps()
fps_text = '<unknown>' if fps is None else '{:.1f}'.format(fps)
tmp_surf = self.font.render('{} FPS'.format(fps_text), True, (255, 255, 255))
surface.blit(tmp_surf, (0, 0))
| [
"pygame.font.get_default_font",
"loguru.logger.info"
]
| [((160, 187), 'loguru.logger.info', 'logger.info', (['"""Loading font"""'], {}), "('Loading font')\n", (171, 187), False, 'from loguru import logger\n'), ((225, 255), 'pygame.font.get_default_font', 'pygame.font.get_default_font', ([], {}), '()\n', (253, 255), False, 'import pygame\n')] |
import json
import tornado.gen
import traceback
from base64 import b64encode
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
from settings import Settings
from mongo_db_controller import ZoomUserDB
@tornado.gen.coroutine
def zoomRefresh(zoom_user):
url = "https://zoom.us/oauth/token"
payload = "grant_type=refresh_token&"
payload += "refresh_token={0}".format(zoom_user.get('refresh_token'))
#we need to base 64 encode it
#and then decode it to acsii as python 3 stores it as a byte string
userAndPass = b64encode("{0}:{1}".format(Settings.zoom_client_id, Settings.zoom_client_secret).encode()).decode("ascii")
headers = {
'authorization': 'Basic {0}'.format(userAndPass),
'content-type': "application/x-www-form-urlencoded"
}
request = HTTPRequest(url, method="POST", headers=headers, body=payload)
http_client = AsyncHTTPClient()
print(zoom_user)
print('making zoomRefresh')
print(payload)
try:
response = yield http_client.fetch(request)
resp = json.loads(response.body.decode("utf-8"))
print("zoomRefresh /access_token Response: {0}".format(resp))
zoom_user = ZoomUserDB.db.insert_user(zoom_user['person_id'], resp['access_token'], resp['expires_in'], resp['refresh_token'], "zoom")
print('new zoom_user:{0}'.format(zoom_user))
except HTTPError as he:
print('zoomRefresh HTTPError:')
print(he.code)
print(he.response.body)
if he.code == 401:
ZoomUserDB.db.delete_user(zoom_user['person_id'], "zoom")
zoom_user = None
raise tornado.gen.Return(zoom_user)
@tornado.gen.coroutine
def zoomGET(endpoint_url, zoom_user):
url = "https://api.zoom.us/v2{0}".format(endpoint_url)
headers = {"Authorization":"Bearer {0}".format(zoom_user.get('token'))}
request = HTTPRequest(url, method="GET", headers=headers)
http_client = AsyncHTTPClient()
response = None
try:
response = yield http_client.fetch(request)
body = response.body.decode('utf-8')
response = json.loads(body)
except HTTPError as he:
if he.code == 401:
print('token may be expired, attempting refresh')
zoom_user = yield zoomRefresh(zoom_user)
if zoom_user:
response, zoom_user = yield zoomGET(endpoint_url, zoom_user)
else:
try:
print(he.response.body)
except Exception as e:
pass
traceback.print_exc()
raise tornado.gen.Return((response, zoom_user))
| [
"json.loads",
"tornado.httpclient.HTTPRequest",
"mongo_db_controller.ZoomUserDB.db.delete_user",
"mongo_db_controller.ZoomUserDB.db.insert_user",
"tornado.httpclient.AsyncHTTPClient",
"traceback.print_exc"
]
| [((820, 882), 'tornado.httpclient.HTTPRequest', 'HTTPRequest', (['url'], {'method': '"""POST"""', 'headers': 'headers', 'body': 'payload'}), "(url, method='POST', headers=headers, body=payload)\n", (831, 882), False, 'from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError\n'), ((901, 918), 'tornado.httpclient.AsyncHTTPClient', 'AsyncHTTPClient', ([], {}), '()\n', (916, 918), False, 'from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError\n'), ((1871, 1918), 'tornado.httpclient.HTTPRequest', 'HTTPRequest', (['url'], {'method': '"""GET"""', 'headers': 'headers'}), "(url, method='GET', headers=headers)\n", (1882, 1918), False, 'from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError\n'), ((1937, 1954), 'tornado.httpclient.AsyncHTTPClient', 'AsyncHTTPClient', ([], {}), '()\n', (1952, 1954), False, 'from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError\n'), ((1199, 1325), 'mongo_db_controller.ZoomUserDB.db.insert_user', 'ZoomUserDB.db.insert_user', (["zoom_user['person_id']", "resp['access_token']", "resp['expires_in']", "resp['refresh_token']", '"""zoom"""'], {}), "(zoom_user['person_id'], resp['access_token'],\n resp['expires_in'], resp['refresh_token'], 'zoom')\n", (1224, 1325), False, 'from mongo_db_controller import ZoomUserDB\n'), ((2100, 2116), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (2110, 2116), False, 'import json\n'), ((1537, 1594), 'mongo_db_controller.ZoomUserDB.db.delete_user', 'ZoomUserDB.db.delete_user', (["zoom_user['person_id']", '"""zoom"""'], {}), "(zoom_user['person_id'], 'zoom')\n", (1562, 1594), False, 'from mongo_db_controller import ZoomUserDB\n'), ((2529, 2550), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2548, 2550), False, 'import traceback\n')] |
import sqlite3
from random import randint, choice
import numpy as np
conn = sqlite3.connect('ej.db')
c = conn.cursor()
#OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO#
c.execute('SELECT MAX(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMax = resultado[0]
c.execute('SELECT MIN(alto) FROM features')
resultado = c.fetchone()
if resultado:
altoMin = resultado[0]
altoProm = abs((altoMax + altoMin) / 2)
#print altoMax , altoProm , altoMin
arrAlto = [altoMax , altoProm , altoMin]
c.execute('SELECT MAX(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMax = resultado[0]
c.execute('SELECT MIN(ancho) FROM features')
resultado = c.fetchone()
if resultado:
anchoMin = resultado[0]
anchoProm = abs((anchoMax + anchoMin) / 2)
anchoMaxProm = abs((anchoMax + anchoProm) / 2)
anchoMinProm = abs((anchoMin + anchoProm) / 2)
arrAncho = [anchoMax, anchoMaxProm, anchoProm, anchoMinProm, anchoMin]
#### CREANDO CLASES NEGATIVAS
for i in range(0,3):
for j in range(0,5):
for _ in range(10):
negAncho = arrAncho[j]
negAlto = arrAlto[i]
rand_alto_max = int(negAlto * 1.5)
rand_alto_min = int(negAlto * 0.5)
r3 = rand_alto_max * 2
rand_ancho_max = int(negAncho*1.5)
rand_ancho_min = int(negAncho*0.5)
r33 = rand_ancho_max * 2
f1 = choice([np.random.randint(1, rand_alto_min), np.random.randint(rand_alto_max, r3)])
f2 = choice([np.random.randint(1, rand_ancho_min), np.random.randint(rand_ancho_max, r33)])
c.execute("insert into features (ancho, alto, area, clase) values (?, ?, ?, ?)",
(f2, f1, f2*f1, 0))
conn.commit()
conn.close() | [
"numpy.random.randint",
"sqlite3.connect"
]
| [((78, 102), 'sqlite3.connect', 'sqlite3.connect', (['"""ej.db"""'], {}), "('ej.db')\n", (93, 102), False, 'import sqlite3\n'), ((1413, 1448), 'numpy.random.randint', 'np.random.randint', (['(1)', 'rand_alto_min'], {}), '(1, rand_alto_min)\n', (1430, 1448), True, 'import numpy as np\n'), ((1450, 1486), 'numpy.random.randint', 'np.random.randint', (['rand_alto_max', 'r3'], {}), '(rand_alto_max, r3)\n', (1467, 1486), True, 'import numpy as np\n'), ((1514, 1550), 'numpy.random.randint', 'np.random.randint', (['(1)', 'rand_ancho_min'], {}), '(1, rand_ancho_min)\n', (1531, 1550), True, 'import numpy as np\n'), ((1552, 1590), 'numpy.random.randint', 'np.random.randint', (['rand_ancho_max', 'r33'], {}), '(rand_ancho_max, r33)\n', (1569, 1590), True, 'import numpy as np\n')] |
# nuScenes dev-kit.
# Code written by <NAME> & <NAME>, 2018.
# Licensed under the Creative Commons [see licence.txt]
import argparse
import json
import os
import random
import time
from typing import Tuple, Dict, Any
import numpy as np
from nuscenes import NuScenes
from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.constants import TP_METRICS
from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes
from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample
class NuScenesEval:
"""
This is the official nuScenes detection evaluation code.
Results are written to the provided output_dir.
nuScenes uses the following metrics:
- Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.
- True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors.
- nuScenes Detection Score (NDS): The weighted sum of the above.
Here is an overview of the functions in this method:
- init: Loads GT annotations an predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://github.com/nutonomy/nuscenes-devkit for more details.
"""
def __init__(self,
nusc: NuScenes,
config: DetectionConfig,
result_path: str,
eval_set: str,
output_dir: str = None,
verbose: bool = True):
"""
Initialize a NuScenesEval object.
:param nusc: A NuScenes object.
:param config: A DetectionConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train or val.
:param output_dir: Folder to save plots and results to.
:param verbose: Whether to print to stdout.
"""
self.nusc = nusc
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.cfg = config
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Load data.
self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, verbose=verbose)
self.gt_boxes = load_gt(self.nusc, self.eval_set, verbose=verbose)
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
"Samples in split doesn't match samples in predictions."
# Add center distances.
self.pred_boxes = add_center_dist(nusc, self.pred_boxes)
self.gt_boxes = add_center_dist(nusc, self.gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering predictions')
self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth annotations')
self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = self.gt_boxes.sample_tokens
def evaluate(self) -> Tuple[DetectionMetrics, MetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data')
metric_data_list = MetricDataList()
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn, dist_th)
metric_data_list.set(class_name, dist_th, md)
# -----------------------------------
# Step 2: Calculate metrics from the data.
# -----------------------------------
if self.verbose:
print('Calculating metrics')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
metric_data = metric_data_list[(class_name, dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)]
if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']:
tp = np.nan
elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']:
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
metrics.add_runtime(time.time() - start_time)
return metrics, metric_data_list
def render(self, metrics: DetectionMetrics, md_list: MetricDataList) -> None:
"""
Renders various PR and TP curves.
:param metrics: DetectionMetrics instance.
:param md_list: MetricDataList instance.
"""
def savepath(name):
return os.path.join(self.plot_dir, name + '.pdf')
summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall,
dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary'))
for detection_name in self.cfg.class_names:
class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath(detection_name + '_pr'))
class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp,
savepath=savepath(detection_name + '_tp'))
for dist_th in self.cfg.dist_ths:
dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall,
savepath=savepath('dist_pr_' + str(dist_th)))
def main(self,
plot_examples: int = 0,
render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param plot_examples: How many example visualizations to write to disk.
:param render_curves: Whether to render PR and TP curves to disk.
:return: A dict that stores the high-level metrics and meta data.
"""
if plot_examples > 0:
# Select a random but fixed subset to plot.
random.seed(43)
sample_tokens = list(self.sample_tokens)
random.shuffle(sample_tokens)
sample_tokens = sample_tokens[:plot_examples]
# Visualize samples.
example_dir = os.path.join(self.output_dir, 'examples')
if not os.path.isdir(example_dir):
os.mkdir(example_dir)
for sample_token in sample_tokens:
visualize_sample(self.nusc,
sample_token,
self.gt_boxes if self.eval_set != 'test' else EvalBoxes(),
# Don't render test GT.
self.pred_boxes,
eval_range=max(self.cfg.class_range.values()),
savepath=os.path.join(example_dir, '{}.png'.format(sample_token)))
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Render PR and TP curves.
if render_curves:
self.render(metrics, metric_data_list)
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print high-level metrics.
print('mAP: %.4f' % (metrics_summary['mean_ap']))
err_name_mapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
for tp_name, tp_val in metrics_summary['tp_errors'].items():
print('%s: %.4f' % (err_name_mapping[tp_name], tp_val))
print('NDS: %.4f' % (metrics_summary['nd_score']))
print('Eval time: %.1fs' % metrics_summary['eval_time'])
return metrics_summary
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes result submission.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('result_path', type=str, help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_name', type=str, default='cvpr_2019',
help='Name of the configuration to use for evaluation, e.g. cvpr_2019.')
parser.add_argument('--plot_examples', type=int, default=10,
help='How many example visualizations to write to disk.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render PR and TP curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_name_ = args.config_name
plot_examples_ = args.plot_examples
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
cfg_ = config_factory(config_name_)
nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_,
output_dir=output_dir_, verbose=verbose_)
nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
| [
"nuscenes.eval.detection.config.config_factory",
"nuscenes.eval.detection.algo.calc_ap",
"argparse.ArgumentParser",
"nuscenes.eval.detection.data_classes.MetricDataList",
"nuscenes.NuScenes",
"nuscenes.eval.detection.loaders.filter_eval_boxes",
"os.path.isdir",
"os.mkdir",
"nuscenes.eval.detection.data_classes.EvalBoxes",
"os.path.expanduser",
"nuscenes.eval.detection.loaders.load_prediction",
"random.shuffle",
"nuscenes.eval.detection.loaders.load_gt",
"nuscenes.eval.detection.data_classes.DetectionMetrics",
"time.time",
"nuscenes.eval.detection.algo.accumulate",
"os.makedirs",
"nuscenes.eval.detection.loaders.add_center_dist",
"os.path.join",
"random.seed",
"nuscenes.eval.detection.algo.calc_tp",
"json.dump"
]
| [((9836, 9971), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate nuScenes result submission."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Evaluate nuScenes result submission.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (9859, 9971), False, 'import argparse\n'), ((11370, 11406), 'os.path.expanduser', 'os.path.expanduser', (['args.result_path'], {}), '(args.result_path)\n', (11388, 11406), False, 'import os\n'), ((11425, 11460), 'os.path.expanduser', 'os.path.expanduser', (['args.output_dir'], {}), '(args.output_dir)\n', (11443, 11460), False, 'import os\n'), ((11717, 11745), 'nuscenes.eval.detection.config.config_factory', 'config_factory', (['config_name_'], {}), '(config_name_)\n', (11731, 11745), False, 'from nuscenes.eval.detection.config import config_factory\n'), ((11758, 11822), 'nuscenes.NuScenes', 'NuScenes', ([], {'version': 'version_', 'verbose': 'verbose_', 'dataroot': 'dataroot_'}), '(version=version_, verbose=verbose_, dataroot=dataroot_)\n', (11766, 11822), False, 'from nuscenes import NuScenes\n'), ((2651, 2689), 'os.path.join', 'os.path.join', (['self.output_dir', '"""plots"""'], {}), "(self.output_dir, 'plots')\n", (2663, 2689), False, 'import os\n'), ((2921, 3007), 'nuscenes.eval.detection.loaders.load_prediction', 'load_prediction', (['self.result_path', 'self.cfg.max_boxes_per_sample'], {'verbose': 'verbose'}), '(self.result_path, self.cfg.max_boxes_per_sample, verbose=\n verbose)\n', (2936, 3007), False, 'from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes\n'), ((3027, 3077), 'nuscenes.eval.detection.loaders.load_gt', 'load_gt', (['self.nusc', 'self.eval_set'], {'verbose': 'verbose'}), '(self.nusc, self.eval_set, verbose=verbose)\n', (3034, 3077), False, 'from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes\n'), ((3296, 3334), 'nuscenes.eval.detection.loaders.add_center_dist', 'add_center_dist', (['nusc', 'self.pred_boxes'], {}), '(nusc, self.pred_boxes)\n', (3311, 3334), False, 'from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes\n'), ((3359, 3395), 'nuscenes.eval.detection.loaders.add_center_dist', 'add_center_dist', (['nusc', 'self.gt_boxes'], {}), '(nusc, self.gt_boxes)\n', (3374, 3395), False, 'from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes\n'), ((3543, 3622), 'nuscenes.eval.detection.loaders.filter_eval_boxes', 'filter_eval_boxes', (['nusc', 'self.pred_boxes', 'self.cfg.class_range'], {'verbose': 'verbose'}), '(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)\n', (3560, 3622), False, 'from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes\n'), ((3723, 3800), 'nuscenes.eval.detection.loaders.filter_eval_boxes', 'filter_eval_boxes', (['nusc', 'self.gt_boxes', 'self.cfg.class_range'], {'verbose': 'verbose'}), '(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)\n', (3740, 3800), False, 'from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes\n'), ((4077, 4088), 'time.time', 'time.time', ([], {}), '()\n', (4086, 4088), False, 'import time\n'), ((4362, 4378), 'nuscenes.eval.detection.data_classes.MetricDataList', 'MetricDataList', ([], {}), '()\n', (4376, 4378), False, 'from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes\n'), ((4867, 4893), 'nuscenes.eval.detection.data_classes.DetectionMetrics', 'DetectionMetrics', (['self.cfg'], {}), '(self.cfg)\n', (4883, 4893), False, 'from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes\n'), ((2705, 2735), 'os.path.isdir', 'os.path.isdir', (['self.output_dir'], {}), '(self.output_dir)\n', (2718, 2735), False, 'import os\n'), ((2749, 2777), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {}), '(self.output_dir)\n', (2760, 2777), False, 'import os\n'), ((2793, 2821), 'os.path.isdir', 'os.path.isdir', (['self.plot_dir'], {}), '(self.plot_dir)\n', (2806, 2821), False, 'import os\n'), ((2835, 2861), 'os.makedirs', 'os.makedirs', (['self.plot_dir'], {}), '(self.plot_dir)\n', (2846, 2861), False, 'import os\n'), ((6158, 6200), 'os.path.join', 'os.path.join', (['self.plot_dir', "(name + '.pdf')"], {}), "(self.plot_dir, name + '.pdf')\n", (6170, 6200), False, 'import os\n'), ((7596, 7611), 'random.seed', 'random.seed', (['(43)'], {}), '(43)\n', (7607, 7611), False, 'import random\n'), ((7677, 7706), 'random.shuffle', 'random.shuffle', (['sample_tokens'], {}), '(sample_tokens)\n', (7691, 7706), False, 'import random\n'), ((7825, 7866), 'os.path.join', 'os.path.join', (['self.output_dir', '"""examples"""'], {}), "(self.output_dir, 'examples')\n", (7837, 7866), False, 'import os\n'), ((8999, 9038), 'json.dump', 'json.dump', (['metrics_summary', 'f'], {'indent': '(2)'}), '(metrics_summary, f, indent=2)\n', (9008, 9038), False, 'import json\n'), ((4494, 4580), 'nuscenes.eval.detection.algo.accumulate', 'accumulate', (['self.gt_boxes', 'self.pred_boxes', 'class_name', 'self.cfg.dist_fcn', 'dist_th'], {}), '(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn,\n dist_th)\n', (4504, 4580), False, 'from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp\n'), ((5079, 5144), 'nuscenes.eval.detection.algo.calc_ap', 'calc_ap', (['metric_data', 'self.cfg.min_recall', 'self.cfg.min_precision'], {}), '(metric_data, self.cfg.min_recall, self.cfg.min_precision)\n', (5086, 5144), False, 'from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp\n'), ((5793, 5804), 'time.time', 'time.time', ([], {}), '()\n', (5802, 5804), False, 'import time\n'), ((7886, 7912), 'os.path.isdir', 'os.path.isdir', (['example_dir'], {}), '(example_dir)\n', (7899, 7912), False, 'import os\n'), ((7930, 7951), 'os.mkdir', 'os.mkdir', (['example_dir'], {}), '(example_dir)\n', (7938, 7951), False, 'import os\n'), ((8921, 8974), 'os.path.join', 'os.path.join', (['self.output_dir', '"""metrics_summary.json"""'], {}), "(self.output_dir, 'metrics_summary.json')\n", (8933, 8974), False, 'import os\n'), ((9057, 9110), 'os.path.join', 'os.path.join', (['self.output_dir', '"""metrics_details.json"""'], {}), "(self.output_dir, 'metrics_details.json')\n", (9069, 9110), False, 'import os\n'), ((5643, 5697), 'nuscenes.eval.detection.algo.calc_tp', 'calc_tp', (['metric_data', 'self.cfg.min_recall', 'metric_name'], {}), '(metric_data, self.cfg.min_recall, metric_name)\n', (5650, 5697), False, 'from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp\n'), ((8169, 8180), 'nuscenes.eval.detection.data_classes.EvalBoxes', 'EvalBoxes', ([], {}), '()\n', (8178, 8180), False, 'from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes\n')] |
# Copyright (c) 2014 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import TestCase
import spdx
from spdx.parsers.tagvalue import Parser
from spdx.parsers.lexers.tagvalue import Lexer
from spdx.parsers.tagvaluebuilders import Builder
from spdx.parsers.loggers import StandardLogger
from spdx.version import Version
class TestLexer(TestCase):
maxDiff = None
def setUp(self):
self.l = Lexer()
self.l.build()
def test_document(self):
data = '''
SPDXVersion: SPDX-2.1
# Comment.
DataLicense: CC0-1.0
DocumentName: Sample_Document-V2.1
SPDXID: SPDXRef-DOCUMENT
DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301
DocumentComment: <text>This is a sample spreadsheet</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'DOC_VERSION', 'SPDXVersion', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDX-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_LICENSE', 'DataLicense', 4)
self.token_assert_helper(self.l.token(), 'LINE', 'CC0-1.0', 4)
self.token_assert_helper(self.l.token(), 'DOC_NAME', 'DocumentName', 5)
self.token_assert_helper(self.l.token(), 'LINE', 'Sample_Document-V2.1',
5)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DOCUMENT', 6)
self.token_assert_helper(self.l.token(), 'DOC_NAMESPACE',
'DocumentNamespace', 7)
self.token_assert_helper(self.l.token(), 'LINE',
'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301',
7)
self.token_assert_helper(self.l.token(), 'DOC_COMMENT', 'DocumentComment', 8)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>This is a sample spreadsheet</text>', 8)
def test_external_document_references(self):
data = '''
ExternalDocumentRef:DocumentRef-spdx-tool-2.1 http://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301 SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF',
'ExternalDocumentRef', 2)
self.token_assert_helper(self.l.token(), 'DOC_REF_ID',
'DocumentRef-spdx-tool-2.1', 2)
self.token_assert_helper(self.l.token(), 'DOC_URI',
'http://spdx.org/spdxdocs/spdx-tools-v2.1-3F25'
'04E0-4F89-41D3-9A0C-0305E82C3301', 2)
self.token_assert_helper(self.l.token(), 'EXT_DOC_REF_CHKSUM',
'SHA1: '
'd6a770ba38583ed4bb4525bd96e50461655d2759', 2)
def test_creation_info(self):
data = '''
## Creation Information
Creator: Person: <NAME>
Creator: Organization: Source Auditor Inc.
Creator: Tool: SourceAuditor-V1.2
Created: 2010-02-03T00:00:00Z
CreatorComment: <text>This is an example of an SPDX
spreadsheet format</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 3)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: <NAME>", 3)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 4)
self.token_assert_helper(self.l.token(), 'ORG_VALUE', 'Organization: Source Auditor Inc.', 4)
self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 5)
self.token_assert_helper(self.l.token(), 'TOOL_VALUE', 'Tool: SourceAuditor-V1.2', 5)
self.token_assert_helper(self.l.token(), 'CREATED', 'Created', 6)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-03T00:00:00Z', 6)
def test_review_info(self):
data = '''
Reviewer: Person: Joe Reviewer
ReviewDate: 2010-02-10T00:00:00Z
ReviewComment: <text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'REVIEWER', 'Reviewer', 2)
self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: <NAME>", 2)
self.token_assert_helper(self.l.token(), 'REVIEW_DATE', 'ReviewDate', 3)
self.token_assert_helper(self.l.token(), 'DATE', '2010-02-10T00:00:00Z', 3)
self.token_assert_helper(self.l.token(), 'REVIEW_COMMENT', 'ReviewComment', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '''<text>This is just an example.
Some of the non-standard licenses look like they are actually
BSD 3 clause licenses</text>''', 4)
def test_pacakage(self):
data = '''
SPDXID: SPDXRef-Package
FilesAnalyzed: False
PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12
PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)
ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:
ExternalRefComment: <text>Some comment about the package.</text>
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Package', 2)
self.token_assert_helper(self.l.token(), 'PKG_FILES_ANALYZED', 'FilesAnalyzed', 3)
self.token_assert_helper(self.l.token(), 'LINE', 'False', 3)
self.token_assert_helper(self.l.token(), 'PKG_CHKSUM', 'PackageChecksum', 4)
self.token_assert_helper(self.l.token(), 'CHKSUM', 'SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12', 4)
self.token_assert_helper(self.l.token(), 'PKG_VERF_CODE', 'PackageVerificationCode', 5)
self.token_assert_helper(self.l.token(), 'LINE', '4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)', 5)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF', 'ExternalRef', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:', 6)
self.token_assert_helper(self.l.token(), 'PKG_EXT_REF_COMMENT', 'ExternalRefComment', 7)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some comment about the package.</text>', 7)
def test_unknown_tag(self):
data = '''
SomeUnknownTag: SomeUnknownValue
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'UNKNOWN_TAG', 'SomeUnknownTag', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SomeUnknownValue', 2)
def test_snippet(self):
data = '''
SnippetSPDXID: SPDXRef-Snippet
SnippetLicenseComments: <text>Some lic comment.</text>
SnippetCopyrightText: <text>Some cr text.</text>
SnippetComment: <text>Some snippet comment.</text>
SnippetName: from linux kernel
SnippetFromFileSPDXID: SPDXRef-DoapSource
SnippetLicenseConcluded: Apache-2.0
LicenseInfoInSnippet: Apache-2.0
'''
self.l.input(data)
self.token_assert_helper(self.l.token(), 'SNIPPET_SPDX_ID', 'SnippetSPDXID', 2)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Snippet', 2)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_COMMENT', 'SnippetLicenseComments', 3)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some lic comment.</text>', 3)
self.token_assert_helper(self.l.token(), 'SNIPPET_CR_TEXT', 'SnippetCopyrightText', 4)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some cr text.</text>', 4)
self.token_assert_helper(self.l.token(), 'SNIPPET_COMMENT', 'SnippetComment', 5)
self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some snippet comment.</text>', 5)
self.token_assert_helper(self.l.token(), 'SNIPPET_NAME', 'SnippetName', 6)
self.token_assert_helper(self.l.token(), 'LINE', 'from linux kernel', 6)
self.token_assert_helper(self.l.token(), 'SNIPPET_FILE_SPDXID',
'SnippetFromFileSPDXID', 7)
self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DoapSource', 7)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_CONC',
'SnippetLicenseConcluded', 8)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 8)
self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_INFO',
'LicenseInfoInSnippet', 9)
self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 9)
def token_assert_helper(self, token, ttype, value, line):
assert token.type == ttype
assert token.value == value
assert token.lineno == line
class TestParser(TestCase):
maxDiff = None
document_str = '\n'.join([
'SPDXVersion: SPDX-2.1',
'DataLicense: CC0-1.0',
'DocumentName: Sample_Document-V2.1',
'SPDXID: SPDXRef-DOCUMENT',
'DocumentComment: <text>Sample Comment</text>',
'DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
])
creation_str = '\n'.join([
'Creator: Person: Bob (<EMAIL>)',
'Creator: Organization: Acme.',
'Created: 2010-02-03T00:00:00Z',
'CreatorComment: <text>Sample Comment</text>'
])
review_str = '\n'.join([
'Reviewer: Person: Bob the Reviewer',
'ReviewDate: 2010-02-10T00:00:00Z',
'ReviewComment: <text>Bob was Here.</text>',
'Reviewer: Person: Alice the Reviewer',
'ReviewDate: 2011-02-10T00:00:00Z',
'ReviewComment: <text>Alice was also here.</text>'
])
package_str = '\n'.join([
'PackageName: Test',
'SPDXID: SPDXRef-Package',
'PackageVersion: Version 0.9.2',
'PackageDownloadLocation: http://example.com/test',
'FilesAnalyzed: True',
'PackageSummary: <text>Test package</text>',
'PackageSourceInfo: <text>Version 1.0 of test</text>',
'PackageFileName: test-1.0.zip',
'PackageSupplier: Organization:ACME',
'PackageOriginator: Organization:ACME',
'PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (something.rdf, something.txt)',
'PackageDescription: <text>A package.</text>',
'PackageComment: <text>Comment on the package.</text>',
'PackageCopyrightText: <text> Copyright 2014 Acme Inc.</text>',
'PackageLicenseDeclared: Apache-2.0',
'PackageLicenseConcluded: (LicenseRef-2.0 and Apache-2.0)',
'PackageLicenseInfoFromFiles: Apache-1.0',
'PackageLicenseInfoFromFiles: Apache-2.0',
'PackageLicenseComments: <text>License Comments</text>',
'ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:',
'ExternalRefComment: <text>Some comment about the package.</text>'
])
file_str = '\n'.join([
'FileName: testfile.java',
'SPDXID: SPDXRef-File',
'FileType: SOURCE',
'FileChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12',
'LicenseConcluded: Apache-2.0',
'LicenseInfoInFile: Apache-2.0',
'FileCopyrightText: <text>Copyright 2014 Acme Inc.</text>',
'ArtifactOfProjectName: AcmeTest',
'ArtifactOfProjectHomePage: http://www.acme.org/',
'ArtifactOfProjectURI: http://www.acme.org/',
'FileComment: <text>Very long file</text>'
])
unknown_tag_str = 'SomeUnknownTag: SomeUnknownValue'
snippet_str = '\n'.join([
'SnippetSPDXID: SPDXRef-Snippet',
'SnippetLicenseComments: <text>Some lic comment.</text>',
'SnippetCopyrightText: <text> Copyright 2008-2010 <NAME> </text>',
'SnippetComment: <text>Some snippet comment.</text>',
'SnippetName: from linux kernel',
'SnippetFromFileSPDXID: SPDXRef-DoapSource',
'SnippetLicenseConcluded: Apache-2.0',
'LicenseInfoInSnippet: Apache-2.0',
])
complete_str = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}'.format(document_str, creation_str, review_str, package_str, file_str, snippet_str)
def setUp(self):
self.p = Parser(Builder(), StandardLogger())
self.p.build()
def test_doc(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.version == Version(major=2, minor=1)
assert document.data_license.identifier == 'CC0-1.0'
assert document.name == 'Sample_Document-V2.1'
assert document.spdx_id == 'SPDXRef-DOCUMENT'
assert document.comment == 'Sample Comment'
assert document.namespace == 'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301'
def test_creation_info(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.creation_info.creators) == 2
assert document.creation_info.comment == 'Sample Comment'
assert (document.creation_info.created_iso_format == '2010-02-03T00:00:00Z')
def test_review(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.reviews) == 2
def test_package(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert document.package.name == 'Test'
assert document.package.spdx_id == 'SPDXRef-Package'
assert document.package.version == 'Version 0.9.2'
assert len(document.package.licenses_from_files) == 2
assert (document.package.conc_lics.identifier == 'LicenseRef-2.0 AND Apache-2.0')
assert document.package.files_analyzed == True
assert document.package.comment == 'Comment on the package.'
assert document.package.pkg_ext_refs[-1].category == 'SECURITY'
assert document.package.pkg_ext_refs[-1].pkg_ext_ref_type == 'cpe23Type'
assert document.package.pkg_ext_refs[-1].locator == 'cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:'
assert document.package.pkg_ext_refs[-1].comment == 'Some comment about the package.'
def test_file(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.package.files) == 1
spdx_file = document.package.files[0]
assert spdx_file.name == 'testfile.java'
assert spdx_file.spdx_id == 'SPDXRef-File'
assert spdx_file.type == spdx.file.FileType.SOURCE
assert len(spdx_file.artifact_of_project_name) == 1
assert len(spdx_file.artifact_of_project_home) == 1
assert len(spdx_file.artifact_of_project_uri) == 1
def test_unknown_tag(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
saved_out = sys.stdout
sys.stdout = StringIO()
document, error = self.p.parse(self.unknown_tag_str)
self.assertEqual(sys.stdout.getvalue(), 'Found unknown tag : SomeUnknownTag at line: 1\n')
sys.stdout = saved_out
assert error
assert document is not None
def test_snippet(self):
document, error = self.p.parse(self.complete_str)
assert document is not None
assert not error
assert len(document.snippet) == 1
assert document.snippet[-1].spdx_id == 'SPDXRef-Snippet'
assert document.snippet[-1].name == 'from linux kernel'
assert document.snippet[-1].comment == 'Some snippet comment.'
assert document.snippet[-1].copyright == ' Copyright 2008-2010 <NAME> '
assert document.snippet[-1].license_comment == 'Some lic comment.'
assert document.snippet[-1].snip_from_file_spdxid == 'SPDXRef-DoapSource'
assert document.snippet[-1].conc_lics.identifier == 'Apache-2.0'
assert document.snippet[-1].licenses_in_snippet[-1].identifier == 'Apache-2.0'
| [
"spdx.parsers.tagvaluebuilders.Builder",
"spdx.parsers.loggers.StandardLogger",
"spdx.version.Version",
"spdx.parsers.lexers.tagvalue.Lexer",
"sys.stdout.getvalue",
"io.StringIO"
]
| [((929, 936), 'spdx.parsers.lexers.tagvalue.Lexer', 'Lexer', ([], {}), '()\n', (934, 936), False, 'from spdx.parsers.lexers.tagvalue import Lexer\n'), ((16093, 16103), 'io.StringIO', 'StringIO', ([], {}), '()\n', (16101, 16103), False, 'from io import StringIO\n'), ((13202, 13211), 'spdx.parsers.tagvaluebuilders.Builder', 'Builder', ([], {}), '()\n', (13209, 13211), False, 'from spdx.parsers.tagvaluebuilders import Builder\n'), ((13213, 13229), 'spdx.parsers.loggers.StandardLogger', 'StandardLogger', ([], {}), '()\n', (13227, 13229), False, 'from spdx.parsers.loggers import StandardLogger\n'), ((13433, 13458), 'spdx.version.Version', 'Version', ([], {'major': '(2)', 'minor': '(1)'}), '(major=2, minor=1)\n', (13440, 13458), False, 'from spdx.version import Version\n'), ((16190, 16211), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (16209, 16211), False, 'import sys\n')] |
# Standard library imports
from subprocess import call as subprocess_call
from utility import fileexists
from time import sleep as time_sleep
from datetime import datetime
mount_try = 1
not_yet = True
done = False
start_time = datetime.now()
if fileexists("/home/rpi4-sftp/usb/drive_present.txt"):
when_usba = 0
else:
when_usba = -1
if fileexists("/home/duck-sftp/usb/drive_present.txt"):
when_usbb = 0
else:
when_usbb = -1
if fileexists("/home/pi/mycloud/drive_present.txt"):
when_mycloud = 0
else:
when_mycloud = -1
while (mount_try < 30) and not_yet:
try:
usba_mounted = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted and usbb_mounted and mycloud_mounted):
print("Something Needs mounting this is try number: ", mount_try)
subprocess_call(["sudo", "mount", "-a"])
mount_try += 1
usba_mounted_after = fileexists("/home/rpi4-sftp/usb/drive_present.txt")
usbb_mounted_after = fileexists("/home/duck-sftp/usb/drive_present.txt")
mycloud_mounted_after = fileexists("/home/pi/mycloud/drive_present.txt")
if not(usba_mounted) and usba_mounted_after:
when_usba = round((datetime.now() - start_time).total_seconds(),2)
if not(usbb_mounted) and usbb_mounted_after:
when_usbb = round((datetime.now() - start_time).total_seconds(),2)
if not(mycloud_mounted) and mycloud_mounted_after:
when_mycloud = round((datetime.now() - start_time).total_seconds(),2)
if usba_mounted_after and usbb_mounted_after and mycloud_mounted_after:
print("Success at :",when_usba,when_usbb,when_mycloud, " secs from start")
not_yet = False
done = True
except:
print("Count: ", count," error")
time_sleep(1)
if done:
print("Great!")
else:
print("Failed to do all or drive_present.txt file not present; Times :",when_usba,when_usbb,when_mycloud)
while True:
time_sleep(20000)
| [
"datetime.datetime.now",
"subprocess.call",
"time.sleep",
"utility.fileexists"
]
| [((237, 251), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (249, 251), False, 'from datetime import datetime\n'), ((256, 307), 'utility.fileexists', 'fileexists', (['"""/home/rpi4-sftp/usb/drive_present.txt"""'], {}), "('/home/rpi4-sftp/usb/drive_present.txt')\n", (266, 307), False, 'from utility import fileexists\n'), ((353, 404), 'utility.fileexists', 'fileexists', (['"""/home/duck-sftp/usb/drive_present.txt"""'], {}), "('/home/duck-sftp/usb/drive_present.txt')\n", (363, 404), False, 'from utility import fileexists\n'), ((450, 498), 'utility.fileexists', 'fileexists', (['"""/home/pi/mycloud/drive_present.txt"""'], {}), "('/home/pi/mycloud/drive_present.txt')\n", (460, 498), False, 'from utility import fileexists\n'), ((1831, 1844), 'time.sleep', 'time_sleep', (['(1)'], {}), '(1)\n', (1841, 1844), True, 'from time import sleep as time_sleep\n'), ((2005, 2022), 'time.sleep', 'time_sleep', (['(20000)'], {}), '(20000)\n', (2015, 2022), True, 'from time import sleep as time_sleep\n'), ((610, 661), 'utility.fileexists', 'fileexists', (['"""/home/rpi4-sftp/usb/drive_present.txt"""'], {}), "('/home/rpi4-sftp/usb/drive_present.txt')\n", (620, 661), False, 'from utility import fileexists\n'), ((680, 731), 'utility.fileexists', 'fileexists', (['"""/home/duck-sftp/usb/drive_present.txt"""'], {}), "('/home/duck-sftp/usb/drive_present.txt')\n", (690, 731), False, 'from utility import fileexists\n'), ((753, 801), 'utility.fileexists', 'fileexists', (['"""/home/pi/mycloud/drive_present.txt"""'], {}), "('/home/pi/mycloud/drive_present.txt')\n", (763, 801), False, 'from utility import fileexists\n'), ((1022, 1073), 'utility.fileexists', 'fileexists', (['"""/home/rpi4-sftp/usb/drive_present.txt"""'], {}), "('/home/rpi4-sftp/usb/drive_present.txt')\n", (1032, 1073), False, 'from utility import fileexists\n'), ((1098, 1149), 'utility.fileexists', 'fileexists', (['"""/home/duck-sftp/usb/drive_present.txt"""'], {}), "('/home/duck-sftp/usb/drive_present.txt')\n", (1108, 1149), False, 'from utility import fileexists\n'), ((1177, 1225), 'utility.fileexists', 'fileexists', (['"""/home/pi/mycloud/drive_present.txt"""'], {}), "('/home/pi/mycloud/drive_present.txt')\n", (1187, 1225), False, 'from utility import fileexists\n'), ((938, 978), 'subprocess.call', 'subprocess_call', (["['sudo', 'mount', '-a']"], {}), "(['sudo', 'mount', '-a'])\n", (953, 978), True, 'from subprocess import call as subprocess_call\n'), ((1297, 1311), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1309, 1311), False, 'from datetime import datetime\n'), ((1416, 1430), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1428, 1430), False, 'from datetime import datetime\n'), ((1544, 1558), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1556, 1558), False, 'from datetime import datetime\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.