prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import random
import numpy as np
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Replicate
# img4, labels4 = replicate(img4, labels4)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.mosaic_border) # border to remove
return img4, labels4
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class DetectionDataset(VisionDataset):
def __init__(self, path, img_size=640, batch_size=16,
augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
'''Dataset for detection with augmentation at batch level.
Args:
path(List[str], str): list of file(s) containing image paths or the parent directory of images
'''
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file containing image paths
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # image directory
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index per image
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.augment = augment
self.hyp = hyp
self.img_size = img_size
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
'''
img_files in [...images...]+
label_files in [...labels...]+
'''
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
l = self.labels[i] # label
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = | np.clip(b[[1, 3]], 0, h) | numpy.clip |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
no_agents = 20
no_bandits = 100
mean = np.load("mean.npy")
mean = mean.tolist()
fig, ax = plt.subplots(1)
M = [[0 for i in range(10)]for j in range(10)]
for j in range(10):
for i in range(10):
M[j][i] = mean[j*10+i]
o = np.argmax( | np.array(mean) | numpy.array |
from __future__ import absolute_import
import pytest
try:
import rasterio
except:
rasterio = None
rasterio_available = pytest.mark.skipif(rasterio is None, reason="requires rasterio")
from os import path
from itertools import product
import datashader as ds
import xarray as xr
import numpy as np
import dask.array as da
from datashader.resampling import compute_chunksize
BASE_PATH = path.split(__file__)[0]
DATA_PATH = path.abspath(path.join(BASE_PATH, 'data'))
TEST_RASTER_PATH = path.join(DATA_PATH, 'world.rgb.tif')
@pytest.fixture
def cvs():
with xr.open_rasterio(TEST_RASTER_PATH) as src:
res = ds.utils.calc_res(src)
left, bottom, right, top = ds.utils.calc_bbox(src.x.values, src.y.values, res)
return ds.Canvas(plot_width=2,
plot_height=2,
x_range=(left, right),
y_range=(bottom, top))
@rasterio_available
def test_raster_aggregate_default(cvs):
with xr.open_rasterio(TEST_RASTER_PATH) as src:
agg = cvs.raster(src)
assert agg is not None
@rasterio_available
def test_raster_aggregate_nearest(cvs):
with xr.open_rasterio(TEST_RASTER_PATH) as src:
agg = cvs.raster(src, upsample_method='nearest')
assert agg is not None
@pytest.mark.skip('use_overviews opt no longer supported; may be re-implemented in the future')
@rasterio_available
def test_raster_aggregate_with_overviews(cvs):
with xr.open_rasterio(TEST_RASTER_PATH) as src:
agg = cvs.raster(src, use_overviews=True)
assert agg is not None
@pytest.mark.skip('use_overviews opt no longer supported; may be re-implemented in the future')
@rasterio_available
def test_raster_aggregate_without_overviews(cvs):
with xr.open_rasterio(TEST_RASTER_PATH) as src:
agg = cvs.raster(src, use_overviews=False)
assert agg is not None
@rasterio_available
def test_out_of_bounds_return_correct_size(cvs):
with xr.open_rasterio(TEST_RASTER_PATH) as src:
cvs = ds.Canvas(plot_width=2,
plot_height=2,
x_range=[1e10, 1e20],
y_range=[1e10, 1e20])
try:
cvs.raster(src)
except ValueError:
pass
else:
assert False
@rasterio_available
def test_partial_extent_returns_correct_size():
with xr.open_rasterio(TEST_RASTER_PATH) as src:
res = ds.utils.calc_res(src)
left, bottom, right, top = ds.utils.calc_bbox(src.x.values, src.y.values, res)
half_width = (right - left) / 2
half_height = (top - bottom) / 2
cvs = ds.Canvas(plot_width=512,
plot_height=256,
x_range=[left-half_width, left+half_width],
y_range=[bottom-half_height, bottom+half_height])
agg = cvs.raster(src)
assert agg.shape == (3, 256, 512)
assert agg is not None
@rasterio_available
def test_partial_extent_with_layer_returns_correct_size(cvs):
with xr.open_rasterio(TEST_RASTER_PATH) as src:
res = ds.utils.calc_res(src)
left, bottom, right, top = ds.utils.calc_bbox(src.x.values, src.y.values, res)
half_width = (right - left) / 2
half_height = (top - bottom) / 2
cvs = ds.Canvas(plot_width=512,
plot_height=256,
x_range=[left-half_width, left+half_width],
y_range=[bottom-half_height, bottom+half_height])
agg = cvs.raster(src, layer=1)
assert agg.shape == (256, 512)
assert agg is not None
@rasterio_available
def test_calc_res():
"""Assert that resolution is calculated correctly when using the xarray
rasterio backend.
"""
with xr.open_rasterio(TEST_RASTER_PATH) as src:
xr_res = ds.utils.calc_res(src)
with rasterio.open(TEST_RASTER_PATH) as src:
rio_res = src.res
assert np.allclose(xr_res, rio_res)
@rasterio_available
def test_calc_bbox():
"""Assert that bounding boxes are calculated correctly when using the xarray
rasterio backend.
"""
with xr.open_rasterio(TEST_RASTER_PATH) as src:
xr_res = ds.utils.calc_res(src)
xr_bounds = ds.utils.calc_bbox(src.x.values, src.y.values, xr_res)
with rasterio.open(TEST_RASTER_PATH) as src:
rio_bounds = src.bounds
assert np.allclose(xr_bounds, rio_bounds, atol=1.0) # allow for absolute diff of 1.0
def test_raster_both_ascending():
"""
Assert raster with ascending x- and y-coordinates is aggregated correctly.
"""
xs = np.arange(10)
ys = np.arange(5)
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, arr)
assert np.allclose(agg.X.values, xs)
assert np.allclose(agg.Y.values, ys)
def test_raster_both_ascending_partial_range():
"""
Assert raster with ascending x- and y-coordinates and a partial canvas
range is aggregated correctly.
"""
xs = np.arange(10)
ys = np.arange(5)
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(7, 3, x_range=(.5, 7.5), y_range=(.5, 3.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, xarr.sel(X=slice(1, 7), Y=slice(1, 3)))
assert np.allclose(agg.X.values, xs[1:8])
assert np.allclose(agg.Y.values, ys[1:4])
def test_raster_both_descending():
"""
Assert raster with ascending x- and y-coordinates is aggregated correctly.
"""
xs = np.arange(10)[::-1]
ys = np.arange(5)[::-1]
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, arr)
assert np.allclose(agg.X.values, xs)
assert np.allclose(agg.Y.values, ys)
def test_raster_both_descending_partial_range():
"""
Assert raster with ascending x- and y-coordinates and a partial canvas range
is aggregated correctly.
"""
xs = np.arange(10)[::-1]
ys = np.arange(5)[::-1]
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(7, 3, x_range=(.5, 7.5), y_range=(.5, 3.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, xarr.sel(Y=slice(3,1), X=slice(7, 1)).data)
assert np.allclose(agg.X.values, xs[2:9])
assert np.allclose(agg.Y.values, ys[1:4])
def test_raster_x_ascending_y_descending():
"""
Assert raster with ascending x- and descending y-coordinates is aggregated correctly.
"""
xs = np.arange(10)
ys = np.arange(5)[::-1]
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, arr)
assert np.allclose(agg.X.values, xs)
assert np.allclose(agg.Y.values, ys)
def test_raster_x_ascending_y_descending_partial_range():
"""
Assert raster with ascending x- and descending y-coordinates is aggregated correctly.
"""
xs = np.arange(10)
ys = np.arange(5)[::-1]
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(7, 2, x_range=(0.5, 7.5), y_range=(1.5, 3.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, xarr.sel(X=slice(1, 7), Y=slice(3, 2)).data)
assert np.allclose(agg.X.values, xs[1:8])
assert np.allclose(agg.Y.values, ys[1:3])
def test_raster_x_descending_y_ascending():
"""
Assert raster with descending x- and ascending y-coordinates is aggregated correctly.
"""
xs = np.arange(10)[::-1]
ys = np.arange(5)
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(10, 5, x_range=(-.5, 9.5), y_range=(-.5, 4.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, arr)
assert np.allclose(agg.X.values, xs)
assert np.allclose(agg.Y.values, ys)
def test_raster_x_descending_y_ascending_partial_range():
"""
Assert raster with descending x- and ascending y-coordinates is aggregated correctly.
"""
xs = np.arange(10)[::-1]
ys = np.arange(5)
arr = xs*ys[np.newaxis].T
xarr = xr.DataArray(arr, coords={'X': xs, 'Y': ys}, dims=['Y', 'X'])
cvs = ds.Canvas(7, 2, x_range=(.5, 7.5), y_range=(1.5, 3.5))
agg = cvs.raster(xarr)
assert np.allclose(agg.data, xarr.sel(X=slice(7, 1), Y=slice(2, 3)).data)
assert np.allclose(agg.X.values, xs[2:9])
assert np.allclose(agg.Y.values, ys[2:4])
def test_raster_integer_nan_value():
"""
Ensure custom nan_value is handled correctly for integer arrays.
"""
cvs = ds.Canvas(plot_height=2, plot_width=2, x_range=(0, 1), y_range=(0,1))
array = np.array([[9999, 1, 2, 3], [4, 9999, 6, 7], [8, 9, 9999, 11]])
coords = {'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}
xr_array = xr.DataArray(array, coords=coords, dims=['y', 'x'])
agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999)
expected = np.array([[4, 7], [9, 11]])
assert np.allclose(agg.data, expected)
assert agg.data.dtype.kind == 'i'
assert np.allclose(agg.x.values, np.array([0.25, 0.75]))
assert np.allclose(agg.y.values, np.array([0.25, 0.75]))
def test_raster_float_nan_value():
"""
Ensure default nan_value is handled correctly for float arrays
"""
cvs = ds.Canvas(plot_height=2, plot_width=2, x_range=(0, 1), y_range=(0,1))
array = np.array([[np.NaN, 1., 2., 3.], [4., np.NaN, 6., 7.], [8., 9., np.NaN, 11.]])
coords = {'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}
xr_array = xr.DataArray(array, coords=coords, dims=['y', 'x'])
agg = cvs.raster(xr_array, downsample_method='max')
expected = np.array([[4, 7], [9, 11]])
assert np.allclose(agg.data, expected)
assert agg.data.dtype.kind == 'f'
assert np.allclose(agg.x.values, np.array([0.25, 0.75]))
assert np.allclose(agg.y.values, np.array([0.25, 0.75]))
def test_raster_integer_nan_value_padding():
"""
Ensure that the padding values respect the supplied nan_value.
"""
cvs = ds.Canvas(plot_height=3, plot_width=3, x_range=(0, 2), y_range=(0, 2))
array = np.array([[9999, 1, 2, 3], [4, 9999, 6, 7], [8, 9, 9999, 11]])
xr_array = xr.DataArray(array, coords={'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}, dims=['y', 'x'])
agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999)
expected = np.array([[4, 7, 9999], [9, 11, 9999], [9999, 9999, 9999]])
assert np.allclose(agg.data, expected)
assert agg.data.dtype.kind == 'i'
assert np.allclose(agg.x.values, np.array([1/3., 1.0, 5/3.]))
assert np.allclose(agg.y.values, np.array([1/3., 1.0, 5/3.]))
def test_raster_float_nan_value_padding():
"""
Ensure that the padding values respect the supplied nan_value.
"""
cvs = ds.Canvas(plot_height=3, plot_width=3, x_range=(0, 2), y_range=(0, 2))
array = np.array([[np.nan, 1., 2., 3.], [4., np.nan, 6., 7.], [8., 9., np.nan, 11.]])
xr_array = xr.DataArray(array, coords={'x': np.linspace(0, 1, 4), 'y': np.linspace(0, 1, 3)}, dims=['y', 'x'])
agg = cvs.raster(xr_array, downsample_method='max')
expected = np.array([[4., 7., np.nan], [9., 11., np.nan], [np.nan, np.nan, np.nan]])
assert np.allclose(agg.data, expected, equal_nan=True)
assert agg.data.dtype.kind == 'f'
assert np.allclose(agg.x.values, np.array([1/3., 1.0, 5/3.]))
assert np.allclose(agg.y.values, np.array([1/3., 1.0, 5/3.]))
def test_raster_single_pixel_range():
"""
Ensure that canvas range covering a single pixel are handled correctly.
"""
cvs = ds.Canvas(plot_height=3, plot_width=3, x_range=(0, 0.1), y_range=(0, 0.1))
array = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
xr_array = xr.DataArray(array, dims=['y', 'x'],
coords={'x': np.linspace(0, 1, 4),
'y': np.linspace(0, 1, 3)})
agg = cvs.raster(xr_array, downsample_method='max', nan_value=9999)
expected = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert np.allclose(agg.data, expected)
assert agg.data.dtype.kind == 'i'
assert np.allclose(agg.x.values, np.array([1/60., 1/20., 1/12.]))
assert np.allclose(agg.y.values, np.array([1/60., 1/20., 1/12.]))
def test_raster_single_pixel_range_with_padding():
"""
Ensure that canvas range covering a single pixel and small area
beyond the defined data ranges is handled correctly.
"""
# The .301 value ensures that one pixel covers the edge of the input extent
cvs = ds.Canvas(plot_height=4, plot_width=6, x_range=(-0.5, 0.25), y_range=(-.5, 0.301))
cvs2 = ds.Canvas(plot_height=4, plot_width=6, x_range=(-0.5, 0.25), y_range=(-.5, 0.3))
array = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype='f')
xr_array = xr.DataArray(array, dims=['y', 'x'],
coords={'x': np.linspace(0.125, .875, 4),
'y': np.linspace(0.125, 0.625, 3)})
agg = cvs.raster(xr_array, downsample_method='max', nan_value=np.NaN)
agg2 = cvs2.raster(xr_array, downsample_method='max', nan_value=np.NaN)
expected = np.array([
[np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN],
[np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN],
[np.NaN, np.NaN, np.NaN, np.NaN, 0, 0],
[np.NaN, np.NaN, np.NaN, np.NaN, 0, 0]
])
expected2 = np.array([
[np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN],
[np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN],
[np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN],
[np.NaN, np.NaN, np.NaN, np.NaN, 0, 0]
])
assert np.allclose(agg.data, expected, equal_nan=True)
assert np.allclose(agg2.data, expected2, equal_nan=True)
assert agg.data.dtype.kind == 'f'
assert np.allclose(agg.x.values, np.array([-0.4375, -0.3125, -0.1875, -0.0625, 0.0625, 0.1875]))
assert np.allclose(agg.y.values, np.array([-0.399875, -0.199625, 0.000625, 0.200875]))
@pytest.mark.parametrize('in_size, out_size, agg', product(range(5, 8), range(2, 5), ['mean', 'min', 'max', 'first', 'last', 'var', 'std', 'mode']))
def test_raster_distributed_downsample(in_size, out_size, agg):
"""
Ensure that distributed regrid is equivalent to regular regrid.
"""
cvs = ds.Canvas(plot_height=out_size, plot_width=out_size)
vs = np.linspace(-1, 1, in_size)
xs, ys = np.meshgrid(vs, vs)
arr = np.sin(xs*ys)
darr = da.from_array(arr, (2, 2))
coords = [('y', range(in_size)), ('x', range(in_size))]
xr_darr = xr.DataArray(darr, coords=coords, name='z')
xr_arr = xr.DataArray(arr, coords=coords, name='z')
agg_arr = cvs.raster(xr_arr, agg=agg)
agg_darr = cvs.raster(xr_darr, agg=agg)
assert np.allclose(agg_arr.data, agg_darr.data.compute())
assert | np.allclose(agg_arr.x.values, agg_darr.x.values) | numpy.allclose |
from statsmodels.compat.numpy import lstsq
from statsmodels.compat.pandas import assert_index_equal
from statsmodels.compat.platform import PLATFORM_WIN
from statsmodels.compat.python import lrange
import os
import warnings
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_equal,
assert_raises,
)
import pandas as pd
from pandas import DataFrame, Series, date_range
import pytest
from scipy.interpolate import interp1d
from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots
from statsmodels.tools.sm_exceptions import (
CollinearityWarning,
InfeasibleTestError,
InterpolationWarning,
MissingDataError,
)
# Remove imports when range unit root test gets an R implementation
from statsmodels.tools.validation import array_like, bool_like
from statsmodels.tsa.arima_process import arma_acovf
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.stattools import (
acf,
acovf,
adfuller,
arma_order_select_ic,
breakvar_heteroskedasticity_test,
ccovf,
coint,
grangercausalitytests,
innovations_algo,
innovations_filter,
kpss,
levinson_durbin,
levinson_durbin_pacf,
pacf,
pacf_burg,
pacf_ols,
pacf_yw,
range_unit_root_test,
zivot_andrews,
)
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="module")
def acovf_data():
rnd = np.random.RandomState(12345)
return rnd.randn(250)
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
x = data.data["realgdp"].values
y = data.data["infl"].values
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="c", autolag=None, maxlag=4)
cls.teststat = 0.97505319
cls.pvalue = 0.99399563
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
""""""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="ct", autolag=None, maxlag=4)
cls.teststat = -1.8566374
cls.pvalue = 0.67682968
cls.critvalues = [-4.007, -3.437, -3.137]
# FIXME: do not leave commented-out
# class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
# TODO: get test values from R?
class TestADFNoConstant(CheckADF):
""""""
@classmethod
def setup_class(cls):
with pytest.warns(FutureWarning):
adfuller(cls.x, regression="nc", autolag=None, maxlag=4)
cls.res1 = adfuller(cls.x, regression="n", autolag=None, maxlag=4)
cls.teststat = 3.5227498
cls.pvalue = 0.99999
# Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
cls.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="c", autolag=None, maxlag=1)
cls.teststat = -4.3346988
cls.pvalue = 0.00038661
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="ct", autolag=None, maxlag=1)
cls.teststat = -4.425093
cls.pvalue = 0.00199633
cls.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="n", autolag=None, maxlag=1)
cls.teststat = -2.4511596
cls.pvalue = 0.013747
# Stata does not return a p-value for noconstant
# this value is just taken from our results
cls.critvalues = [-2.587, -1.950, -1.617]
_, _1, _2, cls.store = adfuller(
cls.y, regression="n", autolag=None, maxlag=1, store=True
)
def test_store_str(self):
assert_equal(
self.store.__str__(), "Augmented Dickey-Fuller Test Results"
)
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load_pandas()
x = data.data["realgdp"]
filename = os.path.join(CURR_DIR, "results", "results_corrgram.csv")
results = pd.read_csv(filename, delimiter=",")
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvar"]
# cls.acf = np.concatenate(([1.], cls.acf))
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, alpha=0.05, fft=False)
cls.confint_res = cls.results[["acvar_lb", "acvar_ub"]].values
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:, None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# FIXME: enable/xfail/skip or delete
# def pvalue(self):
# pass
# NOTE: should not need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
# Test Autocorrelation Function using FFT
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvarfft"]
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestACFMissing(CheckCorrGram):
# Test Autocorrelation Function using Missing
@classmethod
def setup_class(cls):
cls.x = np.concatenate((np.array([np.nan]), cls.x))
cls.acf = cls.results["acvar"] # drop and conservative
cls.qstat = cls.results["Q1"]
cls.res_drop = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="drop", fft=False
)
cls.res_conservative = acf(
cls.x,
nlags=40,
qstat=True,
alpha=0.05,
fft=False,
missing="conservative",
)
cls.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive
cls.qstat_none = np.empty(40) * np.nan
cls.res_none = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="none", fft=False
)
def test_raise(self):
with pytest.raises(MissingDataError):
acf(
self.x,
nlags=40,
qstat=True,
fft=False,
alpha=0.05,
missing="raise",
)
def test_acf_none(self):
assert_almost_equal(self.res_none[0][1:41], self.acf_none, DECIMAL_8)
def test_acf_drop(self):
assert_almost_equal(self.res_drop[0][1:41], self.acf, DECIMAL_8)
def test_acf_conservative(self):
assert_almost_equal(
self.res_conservative[0][1:41], self.acf, DECIMAL_8
)
def test_qstat_none(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res_none[2], self.qstat_none, DECIMAL_3)
# FIXME: enable/xfail/skip or delete
# how to do this test? the correct q_stat depends on whether nobs=len(x) is
# used when x contains NaNs or whether nobs<len(x) when x contains NaNs
# def test_qstat_drop(self):
# assert_almost_equal(self.res_drop[2][:40], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
@classmethod
def setup_class(cls):
cls.pacfols = cls.results["PACOLS"]
cls.pacfyw = cls.results["PACYW"]
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=0.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:, None]
# from edited Stata ado file
res = [[-0.1375625, 0.1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
# check lag 0
assert_equal(centered[0], [0.0, 0.0])
assert_equal(confint[0], [1, 1])
assert_equal(pacfols[0], 1)
def test_ols_inefficient(self):
lag_len = 5
pacfols = pacf_ols(self.x, nlags=lag_len, efficient=False)
x = self.x.copy()
x -= x.mean()
n = x.shape[0]
lags = np.zeros((n - 5, 5))
lead = x[5:]
direct = np.empty(lag_len + 1)
direct[0] = 1.0
for i in range(lag_len):
lags[:, i] = x[5 - (i + 1) : -(i + 1)]
direct[i + 1] = lstsq(lags[:, : (i + 1)], lead, rcond=None)[0][-1]
assert_allclose(pacfols, direct, atol=1e-8)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="lda")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class TestBreakvarHeteroskedasticityTest(object):
from scipy.stats import chi2, f
def test_1d_input(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
# ~ F(2, 2), two-sided test
expected_pvalue = 2 * min(
self.f.cdf(expected_statistic, 2, 2),
self.f.sf(expected_statistic, 2, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_2d_input_with_missing_values(self):
input_residuals = np.array(
[
[0.0, 0.0, np.nan],
[1.0, np.nan, 1.0],
[2.0, 2.0, np.nan],
[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0],
[5.0, 5.0, 5.0],
[6.0, 6.0, 6.0],
[7.0, 7.0, 7.0],
[8.0, 8.0, 8.0],
]
)
expected_statistic = np.array(
[
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2)
/ (0.0 ** 2 + 1.0 ** 2 + 2.0 ** 2),
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2) / (0.0 ** 2 + 2.0 ** 2),
np.nan,
]
)
expected_pvalue = np.array(
[
2
* min(
self.f.cdf(expected_statistic[0], 3, 3),
self.f.sf(expected_statistic[0], 3, 3),
),
2
* min(
self.f.cdf(expected_statistic[1], 3, 2),
self.f.sf(expected_statistic[1], 3, 2),
),
np.nan,
]
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert_equal(actual_statistic, expected_statistic)
assert_equal(actual_pvalue, expected_pvalue)
@pytest.mark.parametrize(
"subset_length,expected_statistic,expected_pvalue",
[
(2, 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
(0.5, 10, 2 * min(f.cdf(10, 3, 3), f.sf(10, 3, 3))),
],
)
def test_subset_length(
self, subset_length, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
subset_length=subset_length,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
@pytest.mark.parametrize(
"alternative,expected_statistic,expected_pvalue",
[
("two-sided", 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
("decreasing", 1 / 41, f.sf(1 / 41, 2, 2)),
("increasing", 41, f.sf(41, 2, 2)),
],
)
def test_alternative(
self, alternative, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
alternative=alternative,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_use_chi2(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
expected_pvalue = 2 * min(
self.chi2.cdf(2 * expected_statistic, 2),
self.chi2.sf(2 * expected_statistic, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
use_f=False,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
y1 = data.data["realcons"].values
y2 = data.data["realgdp"].values
def test_tstat(self):
assert_almost_equal(self.coint_t, self.teststat, DECIMAL_4)
# this does not produce the old results anymore
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
@classmethod
def setup_class(cls):
# cls.coint_t = coint(cls.y1, cls.y2, trend="c")[0]
cls.coint_t = coint(cls.y1, cls.y2, trend="c", maxlag=0, autolag=None)[
0
]
cls.teststat = -1.8208817
cls.teststat = -1.830170986148
def test_coint():
nobs = 200
scale_e = 1
const = [1, 0, 0.5, 0]
np.random.seed(123)
unit = np.random.randn(nobs).cumsum()
y = scale_e * np.random.randn(nobs, 4)
y[:, :2] += unit[:, None]
y += const
y = np.round(y, 4)
# FIXME: enable/xfail/skip or delete
for trend in []: # ['c', 'ct', 'ctt', 'n']:
print("\n", trend)
print(coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None))
# results from Stata egranger
res_egranger = {}
# trend = 'ct'
res = res_egranger["ct"] = {}
res[0] = [
-5.615251442239,
-4.406102369132,
-3.82866685109,
-3.532082997903,
]
res[1] = [
-5.63591313706,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[2] = [
-2.892029275027,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[3] = [-5.626932544079, -5.08363327039, -4.502469783057, -4.2031051091]
# trend = 'c'
res = res_egranger["c"] = {}
# first critical value res[0][1] has a discrepancy starting at 4th decimal
res[0] = [
-5.760696844656,
-3.952043522638,
-3.367006313729,
-3.065831247948,
]
# manually adjusted to have higher precision as in other cases
res[0][1] = -3.952321293401682
res[1] = [
-5.781087068772,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[2] = [
-2.477444137366,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[3] = [
-5.778205811661,
-4.735249216434,
-4.152738973763,
-3.852480848968,
]
# trend = 'ctt'
res = res_egranger["ctt"] = {}
res[0] = [
-5.644431269946,
-4.796038299708,
-4.221469431008,
-3.926472577178,
]
res[1] = [-5.665691609506, -5.111158174219, -4.53317278104, -4.23601008516]
res[2] = [-3.161462374828, -5.111158174219, -4.53317278104, -4.23601008516]
res[3] = [
-5.657904558563,
-5.406880189412,
-4.826111619543,
-4.527090164875,
]
# The following for 'n' are only regression test numbers
# trend = 'n' not allowed in egranger
# trend = 'n'
res = res_egranger["n"] = {}
nan = np.nan # shortcut for table
res[0] = [-3.7146175989071137, nan, nan, nan]
res[1] = [-3.8199323012888384, nan, nan, nan]
res[2] = [-1.6865000791270679, nan, nan, nan]
res[3] = [-3.7991270451873675, nan, nan, nan]
with pytest.warns(FutureWarning):
# Ensure warning raised for nc rather than n
coint(y[:, 0], y[:, 1], trend="nc", maxlag=4, autolag=None)
for trend in ["c", "ct", "ctt", "n"]:
res1 = {}
res1[0] = coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None)
res1[1] = coint(
y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None
)
res1[2] = coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None)
res1[3] = coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None)
for i in range(4):
res = res_egranger[trend]
assert_allclose(res1[i][0], res[i][0], rtol=1e-11)
r2 = res[i][1:]
r1 = res1[i][2]
assert_allclose(r1, r2, rtol=0, atol=6e-7)
# use default autolag #4490
res1_0 = coint(y[:, 0], y[:, 1], trend="ct", maxlag=4)
assert_allclose(res1_0[2], res_egranger["ct"][0][1:], rtol=0, atol=6e-7)
# the following is just a regression test
assert_allclose(
res1_0[:2],
[-13.992946638547112, 2.270898990540678e-27],
rtol=1e-10,
atol=1e-27,
)
def test_coint_identical_series():
nobs = 200
scale_e = 1
np.random.seed(123)
y = scale_e * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with pytest.warns(CollinearityWarning):
c = coint(y, y, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
def test_coint_perfect_collinearity():
# test uses nearly perfect collinearity
nobs = 200
scale_e = 1
np.random.seed(123)
x = scale_e * np.random.randn(nobs, 2)
y = 1 + x.sum(axis=1) + 1e-7 * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with warnings.catch_warnings(record=True) as w:
c = coint(y, x, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
# R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]["ssr_ftest"], decimal=7)
assert_almost_equal(
gr[2][0]["params_ftest"], gr[2][0]["ssr_ftest"], decimal=7
)
def test_grangercausality_single(self):
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
gr2 = grangercausalitytests(data[:, 1::-1], [2], verbose=False)
assert 1 in gr
assert 1 not in gr2
assert_almost_equal(
gr[2][0]["ssr_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
assert_almost_equal(
gr[2][0]["params_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
def test_granger_fails_on_nobs_check(self, reset_randomstate):
# Test that if maxlag is too large, Granger Test raises a clear error.
x = np.random.rand(10, 2)
grangercausalitytests(x, 2, verbose=False) # This should pass.
with pytest.raises(ValueError):
grangercausalitytests(x, 3, verbose=False)
def test_granger_fails_on_finite_check(self, reset_randomstate):
x = np.random.rand(1000, 2)
x[500, 0] = np.nan
x[750, 1] = np.inf
with pytest.raises(ValueError, match="x contains NaN"):
grangercausalitytests(x, 2)
def test_granger_fails_on_zero_lag(self, reset_randomstate):
x = np.random.rand(1000, 2)
with pytest.raises(
ValueError,
match="maxlag must be a non-empty list containing only positive integers",
):
grangercausalitytests(x, [0, 1, 2])
class TestKPSS:
"""
R-code
------
library(tseries)
kpss.stat(x, "Level")
kpss.stat(x, "Trend")
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
@classmethod
def setup(cls):
cls.data = macrodata.load_pandas()
cls.x = cls.data.data["realgdp"].values
def test_fail_nonvector_input(self, reset_randomstate):
# should be fine
with pytest.warns(InterpolationWarning):
kpss(self.x, nlags="legacy")
x = np.random.rand(20, 2)
assert_raises(ValueError, kpss, x)
def test_fail_unclear_hypothesis(self):
# these should be fine,
with pytest.warns(InterpolationWarning):
kpss(self.x, "c", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "C", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "ct", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "CT", nlags="legacy")
assert_raises(
ValueError, kpss, self.x, "unclear hypothesis", nlags="legacy"
)
def test_teststat(self):
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "c", 3)
assert_almost_equal(kpss_stat, 5.0169, DECIMAL_3)
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "ct", 3)
| assert_almost_equal(kpss_stat, 1.1828, DECIMAL_3) | numpy.testing.assert_almost_equal |
import numpy as np
import pydicom as dicom
import os
from glob import glob
import scipy.ndimage
import re
import sys
from tqdm import tqdm
from itertools import chain
from skimage.io import imread, imshow, imread_collection, concatenate_images
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
import random
from sklearn.model_selection import train_test_split
from PIL import Image
def load_tif_scan(path):
slices = []
files = glob(path + '/*.tif')
files = natural_sort(files)
for file in files:
im = Image.open(file)
# Convert to Numpy Array
imarray = np.array(im)
# Normalize
#x = (x - 128.0) / 128.0
x = np.squeeze(imarray)
slices.append(x)
slices = np.array(slices)
#slices = np.flip(slices, 0) #masks were saved in reverse order
return slices
def get_aaron_data(TEST_ID,IMG_WIDTH,IMG_HEIGHT,NUM_SLICES,IMG_CHANNELS):
TEST_PATH = '../npy_data/aaron/'
# Get and resize test images
#print('Getting test images and masks ... ')
X_test = np.zeros((NUM_SLICES, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint16)
y_test = np.zeros((NUM_SLICES, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for ch in range(IMG_CHANNELS):
i = 0
path = TEST_PATH + 'imgs/' + str(ch) + '/' + TEST_ID
img = np.load(path)[:,:,:]
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
maskpath = TEST_PATH + 'labels/' + TEST_ID
mask_ = np.load(maskpath)[:,:,:,np.newaxis]
mask = np.maximum(mask, mask_)
for i in range(NUM_SLICES):
X_test[i,:,:,ch] = img[i]
y_test[i] = mask[i]
i+=1
print('Done!')
return (X_test, y_test)
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def load_scan(path):
#slices = [dicom.read_file((path + '/' + s) for s in os.listdir(path))]
slices = []
for file in glob(path + '/*.DCM'):
slices.append(dicom.read_file(file))
slices.sort(key = lambda x: int(x.InstanceNumber)) # sort by slice number
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def show_dcm_info(dataset, path):
print("Filename.........:", path)
print("Storage type.....:", dataset.SOPClassUID)
print()
pat_name = dataset.PatientName
display_name = pat_name.family_name + ", " + pat_name.given_name
print("Patient's name......:", display_name)
print("Patient id..........:", dataset.PatientID)
print("Patient's Age.......:", dataset.PatientAge)
print("Patient's Sex.......:", dataset.PatientSex)
print("Modality............:", dataset.Modality)
if 'BodyPartExamined' in dataset:
print("Body Part Examined..:", dataset.BodyPartExamined)
if 'ViewPosition' in dataset:
print("View Position.......:", dataset.ViewPosition)
if 'PixelData' in dataset:
rows = int(dataset.Rows)
cols = int(dataset.Columns)
print("Image size.......: {rows:d} x {cols:d}, {size:d} bytes".format(
rows=rows, cols=cols, size=len(dataset.PixelData)))
if 'PixelSpacing' in dataset:
print("Pixel spacing....:", dataset.PixelSpacing)
def get_pixels(scans):
image = np.stack([s.pixel_array for s in scans])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
return np.array(image, dtype=np.int16)
def sample_stack(stack, rows=4, cols=5, start_with=0, show_every=1):
fig,ax = plt.subplots(rows,cols,figsize=[12,12],dpi=300)
ind = start_with
for i in range(rows):
for j in range(cols):
ax[i,j].set_title('slice %d' % (ind+1))
ax[i,j].imshow(stack[ind],cmap='gray')
ax[i,j].axis('off')
ind = ind + show_every
plt.show()
def get_data(TRAIN_PATH,TEST_PATH,IMG_WIDTH,IMG_HEIGHT,NUM_SLICES,IMG_CHANNELS):
# Get train and test IDs
train_ids = next(os.walk(TRAIN_PATH+'imgs/0/'))[2]
test_ids = next(os.walk(TEST_PATH+'imgs/0/'))[2]
# Get and resize train images and masks
#print('Getting train images and masks ... ')
X_train = np.zeros((len(train_ids)*NUM_SLICES, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint16)
y_train = np.zeros((len(train_ids)*NUM_SLICES, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
for ch in range(IMG_CHANNELS):
i = 0
path = TRAIN_PATH + 'imgs/' + str(ch) + '/' + id_
img = np.load(path)[:,:,:]
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
maskpath = TRAIN_PATH + 'labels/' + id_
mask_ = np.load(maskpath)[:,:,:,np.newaxis]
mask = np.maximum(mask, mask_)
for i in range(NUM_SLICES):
X_train[n*NUM_SLICES + i,:,:,ch] = img[i]
y_train[n*NUM_SLICES + i] = mask[i]
i+=1
# Get and resize test images
#print('Getting test images and masks ... ')
X_test = np.zeros((len(test_ids)*NUM_SLICES, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint16)
y_test = np.zeros((len(test_ids)*NUM_SLICES, IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
sizes_test = []
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
for ch in range(IMG_CHANNELS):
i = 0
path = TEST_PATH + 'imgs/' + str(ch) + '/' + id_
img = np.load(path)[:,:,:]
sizes_test.append([img.shape[0], img.shape[1]])
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
maskpath = TEST_PATH + 'labels/' + id_
mask_ = np.load(maskpath)[:,:,:,np.newaxis]
mask = np.maximum(mask, mask_)
for i in range(NUM_SLICES):
X_test[n*NUM_SLICES + i,:,:,ch] = img[i]
y_test[n*NUM_SLICES + i] = mask[i]
i+=1
print('Done!')
return (X_train, X_test, y_train, y_test)
def get_testing_data(TEST_ID,IMG_WIDTH,IMG_HEIGHT,NUM_SLICES,IMG_CHANNELS):
TEST_PATH = '../npy_data/test/'
# Get and resize test images
#print('Getting test images and masks ... ')
X_test = | np.zeros((NUM_SLICES, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint16) | numpy.zeros |
import numpy as np
from numpy.testing import assert_equal
from skcv.video.optical_flow.reliability import flow_reliability
from skcv.video.segmentation.tbpt import TBPT
def test_tbpt():
N = 99
M = 99
n_frames = 2
fflow = np.zeros((n_frames, N, M, 2))
bflow = np.zeros((n_frames, N, M, 2))
fflow[0, N / 3:2 * N / 3, M / 3:2 * M / 3, 0] = 1
fflow[0, N / 3:2 * N / 3, M / 3:2 * M / 3, 1] = 1
bflow[1, 1 + N / 3:2 * N / 3, 1 + M / 3:2 * M / 3, 0] = -1
bflow[1, 1 + N / 3:2 * N / 3, 1 + M / 3:2 * M / 3, 1] = -1
video = np.zeros((2, N, M, 3))
fcoords = np.where(fflow[0, ..., 0] == 1)
bcoords = np.where(bflow[1, ..., 0] == -1)
video[0, fcoords[0], fcoords[1], :] = 200
video[1, bcoords[0], bcoords[1], :] = 200
rel = np.zeros((n_frames, N, M))
for frame in range(n_frames-1):
rel[frame, ...] = flow_reliability(video[frame, ...],
fflow[frame, ...],
bflow[frame + 1, ...],
use_structure=False)
part = (video[..., 1] != 0).astype(np.int)
#define a distance for the TBPT
#arguments: video, flow, region1, region2
distance = lambda v, fflow, r1, r2: 1
tbpt = TBPT(video, part, distance, optical_flow=fflow)
#check regions
assert_equal(tbpt.nodes[0]["parent"], 2)
| assert_equal(tbpt.nodes[1]["parent"], 2) | numpy.testing.assert_equal |
import numpy as np
import tensorflow as tf
from scipy.stats import multivariate_normal as normal
class Equation(object):
"""Base class for defining PDE related function."""
def __init__(self, dim, total_time, num_time_interval):
self._dim = dim
self._total_time = total_time
self._num_time_interval = num_time_interval
self._delta_t = (self._total_time + 0.0) / self._num_time_interval
self._sqrt_delta_t = np.sqrt(self._delta_t)
self._y_init = None
def sample(self, num_sample):
"""Sample forward SDE."""
raise NotImplementedError
def f_tf(self, t, x, y, z):
"""Generator function in the PDE."""
raise NotImplementedError
def g_tf(self, t, x):
"""Terminal condition of the PDE."""
raise NotImplementedError
@property
def y_init(self):
return self._y_init
@property
def dim(self):
return self._dim
@property
def num_time_interval(self):
return self._num_time_interval
@property
def total_time(self):
return self._total_time
@property
def delta_t(self):
return self._delta_t
def get_equation(name, dim, total_time, num_time_interval):
try:
return globals()[name](dim, total_time, num_time_interval)
except KeyError:
raise KeyError("Equation for the required problem not found.")
class AllenCahn(Equation):
def __init__(self, dim, total_time, num_time_interval):
super(AllenCahn, self).__init__(dim, total_time, num_time_interval)
self._x_init = np.zeros(self._dim)
self._sigma = np.sqrt(2.0)
def sample(self, num_sample):
dw_sample = normal.rvs(size=[num_sample,
self._dim,
self._num_time_interval]) * self._sqrt_delta_t
x_sample = np.zeros([num_sample, self._dim, self._num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self._dim]) * self._x_init
for i in range(self._num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + self._sigma * dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
return y - tf.pow(y, 3)
def g_tf(self, t, x):
return 0.5 / (1 + 0.2 * tf.reduce_sum(tf.square(x), 1, keep_dims=True))
class HJB(Equation):
def __init__(self, dim, total_time, num_time_interval):
super(HJB, self).__init__(dim, total_time, num_time_interval)
self._x_init = np.zeros(self._dim)
self._sigma = np.sqrt(2.0)
self._lambda = 50
def sample(self, num_sample):
dw_sample = normal.rvs(size=[num_sample,
self._dim,
self._num_time_interval]) * self._sqrt_delta_t
x_sample = | np.zeros([num_sample, self._dim, self._num_time_interval + 1]) | numpy.zeros |
import numpy as np
import tensorflow as tf
from gym import utils
from gym.envs.mujoco import mujoco_env
from meta_mb.meta_envs.base import MetaEnv
class InvertedPendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle, MetaEnv):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'inverted_pendulum.xml', 2)
def step(self, a):
# reward = 1.0
reward = self._get_reward()
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
# notdone = np.isfinite(ob).all() and (np.abs(ob[1]) <= .2)
# done = not notdone
done = False
return ob, reward, done, {}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.01, high=0.01)
qvel = self.init_qvel + self.np_random.uniform(size=self.model.nv, low=-0.01, high=0.01)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_reward(self):
old_ob = self._get_obs()
reward = -((old_ob[1]) ** 2)
return reward
def _get_obs(self):
return | np.concatenate([self.sim.data.qpos, self.sim.data.qvel]) | numpy.concatenate |
import numpy as np
import re
from sys import argv
import sys
import csv
import glob
import os
import cv2
import operator
import matplotlib.pyplot as plt
"""
Convert a rgb image to grayscale.
"""
def rgb_to_gray(rgb):
return np.dot(rgb[... , :3] , [0.299 , 0.587, 0.114])
"""
Order strings in natural order.
"""
def stringSplitByNumbers(x):
r = re.compile('(\d+)')
l = r.split(x)
return [int(y) if y.isdigit() else y for y in l]
"""
Remove duplicated line from csv file saved by Biolapse interface.
"""
def remove_duplicate_csv(source):
# # Create the output file as input with _deduped before .csv extension
# source = argv[1]
destination = source.replace('.csv', '_deduped.csv')
data = open(source, 'r')
target = open(destination, 'w')
# Let the user know you are starting, in case you are de-dupping a huge file
print("\nRemoving duplicates from %r" % source)
# Initialize variables and counters
unique_lines = set()
source_lines = 0
duplicate_lines = 0
# Loop through data, write uniques to output file, skip duplicates.
for line in data:
source_lines += 1
# Strip out the junk for an easy set check, also saves memory
line_to_check = line.strip('\r\n')
if line_to_check in unique_lines: # Skip if line is already in set
duplicate_lines += 1
continue
else: # Write if new and append stripped line to list of seen lines
target.write(line)
unique_lines.add(line_to_check)
# Be nice and close out the files
target.close()
data.close()
"""
Extract information from csv file saved by Biolapse interface.
"""
def get_labels(path_to_crop):
# Load labels
all_img = glob.glob(os.path.join(path_to_crop, 'zoom','*', '*.png'))
all_img = sorted(all_img,key=stringSplitByNumbers)
labels = []
idx = []
csv_file_ = os.path.join(path_to_crop, 'cells.csv')
remove_duplicate_csv(csv_file_)
csv_file_ = os.path.join(path_to_crop, 'cells_deduped.csv')
with open(csv_file_) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
name_cell_ = ''
first_run = True
for row in csv_reader:
idx_matching = [k for k in range(
len(all_img)) if name_cell_ in all_img[k]]
name_cell = row[0]
if name_cell == name_cell_:
# add previous cell labels
if not(row[1] == ' '):
crt_time = int(row[1])
for k in range(prev_time, crt_time):
lab_cell.append(0)
prev_time = crt_time
elif not(row[3] == ' '):
crt_time = int(row[3])
for k in range(prev_time, crt_time):
lab_cell.append(0)
prev_time = crt_time
elif not(row[5] == ' '):
crt_time = int(row[5])
for k in range(prev_time, crt_time):
lab_cell.append(1)
prev_time = crt_time
elif not(row[7] == ' '):
# import ipdb; ipdb.set_trace()
crt_time = int(row[7])
for k in range(prev_time, crt_time):
lab_cell.append(2)
prev_time = crt_time
elif not(row[9] == ' '):
crt_time = int(row[9])
for k in range(prev_time, crt_time):
lab_cell.append(3)
prev_time = crt_time
elif not(row[11] == ' '):
crt_time = int(row[11])
for k in range(prev_time, crt_time+1):
lab_cell.append(4)
prev_time = crt_time
# lab_cell.append(-1)
if not(len(lab_cell) == int(row[11])-start_time+1):
print('Warning: inconsistance in time acquisition.')
if len(idx_matching) != len(lab_cell):
# import ipdb; ipdb.set_trace()
print(
'Error: not same number of cell than number of labels found.')
else:
print('Warning: no phase mentionned.')
print(row)
name_cell_ = ''
else:
# add corresponding index in image sequence
if not(first_run):
if len(lab_cell) != 0:
labels.append(lab_cell)
idx.append(idx_matching)
# create new label list
lab_cell = []
if not(row[1] == ' '):
start_time = int(row[1])
prev_time = start_time
first_run = False
elif not(row[3] == ' '):
start_time = int(row[3])
prev_time = start_time
first_run = False
elif not(row[5] == ' '):
start_time = int(row[5])
prev_time = start_time
first_run = False
elif not(row[7] == ' '):
start_time = int(row[7])
prev_time = start_time
first_run = False
elif not(row[9] == ' '):
start_time = int(row[9])
prev_time = start_time
first_run = False
elif not(row[11] == ' '):
print('Warning: end before it starts.')
name_cell_ = ''
# start_time = int(row[1])
# lab_cell.append(-1)
else:
print('Warning: no phase mentionned.')
print(row)
name_cell_ = ''
name_cell_ = name_cell
idx_matching = [k for k in range(
len(all_img)) if name_cell_ in all_img[k]]
if len(lab_cell) != 0:
labels.append(lab_cell)
idx.append(idx_matching)
return labels, idx
## NN
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
from torchvision import datasets, transforms
import imageio
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size = 5, stride=1, padding=2, dilation=1)
self.conv2 = nn.Conv2d(10, 20, kernel_size = 5, stride=1, padding=2, dilation=1)
self.conv3 = nn.Conv2d(20, 30, kernel_size = 5, stride=1, padding=2, dilation=1)
self.fc1 = nn.Linear(16*16*30, 500)
self.fc2 = nn.Linear(500, 4)
def forward(self, x):
x = torch.squeeze(x,0)
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
# x = nn.Dropout(p=0.5)(x)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
# x = nn.Dropout(p=0.5)(x)
x = F.relu(self.conv3(x))
x = F.max_pool2d(x, 2, 2)
# x = nn.Dropout(p=0.5)(x)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
# x = nn.Dropout(p=0.5)(x)
x = x.view(-1, self.num_flat_features(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
"""
Apply blur operator onto points at position on the grid.
Operator is assumed to act separately on two parts of the image: im[:n/2,:] and im[n/2:,:].
Image makes operator always identifiable: minimum 2 sources id different part of the image.
"""
class CropGenertor(torch.utils.data.Dataset):
def __init__(self, im, feature):
self.L=feature.shape[0]
self.feature=feature
self.im=im
def __len__(self):
return self.L
def __getitem__(self, i):
return np.expand_dims(self.im[i],0), self.feature[i]
from torch.autograd import Variable
def train(model, device, train_loader, test_loader, optimizer, epoch, nbatch=20):
model.train()
use_cuda=torch.cuda.is_available()
# data, target = train_loader
# data_test, target_test = test_loader
# nbImages = data.shape[0]
cpt=0
cum_loss_train=0
for im, feat in train_loader:
if use_cuda:
im = Variable(im).type(torch.float32).cuda()
else:
im = Variable(im).type(torch.float32)
# for batch_idx in range(nit):
# randi = np.random.randint(0,nbImages,size=nbatch)
# dataloc, targetloc = data[randi,:,:,:] ,target[randi]
# dataloc.to(device)
# targetloc.to(device)
optimizer.zero_grad()
# output = model(dataloc)
output = model(im)
if use_cuda:
loss = nn.BCELoss()(output,Variable(feat.float()).cuda())
else:
loss = nn.BCELoss()(output,Variable(feat.float()))
# loss = nn.BCELoss()(output,targetloc)
# loss = nn.CrossEntropyLoss().cuda()(output,targetloc)
loss.backward()
optimizer.step()
cum_loss_train+=loss.item()
if cpt % 50 == 0:
print('Train Epoch: {} [{}]\tLoss: {:.6f}'.format(
epoch, cpt,loss.item()))
cpt+=1
loss_train = cum_loss_train/cpt
# # Test
# randi = np.random.randint(0,data_test.shape[0],size=nbatch)
# dataloc_test, targetloc_test = data_test[randi,:], target_test[randi,:]
# dataloc_test.to(device)
# targetloc_test.to(device)
# output = model(dataloc_test)
# loss_test = nn.BCELoss()(output,targetloc_test)
cpt=0
cum_loss_test=0
for image_test, label_test in test_loader:
if use_cuda:
image_test = Variable(image_test).type(torch.float32).cuda()
else:
image_test = Variable(image_test).type(torch.float32)
output = model(image_test)
if use_cuda:
loss = nn.BCELoss()(output, Variable(label_test.float()).cuda())
else:
loss = nn.BCELoss()(output, Variable(label_test.float()))
cum_loss_test += loss.item()
cpt+=1
loss_test=cum_loss_test/cpt
# loss_test = nn.CrossEntropyLoss().cuda()(output,targetloc_test)
print('Test loss: {:.6f}'.format(loss_test))
return loss_train, loss_test
class Dataset():
"""Dataset class
Attributes:
dataPath (TYPE): Position of the folder containing all the images
lenDataset (TYPE): Number of images present in this folder
"""
def __init__(self, dataPath,lenDataset):
'Initialization'
self.dataPath = dataPath
self.lenDataset = lenDataset
def __len__(self):
'Denotes the total number of samples'
return self.lenDataset
def __getitem__(self, index):
'Generates one sample of data'
images = np.zeros((self.lenDataset,1,128,128))
labs = np.load(self.dataPath+os.sep+'feature.npy')
labs = labs[:self.lenDataset]
for i in range(self.lenDataset):
images[i,0,:,:] = np.array(imageio.imread(self.dataPath+os.sep+str(i).zfill(5)+'.png'),dtype=float)/255.
return torch.Tensor(images), torch.Tensor(labs)
def getALLDATA(self):
images = np.zeros((self.lenDataset,1,128,128))
labs = | np.load(self.dataPath+os.sep+'feature.npy') | numpy.load |
import database_funcs as _db
import numpy as np
import math_funcs as _mth
from currency_codes import codes
def simpleShouldIInvest(logger,learning_df,last_candle_df,model,epoch,currency_pair,data_base_config = None):
IMPORTANT_FEATURES=model['features']
N = model['n']
AVG_TH = model['avg']
CHEBY_2K_TH = model['cheby2k']
WORSE_TH = model['worse']
BEN_FIELD = model['ben_field']
MODE = model['mode']
STEP_FIELD = "step_" + BEN_FIELD.split('_')[1]
RSI_MODE = model['rsi_mode']
MIN_ROC1 = model['min_roc1']
MAX_ROC1 = model['max_roc1']
STEPS_TH = model['steps']
OUTPUT_RSI_VALUE = int(BEN_FIELD.split("_")[1])/1000.0
INPUT_RSI_VALUE_TH = OUTPUT_RSI_VALUE- (OUTPUT_RSI_VALUE*0.05)
data_for_log = {}
#logger.debug("N:{} AVG_TH:{} CHEBY_2K_TH:{} WORSE_TH:{} BEN_FIELD:{} MODE:{}".format(N,AVG_TH,CHEBY_2K_TH,WORSE_TH,BEN_FIELD,MODE))
#logger.debug("STEP_FIELD:{} RSI_MODE:{} MIN_ROC1:{} MAX_ROC1:{} STEPS_TH:{} ".format(STEP_FIELD,RSI_MODE,MIN_ROC1,MAX_ROC1,STEPS_TH))
#logger.debug("OUTPUT_RSI_VALUE:{} INPUT_RSI_VALUE_TH:{}".format(OUTPUT_RSI_VALUE,INPUT_RSI_VALUE_TH))
data_for_log['epoch'] = epoch
data_for_log['currency_pair'] = currency_pair
data_for_log['currency_code'] = codes[currency_pair]
data_for_log['mode'] = MODE
data_for_log['roc1'] = last_candle_df.iloc[0]['roc1']
data_for_log['rsi'] = last_candle_df.iloc[0]['rsi']
data_for_log['close'] = last_candle_df.iloc[0]['close']
data_for_log['near_neighbors'] = 0
data_for_log['neighbors'] = 0
data_for_log['avg_benefit'] = 0.0
data_for_log['avg_benefit_weighted_space'] = 0.0
data_for_log['avg_benefit_weighted_time'] = 0.0
data_for_log['prob_win'] = 0.0
data_for_log['q5_benefit'] = 0.0
data_for_log['q95_benefit'] = 0.0
data_for_log['cheby2k'] = 0.0
data_for_log['cheby2k_weighted_space'] = 0.0
data_for_log['cheby2k_weighted_time'] = 0.0
data_for_log['worse_expected'] = 0.0
data_for_log['steps_mean'] = 0
data_for_log['buy'] = 0
data_for_log['avg_space_distance'] = 0.0
if abs(last_candle_df.iloc[0]['roc1']) < MAX_ROC1:
logger.debug("abs roc1:{} lower than:{}".format(last_candle_df.iloc[0]['roc1'],MAX_ROC1))
return False
else:
data_for_log['buy'] = 1
logger.debug("roc1:{} less than:{} you must buy!".format(last_candle_df.iloc[0]['roc1'],-MAX_ROC1))
if data_base_config != None:
_db.logInsertNeighbors(logger,data_base_config,data_for_log)
return True
def simpleDownShouldIInvest(logger,learning_df,last_candle_df,model,epoch,currency_pair,data_base_config = None):
IMPORTANT_FEATURES=model['features']
N = model['n']
AVG_TH = model['avg']
CHEBY_2K_TH = model['cheby2k']
WORSE_TH = model['worse']
BEN_FIELD = model['ben_field']
MODE = model['mode']
STEP_FIELD = "step_" + BEN_FIELD.split('_')[1]
RSI_MODE = model['rsi_mode']
MIN_ROC1 = model['min_roc1']
MAX_ROC1 = model['max_roc1']
STEPS_TH = model['steps']
OUTPUT_RSI_VALUE = int(BEN_FIELD.split("_")[1])/1000.0
INPUT_RSI_VALUE_TH = OUTPUT_RSI_VALUE- (OUTPUT_RSI_VALUE*0.05)
data_for_log = {}
logger.debug("N:{} AVG_TH:{} CHEBY_2K_TH:{} WORSE_TH:{} BEN_FIELD:{} MODE:{}".format(N,AVG_TH,CHEBY_2K_TH,WORSE_TH,BEN_FIELD,MODE))
logger.debug("STEP_FIELD:{} RSI_MODE:{} MIN_ROC1:{} MAX_ROC1:{} STEPS_TH:{} ".format(STEP_FIELD,RSI_MODE,MIN_ROC1,MAX_ROC1,STEPS_TH))
logger.debug("OUTPUT_RSI_VALUE:{} INPUT_RSI_VALUE_TH:{}".format(OUTPUT_RSI_VALUE,INPUT_RSI_VALUE_TH))
data_for_log['epoch'] = epoch
data_for_log['currency_pair'] = currency_pair
data_for_log['currency_code'] = codes[currency_pair]
data_for_log['mode'] = MODE
data_for_log['roc1'] = last_candle_df.iloc[0]['roc1']
data_for_log['rsi'] = last_candle_df.iloc[0]['rsi']
data_for_log['close'] = last_candle_df.iloc[0]['close']
data_for_log['near_neighbors'] = 0
data_for_log['neighbors'] = 0
data_for_log['avg_benefit'] = 0.0
data_for_log['avg_benefit_weighted_space'] = 0.0
data_for_log['avg_benefit_weighted_time'] = 0.0
data_for_log['prob_win'] = 0.0
data_for_log['q5_benefit'] = 0.0
data_for_log['q95_benefit'] = 0.0
data_for_log['cheby2k'] = 0.0
data_for_log['cheby2k_weighted_space'] = 0.0
data_for_log['cheby2k_weighted_time'] = 0.0
data_for_log['worse_expected'] = 0.0
data_for_log['steps_mean'] = 0
data_for_log['buy'] = 0
data_for_log['avg_space_distance'] = 0.0
if last_candle_df.iloc[0]['roc1'] > -MAX_ROC1:
logger.debug("roc1:{} bigger than:{}".format(last_candle_df.iloc[0]['roc1'],-MAX_ROC1))
return False
else:
data_for_log['buy'] = 1
logger.debug("roc1:{} less than:{} you must buy!".format(last_candle_df.iloc[0]['roc1'],-MAX_ROC1))
if data_base_config != None:
_db.logInsertNeighbors(logger,data_base_config,data_for_log)
return True
def shouldIInvest(logger,learning_df,last_candle_df,model,epoch,currency_pair,data_base_config = None):
IMPORTANT_FEATURES=model['features']
N = model['n']
AVG_TH = model['avg']
CHEBY_2K_TH = model['cheby2k']
WORSE_TH = model['worse']
BEN_FIELD = model['ben_field']
MODE = model['mode']
STEP_FIELD = "step_" + BEN_FIELD.split('_')[1]
RSI_MODE = model['rsi_mode']
MIN_ROC1 = model['min_roc1']
MAX_ROC1 = model['max_roc1']
STEPS_TH = model['steps']
OUTPUT_RSI_VALUE = int(BEN_FIELD.split("_")[1])/1000.0
INPUT_RSI_VALUE_TH = OUTPUT_RSI_VALUE- (OUTPUT_RSI_VALUE*0.05)
data_for_log = {}
#logger.debug("N:{} AVG_TH:{} CHEBY_2K_TH:{} WORSE_TH:{} BEN_FIELD:{} MODE:{}".format(N,AVG_TH,CHEBY_2K_TH,WORSE_TH,BEN_FIELD,MODE))
#logger.debug("STEP_FIELD:{} RSI_MODE:{} MIN_ROC1:{} MAX_ROC1:{} STEPS_TH:{} ".format(STEP_FIELD,RSI_MODE,MIN_ROC1,MAX_ROC1,STEPS_TH))
#logger.debug("OUTPUT_RSI_VALUE:{} INPUT_RSI_VALUE_TH:{}".format(OUTPUT_RSI_VALUE,INPUT_RSI_VALUE_TH))
data_for_log['epoch'] = epoch
data_for_log['currency_pair'] = currency_pair
data_for_log['currency_code'] = codes[currency_pair]
data_for_log['mode'] = MODE
data_for_log['roc1'] = last_candle_df.iloc[0]['roc1']
data_for_log['rsi'] = last_candle_df.iloc[0]['rsi']
data_for_log['close'] = last_candle_df.iloc[0]['close']
data_for_log['near_neighbors'] = 0
data_for_log['neighbors'] = 0
data_for_log['avg_benefit'] = 0.0
data_for_log['avg_benefit_weighted_space'] = 0.0
data_for_log['avg_benefit_weighted_time'] = 0.0
data_for_log['prob_win'] = 0.0
data_for_log['q5_benefit'] = 0.0
data_for_log['q95_benefit'] = 0.0
data_for_log['cheby2k'] = 0.0
data_for_log['cheby2k_weighted_space'] = 0.0
data_for_log['cheby2k_weighted_time'] = 0.0
data_for_log['worse_expected'] = 0.0
data_for_log['steps_mean'] = 0
data_for_log['buy'] = 0
data_for_log['avg_space_distance'] = 0.0
if np.abs(last_candle_df.iloc[0]['roc1'] ) < MIN_ROC1:
logger.debug("abs roc1:{} less than:{}".format(last_candle_df.iloc[0]['roc1'],MIN_ROC1))
return False
if RSI_MODE == 1:
if last_candle_df.iloc[0]['rsi'] > INPUT_RSI_VALUE_TH:
logger.debug("rsi_mode 1")
logger.debug("rsi:{} bigger than:{}".format(last_candle_df.iloc[0]['rsi'],INPUT_RSI_VALUE_TH))
if data_base_config != None:
_db.logInsertNeighbors(logger,data_base_config,data_for_log)
return False
if MODE == 1:
if abs(last_candle_df.iloc[0]['roc1']) > MAX_ROC1:
logger.debug("abs roc1:{} greater than:{} you must buy!".format(last_candle_df.iloc[0]['roc1'],MAX_ROC1))
return True
if last_candle_df.iloc[0]['roc1'] < MIN_ROC1:
logger.debug("roc1:{} less than:{}".format(last_candle_df.iloc[0]['roc1'],MIN_ROC1))
return False
if MODE == 2:
if last_candle_df.iloc[0]['roc1'] < MIN_ROC1:
logger.debug("roc1:{} less than:{}".format(last_candle_df.iloc[0]['roc1'],MIN_ROC1))
return False
if MODE == 3:
if abs(last_candle_df.iloc[0]['roc1']) < MIN_ROC1:
logger.debug("abs roc1:{} less than:{}".format(last_candle_df.iloc[0]['roc1'],MIN_ROC1))
return False
if abs(last_candle_df.iloc[0]['roc1']) > MAX_ROC1:
data_for_log['buy'] = 1
logger.debug("abs roc1:{} greater than:{} you must buy!".format(last_candle_df.iloc[0]['roc1'],MAX_ROC1))
if data_base_config != None:
_db.logInsertNeighbors(logger,data_base_config,data_for_log)
return True
if MODE == 4:
if last_candle_df.iloc[0]['roc1'] > MAX_ROC1:
logger.debug("roc1:{} greater than:{} you must buy!".format(last_candle_df.iloc[0]['roc1'],MAX_ROC1))
return True
if abs(last_candle_df.iloc[0]['roc1']) < MIN_ROC1:
logger.debug("abs roc1:{} less than:{}".format(last_candle_df.iloc[0]['roc1'],MIN_ROC1))
return False
if last_candle_df.iloc[0]['roc1'] < MAX_ROC1:#zona D no se analiza
logger.debug("roc1:{} less than:{}".format(last_candle_df.iloc[0]['roc1'],MAX_ROC1))
return False
train = learning_df.copy(deep = True)
train['s'] = 0.0
for feature in IMPORTANT_FEATURES:
train['s'] += IMPORTANT_FEATURES[feature] * ((last_candle_df.iloc[0][feature] - train[feature] + 0.00001 ) **2)
train['space_distance'] = np.sqrt(train['s'])
train['w_space_distance'] = 100 - train['space_distance']
train['time_distance'] = last_candle_df.iloc[0]['date'] - train['date']
train = train[train['space_distance'].notnull()]
train.sort_values('space_distance',ascending=True,inplace=True)
logger.debug("{}".format(train.head(5).T))
if N == 0:
n = 0
for j, row in train.iterrows():
if row['space_distance'] > np.sqrt(len(IMPORTANT_FEATURES)):
break
n += 1
elif N == -1:
n = 0
for j, row in train.iterrows():
if row['space_distance'] > np.sqrt(len(IMPORTANT_FEATURES)/2):
break
n += 1
elif N == -2:
n = 0
for j, row in train.iterrows():
if row['space_distance'] > | np.sqrt(2) | numpy.sqrt |
#!/usr/bin/env python
# coding: utf-8
# # Importing Libraries
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Rectangle
# In[2]:
def abs(x):
if x < 0:
return -x
return x
# * 0 -- up
# * 1 -- right
# * 2 -- down
# * 3 -- left
# In[3]:
# Choose the action using the action model in this function
def get_motion_direction(x):
if np.random.random() < 0.8:
return x
return (x + np.random.randint(1, 4))%4
# In[4]:
def is_wall(x, y):
if x == 0 or x == 49:
return True
if y == 0 or y == 24:
return True
if x == 25 or x == 26:
if y != 12:
return True
return False
# In[5]:
def is_goal(x, y):
if x == 48 and y == 12:
return True
return False
# In[6]:
def next_state(x, y, dir):
xf = x
yf = y
if dir == 0:
yf += 1
elif dir == 1:
xf += 1
elif dir == 2:
yf -= 1
else:
xf -= 1
if is_wall(xf, yf):
return (x, y, True)
return (xf, yf, False)
# In[7]:
def reward(x, y, hit_wall):
if is_goal(x, y):
return 100
if hit_wall:
return -1
return 0
# In[30]:
def initialise_val_grid():
val_grid = np.zeros((50, 25))
for x in range(50):
for y in range(25):
val_grid[x][y] = np.random.random() + 0.0000001
return val_grid
# In[9]:
def get_val_for_action(val_grid, x, y, discount_factor, action):
val = 0
for dir in range(4):
xf, yf, hit_wall = next_state(x, y, dir)
r = reward(xf, yf, hit_wall)
probab = 0.2/3
if dir == action:
probab = 0.8
val += probab*(r + discount_factor*val_grid[xf][yf])
return val
# In[10]:
def get_new_value(val_grid, x, y, discount_factor):
val = -np.inf
act = 0
for action in range(4):
temp = get_val_for_action(val_grid, x, y, discount_factor, action)
if temp > val:
val = temp
act = action
return val, act
# In[11]:
def value_iteration(discount_factor, threshold, iterations):
val_grid = initialise_val_grid()
action_grid = np.zeros((50, 25))
for itr in range(iterations):
delta = 0
for x in range(50):
for y in range(25):
if is_wall(x, y):
action_grid[x][y] = -1
val_grid[x][y] = 0
continue
v = val_grid[x][y]
val_grid[x][y], action_grid[x][y] = get_new_value(val_grid, x, y, discount_factor)
delta = max(delta, abs(v - val_grid[x][y]))
if delta < threshold:
break
return val_grid, action_grid
# In[52]:
def show_heatmap(arr, annot, fsize = (20, 15)):
ax = plt.subplots(figsize = fsize)
ax = sns.heatmap(arr.T, annot = annot, cmap = 'gray', linewidth=0.1)
ax.add_patch(Rectangle((0, 0), 1, 25, color='blue'))
ax.add_patch(Rectangle((0, 24), 50, 1, color='blue'))
ax.add_patch(Rectangle((49, 0), 1, 25, color='blue'))
ax.add_patch(Rectangle((0, 0), 50, 1, color='blue'))
ax.add_patch(Rectangle((25, 0), 2, 12, color='blue'))
ax.add_patch(Rectangle((25, 13), 2, 12,color='blue'))
ax.add_patch(Rectangle((48, 12), 1, 1, edgecolor='red', fill=False, lw=2))
ax.invert_yaxis()
plt.show()
# In[ ]:
# In[60]:
def action_indicator_diag(arr, fsize):
x = np.arange(0.5, 50.5, 1)
y = np.arange(0.5, 25.5, 1)
X, Y = np.meshgrid(x, y)
u = np.zeros((25, 50))
v = np.zeros((25, 50))
# print(X.shape)
# print(Y.shape)
# print(u.shape)
# print(v.shape)
for i in range(50):
for j in range(25):
if (arr[i][j] == 0):
v[j][i] = 1
elif (arr[i][j] == 1):
u[j][i] = 1
elif (arr[i][j] == 2):
v[j][i] = -1
else:
u[j][i] = -1
# creating plot
fig, ax = plt.subplots(figsize =fsize)
ax.quiver(X, Y, u, v, angles='xy', pivot='mid')
major_ticks_x = np.arange(0, 50, 1)
#minor_ticks_x = np.arange(0, 49, 1)
major_ticks_y = np.arange(0, 25, 1)
#minor_ticks_y = np.arange(0, 24, 1)
ax.axis([0, 50, 0, 25])
ax.set_xticks(major_ticks_x)
ax.set_yticks(major_ticks_y)
ax.add_patch(Rectangle((0, 0), 1, 25))
ax.add_patch(Rectangle((0, 24), 50, 1))
ax.add_patch(Rectangle((49, 0), 1, 25))
ax.add_patch(Rectangle((0, 0), 50, 1))
ax.add_patch(Rectangle((25, 0), 2, 12))
ax.add_patch(Rectangle((25, 13), 2, 12))
ax.add_patch(Rectangle((48, 12), 1, 1, color = 'red', alpha = 0.3))
ax.grid(True)
plt.minorticks_on
ax.set_axisbelow(True)
plt.show()
# In[31]:
def part_a_simple():
np.random.seed(0)
val_grid, action_grid = value_iteration(0.1, 0.1, 100)
show_heatmap(val_grid, False)
show_heatmap(action_grid, False)
action_indicator_diag(action_grid, (20, 15))
print(action_grid.T)
part_a_simple()
# In[27]:
def part_a():
np.random.seed(0)
val_grid, action_grid = value_iteration(0.1, 0.1, 10)
print(val_grid[48])
x = np.ptp(val_grid)
y = np.min(val_grid) - 0.1
print(y)
u = val_grid - y
print(x)
show_heatmap(np.log((u)/x), False)
show_heatmap(action_grid, False)
part_a()
# In[29]:
def part_b():
| np.random.seed(0) | numpy.random.seed |
""" The Number Coil """
# List a prime; List mod6 of prime; Compare with another set (offset of mod6)
# to see spin change, determine cell of each prime.
#
#
""" This code plots the positons in the central triangle of the prime hexagon.
See accompanying notes and images for what these mean.
"""
import sys, os, glob
import logging
import re
import itertools
from collections import namedtuple
import numpy as np
import primesieve
_HAS_PRIMESIEVE_NUMPY = False
try:
import primesieve.numpy
_HAS_PRIMESIEVE_NUMPY = True
except:
pass
_HAS_CYTHON_PRIMEHEX = False
try:
import primehexagon
_HAS_CYTHON_PRIMEHEX = True
except:
pass
# python 2/3 compatibility stuff
try:
l = long(1)
except:
long = int
try:
# python2 version of zip iterator is in itertools
zipper = itertools.izip
except:
# new python3 version is no longer in itertools. seriously?
zipper = zip
logger = None
log_formatter = None
def setup_loggers():
global logger, log_formatter
if logger is not None:
return
# create logger
logger = logging.getLogger('prime_hexagon')
logger.setLevel(logging.INFO)
# create formatter
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
setup_loggers()
def list_generate_primes_array(a,b):
logger.info("starting generating primes from {} to {}".format(a,b))
logger.info("\tusing primesieve list prime generation")
a = primesieve.primes(a,b)
logger.info("\tprimes generated")
a = np.array(a,dtype=np.uint64)
logger.info("\toutput array created")
logger.info("done generating primes")
return a
def numpy_generate_primes_array(a,b):
logger.info("starting generating primes from {} to {}".format(a,b))
logger.info("\tusing numpy prime generator")
import primesieve.numpy
a = primesieve.numpy.primes(a,b)
a = a.astype(np.uint64)
logger.info("done generating primes")
return a
def get_primes_generator():
logger.info("selecting primes generation function")
func = None
if _HAS_PRIMESIEVE_NUMPY:
logger.info("\tusing numpy prime generation...")
func = numpy_generate_primes_array
else:
logger.info("\tfalling back to list primesieve prime generation")
func = list_generate_primes_array
logger.info("prime generator selected")
return func
def _compute_spins(primes, last_prime, last_spin):
"""Returns array of SPINS given an array of PRIMES (assumed to be 1d)
SPINS result array is same shape as PRIMES, with values stored in SPINS[1:];
SPINS[0] = 0, use that zero element to thread chunked computations together.
"""
logger.info("compute_spins: generating mod6Values")
m6val = primes % 6
m6_offset_sum = np.empty_like(primes, dtype=np.int32)
m6_offset_sum[0] = m6val[0] + (last_prime % 6) # seed value from current val + prev m6val
m6_offset_sum[1:] = m6val[1:] + m6val[:-1] # cur m6val + prev m6val
logger.info("compute_spins: done mod6Values")
logger.info("compute_spins: m6_off_sum={}".format(m6_offset_sum))
logger.info("compute_spins: starting to compute spins")
z = np.zeros_like(primes, dtype=np.int32)
z[ m6_offset_sum == 6] = 1
z[ m6_offset_sum == 10] = -1
z[ m6_offset_sum == 2] = -1
logger.info("compute_spins: z before last_spin={}".format(z))
z[0] *= last_spin
logger.info("compute_spins: z={}".format(z))
spin = np.cumprod(z)
logger.info("compute_spins: spin={}".format(spin))
logger.info("compute_spins: done computing spins")
return spin
def _compute_positions(spin, seed_pos, seed_spin):
"""Given an array of SPINS and two SEED_POSITION and SEED_SPIN values, compute the positions along the prime hex
"""
logger.info("compute_positions: starting aux calculations")
delta = np.zeros_like(spin)
delta[0] = spin[0] - seed_spin # first delta is cur_spin - prev_spin from seed_spin
delta[1:] = spin[1:] - spin[:-1] # delta is cur_spin - prev_spin
logger.info("compute_positions: delta={}".format(delta))
#increments = np.copy(spin) # copy the spin array,
increments = np.abs(spin) # copy the spin array,
increments[ delta != 0 ] = 0 # set any non-zero delta to zero in the increment array
logger.info("compute_positions: increments={}".format(increments))
logger.info("compute_positions:\tdone with aux calculations")
logger.info("compute_positions: starting primary calculation")
# start at seed, cumulative add
positions = np.copy(increments)
#increments[0] = seed_pos
outpositions = (seed_pos + | np.cumsum(increments) | numpy.cumsum |
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory, WorkflowFactory
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
import numpy as np
# Silicon structure
a = 5.404
cell = [[a, 0, 0],
[0, a, 0],
[0, 0, a]]
symbols=['Si'] * 8
scaled_positions = [(0.875, 0.875, 0.875),
(0.875, 0.375, 0.375),
(0.375, 0.875, 0.375),
(0.375, 0.375, 0.875),
(0.125, 0.125, 0.125),
(0.125, 0.625, 0.625),
(0.625, 0.125, 0.625),
(0.625, 0.625, 0.125)]
structure = StructureData(cell=cell)
positions = | np.dot(scaled_positions, cell) | numpy.dot |
#!/usr/bin/env python3
"""
minibatch logistic regression
"""
import numpy as np
# from loguru import logger
from sklearn.metrics import accuracy_score
# relative imports
from books import start_end_map
def softmax(x, multi=True):
"""
get the softmax
"""
ex = np.exp(x - np.max(x))
return ex / ex.sum(axis=1 if multi else 0)
def _process_labels(y_train):
"""
process labels to array
"""
res = []
for val in y_train:
res.append(np.zeros(3))
res[-1][val] = 1
return np.array(res)
class LogisticRegression:
"""
logistic regression
"""
def __init__(self, epochs=100, batch_size=32, lmbda=1e-4, plot_epoch_iter=1, **_args):
"""
logistic regression init
"""
self.epochs = epochs
self.weights = None
self.bias_matrix = None
self.batch_size = batch_size
self.lmbda = lmbda
self.bias_vector = None
self.plot_epoch_iter = plot_epoch_iter
self.training_scores = None
self.testing_scores = None
def _regularization(self, index, k):
return self.lmbda * (self.weights[index, k]**2)
def _regularization_gradient(self, k):
return 2 * self.lmbda * self.weights[:, k]
def _net(self, Xi, multi=True):
"""
Define out network and obtain a predicted output for a set of M inputs ( V > K )
"""
y_linear = np.add(np.dot(Xi, self.weights), self.bias_vector[0])
return softmax(y_linear, multi=multi)
def score(self, X, y):
"""
returns the accuracy score of the logistic regression function
"""
try:
return accuracy_score(y, self.predict(X))
except ValueError:
return 0.
def predict(self, X):
"""
get prediction for given array of inputs
"""
X = X.toarray()
res = []
for elem in X:
net_output = self._net(elem, multi=False)
output = np.argmax(net_output[0])
res.append(output)
return res
def fit(self, X_train_text, y_train, X_test_text, y_test):
"""
main training loop
"""
X_train = X_train_text
X_train_text = X_train_text.toarray()
y_train = _process_labels(y_train)
N = X_train_text.shape[0] # dataset length
V = X_train_text.shape[1] # vocabulary length
K = len(start_end_map.keys()) # num of classes
lr = 1e-2 # Learning rate
self.weights = np.random.rand(V, K) # weight vector
self.bias_vector = np.random.rand(1, K) # bias vector
self.bias_matrix = np.repeat(
self.bias_vector, self.batch_size, axis=0) # bias matrix
dataset_len = X_train_text.shape[0]
dataset_indexes = | np.arange(dataset_len) | numpy.arange |
#!/usr/bin/env python
# Part of the psychopy_ext library
# Copyright 2010-2015 <NAME>
# The program is distributed under the terms of the GNU General Public License,
# either version 3 of the License, or (at your option) any later version.
"""
A library of simple models of vision
Simple usage::
import glob
from psychopy_ext import models
ims = glob.glob('Example_set/*.jpg') # get all jpg images
hmax = models.HMAX()
# if you want to see how similar your images are to each other
hmax.compare(ims)
# or to simply get the output and use it further
out = hmax.run(ims)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import sys, os, glob, itertools, warnings, inspect, argparse, imp
import tempfile, shutil
import pickle
from collections import OrderedDict
import numpy as np
import scipy.ndimage
import pandas
import seaborn as sns
import matlab_wrapper
import sklearn.manifold
import sklearn.preprocessing, sklearn.metrics, sklearn.cluster
import skimage.feature, skimage.data
from psychopy_ext import stats, plot, report, utils
try:
imp.find_module('caffe')
HAS_CAFFE = True
except:
try:
os.environ['CAFFE']
# put Python bindings in the path
sys.path.insert(0, os.path.join(os.environ['CAFFE'], 'python'))
HAS_CAFFE = True
except:
HAS_CAFFE = False
if HAS_CAFFE:
# Suppress GLOG output for python bindings
GLOG_minloglevel = os.environ.pop('GLOG_minloglevel', None)
os.environ['GLOG_minloglevel'] = '5'
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
HAS_CAFFE = True
# Turn GLOG output back on for subprocess calls
if GLOG_minloglevel is None:
del os.environ['GLOG_minloglevel']
else:
os.environ['GLOG_minloglevel'] = GLOG_minloglevel
class Model(object):
def __init__(self, model, labels=None, verbose=True, *args, **kwargs):
self.name = ALIASES[model]
self.nice_name = NICE_NAMES[model]
self.safename = self.name
self.labels = labels
self.args = args
self.kwargs = kwargs
self.verbose = verbose
def download_model(self, path=None):
"""Downloads and extracts a model
:Kwargs:
path (str, default: '')
Where model should be extracted
"""
self._setup()
if self.model.model_url is None:
print('Model {} is already available'.format(self.nice_name))
elif self.model.model_url == 'manual':
print('WARNING: Unfortunately, you need to download {} manually. '
'Follow the instructions in the documentation.'.format(self.nice_name))
else:
print('Downloading and extracting {}...'.format(self.nice_name))
if path is None:
path = os.getcwd()
text = raw_input('Where do you want the model to be extracted? '
'(default: {})\n'.format(path))
if text != '': path = text
outpath, _ = utils.extract_archive(self.model.model_url,
folder_name=self.safename, path=path)
if self.name == 'phog':
with open(os.path.join(outpath, 'anna_phog.m')) as f:
text = f.read()
with open(os.path.join(outpath, 'anna_phog.m'), 'wb') as f:
s = 'dlmwrite(s,p);'
f.write(text.replace(s, '% ' + s, 1))
print('Model {} is available here: {}'.format(self.nice_name, outpath))
print('If you want to use this model, either give this path when '
'calling the model or add it to your path '
'using {} as the environment variable.'.format(self.safename.upper()))
def _setup(self):
if not hasattr(self, 'model'):
if self.name in CAFFE_MODELS:
self.model = CAFFE_MODELS[self.name](model=self.name, *self.args, **self.kwargs)
else:
self.model = KNOWN_MODELS[self.name](*self.args, **self.kwargs)
self.model.labels = self.labels
self.isflat = self.model.isflat
self.model.verbose = self.verbose
def run(self, *args, **kwargs):
self._setup()
return self.model.run(*args, **kwargs)
def train(self, *args, **kwargs):
self._setup()
return self.model.train(*args, **kwargs)
def test(self, *args, **kwargs):
self._setup()
return self.model.test(*args, **kwargs)
def predict(self, *args, **kwargs):
self._setup()
return self.model.predict(*args, **kwargs)
def gen_report(self, *args, **kwargs):
self._setup()
return self.model.gen_report(*args, **kwargs)
class _Model(object):
def __init__(self, labels=None):
self.name = 'Model'
self.safename = 'model'
self.isflat = False
self.labels = labels
self.model_url = None
def gen_report(self, test_ims, train_ims=None, html=None):
print('input images:', test_ims)
print('processing:', end=' ')
if html is None:
html = report.Report(path=reppath)
html.open()
close_html = True
else:
close_html = False
resps = self.run(test_ims=test_ims, train_ims=train_ims)
html.writeh('Dissimilarity', h=1)
dis = dissimilarity(resps)
plot_data(dis, kind='dis')
html.writeimg('dis', caption='Dissimilarity across stimuli'
'(blue: similar, red: dissimilar)')
html.writeh('MDS', h=1)
mds_res = mds(dis)
plot_data(mds_res, kind='mds', icons=test_ims)
html.writeimg('mds', caption='Multidimensional scaling')
if self.labels is not None:
html.writeh('Linear separability', h=1)
lin = linear_clf(dis, y)
plot_data(lin, kind='linear_clf', chance=1./len(np.unique(self.labels)))
html.writeimg('lin', caption='Linear separability')
if close_html:
html.close()
def run(self, test_ims, train_ims=None, layers='output', return_dict=True):
"""
This is the main function to run the model.
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images
:Kwargs:
- train_ims (str, list, tuple, np.ndarray)
Training images
- layers ('all'; 'output', 'top', None; str, int;
list of str or int; default: None)
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an np.ndarray.
"""
if train_ims is not None:
self.train(train_ims)
output = self.test(test_ims, layers=layers, return_dict=return_dict)
return output
def train(self, train_ims):
"""
A placeholder for a function for training a model.
If the model is not trainable, then it will default to this function
here that does nothing.
"""
self.train_ims = im2iter(train_ims)
def test(self, test_ims, layers='output', return_dict=True):
"""
A placeholder for a function for testing a model.
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images
:Kwargs:
- layers ('all'; 'output', 'top', None; str, int;
list of str or int; default: 'output')
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an np.ndarray.
"""
self.layers = layers
# self.test_ims = im2iter(test_ims)
def predict(self, ims, topn=5):
"""
A placeholder for a function for predicting a label.
"""
pass
def _setup_layers(self, layers, model_keys):
if self.safename in CAFFE_MODELS:
filt_layers = self._filter_layers()
else:
filt_layers = model_keys
if layers in [None, 'top', 'output']:
self.layers = [filt_layers[-1]]
elif layers == 'all':
self.layers = filt_layers
elif isinstance(layers, (str, unicode)):
self.layers = [layers]
elif isinstance(layers, int):
self.layers = [filt_layers[layers]]
elif isinstance(layers, (list, tuple, np.ndarray)):
if isinstance(layers[0], int):
self.layers = [filt_layers[layer] for layer in layers]
elif isinstance(layers[0], (str, unicode)):
self.layers = layers
else:
raise ValueError('Layers can only be: None, "all", int or str, '
'list of int or str, got', layers)
else:
raise ValueError('Layers can only be: None, "all", int or str, '
'list of int or str, got', layers)
def _fmt_output(self, output, layers, return_dict=True):
self._setup_layers(layers, output.keys())
outputs = [output[layer] for layer in self.layers]
if not return_dict:
output = output[self.layers[-1]]
return output
def _im2iter(self, ims):
"""
Converts input into in iterable.
This is used to take arbitrary input value for images and convert them to
an iterable. If a string is passed, a list is returned with a single string
in it. If a list or an array of anything is passed, nothing is done.
Otherwise, if the input object does not have `len`, an Exception is thrown.
"""
if isinstance(ims, (str, unicode)):
out = [ims]
else:
try:
len(ims)
except:
raise ValueError('input image data type not recognized')
else:
try:
ndim = ims.ndim
except:
out = ims
else:
if ndim == 1: out = ims.tolist()
elif self.isflat:
if ndim == 2: out = [ims]
elif ndim == 3: out = ims
else:
raise ValueError('images must be 2D or 3D, got %d '
'dimensions instead' % ndim)
else:
if ndim == 3: out = [ims]
elif ndim == 4: out = ims
else:
raise ValueError('images must be 3D or 4D, got %d '
'dimensions instead' % ndim)
return out
def load_image(self, *args, **kwargs):
return utils.load_image(*args, **kwargs)
def dissimilarity(self, resps, kind='mean_euclidean', **kwargs):
return dissimilarity(resps, kind=kind, **kwargs)
def mds(self, dis, ims=None, ax=None, seed=None, kind='metric'):
return mds(dis, ims=ims, ax=ax, seed=seed, kind=kind)
def cluster(self, *args, **kwargs):
return cluster(*args, **kwargs)
def linear_clf(self, resps, y, clf=None):
return linear_clf(resps, y, clf=clf)
def plot_data(data, kind=None, **kwargs):
if kind in ['dis', 'dissimilarity']:
if isinstance(data, dict): data = data.values()[0]
g = sns.heatmap(data, **kwargs)
elif kind == 'mds':
g = plot.mdsplot(data, **kwargs)
elif kind in ['clust', 'cluster']:
g = sns.factorplot('layer', 'dissimilarity', data=df, kind='point')
elif kind in ['lin', 'linear_clf']:
g = sns.factorplot('layer', 'accuracy', data=df, kind='point')
if chance in kwargs:
ax.axhline(kwargs['chance'], ls='--', c='.2')
else:
try:
sns.factorplot(x='layers', y=data.columns[-1], data=data)
except:
raise ValueError('Plot kind "{}" not recognized.'.format(kind))
return g
def dissimilarity(resps, kind='mean_euclidean', **kwargs):
"""
Computes dissimilarity between all rows in a matrix.
:Args:
resps (numpy.array)
A NxM array of model responses. Each row contains an
output vector of length M from a model, and distances
are computed between each pair of rows.
:Kwargs:
- kind (str or callable, default: 'mean_euclidean')
Distance metric. Accepts string values or callables recognized
by :func:`~sklearn.metrics.pairwise.pairwise_distances`, and
also 'mean_euclidean' that normalizes
Euclidean distance by the number of features (that is,
divided by M), as used, e.g., by Grill-Spector et al.
(1999), Op de Beeck et al. (2001), Panis et al. (2011).
.. note:: Up to version 0.6, 'mean_euclidean' was called
'euclidean', and 'cosine' was called 'gaborjet'. Also note
that 'correlation' used to be called 'corr' and is now
returning dissimilarities in the range [0,2] per
scikit-learn convention.
- \*\*kwargs
Keyword arguments for
:func:`~sklearn.metric.pairwise.pairwise_distances`
:Returns:
A square NxN matrix, typically symmetric unless otherwise
defined by the metric, and with NaN's in the diagonal.
"""
if kind == 'mean_euclidean':
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric='euclidean', **kwargs) / np.sqrt(x.shape[1])
else:
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric=kind, **kwargs)
if isinstance(resps, (dict, OrderedDict)):
dis = OrderedDict()
for layer, resp in resps.items():
dis[layer] = dis_func(resp)
diag = np.diag_indices(dis[layer].shape[0])
dis[layer][diag] = np.nan
else:
dis = dis_func(resps)
dis[np.diag_indices(dis.shape[0])] = np.nan
return dis
def mds(dis, ims=None, kind='metric', seed=None):
"""
Multidimensional scaling
:Args:
dis
Dissimilarity matrix
:Kwargs:
- ims
Image paths
- seed
A seed if you need to reproduce MDS results
- kind ({'classical', 'metric'}, default: 'metric')
'Classical' is based on MATLAB's cmdscale, 'metric' uses
:func:`~sklearn.manifold.MDS`.
"""
df = []
if ims is None:
if isinstance(dis, dict):
ims = map(str, range(len(dis.values()[0])))
else:
ims = map(str, range(len(dis)))
for layer_name, this_dis in dis.items():
if kind == 'classical':
vals = stats.classical_mds(this_dis)
else:
mds_model = sklearn.manifold.MDS(n_components=2,
dissimilarity='precomputed', random_state=seed)
this_dis[np.isnan(this_dis)] = 0
vals = mds_model.fit_transform(this_dis)
for im, (x,y) in zip(ims, vals):
imname = os.path.splitext(os.path.basename(im))[0]
df.append([layer_name, imname, x, y])
df = pandas.DataFrame(df, columns=['layer', 'im', 'x', 'y'])
# df = stats.factorize(df)
# if self.layers != 'all':
# if not isinstance(self.layers, (tuple, list)):
# self.layers = [self.layers]
# df = df[df.layer.isin(self.layers)]
# plot.mdsplot(df, ax=ax, icons=icons, zoom=zoom)
return df
def cluster(resps, labels, metric=None, clust=None,
bootstrap=True, stratified=False, niter=1000, ci=95, *func_args, **func_kwargs):
if metric is None:
metric = sklearn.metrics.adjusted_rand_score
struct = labels if stratified else None
n_clust = len(np.unique(labels))
if clust is None:
clust = sklearn.cluster.AgglomerativeClustering(n_clusters=n_clust, linkage='ward')
df = []
def mt(data, labels):
labels_pred = clust.fit_predict(data)
qual = metric(labels, labels_pred)
return qual
print('clustering...', end=' ')
for layer, data in resps.items():
labels_pred = clust.fit_predict(data)
qualo = metric(labels, labels_pred)
if bootstrap:
pct = stats.bootstrap_resample(data1=data, data2=labels,
niter=niter, func=mt, struct=struct, ci=None,
*func_args, **func_kwargs)
for i, p in enumerate(pct):
df.append([layer, qualo, i, p])
else:
pct = [np.nan, np.nan]
df.append([layer, qualo, 0, np.nan])
df = pandas.DataFrame(df, columns=['layer', 'iter', 'bootstrap',
'dissimilarity'])
# df = stats.factorize(df)
return df
def linear_clf(resps, y, clf=None):
if clf is None: clf = sklearn.svm.LinearSVC
df = []
n_folds = len(y) / len(np.unique(y))
for layer, resp in resps.items():
# normalize to 0 mean and variance 1 for each feature (column-wise)
resp = sklearn.preprocessing.StandardScaler().fit_transform(resp)
cv = sklearn.cross_validation.StratifiedKFold(y,
n_folds=n_folds, shuffle=True)
# from scikit-learn docs:
# need not match cross_val_scores precisely!!!
preds = sklearn.cross_validation.cross_val_predict(clf(),
resp, y, cv=cv)
for yi, pred in zip(y, preds):
df.append([layer, yi, pred, yi==pred])
df = pandas.DataFrame(df, columns=['layer', 'actual', 'predicted', 'accuracy'])
# df = stats.factorize(df)
return df
class Pixelwise(_Model):
def __init__(self):
"""
Pixelwise model
The most simple model of them all. Uses pixel values only.
"""
super(Pixelwise, self).__init__()
self.name = 'Pixelwise'
self.safename = 'px'
def test(self, test_ims, layers='output', return_dict=False):
self.layers = [self.safename]
ims = self._im2iter(test_ims)
resps = np.vstack([self.load_image(im).ravel() for im in ims])
resps = self._fmt_output(OrderedDict([(self.safename, resps)]), layers,
return_dict=return_dict)
return resps
class Retinex(_Model):
def __init__(self):
"""
Retinex algorithm
Based on A. Torralba's implementation presented at PAVIS 2014.
.. warning:: Experimental
"""
super(Retinex, self).__init__()
self.name = 'Retinex'
self.safename = 'retinex'
def gen(self, im, thres=20./256, plot=True, save=False):
im = self.load_image(im)
# 2D derivative
der = np.array([[0, 0, 0], [-1, 1, 0], [0, 0, 0]])
im_paint = np.zeros(im.shape)
im_illum = np.zeros(im.shape)
for chno in range(3):
ch = im[:,:,chno]
outv = scipy.ndimage.convolve(ch, der)
outh = scipy.ndimage.convolve(ch, der.T)
out = np.dstack([outv, outh])
# threshold
paint = np.copy(out)
paint[np.abs(paint) < thres] = 0
illum = np.copy(out)
illum[np.abs(illum) >= thres] = 0
# plt.imshow(paint[:,:,0]); plt.show()
# plt.imshow(paint[:,:,1]); plt.show()
# plt.imshow(illum[:,:,0]); plt.show()
# plt.imshow(illum[:,:,1]); plt.show()
# Pseudo-inverse (using the trick from Weiss, ICCV 2001; equations 5-7)
im_paint[:,:,chno] = self._deconvolve(paint, der)
im_illum[:,:,chno] = self._deconvolve(illum, der)
im_paint = (im_paint - np.min(im_paint)) / (np.max(im_paint) - np.min(im_paint))
im_illum = (im_illum - np.min(im_illum)) / (np.max(im_illum) - np.min(im_illum))
# paintm = scipy.misc.imread('paint2.jpg')
# illumm = scipy.misc.imread('illum2.jpg')
# print np.sum((im_paint-paintm)**2)
# print np.sum((im_illum-illumm)**2)
if plot:
sns.plt.subplot(131)
sns.plt.imshow(im)
sns.plt.subplot(132)
sns.plt.imshow(im_paint)
sns.plt.subplot(133)
sns.plt.imshow(im_illum)
sns.plt.show()
if save:
name, ext = imname.splitext()
scipy.misc.imsave('%s_paint.%s' %(name, ext), im_paint)
scipy.misc.imsave('%s_illum.%s' %(name, ext), im_illum)
def _deconvolve(self, out, der):
# der = np.dstack([der, der.T])
d = []
gi = []
for i, deri in enumerate([der, der.T]):
d.append(scipy.ndimage.convolve(out[...,i], np.flipud(np.fliplr(deri))))
gi.append(scipy.ndimage.convolve(deri, np.flipud(np.fliplr(deri)), mode='constant'))
d = np.sum(d, axis=0)
gi = np.sum(gi, axis=0)
gi = np.pad(gi, (der.shape[0]/2, der.shape[1]/2), mode='constant')
gi = scipy.ndimage.convolve(gi, np.array([[1,0,0], [0,0,0], [0,0,0]]))
mxsize = np.max(out.shape[:2])
g = np.fft.fft2(gi, s=(mxsize*2, mxsize*2))
g[g==0] = 1
h = 1/g
h[g==0] = 0
tr = h * np.fft.fft2(d, s=(mxsize*2,mxsize*2))
ii = np.fft.fftshift(np.real(np.fft.ifft2(tr)))
n = (gi.shape[0] - 5) / 2
im = ii[mxsize - n : mxsize + out.shape[0] - n,
mxsize - n : mxsize + out.shape[1] - n]
return im
class Zoccolan(_Model):
"""
Based on 10.1073/pnas.0811583106
.. warning:: Not implemented fully
"""
def __init__(self):
super(Zoccolan, self).__init__()
self.name = 'Zoccolan'
self.safename = 'zoccolan'
# receptive field sizes in degrees
#self.rfs = np.array([.6,.8,1.])
#self.rfs = np.array([.2,.35,.5])
self.rfs = [10, 20, 30] # deg visual angle
self.oris = np.linspace(0, np.pi, 12)
self.phases = [0, np.pi]
self.sfs = range(1, 11) # cycles per RF size
self.winsize = [5, 5] # size of each patch on the grid
# window size will be fixed in pixels and we'll adjust degrees accordingly
# self.win_size_px = 300
def get_gabors(self, rf):
lams = float(rf[0])/self.sfs # lambda = 1./sf #1./np.array([.1,.25,.4])
sigma = rf[0]/2./np.pi
# rf = [100,100]
gabors = np.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))
i = np.arange(-rf[0]/2+1,rf[0]/2+1)
#print i
j = np.arange(-rf[1]/2+1,rf[1]/2+1)
ii,jj = np.meshgrid(i,j)
for o, theta in enumerate(self.oris):
x = ii*np.cos(theta) + jj*np.sin(theta)
y = -ii*np.sin(theta) + jj*np.cos(theta)
for p, phase in enumerate(self.phases):
for s, lam in enumerate(lams):
fxx = np.cos(2*np.pi*x/lam + phase) * np.exp(-(x**2+y**2)/(2*sigma**2))
fxx -= np.mean(fxx)
fxx /= np.linalg.norm(fxx)
#if p==0:
#plt.subplot(len(oris),len(lams),count+1)
#plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
#count+=1
gabors[o,p,s,:,:] = fxx
plt.show()
return gabors
def run(self, ims):
ims = self.input2array(ims)
output = [self.test(im) for im in ims]
def test(self, im):
field = im.shape
num_tiles = (15,15)#[field[0]/10.,field[0]/10.]
size = (field[0]/num_tiles[0], field[0]/num_tiles[0])
V1 = []#np.zeros( gabors.shape + num_tiles )
# tiled_im = im.reshape((num_tiles[0],size[0],num_tiles[1],size[1]))
# tiled_im = np.rollaxis(tiled_im, 1, start=3)
# flat_im = im.reshape((num_tiles[0],num_tiles[1],-1))
for r, rf in enumerate(self.rfs):
def apply_filter(window, this_filter):
this_resp = np.dot(this_filter,window)/np.linalg.norm(this_filter)
# import pdb; pdb.set_trace()
return np.max((0,this_resp)) # returns at least zero
def filter_bank(this_filter,rf):
#print 'done0'
resp = scipy.ndimage.filters.generic_filter(
im, apply_filter, size=rf,mode='nearest',
extra_arguments = (this_filter,))
# import pdb; pdb.set_trace()
#print 'done1'
ii,jj = np.meshgrid(np.arange(0,field[0],size[0]),
np.arange(0,field[1],size[1]) )
selresp = resp[jj,ii]
# maxresp = scipy.ndimage.filters.maximum_filter(
# resp,
# size = size,
# mode = 'nearest'
# )
return np.ravel(selresp)
gabors = self.get_gabors(rf)
#import pdb; pdb.set_trace()
gabors = gabors.reshape(gabors.shape[:3]+(-1,))
# gabors_norms = np.apply_along_axis(np.linalg.norm, -1, gabors)
# import pdb; pdb.set_trace()
# V1.append( np.apply_along_axis(filter_bank, -1, gabors,rf) )
V1resp = np.zeros(gabors.shape[:-1]+num_tiles)
# import pdb; pdb.set_trace()
for i,wi in enumerate(np.arange(0,field[0]-rf[0],size[0])):
for j,wj in enumerate(np.arange(0,field[1]-rf[1],size[1])):
window = im[wi:wi+rf[0],wj:wj+rf[1]]
resp = np.inner(gabors, | np.ravel(window) | numpy.ravel |
import os, time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Ellipse, RegularPolygon, Circle
from matplotlib.collections import PatchCollection
import gym
from gym import error, spaces, utils
from gym.utils import seeding
class TwoCarrierEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.seed()
self.viewer = None
self.prev_reward = None
self.max_episode_steps = 500
self.observation_space = spaces.Box(low=-10., high=10., shape=(3,), dtype=np.float32)
self.action_space = spaces.Tuple((spaces.Discrete(4), spaces.Discrete(4))) # ^v<>
self.action_codebook = np.array([
[0., .02],
[0., -.02],
[-.02, 0.],
[.02, 0.]
])
# vars
self.rod_pose = np.zeros(3)
self.c0_position = np.array([
self.rod_pose[0]+.5*np.cos(self.rod_pose[-1]),
self.rod_pose[1]+.5*np.sin(self.rod_pose[-1])
])
self.c1_position = np.array([
self.rod_pose[0]-.5*np.cos(self.rod_pose[-1]),
self.rod_pose[1]-.5*np.sin(self.rod_pose[-1])
])
self.c0_traj = []
self.c1_traj = []
# prepare renderer
self.fig = plt.figure(figsize=(12,8))
self.ax = self.fig.add_subplot(111)
nwwpat = Rectangle(xy=(-5.5,5), width=5.1, height=.5, fc='gray')
newpat = Rectangle(xy=(.4,5), width=5.1, height=.5, fc='gray')
wwpat = Rectangle(xy=(-5.5,-.5), width=.5, height=6, fc='gray')
ewpat = Rectangle(xy=(5,-.5), width=.5, height=6, fc='gray')
swpat = Rectangle(xy=(-5.5,-.5), width=11, height=.5, fc='gray')
self.fixed_patches = [nwwpat, newpat, wwpat, ewpat, swpat]
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.step_counter = 0
# init rod coordinations
x = np.random.uniform(-3.9, 3.9)
y = .2
theta = 0.
self.rod_pose = np.array([x, y, theta])
self.c0_position = np.array([
self.rod_pose[0]+.5*np.cos(self.rod_pose[-1]),
self.rod_pose[1]+.5*np.sin(self.rod_pose[-1])
])
self.c1_position = np.array([
self.rod_pose[0]-.5*np.cos(self.rod_pose[-1]),
self.rod_pose[1]-.5*np.sin(self.rod_pose[-1])
])
self.c0_traj = [self.c0_position.copy()]
self.c1_traj = [self.c1_position.copy()]
return self.rod_pose
def step(self, action):
done = False
info = ''
reward = 0
prev_rod = self.rod_pose.copy()
prev_c0 = self.c0_position.copy()
prev_c1 = self.c1_position.copy()
# compute rod's displacement and rotation
disp = self.action_codebook[action[0]] + self.action_codebook[action[1]]
rot = 0.
rot += -np.arctan2(self.action_codebook[action[0]][0]*np.sin(self.rod_pose[-1]), .5) + \
np.arctan2(self.action_codebook[action[0]][1]*np.cos(self.rod_pose[-1]), .5) + \
np.arctan2(self.action_codebook[action[1]][0]*np.sin(self.rod_pose[-1]), .5) - \
np.arctan2(self.action_codebook[action[1]][1]*np.cos(self.rod_pose[-1]), .5)
deltas = np.append(disp, rot)
self.rod_pose += deltas
self.c0_position = np.array([
self.rod_pose[0]+.5*np.cos(self.rod_pose[-1]),
self.rod_pose[1]+.5*np.sin(self.rod_pose[-1])
])
self.c1_position = np.array([
self.rod_pose[0]-.5*np.cos(self.rod_pose[-1]),
self.rod_pose[1]-.5*np.sin(self.rod_pose[-1])
])
self.c0_traj.append(self.c0_position.copy())
self.c1_traj.append(self.c1_position.copy())
# restrict angle in (-pi,pi)
if np.pi<self.rod_pose[-1]<=2*np.pi:
self.rod_pose[-1] -= 2*np.pi
elif -np.pi>self.rod_pose[-1]>=-2*np.pi:
self.rod_pose[-1] += 2*np.pi
# compute reward
# uvec_vert = np.array([0., 1.]) # unit vertical vector
# uvec_prod = (prev_c0-prev_c1)/np.linalg.norm(prev_c0-prev_c1) # unit vector of previous rod
# uvec_rod = (self.c0_position-self.c1_position)/np.linalg.norm(self.c0_position-self.c1_position) # unit vector of current rod
# prev_vertang = np.arccos(np.dot(uvec_vert, uvec_prod)) # angle between previous rod and vertical vector
# if prev_vertang>np.pi/2:
# prev_vertang = np.pi-prev_vertang # restrict angle to (0, pi/2)
# vertang = np.arccos(np.dot(uvec_vert, uvec_rod)) # angle between current rod and vertical vector
# if vertang>np.pi/2:
# vertang = np.pi-vertang
# reward = np.abs(prev_rod[0])-np.abs(self.rod_pose[0]) + \
# self.rod_pose[1]-prev_rod[1] + \
# prev_vertang-vertang
reward = np.abs(prev_c0[0])-np.abs(self.c0_position[0]) + np.abs(prev_c1[0])-np.abs(self.c1_position[0]) + \
(self.c0_position[1]-prev_c0[1] + self.c1_position[1]-prev_c1[1])
# check crash
rod_points = np.linspace(self.c0_position, self.c1_position, 50)
for p in self.fixed_patches:
if np.sum(p.contains_points(rod_points, radius=.001)):
done = True
info = 'crash wall'
break
# check escape
if self.c0_position[1]>5.5 and self.c1_position[1]>5.5:
reward = 100.
done = True
info = 'escaped'
return self.rod_pose, reward, done, info
def render(self, mode='human'):
self.ax = self.fig.get_axes()[0]
self.ax.cla()
patch_list = []
patch_list += self.fixed_patches
# add wall patches
c0pat = Circle(
xy=(self.c0_position[0], self.c0_position[-1]),
radius=.05,
ec='black',
fc='white'
)
patch_list.append(c0pat)
c1pat = Circle(
xy=(self.c1_position[0], self.c1_position[-1]),
radius=.05,
fc='black'
)
patch_list.append(c1pat)
pc = PatchCollection(patch_list, match_original=True) # match_origin prevent PatchCollection mess up original color
# plot patches
self.ax.add_collection(pc)
# plot rod
self.ax.plot(
[self.c0_position[0], self.c1_position[0]],
[self.c0_position[1], self.c1_position[1]],
color='darkorange'
)
# plot trajectory
if self.c0_traj and self.c0_traj:
traj_c0 = np.array(self.c0_traj)
traj_c1 = np.array(self.c1_traj)
self.ax.plot(traj_c0[-100:,0], traj_c0[-100:,1], linestyle=':', linewidth=0.5, color='black')
self.ax.plot(traj_c1[-100:,0], traj_c1[-100:,1], linestyle=':', linewidth=0.5, color='black')
# Set ax
self.ax.axis(np.array([-6, 6, -1, 7]))
self.ax.set_xticks(np.arange(-6, 7))
self.ax.set_yticks( | np.arange(-1, 8) | numpy.arange |
import abc
import copy
import csv
import os
import typing
import warnings
from numbers import Number
from collections import defaultdict
from collections.abc import Sequence
import numpy as np
import scipy.interpolate
import scipy.signal
from slippy.core import _MaterialABC, _SurfaceABC
from .ACF_class import ACF
from .roughness_funcs import get_height_of_mat_vr, low_pass_filter
from .roughness_funcs import get_mat_vr, get_summit_curvatures
from .roughness_funcs import roughness, subtract_polynomial, find_summits
__all__ = ['Surface', 'assurface', 'read_surface', '_Surface', '_AnalyticalSurface', 'RollingSurface']
def assurface(profile, grid_spacing=None):
""" make a surface from a profile
Parameters
----------
profile : array-like
The surface profile
grid_spacing : float optional (None)
The spacing between grid points on the surface
Returns
-------
surface : Surface object
A surface object with the specified profile and grid size
See Also
--------
Surface
read_surface
Notes
-----
Examples
--------
>>> profile=np.random.normal(size=[10,10])
>>> my_surface=assurface(profile, 0.1)
>>> my_surface.extent
[1,1]
"""
return Surface(profile=profile, grid_spacing=grid_spacing)
def read_surface(file_name, **kwargs):
""" Read a surface from a file
Parameters
----------
file_name : str
The full path to the data file
Other Parameters
----------------
delim : str optional (',')
The delimiter used in the data file, only needed for csv or txt files
p_name : str optional ('profile')
The name of the variable containing the profile data, needed if a .mat
file is given
gs_name : str optional ('grid_spacing')
The name of the variable containing the grid_spacing, needed if a .mat
file is given
Returns
-------
A surface object generated from the file
See Also
--------
Surface
alicona_read
scipy.io.loadmat
Notes
-----
This function directly invokes the surface class, any other keywords that
can be passed to that class can be passed to this function
Examples
--------
>>> # Read a csv file with tab delimiters
>>> my_surface=read_surface('data.csv', delim='\t')
>>> # Read a .al3d file
>>> my_surface=read_surface('data.al3d')
>>> # Read a .mat file with variables called prof and gs
>>> my_surface=read_surface('data.mat', p_name='prof', gs_name='gs')
"""
return Surface(file_name=file_name, **kwargs)
class _Surface(_SurfaceABC):
"""
An abstract base class for surface types, this class should be extended to given new types of surface. To create an
analytical surface please subclass _AnalyticalSurface
"""
# The surface class for discrete surfaces (typically experimental)
is_discrete: bool = False
""" A bool flag, True if there is a profile present """
acf: typing.Optional[ACF] = None
""" The auto correlation function of the surface profile """
psd: typing.Optional[np.ndarray] = None
""" The power spectral density of the surface """
fft: typing.Optional[np.ndarray] = None
""" The fast fourier transform of the surface """
surface_type: str = "Generic"
""" A description of the surface type """
dimensions: typing.Optional[int] = 2
""" The number of spatial dimensions that """
is_analytic: bool = False
_material: typing.Optional[_MaterialABC] = None
unworn_profile: typing.Optional[np.ndarray] = None
_profile: typing.Optional[np.ndarray] = None
_grid_spacing: typing.Optional[float] = None
_shape: typing.Optional[tuple] = None
_extent: typing.Optional[tuple] = None
_inter_func = None
_allowed_keys = {}
_mask: typing.Optional[np.ndarray] = None
_size: typing.Optional[int] = None
_subclass_registry = []
_original_extent = None
wear_volumes: typing.Optional[defaultdict] = None
def __init__(self, grid_spacing: typing.Optional[float] = None, extent: typing.Optional[tuple] = None,
shape: typing.Optional[tuple] = None, is_discrete: bool = False):
if grid_spacing is not None and extent is not None and shape is not None:
raise ValueError("Up to two of grid_spacing, extent and size should be set, all three were set")
self.is_discrete = is_discrete
if grid_spacing is not None:
self.grid_spacing = grid_spacing
if extent is not None:
self.extent = extent
if shape is not None:
self.shape = shape
@classmethod
def __init_subclass__(cls, is_abstract=False, **kwargs):
super().__init_subclass__(**kwargs)
if not is_abstract:
_Surface._subclass_registry.append(cls)
@property
def size(self):
"""The total number of points in the surface"""
return self._size
@property
def mask(self):
"""A mask used to exclude some values from analysis, a single float or an array of bool the same size as profile
Either a boolean array of size self.size or a float of the value to be excluded
"""
return self._mask
@mask.setter
def mask(self, value: typing.Union[float, np.ndarray]):
if type(value) is float:
if np.isnan(value):
mask = np.isnan(self.profile)
else:
mask = self.profile == value
elif isinstance(value, np.ndarray):
mask = np.asarray(value, dtype=bool)
if not mask.shape == self.shape:
msg = ("profile and mask shapes do not match: profile is"
"{profile.shape}, mask is {mask.shape}".format(**locals()))
raise TypeError(msg)
elif isinstance(value, str):
raise TypeError('Mask cannot be a string')
elif isinstance(value, Sequence):
mask = np.zeros_like(self.profile, dtype=bool)
for item in value:
self.mask = item
mask = np.logical_and(self._mask, mask)
else:
raise TypeError("Mask type is not recognised")
self._mask = mask
@mask.deleter
def mask(self):
self._mask = None
@property
def extent(self):
""" The overall dimensions of the surface in the same units as grid spacing
"""
return self._extent
@extent.setter
def extent(self, value: typing.Sequence[float]):
if not isinstance(value, Sequence):
msg = "Extent must be a Sequence, got {}".format(type(value))
raise TypeError(msg)
if len(value) > 2:
raise ValueError("Too many elements in extent, must be a maximum of two dimensions")
if self.profile is not None:
p_aspect = (self.shape[0]) / (self.shape[1])
e_aspect = value[0] / value[1]
if abs(e_aspect - p_aspect) > 0.0001:
msg = "Extent aspect ratio doesn't match profile aspect ratio"
raise ValueError(msg)
else:
self._extent = tuple(value)
self._grid_spacing = value[0] / (self.shape[0])
else:
self._extent = tuple(value)
self.dimensions = len(value)
if self.grid_spacing is not None:
self._shape = tuple([int(v / self.grid_spacing) for v in value])
self._size = np.product(self._shape)
if self._shape is not None:
self._grid_spacing = self._extent[0] / self._shape[0]
self._extent = tuple([sz * self._grid_spacing for sz in self._shape])
return
@extent.deleter
def extent(self):
self._extent = None
self._grid_spacing = None
if self.profile is None:
self._shape = None
self._size = None
@property
def shape(self):
"""The shape of the surface profile array, the number of points in each direction
"""
return self._shape
@shape.setter
def shape(self, value: typing.Sequence[int]):
if not isinstance(value, Sequence):
raise ValueError(f"Shape should be a Sequence type, got: {type(value)}")
if self._profile is not None:
raise ValueError("Cannot set shape when profile is present")
self._shape = tuple([int(x) for x in value])
self._size = np.product(self._shape)
if self.grid_spacing is not None:
self._extent = tuple([v * self.grid_spacing for v in value])
elif self.extent is not None:
self._grid_spacing = self._extent[0] / self._shape[0]
self._extent = tuple([sz * self.grid_spacing for sz in self.shape])
@shape.deleter
def shape(self):
if self.profile is None:
self._shape = None
self._size = None
self._extent = None
self._grid_spacing = None
else:
msg = "Cannot delete shape with a surface profile set"
raise ValueError(msg)
@property
def profile(self):
"""The height data for the surface profile
"""
return self._profile
@profile.setter
def profile(self, value: np.ndarray):
"""Sets the profile property
"""
if value is None:
return
try:
self.unworn_profile = np.asarray(value, dtype=float).copy()
# this has to be before _profile is set (rewritten for rolling surface)
self.wear_volumes = defaultdict(lambda: np.zeros_like(self.unworn_profile))
self._profile = np.asarray(value, dtype=float).copy()
except ValueError:
msg = "Could not convert profile to array of floats, profile contains invalid values"
raise ValueError(msg)
self._shape = self._profile.shape
self._size = self._profile.size
self.dimensions = len(self._profile.shape)
if self.grid_spacing is not None:
self._extent = tuple([self.grid_spacing * p for p in self.shape])
elif self.extent is not None:
if self.dimensions == 1:
self._grid_spacing = (self.extent[0] / self.shape[0])
if self.dimensions == 2:
e_aspect = self.extent[0] / self.extent[1]
p_aspect = self.shape[0] / self.shape[1]
if abs(e_aspect - p_aspect) < 0.0001:
self._grid_spacing = (self.extent[0] / self.shape[0])
else:
warnings.warn("Global size does not match profile size,"
" global size has been deleted")
self._extent = None
@profile.deleter
def profile(self):
self.unworn_profile = None
self._profile = None
del self.shape
del self.extent
del self.mask
self.wear_volumes = None
self.is_discrete = False
@property
def grid_spacing(self):
"""The distance between grid points in the x and y directions
"""
return self._grid_spacing
@grid_spacing.setter
def grid_spacing(self, grid_spacing: float):
if grid_spacing is None:
return
if not isinstance(grid_spacing, float):
try:
# noinspection PyTypeChecker
grid_spacing = float(grid_spacing)
except ValueError:
msg = ("Invalid type, grid spacing of type {} could not be "
"converted into float".format(type(grid_spacing)))
raise ValueError(msg)
if np.isinf(grid_spacing):
msg = "Grid spacing must be finite"
raise ValueError(msg)
self._grid_spacing = grid_spacing
if self.profile is None:
if self.extent is not None:
self._shape = tuple([int(sz / grid_spacing) for sz in self.extent])
self._size = np.product(self._shape)
self._extent = tuple([sz * grid_spacing for sz in self._shape])
elif self.shape is not None:
self._extent = tuple([grid_spacing * pt for pt in self.shape])
else:
self._extent = tuple([s * grid_spacing for s in self.shape])
@grid_spacing.deleter
def grid_spacing(self):
self._extent = None
self._grid_spacing = None
if self.profile is None:
del self.shape
@property
def material(self):
""" A material object describing the properties of the surface """
return self._material
@material.setter
def material(self, value):
if isinstance(value, _MaterialABC):
self._material = value
else:
raise ValueError("Unable to set material, expected material object"
" received %s" % str(type(value)))
@material.deleter
def material(self):
self._material = None
def wear(self, name: str, x_pts: np.ndarray, y_pts: np.ndarray, depth: np.ndarray):
"""
Add wear / geometry changes to the surface profile
Parameters
----------
name: str
Name of the source of wear
x_pts: np.ndarray
The x locations of the worn points in length units
y_pts: np.ndarray
The y locations of the worn points in length units
depth: np.ndarray
The depth to wear each point, negative values will add height
"""
if not x_pts.size == y_pts.size == depth.size:
raise ValueError(f"X, Y locations and wear depths are not the same size for wear '{name}':\n"
f"x:{x_pts.size}\n"
f"y:{y_pts.size}\n"
f"depth:{depth.size}")
if np.any(np.isnan(depth)):
raise ValueError(f"Some wear depth values are nan for wear {name}")
# equivalent to rounding and applying wear to nearest node
x_ind = np.array(x_pts / self.grid_spacing + self.grid_spacing/2, dtype=np.uint16)
y_ind = np.array(y_pts / self.grid_spacing + self.grid_spacing/2, dtype=np.uint16)
self.wear_volumes[name][y_ind, x_ind] += depth
self._profile[y_ind, x_ind] -= depth
self._inter_func = None # force remaking the interpolator if the surface has been worn
def get_fft(self, profile_in=None):
""" Find the fourier transform of the surface
Finds the fft of the surface and stores it in your_instance.fft
Parameters
----------
profile_in : array-like optional (None)
If set the fft of profile_in will be found and returned otherwise
instances profile attribute is used
Returns
-------
transform : array
The fft of the instance's profile or the profile_in if one is
supplied
See Also
--------
get_psd
get_acf
show
Notes
-----
Uses numpy fft.fft or fft.fft2 depending on the shape of the profile
Examples
--------
>>># Set the fft property of the surface
>>> import slippy.surface as s
>>> my_surface = s.assurface([[1,2],[3,4]], grid_spacing=1)
>>>my_surface.get_fft()
>>># Return the fft of a provided profile
>>>fft_of_profile_2=my_surface.get_fft(np.array([[1,2],[3,4]]))
"""
if profile_in is None:
profile = self.profile
else:
profile = profile_in
try:
if len(profile.shape) == 1:
transform = np.fft.fft(profile)
if type(profile_in) is bool:
self.fft = transform
else:
transform = np.fft.fft2(profile)
if type(profile_in) is bool:
self.fft = transform
except AttributeError:
raise AttributeError('Surface must have a defined profile for fft'
' to be used')
if profile_in is None:
self.fft = transform
else:
return transform
def get_acf(self, profile_in=None):
""" Find the auto correlation function of the surface
Finds the ACF of the surface and stores it in your_instance.acf
Parameters
----------
profile_in : array-like optional (None)
Returns
-------
output : ACF object
An acf object with the acf data stored, the values can be extracted
by numpy.array(output)
See Also
--------
get_psd
get_fft
show
slippy.surface.ACF
Notes
-----
ACF data is kept in ACF objects, these can then be interpolated or
evaluated at specific points with a call:
Examples
--------
>>> import slippy.surface as s
>>> my_surface = s.assurface([[1,2],[3,4]], grid_spacing=1)
>>> # Sets the acf property of the surface with an ACF object
>>> my_surface.get_acf()
>>> # The acf values are then given by the following
>>> np.array(my_surface.acf)
>>> # The acf can be shown using the show function:
>>> my_surface.show('acf', 'image')
>>> # Finding the ACF of a provided profile:
>>> ACF_object_for_profile_2=my_surface.get_acf(np.array([[4, 3], [2, 1]]))
>>> # equivalent to ACF(profile_2)
"""
if profile_in is None:
# noinspection PyTypeChecker
self.acf = ACF(self)
else:
profile = np.asarray(profile_in)
# noinspection PyTypeChecker
output = np.array(ACF(profile))
return output
def get_psd(self):
""" Find the power spectral density of the surface
Finds the PSD of the surface and stores it in your_instance.psd
Parameters
----------
(None)
Returns
-------
(None), sets the psd attribute of the instance
See Also
--------
get_fft
get_acf
show
Notes
-----
Finds the psd by fourier transforming the ACF, in doing so looks for
the instance's acf property. If this is not found the acf is calculated
and set.
Examples
--------
>>> # sets the psd attribute of my_surface
>>> import slippy.surface as s
>>> my_surface = s.assurface([[1,2],[3,4]], grid_spacing=1)
>>> my_surface.get_psd()
"""
# PSD is the fft of the ACF (https://en.wikipedia.org/wiki/Spectral_density#Power_spectral_density)
if self.acf is None:
self.get_acf()
# noinspection PyTypeChecker
self.psd = self.get_fft(np.asarray(self.acf))
def subtract_polynomial(self, order, mask=None):
""" Flatten the surface by subtracting a polynomial
Alias for :func:`~slippy.surface.subtract_polynomial` function
"""
if mask is None:
mask = self.mask
new_profile, coefs = subtract_polynomial(self.profile, order, mask)
self.profile = new_profile
return coefs
def roughness(self, parameter_name, mask=None, curved_surface=False,
no_flattening=False, filter_cut_off=None,
four_nearest=False):
""" Find areal roughness parameters
Alias for :func:`~slippy.surface.roughness` function
"""
if mask is None:
mask = self.mask
out = roughness(self, parameter_name, mask=mask,
curved_surface=curved_surface,
no_flattening=no_flattening,
filter_cut_off=filter_cut_off,
four_nearest=four_nearest)
return out
def get_mat_vr(self, height, void=False, mask=None, ratio=True):
""" Find the material or void volume ratio for a given height
Alias for :func:`~slippy.surface.get_mat_vr` function
"""
if mask is None:
mask = self.mask
return get_mat_vr(height, profile=self.profile, void=void, mask=mask,
ratio=ratio)
def get_height_of_mat_vr(self, ratio, void=False, mask=None,
accuracy=0.001):
""" Find the height of a given material or void volume ratio
Alias for :func:`~slippy.surface.get_height_of_mat_vr` function
"""
if mask is None:
mask = self.mask
return get_height_of_mat_vr(ratio, self.profile, void=void, mask=mask,
accuracy=accuracy)
def get_summit_curvature(self, summits=None, mask=None,
filter_cut_off=None, four_nearest=False):
""" Get summit curvatures
Alias for :func:`~slippy.surface.get_summit_curvature` function
"""
if mask is None:
mask = self.mask
return get_summit_curvatures(self.profile, summits=summits, mask=mask,
filter_cut_off=filter_cut_off,
four_nearest=four_nearest, grid_spacing=self.grid_spacing)
def find_summits(self, mask=None, four_nearest=False, filter_cut_off=None,
invert=False):
""" Find summits after low pass filtering
Alias for :func:`~slippy.surface.find_summits` function
"""
if mask is None:
mask = self.mask
if invert:
return find_summits(self.profile * -1,
grid_spacing=self.grid_spacing, mask=mask,
four_nearest=four_nearest,
filter_cut_off=filter_cut_off)
else:
return find_summits(self, mask=mask, four_nearest=four_nearest,
filter_cut_off=filter_cut_off)
def low_pass_filter(self, cut_off_freq, return_copy=False):
""" Low pass FIR filter the surface profile
Alias for :func:`~slippy.surface.low_pass_filter` function
"""
if return_copy:
return low_pass_filter(self, cut_off_freq)
else:
self.profile = low_pass_filter(self, cut_off_freq)
def resample(self, new_grid_spacing=None, return_profile=False, remake_interpolator=False):
""" Resample or crop the profile by interpolation
Parameters
----------
new_grid_spacing : float, optional (None)
The grid spacing on the new surface, if the grid_spacing is not set on the current surface it is assumed to
be 1
return_profile : bool, optional (False)
If true the interpolated profile is returned otherwise it is set as the profile of the instance
remake_interpolator : bool, optional (False)
If true any memoized interpolator will be deleted and remade based on the current profile before
interpolation, see notes.
Returns
-------
new_profile : array
If return_profile is True the interpolated profile is returned
See Also
--------
rotate
fill_holes
surface_like
Notes
-----
On the first call this function will make an interpolator object which
is used to interpolate, on subsequent calls this object is found and
used resulting in no loss of quality. If the remake_interpolator key
word is set to true this interpolator is remade. This will result in a
loss of quality for subsequent calls but is necessary if the profile
property has changed.
This method does not support masking.
The profile should have nan or inf values removed by the fill_holes
method before running this
Examples
--------
>>> import numpy as np
>>> import slippy.surface as s
>>> profile=np.random.normal(size=(101,101))
>>> my_surface=s.assurface(profile, grid_spacing=1)
>>> # interpolate on a coarse grid:
>>> my_surface.resample(10)
>>> # check shape:
>>> my_surface.shape
(11,11)
>>> # restore original profile:
>>> my_surface.resample(1)
>>> my_surface.shape
(101,101)
"""
gs_changed = False
if self.grid_spacing is None:
gs_changed = True
self.grid_spacing = 1
if remake_interpolator or self._inter_func is None:
self._original_extent = self.extent
x0 = np.arange(0, self.extent[0], self.grid_spacing)
y0 = np.arange(0, self.extent[1], self.grid_spacing)
self._inter_func = scipy.interpolate.RectBivariateSpline(x0, y0, self.profile)
x1 = np.arange(0, self._original_extent[0], new_grid_spacing)
y1 = np.arange(0, self._original_extent[1], new_grid_spacing)
new_profile = self._inter_func(x1, y1)
if gs_changed:
del self.grid_spacing
if return_profile:
return new_profile
else:
self.profile = new_profile
if not gs_changed:
self.grid_spacing = new_grid_spacing
def __add__(self, other):
if not isinstance(other, _Surface):
return Surface(profile=self.profile + other, grid_spacing=self.grid_spacing)
if self.grid_spacing is not None and other.grid_spacing is not None and self.grid_spacing != other.grid_spacing:
if self.grid_spacing < other.grid_spacing:
prof_2 = other.resample(self.grid_spacing, return_profile=True)
prof_1 = self.profile
new_gs = self.grid_spacing
else:
prof_1 = self.resample(other.grid_spacing, return_profile=True)
prof_2 = other.profile
new_gs = other.grid_spacing
else:
prof_1 = self.profile
prof_2 = other.profile
if self.grid_spacing is not None:
new_gs = self.grid_spacing
else:
new_gs = other.grid_spacing
new_shape = [min(p1s, p2s) for p1s, p2s in zip(prof_1.shape, prof_2.shape)]
new_profile = prof_1[0:new_shape[0], 0:new_shape[1]] + prof_2[0:new_shape[0], 0:new_shape[1]]
return Surface(profile=new_profile, grid_spacing=new_gs)
def __mul__(self, other):
if isinstance(other, Number):
return Surface(profile=self.profile*other, grid_spacing=self.grid_spacing)
else:
raise NotImplementedError("Multiplication not implement for Surfaces unless other parameter is number")
def __div__(self, other):
if isinstance(other, Number):
return Surface(profile=self.profile/other, grid_spacing=self.grid_spacing)
else:
raise NotImplementedError("Division not implement for Surfaces unless other parameter is number")
def __sub__(self, other):
if not isinstance(other, _Surface):
return Surface(profile=self.profile - other, grid_spacing=self.grid_spacing)
if self.grid_spacing is not None and other.grid_spacing is not None and self.grid_spacing != other.grid_spacing:
if self.grid_spacing < other.grid_spacing:
prof_2 = other.resample(self.grid_spacing, return_profile=True)
prof_1 = self.profile
new_gs = self.grid_spacing
else:
prof_1 = self.resample(other.grid_spacing, return_profile=True)
prof_2 = other.profile
new_gs = other.grid_spacing
else:
prof_1 = self.profile
prof_2 = other.profile
if self.grid_spacing is not None:
new_gs = self.grid_spacing
else:
new_gs = other.grid_spacing
new_shape = [min(p1s, p2s) for p1s, p2s in zip(prof_1.shape, prof_2.shape)]
new_profile = prof_1[0:new_shape[0], 0:new_shape[1]] - prof_2[0:new_shape[0], 0:new_shape[1]]
return Surface(profile=new_profile, grid_spacing=new_gs)
def __eq__(self, other):
if not isinstance(other, _Surface) or self.is_discrete != other.is_discrete:
return False
if self.is_discrete:
return np.array_equal(self.profile, other.profile) and self.grid_spacing == other.grid_spacing
else:
return repr(self) == repr(other)
def show(self, property_to_plot: typing.Union[str, typing.Sequence[str]] = 'profile',
plot_type: typing.Union[str, typing.Sequence[str]] = 'default', ax=False, *, dist=None, stride=None,
**figure_kwargs):
""" Plot surface properties
Parameters
----------
property_to_plot : str or list of str length N optional ('profile')
The property to be plotted see notes for supported names
plot_type : str or list of str length N optional ('default')
The type of plot to be produced, see notes for supported types
ax : matplotlib axes or False optional (False)
If supplied the plot will be added to the axis
dist : a scipy probability distribution, optional (None)
Only used if probplot is requested, the probability distribution
to plot against
stride : float, optional (None)
Only used if a wire frame plot is requested, the stride between
wires
figure_kwargs : optional (None)
Keyword arguments sent to the figure function in matplotlib
Returns
-------
ax : matplotlib axes or list of matplotlib axes length N
The axis with the plot
See Also
--------
get_fft
get_psd
get_acf
ACF
Notes
-----
If fft, psd or acf are requested the field of the surface is filled
by the relevant get_ method before plotting.
The grid spacing attribute should be set before plotting
2D and 1D plots can be produced. 2D properties are:
- profile - surface profile
- unworn_profile - the surface profile with no wear applied
- fft2d - fft of the surface profile
- psd - power spectral density of the surface profile
- acf - auto correlation function of the surface
- apsd - angular power spectral density of the profile
Plot types allowed for 2D plots are:
- surface (default)
- image
- mesh
If a mesh plot is requested the distance between lines in the mesh can
be specified with the stride keyword
1D properties are:
- histogram - histogram of the profile heights
- fft1d - 1 dimentional fft of the surface
- qq - quartile quartile plot of the surface heights
If qq or dist hist are requested the distribution to be plotted against
the height values can be specified by the dist keyword
Each of the 1D properties can only be plotted on it's default plot type
Examples
--------
>>> # show the surface profile as an image:
>>> import slippy.surface as s
>>> import numpy as np
>>> my_surface=s.assurface(np.random.rand(10,10))
>>> my_surface.show('profile', 'image')
>>> # show the 2D fft of the surface profile with a range of plot types
>>> my_surface.show(['fft2D','fft2D','fft2D'], ['mesh', 'image', 'default'])
"""
import matplotlib.pyplot as plt
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
from scipy.stats import probplot
if self.profile is None:
raise AttributeError('The profile of the surface must be set before it can be shown')
if self.grid_spacing is None:
raise AttributeError("The grid spacing of the surface must be set before it can be shown")
types2d = ['profile', 'fft2d', 'psd', 'acf', 'apsd', 'unworn_profile']
types1d = ['histogram', 'fft1d', 'qq', 'hist']
# using a recursive call to deal with multiple plots on the same fig
if isinstance(property_to_plot, Sequence) and not isinstance(property_to_plot, str):
number_of_subplots = len(property_to_plot)
if not type(ax) is bool:
msg = ("Can't plot multiple plots on single axis, "
'making new figure')
warnings.warn(msg)
if isinstance(plot_type, Sequence) and not isinstance(plot_type, str):
plot_type = list(plot_type)
if len(plot_type) < number_of_subplots:
plot_type.extend(['default'] * (number_of_subplots - len(plot_type)))
else:
plot_type = [plot_type, ] * number_of_subplots
# 11, 12, 13, 22, then filling up rows of 3 (unlikely to be used)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
if len(property_to_plot) < 5:
n_cols = [1, 2, 3, 2][number_of_subplots - 1]
else:
n_cols = 3
n_rows = int(np.ceil(number_of_subplots / 3))
fig = plt.figure(**figure_kwargs)
ax = []
sub_plot_number = 100 * n_rows + 10 * n_cols + 1
for i in range(number_of_subplots):
if property_to_plot[i].lower() in types2d and not plot_type[i] in ('image', 'default'):
ax.append(fig.add_subplot(sub_plot_number + i, projection='3d'))
else:
ax.append(fig.add_subplot(sub_plot_number + i))
self.show(property_to_plot[i], plot_type[i], ax[i])
return fig, ax
#######################################################################
# main method
#######################################################################
# 2D plots
try:
property_to_plot = property_to_plot.lower()
except AttributeError:
msg = "Property to plot must be a string or a list of strings"
raise ValueError(msg)
if not (property_to_plot in types2d or property_to_plot in types1d):
msg = ('Unsupported property to plot see documentation for details'
', type given: \n' + str(property_to_plot) + ' \nsupported ty'
'pes: \n' + ' '.join(types2d + types1d))
raise ValueError(msg)
if not ax:
fig = plt.figure(**figure_kwargs)
if property_to_plot in types2d:
if not ax and (plot_type == 'image' or plot_type == 'default'):
# noinspection PyUnboundLocalVariable
ax = fig.add_subplot(111)
elif not ax:
# noinspection PyUnboundLocalVariable
ax = fig.add_subplot(111, projection='3d')
if property_to_plot == 'profile':
labels = ['Surface profile', 'x', 'y', 'Height']
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
z = self.profile
elif property_to_plot == 'unworn_profile':
labels = ['Surface profile (unworn)', 'x', 'y', 'Height']
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
z = self.unworn_profile
elif property_to_plot == 'fft2d':
labels = ['Fourier transform of surface', 'u', 'v', '|F(x)|']
if self.fft is None:
self.get_fft()
z = np.abs(np.fft.fftshift(self.fft))
x = np.fft.fftfreq(self.shape[0], self.grid_spacing)
y = np.fft.fftfreq(self.shape[1], self.grid_spacing)
elif property_to_plot == 'psd':
labels = ['Power spectral density', 'u', 'v', 'Power/ frequency']
if self.psd is None:
self.get_psd()
# noinspection PyTypeChecker
z = np.log(np.abs(np.fft.fftshift(self.psd)))
x = np.fft.fftfreq(self.shape[0], self.grid_spacing)
y = np.fft.fftfreq(self.shape[1], self.grid_spacing)
elif property_to_plot == 'acf':
labels = ['Auto correlation function', 'x', 'y',
'Surface auto correlation']
if self.acf is None:
self.get_acf()
# noinspection PyTypeChecker
z = np.abs(np.asarray(self.acf))
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
x = x - max(x) / 2
y = y - max(y) / 2
elif property_to_plot == 'apsd':
labels = ['Angular power spectral density', 'x', 'y']
if self.fft is None:
self.get_fft()
p_area = (self.shape[0] - 1) * (self.shape[1] - 1) * self.grid_spacing ** 2
z = self.fft * np.conj(self.fft) / p_area
x = self.grid_spacing * np.arange(self.shape[0])
y = self.grid_spacing * np.arange(self.shape[1])
x = x - max(x) / 2
y = y - max(y) / 2
else:
raise ValueError("Property not recognised")
mesh_x, mesh_y = np.meshgrid(x, y)
if plot_type == 'surface':
ax.plot_surface(mesh_x, mesh_y, np.transpose(z))
# plt.axis('equal')
ax.set_zlabel(labels[3])
elif plot_type == 'mesh':
if property_to_plot == 'psd' or property_to_plot == 'fft2d':
mesh_x, mesh_y = np.fft.fftshift(mesh_x), np.fft.fftshift(mesh_y)
if stride:
ax.plot_wireframe(mesh_x, mesh_y, np.transpose(z), rstride=stride,
cstride=stride)
else:
ax.plot_wireframe(mesh_x, mesh_y, np.transpose(z), rstride=25,
cstride=25)
ax.set_zlabel(labels[3])
elif plot_type == 'default' or plot_type == 'image':
ax.imshow(z, extent=[min(y), max(y), min(x), max(x)], aspect=1)
else:
ValueError('Unrecognised plot type')
ax.set_title(labels[0])
ax.set_xlabel(labels[1])
ax.set_ylabel(labels[2])
return ax
#######################################################################
# 1D plots
#######################################################################
elif property_to_plot in types1d:
if not ax:
# noinspection PyUnboundLocalVariable
ax = fig.add_subplot(111)
if property_to_plot == 'histogram' or property_to_plot == 'hist':
# do all plotting in this loop for 1D plots
labels = ['Histogram of surface heights', 'height', 'counts']
ax.hist(self.profile.flatten(), 100)
elif property_to_plot == 'fft1d':
if self.dimensions == 1:
labels = ['FFt of surface', 'frequency', '|F(x)|']
if type(self.fft) is bool:
self.get_fft()
x = np.fft.fftfreq(self.shape[0], self.grid_spacing)
y = np.abs(self.fft / self.shape[0])
# line plot for 1d surfaces
ax.plot(x, y)
ax.xlim(0, max(x))
else:
labels = ['Scatter of frequency magnitudes',
'frequency', '|F(x)|']
u = np.fft.fftfreq(self.shape[0], self.grid_spacing)
v = np.fft.fftfreq(self.shape[1], self.grid_spacing)
u_mesh, v_mesh = np.meshgrid(u, v)
frequencies = u_mesh + v_mesh
if type(self.fft) is bool:
self.get_fft()
mags = | np.abs(self.fft) | numpy.abs |
import numpy as np;
import matplotlib.pyplot as plt;
import scipy;
from scipy.ndimage import gaussian_filter, uniform_filter, median_filter;
from scipy.special import gammainc, gamma;
from scipy.interpolate import interp1d
from . import log, files, headers, setup, oifits;
def airy (x):
''' Airy function, with its zero at x = 1.22'''
return 2.*scipy.special.jn (1,np.pi*x) / (np.pi*x);
def gaussian_filter_cpx (input,sigma,**kwargs):
''' Gaussian filter of a complex array '''
return gaussian_filter (input.real,sigma,**kwargs) + \
gaussian_filter (input.imag,sigma,**kwargs) * 1.j;
def uniform_filter_cpx (input,sigma,**kwargs):
''' Uniform filter of a complex array '''
return uniform_filter (input.real,sigma,**kwargs) + \
uniform_filter (input.imag,sigma,**kwargs) * 1.j;
def getwidth (curve, threshold=None):
'''
Compute the width of curve around its maximum,
given a threshold. Return the tuple (center,fhwm)
'''
if threshold is None:
threshold = 0.5*np.max (curve);
# Find rising point
f = np.argmax (curve > threshold) - 1;
if f == -1:
log.warning ('Width detected outside the spectrum');
first = 0;
else:
first = f + (threshold - curve[f]) / (curve[f+1] - curve[f]);
# Find lowering point
l = len(curve) - np.argmax (curve[::-1] > threshold) - 1;
if l == len(curve)-1:
log.warning ('Width detected outside the spectrum');
last = l;
else:
last = l + (threshold - curve[l]) / (curve[l+1] - curve[l]);
return 0.5*(last+first), 0.5*(last-first);
def bootstrap_matrix (snr, gd):
'''
Compute the best SNR and GD of each baseline when considering
also the boostraping capability of the array.
snr and gd shall be of shape (...,nb)
Return (snr_b, gd_b) of same size, but including bootstrap.
'''
log.info ('Bootstrap baselines with linear matrix');
# User a power to implement a type of min/max of SNR
power = 4.0;
# Reshape
shape = snr.shape;
snr = snr.reshape ((-1,shape[-1]));
gd = gd.reshape ((-1,shape[-1]));
ns,nb = gd.shape;
# Ensure no zero and no nan
snr[~np.isfinite (snr)] = 0.0;
snr = np.maximum (snr,1e-1);
snr = np.minimum (snr,1e3);
log.info ('Compute OPD_TO_OPD');
# The OPL_TO_OPD matrix
OPL_TO_OPD = setup.beam_to_base;
# OPD_TO_OPL = (OPL_TO_OPD^T.snr.OPL_TO_OPD)^-1 . OPL_TO_OPD^T.W_OPD
# o is output OPL
JtW = np.einsum ('tb,sb->stb',OPL_TO_OPD.T,snr**power);
JtWJ = np.einsum ('stb,bo->sto',JtW,OPL_TO_OPD);
JtWJ_inv = np.array([ np.linalg.pinv (JtWJ[s]) for s in range(ns)]);# 'sot'
OPD_TO_OPL = np.einsum ('sot,stb->sob', JtWJ_inv, JtW);
# OPD_TO_OPD = OPL_TO_OPD.OPD_TO_OPL (m is output OPD)
OPD_TO_OPD = np.einsum ('mo,sob->smb', OPL_TO_OPD, OPD_TO_OPL);
log.info ('Compute gd_b and snr_b');
# GDm = OPD_TO_OPD . GD
gd_b = np.einsum ('smb,sb->sm',OPD_TO_OPD,gd);
# Cm = OPD_TO_OPD . C_OPD . OPD_TO_OPD^T
OPD_TO_OPD_W = np.einsum ('smb,sb->smb',OPD_TO_OPD,snr**-power);
cov_b = np.einsum ('smb,snb->smn',OPD_TO_OPD_W, OPD_TO_OPD);
# Reform SNR from covariance
snr_b = np.diagonal (cov_b, axis1=1, axis2=2)**-(1./power);
snr_b[snr_b < 1e-2] = 0.0;
# Reshape
snr = snr.reshape (shape);
gd = gd.reshape (shape);
snr_b = snr_b.reshape (shape);
gd_b = gd_b.reshape (shape);
return (snr_b,gd_b);
def bootstrap_triangles (snr,gd):
'''
Compute the best SNR and GD of each baseline when considering
also the boostraping capability of the array.
snr and gd shall be of shape (...,nb)
Return (snr_b, gd_b) of same size, but including bootstrap.
'''
log.info ('Bootstrap baselines with triangles');
# Reshape
shape = snr.shape;
snr = snr.reshape ((-1,shape[-1]));
gd = gd.reshape ((-1,shape[-1]));
ns,nb = gd.shape;
# Ensure no zero and no nan
snr[~np.isfinite (snr)] = 0.0;
snr = np.maximum (snr,1e-1);
snr = np.minimum (snr,1e3);
# Create output
gd_b = gd.copy ();
snr_b = snr.copy ();
# Sign of baseline in triangles
sign = np.array ([1.0,1.0,-1.0]);
# Loop several time over triplet to also
# get the baseline tracked by quadruplets.
for i in range (7):
for tri in setup.triplet_base ():
for s in range (ns):
i0,i1,i2 = np.argsort (snr_b[s,tri]);
# Set SNR as the worst of the two best
snr_b[s,tri[i0]] = snr_b[s,tri[i1]];
# Set the GD as the sum of the two best
mgd = gd_b[s,tri[i1]] * sign[i1] + gd_b[s,tri[i2]] * sign[i2];
gd_b[s,tri[i0]] = - mgd * sign[i0];
# Reshape
snr = snr.reshape (shape);
gd = gd.reshape (shape);
snr_b = snr_b.reshape (shape);
gd_b = gd_b.reshape (shape);
return (snr_b,gd_b);
def bootstrap_triangles_jdm (snr,gd):
'''
MIRC/JDM Method: Compute the best SNR and GD of each baseline when considering
also the boostraping capability of the array.
snr and gd shall be of shape (...,nb)
Return (snr_b, gd_b) of same size, but including bootstrap.
'''
log.info ('Bootstrap baselines with triangles using MIRC/JDM method');
w=snr.copy()
opd0=gd.copy()
ns,nf,ny,nb=snr.shape
a=np.zeros((ns,nf,ny,5,5))
b=np.zeros((ns,nf,ny,5))
gd_jdm = np.zeros((ns,nf,ny,15))
# Reshape
shape = snr.shape;
snr = snr.reshape ((-1,shape[-1]));
gd = gd.reshape ((-1,shape[-1]));
ns,nb = gd.shape;
# Ensure no zero and no nan
snr[~np.isfinite (snr)] = 0.0;
snr = np.maximum (snr,1e-1);
snr = np.minimum (snr,1e3);
# Create output
gd_b = gd.copy ();
snr_b = snr.copy ();
# Sign of baseline in triangles
sign = np.array ([1.0,1.0,-1.0]);
# Loop several time over triplet to also
# get the baseline tracked by quadruplets.
for i in range (7):
for tri in setup.triplet_base ():
for s in range (ns):
i0,i1,i2 = np.argsort (snr_b[s,tri]);
# Set SNR as the worst of the two best
snr_b[s,tri[i0]] = snr_b[s,tri[i1]];
# Set the GD as the sum of the two best
mgd = gd_b[s,tri[i1]] * sign[i1] + gd_b[s,tri[i2]] * sign[i2];
gd_b[s,tri[i0]] = - mgd * sign[i0];
# Reshape
snr = snr.reshape (shape);
gd = gd.reshape (shape);
snr_b = snr_b.reshape (shape);
gd_b = gd_b.reshape (shape);
OPD=opd0.copy()
OPD=np.where(w <=1., 0.0, OPD)
w=np.where(w <=1., .01, w)
#inzero=np.argwhere(w <= 100.)
#OPD[inzero]=0.0
#w[inzero]=.01
opd12=OPD[:,:,:,0];
opd13=OPD[:,:,:,1];
opd14=OPD[:,:,:,2];
opd15=OPD[:,:,:,3];
opd16=OPD[:,:,:,4];
opd23=OPD[:,:,:,5];
opd24=OPD[:,:,:,6];
opd25=OPD[:,:,:,7];
opd26=OPD[:,:,:,8];
opd34=OPD[:,:,:,9];
opd35=OPD[:,:,:,10];
opd36=OPD[:,:,:,11];
opd45=OPD[:,:,:,12];
opd46=OPD[:,:,:,13];
opd56=OPD[:,:,:,14];
w12=w[:,:,:,0]+0.001;
w13=w[:,:,:,1]+0.002;
w14=w[:,:,:,2]+0.005;
w15=w[:,:,:,3]+0.007;
w16=w[:,:,:,4]+0.003;
w23=w[:,:,:,5]+0.004;
w24=w[:,:,:,6]+0.008;
w25=w[:,:,:,7]+0.009;
w26=w[:,:,:,8]+0.002;
w34=w[:,:,:,9]+0.003;
w35=w[:,:,:,10]+0.006;
w36=w[:,:,:,11]+0.008;
w45=w[:,:,:,12]+0.009;
w46=w[:,:,:,13]+0.004;
w56=w[:,:,:,14]+0.005;
a[:,:,:,0,0] = w12+w23+w24+w25+w26;
a[:,:,:,1,1] = w13+w23+w34+w35+w36;
a[:,:,:,2,2] = w14+w24+w34+w45+w46;
a[:,:,:,3,3] = w15+w25+w35+w45+w56;
a[:,:,:,4,4] = w16+w26+w36+w46+w56;
a[:,:,:,0,1] = -w23;
a[:,:,:,0,2] = -w24;
a[:,:,:,0,3] = -w25;
a[:,:,:,0,4] = -w26;
a[:,:,:,1,0] = -w23;
a[:,:,:,1,2] = -w34;
a[:,:,:,1,3] = -w35;
a[:,:,:,1,4] = -w36;
a[:,:,:,2,0] = -w24;
a[:,:,:,2,1] = -w34;
a[:,:,:,2,3] = -w45;
a[:,:,:,2,4] = -w46;
a[:,:,:,3,0] = -w25;
a[:,:,:,3,1] = -w35;
a[:,:,:,3,2] = -w45;
a[:,:,:,3,4] = -w56;
a[:,:,:,4,0] = -w26;
a[:,:,:,4,1] = -w36;
a[:,:,:,4,2] = -w46;
a[:,:,:,4,3] = -w56;
b[:,:,:,0] = w12*opd12 - w23*opd23 - w24*opd24 - w25*opd25 - w26*opd26;
b[:,:,:,1] = w13*opd13 + w23*opd23 - w34*opd34 - w35*opd35 - w36*opd36;
b[:,:,:,2] = w14*opd14 + w24*opd24 + w34*opd34 - w45*opd45 - w46*opd46;
b[:,:,:,3] = w15*opd15 + w25*opd25 + w35*opd35 + w45*opd45 - w56*opd56;
b[:,:,:,4] = w16*opd16 + w26*opd26 + w36*opd36 + w46*opd46 + w56*opd56;
#invert!
result=np.linalg.solve(a, b)
gd_jdm[:,:,:,0]=result[:,:,:,0]
gd_jdm[:,:,:,1]=result[:,:,:,1]
gd_jdm[:,:,:,2]=result[:,:,:,2]
gd_jdm[:,:,:,3]=result[:,:,:,3]
gd_jdm[:,:,:,4]=result[:,:,:,4]
gd_jdm[:,:,:,5]=result[:,:,:,1]-result[:,:,:,0]
gd_jdm[:,:,:,6]=result[:,:,:,2]-result[:,:,:,0]
gd_jdm[:,:,:,7]=result[:,:,:,3]-result[:,:,:,0]
gd_jdm[:,:,:,8]=result[:,:,:,4]-result[:,:,:,0]
gd_jdm[:,:,:,9]=result[:,:,:,2]-result[:,:,:,1]
gd_jdm[:,:,:,10]=result[:,:,:,3]-result[:,:,:,1]
gd_jdm[:,:,:,11]=result[:,:,:,4]-result[:,:,:,1]
gd_jdm[:,:,:,12]=result[:,:,:,3]-result[:,:,:,2]
gd_jdm[:,:,:,13]=result[:,:,:,4]-result[:,:,:,2]
gd_jdm[:,:,:,14]=result[:,:,:,4]-result[:,:,:,3]
return (snr_b,gd_jdm,result);
def gd_tracker(opds_trial,input_snr,gd_key):
'''
Used for fitting a self-consistent set of opds. input 5 telscope delays
and compare to the snr vectors in opds space.
return a globabl metric base don logs of the snrs with thresholds.
'''
#log.info ('Bootstrap baselines with triangles using MIRC/JDM method');
# probably replace as matrix in future for vectorizing.
gd_jdm,snr_jdm = get_gds(opds_trial,input_snr,gd_key)
#fit_metric = np.sum(np.log10(snr_jdm))
fit_metric = np.sum(snr_jdm)
return (-fit_metric);
def get_gds(topds,input_snr,gd_key):
'''
Used for fitting a self-consistent set of opds. input 5 telscope delays
and compare to the snr vectors in opds space.
return a gds and snrs for self-consistent set of delays.
'''
nscan,nb=input_snr.shape
gd_jdm=np.zeros(nb)
snr_jdm=np.zeros(nb)
gd_jdm[0]=topds[0]
gd_jdm[1]=topds[1]
gd_jdm[2]=topds[2]
gd_jdm[3]=topds[3]
gd_jdm[4]=topds[4]
gd_jdm[5]=topds[1]-topds[0]
gd_jdm[6]=topds[2]-topds[0]
gd_jdm[7]=topds[3]-topds[0]
gd_jdm[8]=topds[4]-topds[0]
gd_jdm[9]=topds[2]-topds[1]
gd_jdm[10]=topds[3]-topds[1]
gd_jdm[11]=topds[4]-topds[1]
gd_jdm[12]=topds[3]-topds[2]
gd_jdm[13]=topds[4]-topds[2]
gd_jdm[14]=topds[4]-topds[3]
# interpolate into the snr.
for i in range(nb):
#snr_func=interp1d(gd_key,input_snr[:,i],kind='cubic',bounds_error=False,fill_value=(input_snr[:,i]).min(),assume_sorted=True)
snr_func=interp1d(gd_key,input_snr[:,i],kind='cubic',bounds_error=False,fill_value=1.,assume_sorted=True)
snr_jdm[i]=snr_func(gd_jdm[i])
return(gd_jdm,snr_jdm)
def get_gd_gravity(topds, bestsnr_snrs,bestsnr_indices,softlength=2.,nscan=None):
'''
Used for fitting a self-consistent set of opds. input 5 telscope delays
and compare to the snr vectors in opds space.
return a gds and snrs for self-consistent set of delays.
topds = (nramps,nframes, ntels=5)
bestsnr_snrs = (nramps, nframes, npeaks, nbaselines )
bestsnr_indices = (nramps, nframes, npeaks, nbaselines ) ; integers
'''
nr,nf,npeak,nt=topds.shape
nr,nf,npeak,nb=bestsnr_snrs.shape
OPL_TO_OPD = setup.beam_to_base;
temp = setup.base_beam ()
#photo_power = photo[:,:,:,setup.base_beam ()];
#totflux = np.nansum(photo,axis=(1,3))
#bp=np.nanmean(bias_power,axis=2)
topds1= topds[:,:,:,setup.base_beam ()]
gd_jdm= topds1[:,:,:,:,1] - topds1[:,:,:,:,0]
# if gd_jdm > nscan/2 than wraparond. but.. does sign work in fordce equation.. will have to check.
##if nscan != None:
# gd_jdm= np.where( gd_jdm >nscan/2, gd_jdm-nscan ,gd_jdm)
# gd_jdm= np.where( gd_jdm < -nscan/2, nscan + gd_jdm, gd_jdm)
# alternatively instead of adding in a discontunity, we could copy the force centers +/- nscan and apply
# global down-weight.
if nscan != None:
bestsnr_snrs=np.concatenate((bestsnr_snrs,bestsnr_snrs,bestsnr_snrs),axis=2)
bestsnr_indices= | np.concatenate((bestsnr_indices,bestsnr_indices+nscan,bestsnr_indices-nscan),axis=2) | numpy.concatenate |
from threading import Thread
import gym
import numpy as np
import torch
import ctypes
from threading import Thread
from typing import Tuple, List
from torch import nn
from torch.nn import functional as F
from torch_spread import NetworkClient, NetworkManager, SpreadModule, Buffer, mp_ctx
from torch_spread.buffer_queue import BufferRing
from torch_spread.buffer import raw_buffer_and_size
from argparse import ArgumentParser, Namespace
from scipy import signal
process_type = mp_ctx.Process
Value = mp_ctx.Value
JoinableQueue = mp_ctx.JoinableQueue
# process_type = Thread
class DuelingNetwork(SpreadModule):
""" A simple feed forward neural network for training a q-value on cartpole. """
def __init__(self, worker: bool, state_shape: Tuple[int], num_actions: int):
super(DuelingNetwork, self).__init__(worker)
self.input_shape = int(np.prod(state_shape))
self.encoder = nn.Sequential(
nn.Linear(self.input_shape, 16),
nn.PReLU(16),
nn.Linear(16, 32),
nn.PReLU(32),
)
self.value_output = nn.Linear(32, 1)
self.advantage_output = nn.Linear(32, num_actions)
def forward(self, input_buffer):
x = self.encoder(input_buffer.view(-1, self.input_shape))
value = self.value_output(x)
advantage = self.advantage_output(x)
return value + advantage - advantage.mean(dim=-1, keepdim=True)
def q_values(self, states, actions):
return self.forward(states).gather(1, actions.unsqueeze(1)).squeeze()
class Episode:
def __init__(self, n_step: int = 1, discount: float = 0.99):
self.states: List[np.ndarray] = []
self.actions: List[int] = []
self.rewards: List[float] = []
self.action_probabilities: List[float] = []
self.length: int = 0
self.n_step = n_step
self.discount = discount
if n_step > 1:
self.discount_filter = np.arange(n_step, dtype=np.float32)
self.discount_filter = discount ** self.discount_filter
def add(self, state: np.ndarray, action: int, reward: float, action_probability: float):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
self.action_probabilities.append(action_probability)
self.length += 1
def clear(self):
self.states.clear()
self.actions.clear()
self.rewards.clear()
self.action_probabilities.clear()
self.length = 0
@property
def total_reward(self):
return sum(self.rewards)
@property
def observations(self):
states = torch.as_tensor(self.states, dtype=torch.float32)
actions = torch.as_tensor(self.actions, dtype=torch.long)
rewards = torch.as_tensor(self.rewards, dtype=torch.float32)
action_probabilities = torch.as_tensor(self.action_probabilities, dtype=torch.float32)
# Priorities are calculated when adding to replay buffer
priorities = torch.zeros(self.length, dtype=torch.float32)
# Full Monte Carlo discounts
if self.n_step < 1:
terminals = torch.ones(self.length, dtype=torch.uint8)
results = torch.zeros_like(states)
discount_rewards = signal.lfilter([1], [1, -self.discount], x=rewards[::-1].numpy())
discount_rewards = torch.from_numpy(discount_rewards[::-1])
# TD(n) discounts
else:
# Compute terminals as a binary mask for states that hit the terminal state during n-step
terminals = torch.zeros(self.length, dtype=torch.uint8)
terminals[-self.n_step:] = 1
# Compute the next-states as the n-offset of the states with zero padding
results = torch.zeros_like(states)
if self.length > self.n_step:
results[:self.length - self.n_step] = states[self.n_step:]
# Compute the n-step discount returns
discount_rewards = rewards
if self.n_step > 1:
discount_rewards = signal.correlate(rewards.numpy(), self.discount_filter[:self.length], 'full')
discount_rewards = torch.from_numpy(discount_rewards[-self.length:])
return {
"states": states,
"actions": actions,
"results": results,
"rewards": rewards,
"terminals": terminals,
"priorities": priorities,
"discount_rewards": discount_rewards,
"action_probabilities": action_probabilities
}
class PrioritizedReplayBuffer(BufferRing):
def __init__(self, state_shape: Tuple[int], max_size: int, alpha: float = 0.6, beta: float = 0.4):
""" A ring buffer for storing a prioritized replay buffer. Used for Deep Q Learning.
Parameters
----------
state_shape: tuple
The numpy shape of a single state
max_size: int
Maximum number of unique samples to hold in this buffer.
alpha: float
Prioritized Experience Replay alpha parameter
beta: float
Prioritized Experience Replay beta parameter
"""
buffer_shapes = {
"states": state_shape,
"results": state_shape,
"actions": tuple(),
"rewards": tuple(),
"terminals": tuple(),
"priorities": tuple(),
"discount_rewards": tuple(),
"action_probabilities": tuple()
}
buffer_types = {
"states": torch.float32,
"results": torch.float32,
"actions": torch.long,
"rewards": torch.float32,
"terminals": torch.uint8,
"priorities": torch.float32,
"discount_rewards": torch.float32,
"action_probabilities": torch.float32
}
super(PrioritizedReplayBuffer, self).__init__(buffer_shapes, buffer_types, max_size)
self.alpha = alpha
self.beta = beta
self.max_priority = Value(ctypes.c_float, lock=False)
self.max_priority.value = 1.0
@property
def priorities(self):
current_size = self.size
return self.buffer[:current_size]('priorities').numpy()
def update_priority(self, idx: np.ndarray, delta: torch.Tensor):
self.buffer('priorities')[idx] = torch.abs(delta.detach().cpu())
def update_max_priority(self):
self.max_priority.value = float( | np.max(self.priorities, initial=1) | numpy.max |
import logging
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.misc import imshow
import scipy.ndimage as ndi
from scipy.ndimage.filters import convolve
from scipy.ndimage import zoom
# from scipy.stats import threshold DEPRECATED
from skimage import exposure
from skimage.io import imsave
from skimage import measure
from photutils import make_source_mask
from astropy.stats import sigma_clipped_stats
from astropy.io import fits
def rgb_scale(X):
return X * 255 / X.max()
def clip(im, instruct):
im = im.copy() # np.place operates inplace, so let's avoid closure errors
sig_n = instruct['sig_n']
if instruct['rel']:
flat_bkg, fake_bkg, std = estimate_background(im) # sig above bkg
else:
std = np.std(im) # sig above image mean
if instruct['clip'] == 'threshold':
av = np.mean(im)
threshmax = av + sig_n * std
newval = np.min(im)
above_max = im > threshmax
| np.place(im, above_max, newval) | numpy.place |
# semantic segmenation
# caffe model from deephi
#%% import packages
import numpy as np
from PIL import Image
import os
#import sys
#import glob
import cv2
#import matplotlib.pyplot as plt
#import time
import caffe
# Need to create derived class to clean up properly
class Net(caffe.Net):
def __del__(self):
for layer in self.layer_dict:
if hasattr(self.layer_dict[layer],"fpgaRT"):
del self.layer_dict[layer].fpgaRT
#%% define functions
def label_img_to_color(img):
label_to_color = {
0: [128, 64,128],
1: [244, 35,232],
2: [ 70, 70, 70],
3: [102,102,156],
4: [190,153,153],
5: [153,153,153],
6: [250,170, 30],
7: [220,220, 0],
8: [107,142, 35],
9: [152,251,152],
10: [ 70,130,180],
11: [220, 20, 60],
12: [255, 0, 0],
13: [ 0, 0,142],
14: [ 0, 0, 70],
15: [ 0, 60,100],
16: [ 0, 80,100],
17: [ 0, 0,230],
18: [119, 11, 32],
}
img_height, img_width = img.shape
img_color = np.zeros((img_height, img_width, 3))
for row in range(img_height):
for col in range(img_width):
label = img[row, col]
img_color[row, col] = np.array(label_to_color[label])
return img_color
def segment_output(net, img_file):
IMG_MEAN = | np.array((104,117,123)) | numpy.array |
from __future__ import print_function
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from cvxopt.solvers import qp
from cvxopt import matrix, spmatrix
from numpy import array, ndarray
from scipy.spatial.distance import cdist
# solve_qp adapted from https://github.com/stephane-caron/qpsolvers/blob/master/qpsolvers/
# Added objective function value in return value
def cvxopt_matrix(M):
if type(M) is ndarray:
return matrix(M)
elif type(M) is spmatrix or type(M) is matrix:
return M
coo = M.tocoo()
return spmatrix(
coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=M.shape)
def solve_qp(P, q, G=None, h=None, A=None, b=None, solver=None, initvals=None):
"""
Solve a Quadratic Program defined as:
minimize
(1/2) * x.T * P * x + q.T * x
subject to
G * x <= h
A * x == b
using CVXOPT <http://cvxopt.org/>.
Parameters
----------
P : numpy.array, cvxopt.matrix or cvxopt.spmatrix
Symmetric quadratic-cost matrix.
q : numpy.array, cvxopt.matrix or cvxopt.spmatrix
Quadratic-cost vector.
G : numpy.array, cvxopt.matrix or cvxopt.spmatrix
Linear inequality matrix.
h : numpy.array, cvxopt.matrix or cvxopt.spmatrix
Linear inequality vector.
A : numpy.array, cvxopt.matrix or cvxopt.spmatrix
Linear equality constraint matrix.
b : numpy.array, cvxopt.matrix or cvxopt.spmatrix
Linear equality constraint vector.
solver : string, optional
Set to 'mosek' to run MOSEK rather than CVXOPT.
initvals : numpy.array, optional
Warm-start guess vector.
Returns
-------
x : array, shape=(n,)
Solution to the QP, if found, otherwise ``None``.
Note
----
CVXOPT only considers the lower entries of `P`, therefore it will use a
wrong cost function if a non-symmetric matrix is provided.
"""
args = [cvxopt_matrix(P), cvxopt_matrix(q)]
if G is not None:
args.extend([cvxopt_matrix(G), cvxopt_matrix(h)])
if A is not None:
if type(A) is ndarray and A.ndim == 1:
A = A.reshape((1, A.shape[0]))
args.extend([cvxopt_matrix(A), cvxopt_matrix(b)])
sol = qp(*args, solver=solver, initvals=initvals)
if 'optimal' not in sol['status']:
return (None, None)
return ( array(sol['x']).reshape((q.shape[0],)), sol['primal objective'] )
def runOptimiser(K, u, preOptw, initialValue, maxWeight=10000):
"""
Args:
K (double 2d array): Similarity/distance matrix
u (double array): Mean similarity of each prototype
preOptw (double): Weight vector
initialValue (double): Initialize run
maxWeight (double): Upper bound on weight
Returns:
Prototypes, weights and objective values
"""
d = u.shape[0]
lb = np.zeros((d, 1))
ub = maxWeight * np.ones((d, 1))
x0 = | np.append( preOptw, initialValue/K[d-1, d-1] ) | numpy.append |
#!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd
import subprocess
import time
from argparse import ArgumentParser
import socket
from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
from sklearn.cluster import DBSCAN
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
import sqlite3
import gc
import psutil
def writecat2db(cat,dbfile):
""" Write a catalog to the database """
ncat = dln.size(cat)
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
#db = sqlite3.connect('test.db')
#db.text_factory = lambda x: str(x, 'latin1')
#db.row_factory = sqlite3.Row
c = db.cursor()
# Create the table
# the primary key ROWID is automatically generated
if len(c.execute('SELECT name from sqlite_master where type= "table" and name="meas"').fetchall()) < 1:
c.execute('''CREATE TABLE meas(measid TEXT, objlabel INTEGER, exposure TEXT, ccdnum INTEGER, filter TEXT, mjd REAL,
ra REAL, raerr REAL, dec REAL, decerr REAL, mag_auto REAL, magerr_auto REAL, asemi REAL, asemierr REAL,
bsemi REAL, bsemierr REAL, theta REAL, thetaerr REAL, fwhm REAL, flags INTEGER, class_star REAL)''')
data = list(zip(cat['measid'],np.zeros(ncat,int)-1,cat['exposure'],cat['ccdnum'],cat['filter'],cat['mjd'],cat['ra'],
cat['raerr'],cat['dec'],cat['decerr'],cat['mag_auto'],cat['magerr_auto'],cat['asemi'],cat['asemierr'],
cat['bsemi'],cat['bsemierr'],cat['theta'],cat['thetaerr'],cat['fwhm'],cat['flags'],cat['class_star']))
c.executemany('''INSERT INTO meas(measid,objlabel,exposure,ccdnum,filter,mjd,ra,raerr,dec,decerr,mag_auto,magerr_auto,
asemi,asemierr,bsemi,bsemierr,theta,thetaerr,fwhm,flags,class_star)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', data)
db.commit()
db.close()
def getdbcoords(dbfile):
""" Get the coordinates and ROWID from the database """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
c.execute('''SELECT rowid,ra,dec FROM meas''')
data = c.fetchall()
db.close()
# Convert to nump structured array
dtype = np.dtype([('ROWID',int),('RA',np.float64),('DEC',np.float64)])
cat = np.zeros(len(data),dtype=dtype)
cat[...] = data
del data
return cat
def createindexdb(dbfile,col='measid',table='meas',unique=True):
""" Index a column in the database """
t0 = time.time()
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
index_name = 'idx_'+col+'_'+table
# Check if the index exists first
c.execute('select name from sqlite_master')
d = c.fetchall()
for nn in d:
if nn[0]==index_name:
print(index_name+' already exists')
return
# Create the index
print('Indexing '+col)
if unique:
c.execute('CREATE UNIQUE INDEX '+index_name+' ON '+table+'('+col+')')
else:
c.execute('CREATE INDEX '+index_name+' ON '+table+'('+col+')')
data = c.fetchall()
db.close()
print('indexing done after '+str(time.time()-t0)+' sec')
def insertobjlabelsdb(rowid,labels,dbfile):
""" Insert objectlabel values into the database """
print('Inserting object labels')
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(labels,rowid))
c.executemany('''UPDATE meas SET objlabel=? WHERE rowid=?''', data)
db.commit()
db.close()
print('inserting done after '+str(time.time()-t0)+' sec')
def updatecoldb(selcolname,selcoldata,updcolname,updcoldata,table,dbfile):
""" Update column in database """
print('Updating '+updcolname+' column in '+table+' table using '+selcolname)
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(updcoldata,selcoldata))
c.executemany('''UPDATE '''+table+''' SET '''+updcolname+'''=? WHERE '''+selcolname+'''=?''', data)
db.commit()
db.close()
print('updating done after '+str(time.time()-t0)+' sec')
def deleterowsdb(colname,coldata,table,dbfile):
""" Delete rows from the database using rowid"""
print('Deleting rows from '+table+' table using '+colname)
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(coldata))
c.executemany('''DELETE from '''+table+''' WHERE '''+colname+'''=?''', data)
db.commit()
db.close()
print('deleting done after '+str(time.time()-t0)+' sec')
def writeidstr2db(cat,dbfile):
""" Insert IDSTR database values """
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
# Create the table
# the primary key ROWID is automatically generated
if len(c.execute('SELECT name from sqlite_master where type= "table" and name="idstr"').fetchall()) < 1:
c.execute('''CREATE TABLE idstr(measid TEXT, exposure TEXT, objectid TEXT, objectindex INTEGER)''')
data = list(zip(cat['measid'],cat['exposure'],cat['objectid'],cat['objectindex']))
c.executemany('''INSERT INTO idstr(measid,exposure,objectid,objectindex)
VALUES(?,?,?,?)''', data)
db.commit()
db.close()
#print('inserting done after '+str(time.time()-t0)+' sec')
def readidstrdb(dbfile):
""" Get data from IDSTR database"""
data = querydb(dbfile,table='idstr',cols='*')
# Put in catalog
dtype_idstr = np.dtype([('measid',np.str,200),('exposure',np.str,200),('objectid',np.str,200),('objectindex',int)])
cat = np.zeros(len(data),dtype=dtype_idstr)
cat[...] = data
del data
return cat
def querydb(dbfile,table='meas',cols='rowid,*',where=None):
""" Query database table """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cmd = 'SELECT '+cols+' FROM '+table
if where is not None: cmd += ' WHERE '+where
cur.execute(cmd)
data = cur.fetchall()
db.close()
# Return results
return data
def executedb(dbfile,cmd):
""" Execute a database command """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cur.execute(cmd)
data = cur.fetchall()
db.close()
return data
def getdatadb(dbfile,table='meas',cols='rowid,*',objlabel=None,rar=None,decr=None,verbose=False):
""" Get measurements for an object(s) from the database """
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cmd = 'SELECT '+cols+' FROM '+table
# OBJLABEL constraints
if objlabel is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
if dln.size(objlabel)==2:
cmd += 'objlabel>='+str(objlabel[0])+' AND objlabel<='+str(objlabel[1])
else:
cmd += 'objlabel='+str(objlabel)
# RA constraints
if rar is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
cmd += 'ra>='+str(rar[0])+' AND ra<'+str(rar[1])
# DEC constraints
if decr is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
cmd += 'dec>='+str(decr[0])+' AND dec<'+str(decr[1])
# Execute the select command
#print('CMD = '+cmd)
cur.execute(cmd)
data = cur.fetchall()
db.close()
# No results
if len(data)==0:
return np.array([])
# Convert to numpy structured array
dtype_hicat = np.dtype([('ROWID',int),('MEASID',np.str,30),('OBJLABEL',int),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
cat = np.zeros(len(data),dtype=dtype_hicat)
cat[...] = data
del data
if verbose: print('got data in '+str(time.time()-t0)+' sec.')
return cat
def getradecrangedb(dbfile):
""" Get RA/DEC ranges from database """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
c.execute('''SELECT MIN(ra),MAX(ra),MIN(dec),MAX(dec) FROM meas''')
data = c.fetchall()
db.close()
return data[0]
def add_elements(cat,nnew=300000):
""" Add more elements to a catalog"""
ncat = len(cat)
old = cat.copy()
nnew = dln.gt(nnew,ncat)
cat = np.zeros(ncat+nnew,dtype=old.dtype)
cat[0:ncat] = old
del old
return cat
def seqcluster(cat,dcr=0.5,iter=False,inpobj=None,trim=False):
""" Sequential clustering of measurements in exposures. This was the old method."""
ncat = len(cat)
labels = np.zeros(ncat)-1
# Iterate
if iter is not False:
done = False
niter = 1
maxiter = 10
lastlabels = | np.zeros(ncat) | numpy.zeros |
"""Make a dataset for a study."""
from typing import List, Sequence, Type, Tuple, Union
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tqdm
from ..base import BasePandasTransformer
class DatasetCreator(BasePandasTransformer):
def __init__(self,
columns: Sequence[str],
train_len: int,
bptt: int,
valid_percent: Union[float, None] = None,
shuffle_in_time=False,
shuffle_columns=False,
interactive=False):
"""Make a dataset inside a study period,
starting from a pd.DataFrame ordered by index.
The passed dataset must be the one considered for a *single* study period.
Inside a study period, we take a training period of train_len, and
inside the training period we move from the beginning onwards in blocks
of window_len days.
"""
self.__check_init_params(train_len, bptt, valid_percent,
shuffle_in_time, shuffle_columns, interactive)
super().__init__(columns)
self.train_len = train_len
self.bptt = bptt
self.valid_percent = valid_percent
self.shuffle_in_time = shuffle_in_time
self.shuffle_columns = shuffle_columns
self.interactive = interactive
def fit(self, X: pd.DataFrame, y=None) -> Type['DatasetCreator']:
"""Check dimensions and prepare to fit.
Most importantly, keep track of the names of the companies
that were included in the S&P500 at the end of the training period,
to keep only them in the training and testing phases.
This is to follow Fischer, Krausse 2018.
"""
self.prepare_to_fit(X)
nr, _ = X.shape
if nr <= self.train_len:
raise ValueError("there are no days left for testing")
if nr <= self.bptt:
raise ValueError("dataset is smaller than bptt")
last_train_day = X.iloc[self.train_len - 1, :].dropna()
self.companies_included = last_train_day.index.tolist()
return self
def transform(self, X: pd.DataFrame):
"""Get the training and testing data, optionally also the validation data.
IMPORTANT: the X variable should contain all the data for a
**single** study period and not be longer, otherwise all indexes get messed up.
In practice, X shall be a window of len = study_len inside the whole data,
so that we can index it starting from 0 and get all indexes right.
"""
final_data = X.loc[:, self.companies_included]
n = final_data.shape[0]
# TRAINING phase
if self.valid_percent:
X_train, X_valid, y_train, y_valid = self.subset_with_validation(
data=final_data,
start_index=self.bptt,
end_index=self.train_len - 1,
bptt=self.bptt,
shuffle_columns=self.shuffle_columns,
valid_percent=self.valid_percent)
assert X_valid.shape[0] == y_valid.shape[0]
else:
X_train, y_train = self.subset_no_validation(
data=final_data,
start_index=self.bptt,
end_index=self.train_len - 1,
bptt=self.bptt,
shuffle_columns=self.shuffle_columns)
# TESTING phase
X_test, y_test = self.subset_no_validation(
data=final_data,
start_index=self.train_len,
end_index=n,
bptt=self.bptt,
shuffle_columns=False)
# check dimensions
assert X_train.shape[0] == y_train.shape[0]
assert X_test.shape[0] == y_test.shape[0]
# if shuffle_in_time, then shuffle *only* the training set
if self.shuffle_in_time:
n_train_samples, _, _ = X_train.shape
permuted_indexes = | np.random.permutation(n_train_samples) | numpy.random.permutation |
import argparse
import os
import random
import re
import sys
import time
from os.path import join
import cv2
import numpy as np
import tensorflow as tf
import sklearn
from utils.metrics_utils import *
random.seed(1024)
np.random.seed(1024)
tf.set_random_seed(1024)
from utils.icp import icp
from tqdm import tqdm
# Create Placeholders
xyz1=tf.placeholder(tf.float32,shape=(None, 3))
xyz2=tf.placeholder(tf.float32,shape=(None, 3))
xyz3 = tf.expand_dims(xyz1, 0)
xyz4 = tf.expand_dims(xyz2, 0)
xyz3_scaled, xyz4_scaled = scale(xyz3, xyz4)
xyz3_scaleds = tf.squeeze(xyz3_scaled)
xyz4_scaleds = tf.squeeze(xyz4_scaled)
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
# load predictions
# data_path
data_dir = '../../data/ShapeNet/'
test_datapath = '../../data/test_list.txt'
eval_path = '../../result/result_shapenet_ply_out_smooth_pt1024/'
rendering_path = '../../data/ShapeNet/ShapeNetRendering/'
# Load data
namelist = []
with open(test_datapath, 'r') as f:
while(True):
line = f.readline().strip()
if not line:
break
namelist.append(line)
class_name = {'02828884':'bench','03001627':'chair','03636649':'lamp','03691459':'speaker','04090263':'firearm','04379243':'table','04530566':'watercraft','02691156':'plane','02933112':'cabinet','02958343':'car','03211117':'monitor','04256520':'couch','04401088':'cellphone'}
model_number = {i:0 for i in class_name}
sum_f = {i:0 for i in class_name}
sum_cd = {i:0 for i in class_name}
sum_emd = {i:0 for i in class_name}
iters = 0
f_sum = 0.0
cd_sum = 0.0
emd_sum = 0.0
def camera_info(param):
theta = np.deg2rad(param[0])
phi = np.deg2rad(param[1])
camY = param[3]*np.sin(phi)
temp = param[3]*np.cos(phi)
camX = temp * np.cos(theta)
camZ = temp * np.sin(theta)
cam_pos = np.array([camX, camY, camZ])
axisZ = cam_pos.copy()
axisY = np.array([0,1,0])
axisX = np.cross(axisY, axisZ)
axisY = np.cross(axisZ, axisX)
cam_mat = np.array([axisX, axisY, axisZ])
cam_mat = sklearn.preprocessing.normalize(cam_mat, axis=1)
return cam_mat, cam_pos
for file_list in namelist:
iters += 1
print(iters)
if os.path.isfile(eval_path+file_list[19:-4]+'_pred.npy'):
continue
pcl_pred = np.loadtxt(eval_path+file_list[19:-4]+'-clean.xyz')
pcl_pred = np.concatenate((np.expand_dims(pcl_pred[:,0], axis=1), np.expand_dims(pcl_pred[:,2], axis=1), np.expand_dims(-1*pcl_pred[:,1], axis=1)), axis=1)
print(eval_path+file_list)
file_list_sub = file_list.split("_")
class_id = file_list_sub[0][19:]
# rotate back
view_path = rendering_path+class_id+'/'+file_list_sub[1]+'/rendering/rendering_metadata.txt'
cam_params = | np.loadtxt(view_path) | numpy.loadtxt |
from statsmodels.compat.python import iterkeys
from statsmodels.regression.linear_model import GLS
import numpy as np
from statsmodels.base.model import LikelihoodModelResults
from scipy import sparse
# http://www.irisa.fr/aladin/wg-statlin/WORKSHOPS/RENNES02/SLIDES/Foschi.pdf
__all__ = ['SUR', 'Sem2SLS']
#probably should have a SystemModel superclass
# TODO: does it make sense of SUR equations to have
# independent endogenous regressors? If so, then
# change docs to LHS = RHS
#TODO: make a dictionary that holds equation specific information
#rather than these cryptic lists? Slower to get a dict value?
#TODO: refine sigma definition
class SUR(object):
"""
Seemingly Unrelated Regression
Parameters
----------
sys : list
[endog1, exog1, endog2, exog2,...] It will be of length 2 x M,
where M is the number of equations endog = exog.
sigma : array_like
M x M array where sigma[i,j] is the covariance between equation i and j
dfk : None, 'dfk1', or 'dfk2'
Default is None. Correction for the degrees of freedom
should be specified for small samples. See the notes for more
information.
Attributes
----------
cholsigmainv : array
The transpose of the Cholesky decomposition of `pinv_wexog`
df_model : array
Model degrees of freedom of each equation. p_{m} - 1 where p is
the number of regressors for each equation m and one is subtracted
for the constant.
df_resid : array
Residual degrees of freedom of each equation. Number of observations
less the number of parameters.
endog : array
The LHS variables for each equation in the system.
It is a M x nobs array where M is the number of equations.
exog : array
The RHS variable for each equation in the system.
It is a nobs x sum(p_{m}) array. Which is just each
RHS array stacked next to each other in columns.
history : dict
Contains the history of fitting the model. Probably not of interest
if the model is fit with `igls` = False.
iterations : int
The number of iterations until convergence if the model is fit
iteratively.
nobs : float
The number of observations of the equations.
normalized_cov_params : array
sum(p_{m}) x sum(p_{m}) array
:math:`\\left[X^{T}\\left(\\Sigma^{-1}\\otimes\\boldsymbol{I}\\right)X\\right]^{-1}`
pinv_wexog : array
The pseudo-inverse of the `wexog`
sigma : array
M x M covariance matrix of the cross-equation disturbances. See notes.
sp_exog : CSR sparse matrix
Contains a block diagonal sparse matrix of the design so that
exog1 ... exogM are on the diagonal.
wendog : array
M * nobs x 1 array of the endogenous variables whitened by
`cholsigmainv` and stacked into a single column.
wexog : array
M*nobs x sum(p_{m}) array of the whitened exogenous variables.
Notes
-----
All individual equations are assumed to be well-behaved, homoskedastic
iid errors. This is basically an extension of GLS, using sparse matrices.
.. math:: \\Sigma=\\left[\\begin{array}{cccc}
\\sigma_{11} & \\sigma_{12} & \\cdots & \\sigma_{1M}\\\\
\\sigma_{21} & \\sigma_{22} & \\cdots & \\sigma_{2M}\\\\
\\vdots & \\vdots & \\ddots & \\vdots\\\\
\\sigma_{M1} & \\sigma_{M2} & \\cdots & \\sigma_{MM}\\end{array}\\right]
References
----------
Zellner (1962), Greene (2003)
"""
#TODO: Does each equation need nobs to be the same?
def __init__(self, sys, sigma=None, dfk=None):
if len(sys) % 2 != 0:
raise ValueError("sys must be a list of pairs of endogenous and \
exogenous variables. Got length %s" % len(sys))
if dfk:
if not dfk.lower() in ['dfk1','dfk2']:
raise ValueError("dfk option %s not understood" % (dfk))
self._dfk = dfk
M = len(sys[1::2])
self._M = M
# exog = np.zeros((M,M), dtype=object)
# for i,eq in enumerate(sys[1::2]):
# exog[i,i] = np.asarray(eq) # not sure this exog is needed
# used to compute resids for now
exog = np.column_stack(np.asarray(sys[1::2][i]) for i in range(M))
# exog = np.vstack(np.asarray(sys[1::2][i]) for i in range(M))
self.exog = exog # 2d ndarray exog is better
# Endog, might just go ahead and reshape this?
endog = np.asarray(sys[::2])
self.endog = endog
self.nobs = float(self.endog[0].shape[0]) # assumes all the same length
# Degrees of Freedom
df_resid = []
df_model = []
[df_resid.append(self.nobs - np.linalg.matrix_rank(_)) for _ in sys[1::2]]
[df_model.append(np.linalg.matrix_rank(_) - 1) for _ in sys[1::2]]
self.df_resid = np.asarray(df_resid)
self.df_model = np.asarray(df_model)
# "Block-diagonal" sparse matrix of exog
sp_exog = sparse.lil_matrix((int(self.nobs*M),
int(np.sum(self.df_model+1)))) # linked lists to build
self._cols = np.cumsum(np.hstack((0, self.df_model+1)))
for i in range(M):
sp_exog[i*self.nobs:(i+1)*self.nobs,
self._cols[i]:self._cols[i+1]] = sys[1::2][i]
self.sp_exog = sp_exog.tocsr() # cast to compressed for efficiency
# Deal with sigma, check shape earlier if given
if np.any(sigma):
sigma = np.asarray(sigma) # check shape
elif sigma is None:
resids = []
for i in range(M):
resids.append(GLS(endog[i],exog[:,
self._cols[i]:self._cols[i+1]]).fit().resid)
resids = np.asarray(resids).reshape(M,-1)
sigma = self._compute_sigma(resids)
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(\
self.sigma)).T
self.initialize()
def initialize(self):
self.wendog = self.whiten(self.endog)
self.wexog = self.whiten(self.sp_exog)
self.pinv_wexog = np.linalg.pinv(self.wexog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.history = {'params' : [np.inf]}
self.iterations = 0
def _update_history(self, params):
self.history['params'].append(params)
def _compute_sigma(self, resids):
"""
Computes the sigma matrix and update the cholesky decomposition.
"""
M = self._M
nobs = self.nobs
sig = np.dot(resids, resids.T) # faster way to do this?
if not self._dfk:
div = nobs
elif self._dfk.lower() == 'dfk1':
div = np.zeros(M**2)
for i in range(M):
for j in range(M):
div[i+j] = ((self.df_model[i]+1) *\
(self.df_model[j]+1))**(1/2)
div.reshape(M,M)
else: # 'dfk2' error checking is done earlier
div = | np.zeros(M**2) | numpy.zeros |
import numpy as np
from cleanlab.pruning import get_noise_indices
class ProbaReason:
"""
Assign doubt based on low proba-confidence values from a scikit-learn model.
Arguments:
model: scikit-learn classifier
max_proba: maximum probability threshold for doubt assignment
Usage:
```python
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from doubtlab.ensemble import DoubtEnsemble
from doubtlab.reason import ProbaReason
X, y = load_iris(return_X_y=True)
model = LogisticRegression(max_iter=1_000)
model.fit(X, y)
doubt = DoubtEnsemble(reason = ProbaReason(model, max_proba=0.55))
indices = doubt.get_indices(X, y)
```
"""
def __init__(self, model, max_proba=0.55):
self.model = model
self.max_proba = max_proba
def __call__(self, X, y=None):
result = self.model.predict_proba(X).max(axis=1) <= self.max_proba
return result.astype(np.float16)
class RandomReason:
"""
Assign doubt based on a random value.
Arguments:
probability: probability of assigning a doubt
random_seed: seed for random number generator
Usage:
```python
from sklearn.datasets import load_iris
from doubtlab.ensemble import DoubtEnsemble
from doubtlab.reason import RandomReason
X, y = load_iris(return_X_y=True)
doubt = DoubtEnsemble(reason = RandomReason(probability=0.05, random_seed=42))
indices = doubt.get_indices(X, y)
```
"""
def __init__(self, probability=0.01, random_seed=42):
self.probability = probability
self.random_seed = random_seed
def __call__(self, X, y=None):
np.random.seed(self.random_seed)
rvals = np.random.random(size=len(X))
return np.where(rvals < self.probability, rvals, 0)
class WrongPredictionReason:
"""
Assign doubt when the model prediction doesn't match the label.
Arguments:
model: scikit-learn classifier
Usage:
```python
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from doubtlab.ensemble import DoubtEnsemble
from doubtlab.reason import WrongPredictionReason
X, y = load_iris(return_X_y=True)
model = LogisticRegression(max_iter=1_000)
model.fit(X, y)
doubt = DoubtEnsemble(reason = WrongPredictionReason(model=model))
indices = doubt.get_indices(X, y)
```
"""
def __init__(self, model):
self.model = model
def __call__(self, X, y):
return (self.model.predict(X) != y).astype(np.float16)
class LongConfidenceReason:
"""
Assign doubt when a wrong class gains too much confidence.
Arguments:
model: scikit-learn classifier
threshold: confidence threshold for doubt assignment
Usage:
```python
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from doubtlab.ensemble import DoubtEnsemble
from doubtlab.reason import LongConfidenceReason
X, y = load_iris(return_X_y=True)
model = LogisticRegression(max_iter=1_000)
model.fit(X, y)
doubt = DoubtEnsemble(reason = LongConfidenceReason(model=model))
indices = doubt.get_indices(X, y)
```
"""
def __init__(self, model, threshold=0.2):
self.model = model
self.threshold = threshold
def _max_bad_class_confidence(self, X, y):
probas = self.model.predict_proba(X)
values = []
for i, proba in enumerate(probas):
proba_dict = {
self.model.classes_[j]: v for j, v in enumerate(proba) if j != y[i]
}
values.append(max(proba_dict.values()))
return | np.array(values) | numpy.array |
#!/usr/bin/env python3
import numpy as np
# import matplotlib.pyplot as plt
# from scipy.integrate import quad
# from scipy import interpolate
# import AMC
import mass_function
import NSencounter as NE
import perturbations as PB
import glob
try:
from tqdm import tqdm
except ImportError as err:
def tqdm(x):
return x
import argparse
import sys
import os
import re
import warnings
import params
# sys.path.append("../")
import dirs
if not os.path.exists(dirs.data_dir + "distributions/"):
os.makedirs(dirs.data_dir + "distributions/")
# The code in principle is parallelised, but I wouldn't recommend it...
USING_MPI = False
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
MPI_size = comm.Get_size()
MPI_rank = comm.Get_rank()
if MPI_size > 1:
USING_MPI = True
except ImportError as err:
print(" mpi4py module not found: using a single process only...")
USING_MPI = False
MPI_size = 1
MPI_rank = 0
print(MPI_size, MPI_rank)
warnings.filterwarnings("error")
# This mass corresponds roughly to an axion decay
# constant of 3e11 and a confinement scale of Lambda = 0.076
in_maeV = params.m_a # axion mass in eV
in_gg = -0.7
print("> Using m_a = %.2e eV, gamma = %.2f" % (in_maeV, in_gg))
######################
#### OPTIONS ######
# Parse the arguments!
parser = argparse.ArgumentParser(description="...")
parser.add_argument(
"-profile",
"--profile",
help="Density profile for AMCs - `NFW` or `PL`",
type=str,
default="PL",
)
parser.add_argument(
"-unperturbed",
"--unperturbed",
help="Calculate for unperturbed profiles?",
type=bool,
default=False,
)
parser.add_argument(
"-max_rows",
"--max_rows",
help="Maximum number of rows to read from each file?",
type=int,
default=None,
)
parser.add_argument(
"-circ",
"--circular",
dest="circular",
action="store_true",
help="Use the circular flag to force e = 0 for all orbits.",
)
parser.add_argument(
"-AScut",
"--AScut",
dest="AScut",
action="store_true",
help="Include an axion star cut on the AMC properties.",
)
parser.add_argument(
"-mass_choice",
"--mass_choice",
help="Mass parameter = 'c' or 'a' for characteristic or average.",
type=str,
default="c",
)
parser.set_defaults(circular=False)
parser.set_defaults(AScut=False)
args = parser.parse_args()
UNPERTURBED = args.unperturbed
PROFILE = args.profile
CIRCULAR = args.circular
AS_CUT = args.AScut
max_rows = args.max_rows
MASS_CHOICE = args.mass_choice
circ_text = ""
if CIRCULAR:
circ_text = "_circ"
cut_text = ""
if AS_CUT:
print("> Calculating with axion-star cut...")
cut_text = "_AScut"
if MASS_CHOICE.lower() == "c":
M0 = mass_function.calc_Mchar(in_maeV)
elif MASS_CHOICE.lower() == "a":
AMC_MF = mass_function.PowerLawMassFunction(m_a=in_maeV, gamma=in_gg)
M0 = AMC_MF.mavg
if PROFILE == "NFW" and UNPERTURBED == False:
M0 = mass_function.mass_after_stripping(M0)
# Mass function
if PROFILE == "PL" or UNPERTURBED == True:
AMC_MF = mass_function.PowerLawMassFunction(m_a=in_maeV, gamma=in_gg)
elif PROFILE == "NFW":
AMC_MF = mass_function.StrippedPowerLawMassFunction(m_a=in_maeV, gamma=in_gg)
M_cut = 1e-29
# IDstr = "_ma_57mueV"
IDstr = params.IDstr
IDstr += "_delta_" + MASS_CHOICE.lower()
Nbins_mass = 300
Nbins_radius = 500 # Previously 500
# How much smaller than the local DM density
# do we care about?
k = params.min_enhancement
# Define AS cut
def r_AS(M_AMC):
m_22 = in_maeV / 1e-22
return 1e3 * (1.6 / m_22) * (M_AMC / 1e9) ** (-1 / 3)
alpha_AS = r_AS(1.0)
k_AMC = (3 / (4 * np.pi)) ** (1 / 3)
# Helper for MPI stuff
def MPI_send_chunks(data, dest, tag):
data_shape = data.shape
comm.send(data_shape, dest, tag)
data_flat = data.flatten()
# Split the data into N_chunks, each of maximum length 1e6
data_len = len(data_flat)
N_chunks = int(np.ceil(data_len / 1e6))
chunk_indices = np.array_split(np.arange(data_len), N_chunks)
print("Source:", data_len, N_chunks)
# Loop over the chunks and send
for inds in chunk_indices:
comm.send(data_flat[inds], dest, tag)
return None
def MPI_recv_chunks(source, tag):
data_shape = comm.recv(source=source, tag=tag)
data_flat = np.zeros(data_shape).flatten()
# Split the data into N_chunks, each of maximum length 1e6
data_len = len(data_flat)
N_chunks = int(np.ceil(data_len / 1e6))
print("Dest:", data_len, N_chunks)
chunk_indices = np.array_split(np.arange(data_len), N_chunks)
# Loop over the chunks and send
for inds in chunk_indices:
data_flat[inds] = comm.recv(source=source, tag=tag)
data = np.reshape(data_flat, data_shape)
return data
def main():
a_grid = None
if MPI_rank == 0:
# Gather the list of files to be used, then loop over semi-major axis a
ff1 = glob.glob(
dirs.montecarlo_dir + "AMC_logflat_*" + PROFILE + circ_text + ".txt"
)
a_grid = np.zeros(len(ff1))
print(dirs.montecarlo_dir)
for i, fname in enumerate(ff1):
# print(fname)
m = re.search("AMC_logflat_a=(.+?)_" + PROFILE + circ_text + ".txt", fname)
if m:
a_string = m.group(1)
a_grid[i] = float(a_string) * 1.0e3 # conversion to pc
a_grid = np.sort(a_grid)
print(len(a_grid))
print(a_grid)
if USING_MPI: # Tell all processes about the list, a_grid
a_grid = comm.bcast(a_grid, root=0)
# Edges to use for the output bins in R (galactocentric radius, pc)
if CIRCULAR:
R_centres = 1.0 * a_grid
else:
R_bin_edges = np.geomspace(0.05e3, 60e3, 65)
R_centres = np.sqrt(R_bin_edges[:-1] * R_bin_edges[1:])
mass_ini_all, mass_all, radius_all, e_all, a_all = load_AMC_results(a_grid)
# ----------------------------
# Re-weight the samples according to radius
if CIRCULAR:
(
AMC_weights,
AMC_weights_surv,
AMC_weights_masscut,
AMC_weights_AScut,
AMC_weights_AScut_masscut,
) = calculate_weights_circ(
a_grid, a_all, e_all, mass_all, mass_ini_all, radius_all
)
else:
(
AMC_weights,
AMC_weights_surv,
AMC_weights_masscut,
AMC_weights_AScut,
AMC_weights_AScut_masscut,
) = calculate_weights(
R_bin_edges, a_grid, a_all, e_all, mass_all, mass_ini_all, radius_all
) # Just pass the eccentricities and semi major axes
if USING_MPI:
comm.barrier()
if MPI_rank != 0:
comm.send(mass_ini_all, dest=0, tag=(10 * MPI_rank + 1))
comm.send(mass_all, dest=0, tag=(10 * MPI_rank + 2))
comm.send(radius_all, dest=0, tag=(10 * MPI_rank + 3))
comm.send(a_all, dest=0, tag=(10 * MPI_rank + 4))
comm.send(e_all, dest=0, tag=(10 * MPI_rank + 5))
# print(AMC_weights.shape)
# print(sys.getsizeof(AMC_weights))
# comm.send(AMC_weights.shape, dest=0,tag= (10*MPI_rank+6) )
# print("MPI_rank : ...")
# comm.Send(AMC_weights, dest=0, tag= (10*MPI_rank+7) )
MPI_send_chunks(AMC_weights, dest=0, tag=(10 * MPI_rank + 7))
MPI_send_chunks(AMC_weights_surv, dest=0, tag=(10 * MPI_rank + 9))
# comm.send(AMC_weights_surv, dest=0, tag= (10*MPI_rank+9) )
# print(MPI_rank)
# https://stackoverflow.com/questions/15833947/mpi-hangs-on-mpi-send-for-large-messages
if MPI_rank == 0:
for i in range(1, MPI_size):
mass_ini_tmp = comm.recv(source=i, tag=(10 * i + 1))
mass_tmp = comm.recv(source=i, tag=(10 * i + 2))
radius_tmp = comm.recv(source=i, tag=(10 * i + 3))
a_tmp = comm.recv(source=i, tag=(10 * i + 4))
e_tmp = comm.recv(source=i, tag=(10 * i + 5))
# req = comm.irecv(source=i, tag= (10*i+7) )
# comm.Recv(AMC_w_tmp, source=i, tag= (10*i+7) )
AMC_w_tmp = MPI_recv_chunks(source=i, tag=(10 * i + 7))
# AMC_w_surv_tmp = comm.recv(source=i, tag= (10*i+9) )
AMC_w_surv_tmp = MPI_recv_chunks(source=i, tag=(10 * i + 9))
mass_ini_all = np.concatenate((mass_ini_all, mass_ini_tmp))
mass_all = np.concatenate((mass_all, mass_tmp))
radius_all = np.concatenate((radius_all, radius_tmp))
a_all = np.concatenate((a_all, a_tmp))
e_all = np.concatenate((e_all, e_tmp))
AMC_weights = np.concatenate((AMC_weights, AMC_w_tmp))
AMC_weights_surv = np.concatenate((AMC_weights_surv, AMC_w_surv_tmp))
comm.barrier()
# quit()
if MPI_rank == 0:
# Calculate the survival probability as a function of a
psurv_a_list, psurv_a_AScut_list = calculate_survivalprobability(
a_grid, a_all, mass_all, mass_ini_all, radius_all
)
P_r_weights = np.sum(
AMC_weights, axis=0
) # Check if this should be a sum or integral
P_r_weights_surv = np.sum(AMC_weights_surv, axis=0)
P_r_weights_masscut = np.sum(AMC_weights_masscut, axis=0)
P_r_weights_AScut = np.sum(AMC_weights_AScut, axis=0)
P_r_weights_AScut_masscut = np.sum(AMC_weights_AScut_masscut, axis=0)
psurv_R_list = P_r_weights_surv / (P_r_weights + 1e-30)
# Save the outputs
if not UNPERTURBED:
# np.savetxt(output_dir + 'Rvals_distributions_' + PROFILE + '.txt', Rvals_distr)
if not CIRCULAR:
np.savetxt(
dirs.data_dir + "SurvivalProbability_a_" + PROFILE + IDstr + ".txt",
np.column_stack([a_grid, psurv_a_list, psurv_a_AScut_list]),
delimiter=", ",
header="Columns: semi-major axis [pc], survival probability, survival probability for AMCs passing the AS cut",
)
np.savetxt(
dirs.data_dir
+ "SurvivalProbability_R_"
+ PROFILE
+ circ_text
+ IDstr
+ ".txt",
np.column_stack(
[
R_centres,
psurv_R_list,
P_r_weights,
P_r_weights_surv,
P_r_weights_masscut,
P_r_weights_AScut,
P_r_weights_AScut_masscut,
]
),
delimiter=", ",
header="Columns: galactocentric radius [pc], survival probability, Initial AMC density [Msun/pc^3], Surviving AMC density [Msun/pc^3], Surviving AMC density with mass-loss < 90% [Msun/pc^3], Surviving AMC density with R_AMC > R_AS [Msun/pc^3], Surviving AMC density with R_AMC > R_AS *AND* mass-loss < 90% [Msun/pc^3]",
)
PDF_list = np.zeros_like(R_centres)
if USING_MPI:
PDF_list = comm.bcast(PDF_list, root=0)
mass_ini_all = comm.bcast(mass_ini_all, root=0)
mass_all = comm.bcast(mass_all, root=0)
radius_all = comm.bcast(radius_all, root=0)
AMC_weights_surv = comm.bcast(AMC_weights_surv, root=0)
comm.barrier()
R_indices = np.array_split(range(len(R_centres)), MPI_size)[MPI_rank]
for i in R_indices:
R = R_centres[i]
print(i, "\t - R [pc]:", R)
if UNPERTURBED:
weights = AMC_weights
else:
weights = AMC_weights_surv
# weights = AMC_weights_AScut
inds = weights[:, i] > 0
# inds = np.arange(len(mass_ini_all))
# Calculate distributions of R and M
PDF_list[i] = calc_distributions(
R, mass_ini_all[inds], mass_all[inds], radius_all[inds], weights[inds, i]
) # just pass the AMC weight at that radius
if USING_MPI:
comm.barrier()
if MPI_rank != 0:
comm.send(PDF_list, dest=0, tag=21 + MPI_rank)
if MPI_rank == 0:
for i in range(1, MPI_size):
PDF_tmp = comm.recv(source=i, tag=21 + i)
R_inds = np.array_split(range(len(R_centres)), MPI_size)[i]
PDF_list[R_inds] = PDF_tmp[R_inds]
comm.barrier()
if MPI_rank == 0:
print(np.trapz(PDF_list, R_centres) * 60 * 60 * 24)
# Save the outputs
# if not UNPERTURBED:
out_text = PROFILE + circ_text + cut_text
if UNPERTURBED:
out_text += "_unperturbed"
out_text += IDstr + ".txt"
# if (UNPERTURBED):
# _unperturbed.txt"
# np.savetxt(output_dir + 'Rvals_distributions_' + PROFILE + '.txt', Rvals_distr)
np.savetxt(
dirs.data_dir + "EncounterRate_" + out_text,
np.column_stack([R_centres, PDF_list]),
delimiter=", ",
header="Columns: R orbit [pc], surv_prob, MC radial distrib (dGamma/dR [pc^-1 s^-1])",
)
# ------------------------------
def load_AMC_results(Rlist):
Rkpc_list = Rlist / 1e3
a_pc_all = np.array([])
mass_ini_all = np.array([])
mass_all = np.array([])
radius_all = np.array([])
e_all = np.array([])
a_all = np.array([])
# Divide up the processes for each MPI process
R_vals = np.array_split(Rkpc_list, MPI_size)[MPI_rank]
print(R_vals)
for i, Rkpc in enumerate(R_vals):
fname = dirs.montecarlo_dir + "AMC_logflat_a=%.2f_%s%s.txt" % (
Rkpc,
PROFILE,
circ_text,
)
columns = (
3,
4,
) # FIXME: Need to edit this if I've removed delta from the output files...
if UNPERTURBED:
columns = (0, 1)
mass_ini = np.loadtxt(
fname,
delimiter=", ",
dtype="f8",
usecols=(0,),
unpack=True,
max_rows=max_rows,
)
mass, radius = np.loadtxt(
fname,
delimiter=", ",
dtype="f8",
usecols=columns,
unpack=True,
max_rows=max_rows,
)
e = np.loadtxt(
fname,
delimiter=", ",
dtype="f8",
usecols=(6,),
unpack=True,
max_rows=max_rows,
)
a_pc_all = np.concatenate((a_pc_all, np.ones_like(mass_ini) * R_vals[i] * 1e3))
mass_ini_all = np.concatenate((mass_ini_all, mass_ini))
mass_all = np.concatenate((mass_all, mass))
radius_all = np.concatenate((radius_all, radius))
e_all = np.concatenate((e_all, e))
return mass_ini_all, mass_all, radius_all, e_all, a_pc_all
G_N = (
6.67408e-11 * 6.7702543e-20
) # pc^3 solar mass^-1 s^-2 (conversion: m^3 kg^-1 s^-2 to pc^3 solar mass^-1 s^-2)
# G_N = 4.302e-3
def calc_M_enc(a):
rho0 = 1.4e7 * 1e-9 # Msun pc^-3, see Table 1 in 1304.5127
rs = 16.1e3 # pc
# MW mass enclosed within radius a
Menc = 4 * np.pi * rho0 * rs ** 3 * (np.log((rs + a) / rs) - a / (rs + a))
return Menc
# BJK: It turns out this integral can be done analytically...
def int_P_R(r, a, e):
x = r / a
A = np.clip(e ** 2 - (x - 1) ** 2, 0, 1e30)
res = (1 / np.pi) * (-np.sqrt(A) + np.arctan((x - 1) / | np.sqrt(A) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Thursday Jun 19 2020
A bit of 2-beam diffraction contrast never hurt anyone
Based on Hirsch, Howie, Nicolson, Pashley and Whelan p207
and Head, Humble, Clarebrough, Morton, Forwood p31
Image calculations for a dislocation:
ignoring dilational components so that everything can be expressed as a local
change in deviation parameter s
Dissociated dislocation version
OpenCL speed up by <NAME>, about 50,000x faster on a Dell Inspiron 7577 laptop
@author: <NAME>, <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import time
# from libtiff import TIFF as tif
from PIL import Image
import funcs_richard as funcs_1
import funcs_opencl as funcs_3
# to avoid division by zero errors
eps = 0.000000000001
use_cl=True
save_images=True
if use_cl:
suffix = "_cl"
else:
suffix = ""
toc = time.perf_counter()
# input variables
# Extinction distances
# X0i is the imaginary part of the 000 extinction distance
# thickness fringes disappear at about X0i nm
X0i = 100.0 # nm
# Xg is the extinction distance for g (complex)
# The imaginary part should be larger than X0i
Xg = 20.0 + 1j * X0i * 1.1 # nm
# deviation parameter (typically between -0.1 and 0.1)
s = 0.0
# crystal thickness, nm
t = 50 # nm
# integration step, nm
dt = 0.5 # nm
# pixel scale
# want more or less pixels? this will change the image size
# with an according increase (<1) or decrease (>1) in calculation time
pix2nm = 0.1# nm per pixel
# default number of pixels arounnd the dislocation
pad = 40 # pixels
# max image size in pixels & nm
# NB MUST be an even number since the dislocation lies at the boundary between pixels
# will OVER-RULE pad to make the image smaller
picmax = 6000 # pixels
# lattice parameter nm
a0 = 0.4
# Poisson's ratio
nu = 0.3
# Half the distance between dislocations, if a pair
sep = 0#int(pad / 2) # in pixels
# Vector inputs
# NB cubic crystals only! Everything here in the crystal reference frame
# Burgers vectors: two partial dislocations
si = 1 / 2
b1 = np.array((si, 0, si))
# b2 = np.array((0.,0.,0.))
b2 = np.array((0, 0, 0))
# line direction
u = np.array((1, 0, -1))
# foil normal points along z parallel to the electron beam
z = np.array((1, 1, 0))
# g-vector
g = np.array((1,-1,0))
# setup calculations
# scale dimensions
X0i = X0i / pix2nm
Xg = Xg / pix2nm
t = t / pix2nm
pad = int(pad / pix2nm + 0.5)
picmax = int(picmax / pix2nm + 0.5)
sep = int(sep / pix2nm + 0.5)
a0 = a0 / pix2nm
# position of centre point of dislocation line relative to centre of volume
# ((1,0,0)) is vertical up, ((0,1,0)) is horizontal right, ((0,0,1)) is horiz. left
q1 = np.array((-sep, 0, 0)) # pixels
q2 = np.array((sep, 0, 0))
# x, y and z are the defining unit vectors of the simulation volume
# written in the crystal frame
# x is defined by the cross product of u and n
# normalise line direction
u = u / (np.dot(u, u) ** 0.5)
# normalise foil normal
z = z / (np.dot(z, z) ** 0.5)
# we want u pointing to the same side of the foil as z
if np.dot(u, z) < 0: # they're antiparallel, reverse u and b
u = -u
b1 = -b1
b2 = -b2
# angle between dislocation and z-axis
phi = np.arccos(abs(np.dot(u, z)))
# check if they're parallel and use an alternative if so
if abs(np.dot(u, z) - 1) < eps: # they're parallel, set x parallel to b
x = b1[:]
x = x / (np.dot(x, x) ** 0.5)
if abs(np.dot(x, z) - 1) < eps: # they're parallel too, set x parallel to g
x = g[:] # this will fail for u=z=b=g but it would be stupid
else:
x = np.cross(u, z)
x = x / (np.dot(x, x) ** 0.5)
# y is the cross product of z & x
y = | np.cross(z, x) | numpy.cross |
"""
Created on 10:25 at 08/07/2021/
@author: bo
"""
import argparse
import os
import numpy as np
import pickle
import data.rruff as rruff
from sklearn.metrics import roc_curve, auc
from scipy.special import expit, softmax
import const
import test
import vis_utils as vis_utils
import data.prepare_data as pdd
import matplotlib
import matplotlib.ticker as ticker
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'text.usetex': True,
# })
matplotlib.rcParams.update({
'font.family': 'serif',
"font.size": 7,
"legend.fontsize": 7,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"legend.title_fontsize": 7,
"axes.titlesize": 7,
})
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter, StrMethodFormatter, NullFormatter
import matplotlib.ticker as mticker
TEXTWIDTH = 6.75133
def give_args():
"""This function is used to give the argument"""
parser = argparse.ArgumentParser(description='Reproduce figures in the paper')
parser.add_argument('--dir2read_exp', type=str, default="../exp_data/exp_group/")
parser.add_argument('--dir2read_data', type=str, default="../data_group/")
parser.add_argument('--dir2save', type=str, default="figures/")
parser.add_argument('--index', type=str, default="figure_1", help="which figure or table do you want to produce?")
parser.add_argument("--save", type=const.str2bool, default=False, help="whether to save the image or not")
parser.add_argument("--pdf_pgf", type=str, default="pgf", help="in what kind of format will I save the image?")
return parser.parse_args()
# ------------------------------------------------------------------------------------
def set_size(width, fraction=1, enlarge=0):
"""
Args:
width: inches
fraction: float
"""
# Width of figure (in pts)
fig_width_in = width * fraction
golden_ratio = (5 ** .5 - 1) / 2
if enlarge != 0:
golden_ratio *= enlarge
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
def give_figure_specify_size(fraction, enlarge=0):
fig = plt.figure()
fig.set_size_inches(set_size(TEXTWIDTH, fraction, enlarge))
return fig
# -------------- First figure --------------------#
def give_data_augmentation_example(tds_dir_use="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pgf", data_path="../data_group/"):
args = const.give_args_test(raman_type="excellent_unoriented")
args["pre_define_tt_filenames"] = False
tr_data, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
show_data_augmentation_example(args, tr_data[0], tr_data[1], label_name_tr,
tds_dir_use, save, pdf_pgf)
def show_data_augmentation_example(args, tr_spectrum, tr_label, label_name_tr,
tds_dir_use="../exp_data/eerst_paper_figures/",
save=False, pdf_pgf="pdf"):
"""Illustrate the data augmentation process
Args:
args: the arguments that can tell me the maximum and minimum wavenumber
tr_spectrum: [num_spectra, wavenumbers]
tr_label: [num_spectra]
label_name_tr: corresponding names for each class in the tr label
tds_dir_use: the directory to save the data.
save: bool, whether to save the figure
"""
select_index = np.where(label_name_tr == "AlumNa")[0] #AlumNa
tr_select = tr_spectrum[np.where(tr_label == select_index)[0]]
u_spectrum = tr_select[np.random.choice(len(tr_select), 1)[0]]
std_s_spectrum = rruff.calc_std(u_spectrum, 10)
rand_noise = np.random.normal(0, 3, [3, len(u_spectrum)]) # 5 before
generate = abs(np.expand_dims(u_spectrum, 0) + rand_noise * np.expand_dims(std_s_spectrum, 0))
generate = generate / np.max(generate, axis=-1, keepdims=True)
wavenumber = np.arange(args["max_wave"])[args["min_wave"]:]
text_use = ["%s" % label_name_tr[select_index][0], "Synthetic"]
fig = give_figure_specify_size(0.5, 1.1)
ax = fig.add_subplot(111)
for i, s_c in enumerate(["r", "g"]):
ax.plot([], [], color=s_c)
ax.plot(wavenumber, u_spectrum, 'r', lw=0.8)
ax.text(250, 0.5, text_use[0])
for i, s in enumerate(generate):
ax.plot(wavenumber, s + i + 1, 'g', lw=0.8)
ax.text(250, 0.5 + i + 1, text_use[-1])
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlabel("Wavenumber (cm" + r"$^{-1})$")
ax.set_ylabel("Intensity (a.u.)")
if save:
plt.savefig(
tds_dir_use + "/augmentation_example_on_RRUFF_%s.%s" % (label_name_tr[select_index][0],
pdf_pgf),
pad_inches=0, bbox_inches='tight')
# --------------------------- second & third figure ------------------------------#
def show_example_spectra(tds_dir="../exp_data/eerst_paper_figures/", save=False, pdf_pgf="pgf",
data_path="../data_group/"):
"""This function shows the example spectra from each dataset. It should also show the distribution of the classes
"""
dataset = ["RRUFF", "RRUFF", "ORGANIC", "ORGANIC", "BACTERIA"]
raman_type = ["raw", "excellent_unoriented", "organic_target_raw", "organic_target", "bacteria_reference_finetune"]
color_group = ['r', 'g']
fig = give_figure_specify_size(0.5, 3.0)
ax_global = vis_utils.ax_global_get(fig)
ax_global.set_xticks([])
ax_global.set_yticks([])
im_index = 0
title_group = ["Mineral (r)", "Mineral (p)", "Organic (r)", "Organic (p)", "Bacteria"]
tr_frequency_count = []
for s_data, s_raman in zip(dataset, raman_type):
ax = fig.add_subplot(5, 1, im_index + 1)
args = const.give_args_test(raman_type=s_raman)
args["pre_define_tt_filenames"] = False
if s_data == "RRUFF" or s_data == "ORGANIC":
tr_data, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
else:
tr_data, _, _, _, label_name_tr = test.get_data(args, None, read_twin_triple="cls", dir2read=data_path)
tr_spectra, tr_label = tr_data
unique_label, unique_count = np.unique(tr_label, return_counts=True)
if s_data == "RRUFF":
tr_frequency_count.append(unique_count)
if s_data == "RRUFF":
class_name = "Beryl"
select_label = np.where(label_name_tr == class_name)[0]
index = np.where(tr_label == select_label)[0]
else:
select_label = unique_label[np.argmax(unique_count)]
if s_data == "ORGANIC":
select_label = 1
class_name = label_name_tr[select_label]
if s_data == "ORGANIC":
class_name = "Benzidine"
index = | np.where(tr_label == select_label) | numpy.where |
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pytest
from ..association import Association, AssociationPair, AssociationSet, \
SingleTimeAssociation, TimeRangeAssociation
from ..detection import Detection
from ..time import TimeRange
def test_association():
with pytest.raises(TypeError):
Association()
objects = {Detection(np.array([[1], [2]])),
Detection( | np.array([[3], [4]]) | numpy.array |
from numpy.random import seed
import sys
seed(1)
import numpy as np
import labels as L
import tensorflow.contrib.keras as keras
import tensorflow as tf
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.engine import Layer, InputSpec, InputLayer
from keras.models import Model, Sequential
from keras.layers import Dropout, Embedding, concatenate
from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D, Conv2D, MaxPool2D, ZeroPadding1D
from keras.layers import Dense, Input, Flatten, BatchNormalization
from keras.layers import Concatenate, Dot, Merge, Multiply, RepeatVector
from keras.layers import Bidirectional, TimeDistributed
from keras.layers import SimpleRNN, LSTM, GRU, Lambda, Permute
from keras.layers import merge
from keras.layers.core import Reshape, Activation
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping,TensorBoard
from keras.constraints import maxnorm
from keras.regularizers import l2
from keras.metrics import top_k_categorical_accuracy
from keras.optimizers import SGD
import keras.metrics
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
keras.metrics.top_3_accuracy = top_3_accuracy
import pickle
EMBEDDING_DIM = 300
MAX_SEQUENCE_LENGTH = 100
MAX_NUMBER_WORDS = 136085
VALIDATION_SPLIT = 0.10
traces = []
with open('err-traces-shuf.txt', 'r') as tracesf:
traces = list(tracesf.readlines())[:40000]
traces = [
' '.join([x.strip() for x in t.strip().split(' ')[::-1][-MAX_SEQUENCE_LENGTH:]])
for t in traces
]
traces = [
(' '.join(t.split(' ')[:-3]), t.split(' ')[-2])
for t in traces
]
labels = [ L.LABELS[l][0] for (_, l) in traces ]
texts = [ t for (t, _) in traces ]
for t in traces:
assert len(t) <= MAX_SEQUENCE_LENGTH
tokenizer = None
with open('/app/assets/tokenizer.pkl', 'rb') as wordf:
tokenizer = pickle.load(wordf)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels), num_classes=L.NUM_LABELS)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = | np.arange(data.shape[0]) | numpy.arange |
from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
"""
Take a cell and complete certain molecules
The objective is to end up with a unit cell where the molecules of interest
are complete. The rest of the atoms of the cell must remain intact. Note that
the input atoms are transformed and are the same as are present in the
output.
Parameters
----------
labels : int or list of ints
The number of the atoms from which the molecules are generated
Returns
-------
new_mol : Mol object
The now complete molecule
new_cell : Mol object
The cell with the completed molecule
"""
new_mol, scattered_mol = self.per_select(labels, old_pos=True)
new_cell_atoms = deepcopy(
[a for a in self.atoms if a not in scattered_mol])
new_cell = self.copy()
new_cell.atoms = new_cell_atoms
for atom in new_mol:
new_cell.append(atom.copy())
return new_mol, new_cell
def complete_cell(self):
"""
Return a cell where atoms have been translated to complete all molecules of
the cell
Returns
-------
out_cell : Mol object
The new untruncated cell
full_mol_l : list of Mol objects
Each molecule in the untruncated cell
"""
full_mol_l = []
remaining = self.copy()
while len(remaining) != 0:
full_mol, cell = remaining.complete_mol(0)
full_mol_l.append(full_mol)
remaining = cell
for atom in full_mol:
if atom in remaining:
remaining.remove(atom)
# Convinently, remaining is now an empty Mol
out_cell = remaining
for mol in full_mol_l:
out_cell.extend(mol)
return out_cell, full_mol_l
def supercell(self, trans):
"""
Return a supercell of I x J x K
Parameters
----------
trans : array-like of length 3
Multiplications of the primitive cell
Returns
-------
supercell : Mol object
New supercell with adjusted lattice vectors
"""
import fromage.utils.mol as mol_init
# make the input into a np array
trans = np.array(trans)
new_cell = self.empty_mol()
for a_mult in range(trans[0]):
for b_mult in range(trans[1]):
for c_mult in range(trans[2]):
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def centered_supercell(self, trans, from_origin=False):
"""
Make a bigger supercell out of an input cell.
The cell is multiplied positively and negatively through each lattice
vector so that the supercluster ends up being
(1+2*trans[0])*(1+2*trans[1])*(1+2*trans[2]) times larger. For example if the
input is 1,1,1 for a cubic unit cell, the output will be the original unit
cell surrounded by 26 other unit cells forming a total 3x3x3 cube.
Alternatively, the multiplication can be centered around the origin, a corner of the
unit cell, instead of the centre. In that case the supercluster ends up being
only (2*trans[0])*(2*trans[1])*(2*trans[2])
Parameters
----------
trans : numpy array of length 3
Multiplications of the primitive cell
from_origin : bool
Determines the kind of multiplication. True is corner of the cell as
the center, False is middle of the cell.
Returns
-------
mega_cell : Mol object
The resulting supercell
"""
import fromage.utils.mol as mol_init
trans_series = [0, 0, 0]
for i, tra in enumerate(trans):
if from_origin:
trans_series[i] = list(range(-tra, tra))
else:
trans_series[i] = list(range(-tra, tra + 1))
trans_series = np.array(trans_series)
new_cell = self.empty_mol()
for a_mult in trans_series[0]:
for b_mult in trans_series[1]:
for c_mult in trans_series[2]:
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def trans_from_rad(self, clust_rad):
"""
Generate the translations necessary to encapsulate a sphere of given rad
Parameters
----------
clust_rad : float
Radius defining a sphere
Returns
-------
trans_count : 3 x 1 numpy array
The translations required for the unit cell to contain the sphere
"""
# vectors normal to faces
a_perp = np.cross(self.vectors[1], self.vectors[2])
b_perp = np.cross(self.vectors[2], self.vectors[0])
c_perp = np.cross(self.vectors[0], self.vectors[1])
# the three normalised unit vectors
perp = np.array([a_perp / | np.linalg.norm(a_perp) | numpy.linalg.norm |
from itertools import combinations
import numpy as np
import torch
def pdist(vectors):
distance_matrix = -2 * vectors.mm(torch.t(vectors)) + vectors.pow(2).sum(dim=1).view(1, -1) + vectors.pow(2).sum(
dim=1).view(-1, 1)
return distance_matrix
class PairSelector:
"""
Implementation should return indices of positive pairs and negative pairs that will be passed to compute
Contrastive Loss
return positive_pairs, negative_pairs
"""
def __init__(self):
pass
def get_pairs(self, embeddings, labels):
raise NotImplementedError
class AllPositivePairSelector(PairSelector):
"""
Discards embeddings and generates all possible pairs given labels.
If balance is True, negative pairs are a random sample to match the number of positive samples
"""
def __init__(self, balance=True):
super(AllPositivePairSelector, self).__init__()
self.balance = balance
def get_pairs(self, embeddings, labels):
labels = labels.cpu().data.numpy()
all_pairs = np.array(list(combinations(range(len(labels)), 2)))
all_pairs = torch.LongTensor(all_pairs)
positive_pairs = all_pairs[(labels[all_pairs[:, 0]] == labels[all_pairs[:, 1]]).nonzero()]
negative_pairs = all_pairs[(labels[all_pairs[:, 0]] != labels[all_pairs[:, 1]]).nonzero()]
if self.balance:
negative_pairs = negative_pairs[torch.randperm(len(negative_pairs))[:len(positive_pairs)]]
return positive_pairs, negative_pairs
class HardNegativePairSelector(PairSelector):
"""
Creates all possible positive pairs. For negative pairs, pairs with smallest distance are taken into consideration,
matching the number of positive pairs.
"""
def __init__(self, cpu=True):
super(HardNegativePairSelector, self).__init__()
self.cpu = cpu
def get_pairs(self, embeddings, labels):
if self.cpu:
embeddings = embeddings.cpu()
distance_matrix = pdist(embeddings)
labels = labels.cpu().data.numpy()
all_pairs = np.array(list(combinations(range(len(labels)), 2)))
all_pairs = torch.LongTensor(all_pairs)
positive_pairs = all_pairs[(labels[all_pairs[:, 0]] == labels[all_pairs[:, 1]]).nonzero()]
negative_pairs = all_pairs[(labels[all_pairs[:, 0]] != labels[all_pairs[:, 1]]).nonzero()]
negative_distances = distance_matrix[negative_pairs[:, 0], negative_pairs[:, 1]]
negative_distances = negative_distances.cpu().data.numpy()
top_negatives = np.argpartition(negative_distances, len(positive_pairs))[:len(positive_pairs)]
top_negative_pairs = negative_pairs[torch.LongTensor(top_negatives)]
return positive_pairs, top_negative_pairs
class TripletSelector:
"""
Implementation should return indices of anchors, positive and negative samples
return np array of shape [N_triplets x 3]
"""
def __init__(self):
pass
def get_triplets(self, embeddings, labels):
raise NotImplementedError
class AllTripletSelector(TripletSelector):
"""
Returns all possible triplets
May be impractical in most cases
"""
def __init__(self):
super(AllTripletSelector, self).__init__()
def get_triplets(self, embeddings, labels):
labels = labels.cpu().data.numpy()
triplets = []
for label in set(labels):
label_mask = (labels == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 2:
continue
negative_indices = np.where(np.logical_not(label_mask))[0]
anchor_positives = list(combinations(label_indices, 2)) # All anchor-positive pairs
# Add all negatives for all positive pairs
temp_triplets = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives
for neg_ind in negative_indices]
triplets += temp_triplets
return torch.LongTensor(np.array(triplets))
def hardest_negative(loss_values):
hard_negative = np.argmax(loss_values)
return hard_negative if loss_values[hard_negative] > 0 else None
def random_hard_negative(loss_values):
hard_negatives = np.where(loss_values > 0)[0]
return np.random.choice(hard_negatives) if len(hard_negatives) > 0 else None
def semihard_negative(loss_values, margin):
semihard_negatives = np.where(np.logical_and(loss_values < margin, loss_values > 0))[0]
hardest_negative = np.random.choice(semihard_negatives) if len(semihard_negatives) > 0 else None
return hardest_negative, len(semihard_negatives)
class FunctionNegativeTripletSelector(TripletSelector):
"""
For each positive pair, takes the hardest negative sample (with the greatest triplet loss value) to create a triplet
Margin should match the margin used in triplet loss.
negative_selection_fn should take array of loss_values for a given anchor-positive pair and all negative samples
and return a negative index for that pair
"""
def __init__(self, margin, negative_selection_fn, cpu=True):
super(FunctionNegativeTripletSelector, self).__init__()
self.cpu = cpu
self.margin = margin
self.negative_selection_fn = negative_selection_fn
def get_triplets(self, embeddings, labels):
if self.cpu:
embeddings = embeddings.cpu()
distance_matrix = pdist(embeddings)
distance_matrix = distance_matrix.cpu()
labels = labels.cpu().data.numpy()
triplets = []
for label in set(labels):
label_mask = (labels == label)
label_indices = np.where(label_mask)[0]
if len(label_indices) < 2:
continue
negative_indices = np.where(np.logical_not(label_mask))[0]
anchor_positives = list(combinations(label_indices, 2)) # All anchor-positive pairs
anchor_positives = np.array(anchor_positives)
ap_distances = distance_matrix[anchor_positives[:, 0], anchor_positives[:, 1]]
for anchor_positive, ap_distance in zip(anchor_positives, ap_distances):
loss_values = ap_distance - distance_matrix[
torch.LongTensor(np.array([anchor_positive[0]])), torch.LongTensor(negative_indices)] + self.margin
loss_values = loss_values.data.cpu().numpy()
hard_negative = self.negative_selection_fn(loss_values)
if hard_negative is not None:
hard_negative = negative_indices[hard_negative]
triplets.append([anchor_positive[0], anchor_positive[1], hard_negative])
if len(triplets) == 0:
triplets.append([anchor_positive[0], anchor_positive[1], negative_indices[0]])
triplets = | np.array(triplets) | numpy.array |
from multiprocessing import Pool #parallel processing
from itertools import repeat
import sys
from functools import partial
import os
import numpy as np
import libs.data as data
from libs.pd_lib import run_simulation,simulation_death_birth,prisoners_dilemma_averaged,prisoners_dilemma_accumulated
from functools import partial
"""command line arguments
1. str (av or acc): game_str. determines payoff accounting is averaged or accumulated
2, 3. ints: start_batch and end_batch. indices (run batches of 1000 simulations)
4, ... array floats: b_vals. values of b (prisoner's dilemma param) to run simulations for
"""
game_str = sys.argv[1]
if game_str == 'acc': game = prisoners_dilemma_accumulated
elif game_str == 'av': game = prisoners_dilemma_averaged
else: raise ValueError('invalid game string')
start_batch,end_batch = int(sys.argv[2]),int(sys.argv[3])
runs_per_batch = 1000
b_vals = np.array(sys.argv[4:],dtype=float)
c,DELTA = 1.0,0.025 #prisoner dilemma params
l = 10 #population size N=lxl
timend = 10000. #time (hours) after which simulation ends if no fixation
timestep = 12.0 #state saved every 12 hours
rand = np.random.RandomState()
outdir = 'VTpd_%s_db'%(game_str)
if not os.path.exists(outdir): # if the outdir doesn't exist create it
os.makedirs(outdir)
with open(outdir+'/info','w') as f:
f.write('N=%d, c=%.1f, delta=%.3f'%(l*l,c,DELTA))
def run_parallel(b,i):
"""run a single simulation using simulation_death_birth routine indexed by i returning 1 if resulted in mutant fixation and 0 if resident fixation.
If no fixation returns -1 and saves number of mutants at each timestep to file
"""
rand=np.random.RandomState()
history = run_simulation(simulation_death_birth,l,timestep,timend,rand,DELTA,prisoners_dilemma_averaged,(b,c),save_areas=False)
if 0 not in history[-1].properties['type']:
fix = 1
# data.save_N_mutant(history,outdir+'/fixed_b%.1f'%b,i)
elif 1 not in history[-1].properties['type']:
fix = 0
else:
fix = -1
data.save_N_mutant(history,outdir+'/incomplete_b%.1f'%b,i)
return fix
pool = Pool(maxtasksperchild=1000) # creating a pool of workers to run simulations in parallel
# for each b run 1000x(end_batch-start_batch simulations). At the end of each batch of 1000 simulations
# write to file how many are fixed, lost and incomplete
for b in b_vals:
fix_results = open(outdir+'/fix%.2f'%b,'a',0)
for i in range(start_batch,end_batch):
text = '\r running batch %d of %d'%(i+1,end_batch)
sys.stdout.write(text)
sys.stdout.flush()
fixation = np.array([f for f in pool.imap(partial(run_parallel,b),range(i*runs_per_batch,(i+1)*runs_per_batch))]) # mapping of all the calls necessary into the calling function (run parallel)
fixed = len(np.where(fixation==1)[0])
lost = len( | np.where(fixation==0) | numpy.where |
#
# An attempt to translate the main functionality my main
# R radio signal packages gursipr and stuffr to python.
# Nothing extremely complicated, just conveniece functions
#
#
import numpy
import math
import matplotlib
import matplotlib.cbook
import matplotlib.pyplot as plt
import datetime
import time, re
import pickle
import h5py
# fit_velocity
import scipy.constants
import scipy.optimize
# xpath-like access to nested dictionaries
# @d ditct
# @q query (eg., /data/stuff)
def qd(d, q):
keys = q.split("/")
nd = d
for k in keys:
if k == "":
continue
if k in nd:
nd = nd[k]
else:
return None
return nd
# seed is a way of reproducing the random code without
# having to store all actual codes. the seed can then
# act as a sort of station_id.
def create_pseudo_random_code(len=10000, seed=0):
numpy.random.seed(seed)
phases = numpy.array(
numpy.exp(1.0j * 2.0 * math.pi * numpy.random.random(len)),
dtype=numpy.complex64,
)
return phases
def periodic_convolution_matrix(envelope, rmin=0, rmax=100):
# we imply that the number of measurements is equal to the number of elements in code
L = len(envelope)
ridx = numpy.arange(rmin, rmax)
A = numpy.zeros([L, rmax - rmin], dtype=numpy.complex64)
for i in numpy.arange(L):
A[i, :] = envelope[(i - ridx) % L]
result = {}
result["A"] = A
result["ridx"] = ridx
return result
def analyze_prc_file(fname="data-000001.gdf", clen=10000, station=0, Nranges=1000):
z = numpy.fromfile(fname, dtype=numpy.complex64)
code = create_pseudo_random_code(len=clen, seed=station)
N = len(z) / clen
res = numpy.zeros([N, Nranges], dtype=numpy.complex64)
idx = numpy.arange(clen)
r = create_estimation_matrix(code=code, cache=True)
B = r["B"]
spec = numpy.zeros([N, Nranges], dtype=numpy.float32)
for i in numpy.arange(N):
res[i, :] = numpy.dot(B, z[idx + i * clen])
for i in numpy.arange(Nranges):
spec[:, i] = numpy.abs(numpy.fft.fft(res[:, i]))
r["res"] = res
r["spec"] = spec
return r
B_cache = 0
r_cache = 0
B_cached = False
def create_estimation_matrix(code, rmin=0, rmax=1000, cache=True):
global B_cache
global r_cache
global B_cached
if cache == False or B_cached == False:
r_cache = periodic_convolution_matrix(envelope=code, rmin=rmin, rmax=rmax)
A = r_cache["A"]
Ah = numpy.transpose(numpy.conjugate(A))
B_cache = numpy.dot(numpy.linalg.inv(numpy.dot(Ah, A)), Ah)
r_cache["B"] = B_cache
B_cached = True
return r_cache
else:
# print("using cache")
return r_cache
def grid_search1d(fun, xmin, xmax, nstep=100):
vals = numpy.linspace(xmin, xmax, num=nstep)
min_val = fun(vals[0])
best_idx = 0
for i in range(nstep):
try_val = fun(vals[i])
if try_val < min_val:
min_val = try_val
best_idx = i
return vals[best_idx]
def fit_velocity(z, t, var, frad=440.2e6):
zz = numpy.exp(1.0j * numpy.angle(z))
def ssfun(x):
freq = 2.0 * frad * x / scipy.constants.c
model = numpy.exp(1.0j * 2.0 * scipy.constants.pi * freq * t)
ss = numpy.sum((1.0 / var) * numpy.abs(model - zz) ** 2.0)
# plt.plot( numpy.real(model))
# plt.plot( numpy.real(zz), 'red')
# plt.show()
return ss
v0 = grid_search1d(ssfun, -800.0, 800.0, nstep=50)
# v = scipy.optimize.fmin(ssfun,numpy.array([v0]),full_output=False,disp=False,retall=False)
return v0
def fit_velocity_and_power(z, t, var, frad=440.2e6):
zz = numpy.exp(1.0j * numpy.angle(z))
def ssfun(x):
freq = 2.0 * frad * x / scipy.constants.c
model = numpy.exp(1.0j * 2.0 * scipy.constants.pi * freq * t)
ss = numpy.sum((1.0 / var) * numpy.abs(model - zz) ** 2.0)
return ss
v0 = grid_search1d(ssfun, -800.0, 800.0, nstep=50)
v0 = scipy.optimize.fmin(
ssfun, numpy.array([v0]), full_output=False, disp=False, retall=False
)
freq = 2.0 * frad * v0 / scipy.constants.c
dc = numpy.real(numpy.exp(-1.0j * 2.0 * scipy.constants.pi * freq * t) * z)
p0 = (1.0 / numpy.sum(1.0 / var)) * numpy.sum((1.0 / var) * dc)
return [v0, p0]
def dict2hdf5(d, fname):
f = h5py.File(fname, "w")
for k in d.keys():
f[k] = d[k]
f.close()
def save_object(obj, filename):
with open(filename, "wb") as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def load_object(filename):
with open(filename, "rb") as input:
return pickle.load(input)
def date2unix(year, month, day, hour, minute, second):
t = datetime.datetime(year, month, day, hour, minute, second)
return time.mktime(t.timetuple())
def unix2date(x):
return datetime.datetime.utcfromtimestamp(x)
def sec2dirname(t):
return unix2date(t).strftime("%Y-%m-%dT%H-00-00")
def dirname2unix(dirn):
r = re.search("(....)-(..)-(..)T(..)-(..)-(..)", dirn)
return date2unix(
int(r.group(1)),
int(r.group(2)),
int(r.group(3)),
int(r.group(4)),
int(r.group(5)),
int(r.group(6)),
)
def unix2datestr(x):
return unix2date(x).strftime("%Y-%m-%d %H:%M:%S")
def compr(x, fr=0.001):
sh = x.shape
x = x.reshape(-1)
xs = numpy.sort(x)
mini = xs[int(fr * len(x))]
maxi = xs[int((1.0 - fr) * len(x))]
mx = numpy.ones_like(x) * maxi
mn = numpy.ones_like(x) * mini
x = numpy.where(x < maxi, x, mx)
x = numpy.where(x > mini, x, mn)
x = x.reshape(sh)
return x
def comprz(x):
""" Compress signal in such a way that elements less than zero are set to zero. """
zv = x * 0.0
return numpy.where(x > 0, x, zv)
def rep(x, n):
""" interpolate """
z = numpy.zeros(len(x) * n)
for i in range(len(x)):
for j in range(n):
z[i * n + j] = x[i]
return z
def comprz_dB(xx, fr=0.05):
""" Compress signal in such a way that is logarithmic but also avoids negative values """
x = numpy.copy(xx)
sh = xx.shape
x = x.reshape(-1)
x = comprz(x)
x = numpy.setdiff1d(x, numpy.array([0.0]))
xs = numpy.sort(x)
mini = xs[int(fr * len(x))]
mn = numpy.ones_like(xx) * mini
xx = numpy.where(xx > mini, xx, mn)
xx = xx.reshape(sh)
return 10.0 * numpy.log10(xx)
def decimate(x, dec=2):
Nout = int(math.floor(len(x) / dec))
idx = numpy.arange(Nout, dtype=numpy.int) * int(dec)
res = x[idx] * 0.0
for i in numpy.arange(dec):
res = res + x[idx + i]
return res / float(dec)
def decimate2(x, dec=2):
Nout = int(math.floor(len(x) / dec))
idx = numpy.arange(Nout, dtype=numpy.int) * int(dec)
res = x[idx] * 0.0
count = numpy.copy(x[idx])
count[:] = 1.0
count_vector = numpy.negative(numpy.isnan(x)) * 1.0
x[numpy.where(numpy.isnan(x))] = 0.0
for i in numpy.arange(dec):
res = res + x[idx + i]
count += count_vector[idx + i]
count[ | numpy.where(count == 0.0) | numpy.where |
# -*- coding: utf-8 -*-
'''
Basic processing procedures for analog signals (e.g., performing a z-score of a
signal, or filtering a signal).
:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
'''
from __future__ import division, print_function
import numpy as np
import scipy.signal
import quantities as pq
import neo
import numpy.matlib as npm
def zscore(signal, inplace=True):
'''
Apply a z-score operation to one or several AnalogSignal objects.
The z-score operation subtracts the mean :math:`\\mu` of the signal, and
divides by its standard deviation :math:`\\sigma`:
.. math::
Z(x(t))= \\frac{x(t)-\\mu}{\\sigma}
If an AnalogSignal containing multiple signals is provided, the
z-transform is always calculated for each signal individually.
If a list of AnalogSignal objects is supplied, the mean and standard
deviation are calculated across all objects of the list. Thus, all list
elements are z-transformed by the same values of :math:`\\mu` and
:math:`\\sigma`. For AnalogSignals, each signal of the array is
treated separately across list elements. Therefore, the number of signals
must be identical for each AnalogSignal of the list.
Parameters
----------
signal : neo.AnalogSignal or list of neo.AnalogSignal
Signals for which to calculate the z-score.
inplace : bool
If True, the contents of the input signal(s) is replaced by the
z-transformed signal. Otherwise, a copy of the original
AnalogSignal(s) is returned. Default: True
Returns
-------
neo.AnalogSignal or list of neo.AnalogSignal
The output format matches the input format: for each supplied
AnalogSignal object a corresponding object is returned containing
the z-transformed signal with the unit dimensionless.
Use Case
--------
You may supply a list of AnalogSignal objects, where each object in
the list contains the data of one trial of the experiment, and each signal
of the AnalogSignal corresponds to the recordings from one specific
electrode in a particular trial. In this scenario, you will z-transform the
signal of each electrode separately, but transform all trials of a given
electrode in the same way.
Examples
--------
>>> a = neo.AnalogSignal(
... np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1)*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> b = neo.AnalogSignal(
... np.transpose([[1, 2, 3, 4, 5, 6], [11, 12, 13, 14, 15, 16]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> c = neo.AnalogSignal(
... np.transpose([[21, 22, 23, 24, 25, 26], [31, 32, 33, 34, 35, 36]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> print zscore(a)
[[-1.46385011]
[-0.87831007]
[-0.29277002]
[ 0.29277002]
[ 0.87831007]
[ 1.46385011]] dimensionless
>>> print zscore(b)
[[-1.46385011 -1.46385011]
[-0.87831007 -0.87831007]
[-0.29277002 -0.29277002]
[ 0.29277002 0.29277002]
[ 0.87831007 0.87831007]
[ 1.46385011 1.46385011]] dimensionless
>>> print zscore([b,c])
[<AnalogSignal(array([[-1.11669108, -1.08361877],
[-1.0672076 , -1.04878252],
[-1.01772411, -1.01394628],
[-0.96824063, -0.97911003],
[-0.91875714, -0.94427378],
[-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>,
<AnalogSignal(array([[ 0.78170952, 0.84779261],
[ 0.86621866, 0.90728682],
[ 0.9507278 , 0.96678104],
[ 1.03523694, 1.02627526],
[ 1.11974608, 1.08576948],
[ 1.20425521, 1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>]
'''
# Transform input to a list
if type(signal) is not list:
signal = [signal]
# Calculate mean and standard deviation
signal_stacked = np.vstack(signal)
m = np.mean(signal_stacked, axis=0)
s = np.std(signal_stacked, axis=0)
result = []
for sig in signal:
sig_normalized = sig.magnitude - m.magnitude
sig_normalized = np.divide(sig_normalized, s.magnitude,
out=np.zeros_like(sig_normalized),
where=s.magnitude != 0)
if inplace:
sig[:] = pq.Quantity(sig_normalized, units=sig.units)
sig_normalized = sig
else:
sig_normalized = sig.duplicate_with_new_data(sig_normalized)
# todo use flag once is fixed
# https://github.com/NeuralEnsemble/python-neo/issues/752
sig_normalized.array_annotate(**sig.array_annotations)
sig_dimless = sig_normalized / sig.units
result.append(sig_dimless)
# Return single object, or list of objects
if len(result) == 1:
return result[0]
else:
return result
def cross_correlation_function(signal, ch_pairs, env=False, nlags=None):
"""
Computes unbiased estimator of the cross-correlation function.
Calculates the unbiased estimator of the cross-correlation function [1]_
.. math::
R(\\tau) = \\frac{1}{N-|k|} R'(\\tau) \\ ,
where :math:`R'(\\tau) = \\left<x(t)y(t+\\tau)\\right>` in a pairwise
manner, i.e. `signal[ch_pairs[0,0]]` vs `signal2[ch_pairs[0,1]]`,
`signal[ch_pairs[1,0]]` vs `signal2[ch_pairs[1,1]]`, and so on. The
cross-correlation function is obtained by `scipy.signal.fftconvolve`.
Time series in signal are zscored beforehand. Alternatively returns the
Hilbert envelope of :math:`R(\\tau)`, which is useful to determine the
correlation length of oscillatory signals.
Parameters
-----------
signal : neo.AnalogSignal (`nt` x `nch`)
Signal with nt number of samples that contains nch LFP channels
ch_pairs : list (or array with shape `(n,2)`)
list with n channel pairs for which to compute cross-correlation,
each element of list must contain 2 channel indices
env : bool
Return Hilbert envelope of cross-correlation function
Default: False
nlags : int
Defines number of lags for cross-correlation function. Float will be
rounded to nearest integer. Number of samples of output is `2*nlags+1`.
If None, number of samples of output is equal to number of samples of
input signal, namely `nt`
Default: None
Returns
-------
cross_corr : neo.AnalogSignal (`2*nlag+1` x `n`)
Pairwise cross-correlation functions for channel pairs given by
`ch_pairs`. If `env=True`, the output is the Hilbert envelope of the
pairwise cross-correlation function. This is helpful to compute the
correlation length for oscillating cross-correlation functions
Raises
------
ValueError
If the input signal is not a neo.AnalogSignal.
ValueError
If `ch_pairs` is not a list of channel pair indices with shape `(n,2)`.
KeyError
If keyword `env` is not a boolean.
KeyError
If `nlags` is not an integer or float larger than 0.
Examples
--------
>>> dt = 0.02
>>> N = 2018
>>> f = 0.5
>>> t = np.arange(N)*dt
>>> x = np.zeros((N,2))
>>> x[:,0] = 0.2 * np.sin(2.*np.pi*f*t)
>>> x[:,1] = 5.3 * np.cos(2.*np.pi*f*t)
>>> # Generate neo.AnalogSignals from x
>>> signal = neo.AnalogSignal(x, units='mV', t_start=0.*pq.ms,
>>> sampling_rate=1/dt*pq.Hz, dtype=float)
>>> rho = elephant.signal_processing.cross_correlation_function(
>>> signal, [0,1], nlags=150)
>>> env = elephant.signal_processing.cross_correlation_function(
>>> signal, [0,1], nlags=150, env=True)
>>> plt.plot(rho.times, rho)
>>> plt.plot(env.times, env) # should be equal to one
>>> plt.show()
References
----------
.. [1] <NAME> (2009) "Spectral Analysis of Signals, Spectral Element
Method in Structural Dynamics", Eq. 2.2.3
"""
# Make ch_pairs a 2D array
pairs = np.array(ch_pairs)
if pairs.ndim == 1:
pairs = pairs[:, np.newaxis]
# Check input
if not isinstance(signal, neo.AnalogSignal):
raise ValueError('Input signal is not a neo.AnalogSignal!')
if np.shape(pairs)[1] != 2:
pairs = pairs.T
if np.shape(pairs)[1] != 2:
raise ValueError('ch_pairs is not a list of channel pair indices.'\
'Cannot define pairs for cross-correlation.')
if not isinstance(env, bool):
raise KeyError('env is not a boolean!')
if nlags is not None:
if not isinstance(nlags, (int, float)):
raise KeyError('nlags must be an integer or float larger than 0!')
if nlags <= 0:
raise KeyError('nlags must be an integer or float larger than 0!')
# z-score analog signal and store channel time series in different arrays
# Cross-correlation will be calculated between xsig and ysig
xsig = np.array([zscore(signal).magnitude[:, pair[0]] \
for pair in pairs]).T
ysig = np.array([zscore(signal).magnitude[:, pair[1]] \
for pair in pairs]).T
# Define vector of lags tau
nt, nch = np.shape(xsig)
tau = (np.arange(nt) - nt//2)
# Calculate cross-correlation by taking Fourier transform of signal,
# multiply in Fourier space, and transform back. Correct for bias due
# to zero-padding
xcorr = np.zeros((nt, nch))
for i in range(nch):
xcorr[:, i] = scipy.signal.fftconvolve(xsig[:, i], ysig[::-1, i],
mode='same')
xcorr = xcorr / npm.repmat((nt-abs(tau)), nch, 1).T
# Calculate envelope of cross-correlation function with Hilbert transform.
# This is useful for transient oscillatory signals.
if env:
for i in range(nch):
xcorr[:, i] = np.abs(scipy.signal.hilbert(xcorr[:, i]))
# Cut off lags outside desired range
if nlags is not None:
nlags = int( | np.round(nlags) | numpy.round |
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ..simulation.properties import Pose
from ..log import PCG_ROOT_LOGGER
def circular(
radius,
max_theta=2 * np.pi,
step_theta=None,
step_radius=None,
n_theta=None,
n_radius=None,
pose_offset=[0, 0, 0, 0, 0, 0]):
poses = None
assert radius > 0, \
'Radius must be greater than zero, provided={}'.format(
radius)
assert max_theta >= 0 and max_theta <= 2 * np.pi, \
'max_theta must be greater than zero and smaller' \
' than 2 * pi, provided={}'.format(max_theta)
if step_theta is not None:
assert step_theta > 0, \
'n_theta must be greater than zero, provided={}'.format(
n_theta)
theta = np.arange(0, max_theta + step_theta, step_theta)
elif n_theta is not None:
assert n_theta > 0, \
'Number of angle samples must be greater than 0, ' \
'provided={}'.format(n_theta)
if max_theta == 2 * np.pi:
m = max_theta - max_theta / n_theta
else:
m = max_theta
theta = np.linspace(0, m, n_theta)
else:
raise ValueError('No sampling method provided for theta')
if step_radius is not None:
if step_radius <= 0:
msg = 'step_radius must be greater than zero, provided={}'.format(
step_radius)
PCG_ROOT_LOGGER.error(msg)
return poses
r = np.arange(step_radius, radius + step_radius, step_radius)
r = r[np.nonzero(r > 0)[0]]
elif n_radius is not None:
assert n_radius > 0, \
'n_radius must be greater than zero, provided={}'.format(
n_radius)
if n_radius == 1:
r = np.array([radius])
else:
r = np.linspace(float(radius) / n_radius, radius, n_radius)
r = r[np.nonzero(r > 0)[0]]
elif radius > 0:
r = np.array([radius])
else:
raise ValueError('Invalid radius input')
tt, rr = np.meshgrid(theta, r)
tt = tt.flatten()
rr = rr.flatten()
poses = list()
if isinstance(pose_offset, Pose):
offset = pose_offset
else:
offset = Pose(pos=pose_offset[0:3], rot=pose_offset[3::])
for i in range(tt.size):
poses.append(
Pose(
pos=[
rr[i] * np.cos(tt[i]),
rr[i] * np.sin(tt[i]),
0]))
poses[-1] = offset + poses[-1]
return poses
def rectangular(
x_length=None,
y_length=None,
step_x=None,
step_y=None,
n_x=None,
n_y=None,
pose_offset=[0, 0, 0, 0, 0, 0],
center=False):
poses = None
if None not in [x_length, y_length]:
if None not in [step_x, step_y]:
if step_x <= 0:
PCG_ROOT_LOGGER.error('step_x must be greater than zero')
return poses
if step_y <= 0:
PCG_ROOT_LOGGER.error('step_y must be greater than zero')
return poses
if step_x > x_length:
PCG_ROOT_LOGGER.error(
'step_x should be equal or smaller than x_length')
return poses
if step_y > y_length:
PCG_ROOT_LOGGER.error(
'step_y should be equal or smaller than y_length')
return poses
x = np.arange(0, x_length + step_x, step_x)
y = np.arange(0, y_length + step_y, step_y)
elif None not in [n_x, n_y]:
if n_x <= 0:
PCG_ROOT_LOGGER.error('n_x must be greater than zero')
return poses
if n_y <= 0:
PCG_ROOT_LOGGER.error('n_y must be greater than zero')
return poses
x = np.linspace(0, x_length, n_x)
y = np.linspace(0, y_length, n_y)
else:
PCG_ROOT_LOGGER.error(
'No valid options where chosen to'
' generate the rectangular pattern')
return poses
if center:
x = x - (x.max() - x.min()) / 2
y = y - (y.max() - y.min()) / 2
xx, yy = np.meshgrid(x, y)
xx = xx.flatten()
yy = yy.flatten()
if isinstance(pose_offset, Pose):
offset = pose_offset
else:
offset = Pose(pos=pose_offset[0:3], rot=pose_offset[3::])
poses = list()
for i in range(xx.size):
poses.append(Pose(pos=[xx[i], yy[i], 0]))
poses[-1] = offset + poses[-1]
else:
raise ValueError('x_length or y_length were not provided')
return poses
def cuboid(
x_length=None,
y_length=None,
z_length=None,
step_x=None,
step_y=None,
step_z=None,
n_x=None,
n_y=None,
n_z=None,
pose_offset=[0, 0, 0, 0, 0, 0],
center=False):
poses = None
if None not in [x_length, y_length, z_length]:
if None not in [step_x, step_y, step_z]:
if step_x <= 0:
PCG_ROOT_LOGGER.error('step_x must be greater than zero')
return poses
if step_y <= 0:
PCG_ROOT_LOGGER.error('step_y must be greater than zero')
return poses
if step_z <= 0:
PCG_ROOT_LOGGER.error('step_z must be greater than zero')
return poses
if step_x > x_length:
PCG_ROOT_LOGGER.error(
'step_x should be equal or smaller than x_length')
return poses
if step_y > y_length:
PCG_ROOT_LOGGER.error(
'step_y should be equal or smaller than y_length')
return poses
if step_z > z_length:
PCG_ROOT_LOGGER.error(
'step_z should be equal or smaller than z_length')
return poses
x = np.arange(0, x_length + step_x, step_x)
y = np.arange(0, y_length + step_y, step_y)
z = np.arange(0, z_length + step_z, step_z)
elif None not in [n_x, n_y, n_z]:
if n_x <= 0:
PCG_ROOT_LOGGER.error('n_x must be greater than zero')
return poses
if n_y <= 0:
PCG_ROOT_LOGGER.error('n_y must be greater than zero')
return poses
if n_z <= 0:
PCG_ROOT_LOGGER.error('n_z must be greater than zero')
return poses
x = np.linspace(0, x_length, n_x)
y = np.linspace(0, y_length, n_y)
z = | np.linspace(0, z_length, n_z) | numpy.linspace |
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
from matplotlib import rc
import matplotlib
import pymaster as nmt
from mpl_toolkits.axes_grid1.inset_locator import mark_inset,zoomed_inset_axes
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
nside=256
nsims=1000
prefix_pure="tests_sphb/run_pure01_ns%d_cont1"%nside
prefix_nopu="tests_sphb/run_pure00_ns%d_cont1"%nside
prefix_noco="tests_sphb/run_pure01_ns%d_cont0"%nside
prefix_nodb="tests_sph/run_pure01_ns%d_cont1_no_debias"%nside
def tickfs(ax,x=True,y=True) :
if x :
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(12)
if y :
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(12)
def read_cls(fname) :
l,cee,ceb,cbe,cbb=np.loadtxt(fname,unpack=True);
id_good=np.where(l<2*nside)[0]
return l[id_good],cee[id_good],ceb[id_good],cbe[id_good],cbb[id_good]
l_th,clEE_th,clEB_th,clBE_th,clBB_th=read_cls(prefix_pure+"_cl_th.txt")
ndof=len(l_th)
print("Reading")
clEE_pure=[]; clEB_pure=[]; clBB_pure=[];
clEE_nopu=[]; clEB_nopu=[]; clBB_nopu=[];
clEE_noco=[]; clEB_noco=[]; clBB_noco=[];
clEE_nodb=[]; clEB_nodb=[]; clBB_nodb=[];
for i in np.arange(nsims) :
ll,ccee,cceb,ccbe,ccbb=read_cls(prefix_pure+"_cl_%04d.txt"%(i+1))
clEE_pure.append(ccee); clEB_pure.append(cceb); clBB_pure.append(ccbb);
ll,ccee,cceb,ccbe,ccbb=read_cls(prefix_nopu+"_cl_%04d.txt"%(i+1))
clEE_nopu.append(ccee); clEB_nopu.append(cceb); clBB_nopu.append(ccbb);
ll,ccee,cceb,ccbe,ccbb=read_cls(prefix_noco+"_cl_%04d.txt"%(i+1))
clEE_noco.append(ccee); clEB_noco.append(cceb); clBB_noco.append(ccbb);
ll,ccee,cceb,ccbe,ccbb=read_cls(prefix_nodb+"_cl_%04d.txt"%(i+1))
clEE_nodb.append(ccee); clEB_nodb.append(cceb); clBB_nodb.append(ccbb);
clEE_pure=np.array(clEE_pure); clEB_pure=np.array(clEB_pure); clBB_pure=np.array(clBB_pure);
clEE_nopu=np.array(clEE_nopu); clEB_nopu=np.array(clEB_nopu); clBB_nopu=np.array(clBB_nopu);
clEE_noco=np.array(clEE_noco); clEB_noco=np.array(clEB_noco); clBB_noco=np.array(clBB_noco);
clEE_nodb=np.array(clEE_nodb); clEB_nodb=np.array(clEB_nodb); clBB_nodb=np.array(clBB_nodb);
print("Computing statistics")
def compute_stats(y,y_th) :
mean=np.mean(y,axis=0)
cov=np.mean(y[:,:,None]*y[:,None,:],axis=0)-mean[:,None]*mean[None,:]
icov=np.linalg.inv(cov)
chi2_red=np.dot(mean-y_th,np.dot(icov,mean-y_th))*nsims
chi2_all=np.sum((y-y_th)*np.sum(icov[None,:,:]*(y-y_th)[:,None,:],axis=2),axis=1)
return mean,cov,icov,chi2_red,chi2_all
clEE_pure_mean,clEE_pure_cov,clEE_pure_icov,clEE_pure_chi2r,clEE_pure_chi2all=compute_stats(clEE_pure,clEE_th)
clEB_pure_mean,clEB_pure_cov,clEB_pure_icov,clEB_pure_chi2r,clEB_pure_chi2all=compute_stats(clEB_pure,clEB_th)
clBB_pure_mean,clBB_pure_cov,clBB_pure_icov,clBB_pure_chi2r,clBB_pure_chi2all=compute_stats(clBB_pure,clBB_th)
clEE_noco_mean,clEE_noco_cov,clEE_noco_icov,clEE_noco_chi2r,clEE_noco_chi2all=compute_stats(clEE_noco,clEE_th)
clEB_noco_mean,clEB_noco_cov,clEB_noco_icov,clEB_noco_chi2r,clEB_noco_chi2all=compute_stats(clEB_noco,clEB_th)
clBB_noco_mean,clBB_noco_cov,clBB_noco_icov,clBB_noco_chi2r,clBB_noco_chi2all=compute_stats(clBB_noco,clBB_th)
clEE_nodb_mean,clEE_nodb_cov,clEE_nodb_icov,clEE_nodb_chi2r,clEE_nodb_chi2all=compute_stats(clEE_nodb,clEE_th)
clEB_nodb_mean,clEB_nodb_cov,clEB_nodb_icov,clEB_nodb_chi2r,clEB_nodb_chi2all=compute_stats(clEB_nodb,clEB_th)
clBB_nodb_mean,clBB_nodb_cov,clBB_nodb_icov,clBB_nodb_chi2r,clBB_nodb_chi2all=compute_stats(clBB_nodb,clBB_th)
m_pure,cov_pure,icov_pure,chi2r_pure,chi2all_pure=compute_stats(np.vstack((clEE_pure.T,clEB_pure.T,clBB_pure.T)).T,
np.vstack((clEE_th,clEB_th,clBB_th)).flatten())
m_noco,cov_noco,icov_noco,chi2r_noco,chi2all_noco=compute_stats(np.vstack((clEE_noco.T,clEB_noco.T,clBB_noco.T)).T,
np.vstack((clEE_th,clEB_th,clBB_th)).flatten())
print(chi2r_pure,len(m_pure),1-st.chi2.cdf(chi2r_pure,len(m_pure)))
#Plot errorbars
plt.figure()
ax=plt.gca()
ax.plot([-1,-1],[-1,-1],'k-',lw=2,label='$BB$')
ax.plot([-1,-1],[-1,-1],'k--',lw=2,label='$EB$')
ax.plot([-1,-1],[-1,-1],'k-.',lw=2,label='$EE$')
ax.plot(ll,np.std(clBB_nopu,axis=0),'b-',lw=2,label='${\\rm Standard\\,\\,PCL}$');
ax.plot(ll,np.std(clBB_pure,axis=0),'r-',lw=2,label='${\\rm Pure}\\,B$');
ax.plot(ll,np.std(clEB_nopu,axis=0),'b--',lw=2)
ax.plot(ll,np.std(clEB_pure,axis=0),'r--',lw=2)
ax.plot(ll,np.std(clEE_nopu,axis=0),'b-.',lw=2)
ax.plot(ll,np.std(clEE_pure,axis=0),'r-.',lw=2)
ax.set_xlabel('$\\ell$',fontsize=15)
ax.set_ylabel('$\\sigma(C_\\ell)\\,[\\mu K^2\\,{\\rm srad}]$',fontsize=15)
tickfs(ax)
ax.set_xlim([4,515])
ax.set_ylim([7E-8,3E-2])
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(loc='upper right',frameon=False,fontsize=14,ncol=2)
plt.savefig("plots_paper/val_sigmas_cmb_sph.pdf",bbox_inches='tight')
#Plot covariance
plt.figure();
ax=plt.gca();
im=ax.imshow(cov_pure/np.sqrt(np.diag(cov_pure)[None,:]*np.diag(cov_pure)[:,None]),
interpolation='nearest',cmap=plt.cm.Greys);
for i in np.arange(2)+1 :
ax.plot([i*ndof,i*ndof],[0,3*ndof],'k--',lw=1)
ax.plot([0,3*ndof],[i*ndof,i*ndof],'k--',lw=1)
ax.set_xlim([0,3*ndof])
ax.set_ylim([3*ndof,0])
ax.set_xticks(ndof*(np.arange(3)+0.5))
ax.set_yticks(ndof*(np.arange(3)+0.5))
ax.set_xticklabels(['$EE$','$EB$','$BB$'])
ax.set_yticklabels(['$EE$','$EB$','$BB$'])
plt.colorbar(im)
axins=zoomed_inset_axes(ax,2.5,loc=6)
axins.imshow(cov_pure/np.sqrt(np.diag(cov_pure)[None,:]*np.diag(cov_pure)[:,None]),
interpolation='nearest',cmap=plt.cm.Greys)
axins.get_xaxis().set_visible(False)
axins.get_yaxis().set_visible(False)
axins.set_xlim(0.,0.2*ndof)
axins.set_ylim(0.2*ndof,0.)
mark_inset(ax, axins, loc1=2, loc2=1, fc="none")#, ec="0.5")
tickfs(ax)
plt.savefig("plots_paper/val_covar_cmb_sph.pdf",bbox_inches='tight')
plt.figure();
ax=plt.gca();
im=ax.imshow(cov_noco/np.sqrt(np.diag(cov_noco)[None,:]*np.diag(cov_noco)[:,None]),
interpolation='nearest',cmap=plt.cm.Greys);
for i in np.arange(2)+1 :
ax.plot([i*ndof,i*ndof],[0,3*ndof],'k--',lw=1)
ax.plot([0,3*ndof],[i*ndof,i*ndof],'k--',lw=1)
ax.set_xlim([0,3*ndof])
ax.set_ylim([3*ndof,0])
ax.set_xticks(ndof*(np.arange(3)+0.5))
ax.set_yticks(ndof*(np.arange(3)+0.5))
ax.set_xticklabels(['$EE$','$EB$','$BB$'])
ax.set_yticklabels(['$EE$','$EB$','$BB$'])
plt.colorbar(im)
axins=zoomed_inset_axes(ax,2.5,loc=6)
axins.imshow(cov_noco/np.sqrt(np.diag(cov_noco)[None,:]*np.diag(cov_noco)[:,None]),
interpolation='nearest',cmap=plt.cm.Greys)
axins.get_xaxis().set_visible(False)
axins.get_yaxis().set_visible(False)
axins.set_xlim(-0.1,0.2*ndof)
axins.set_ylim(0.2*ndof,-0.1)
mark_inset(ax, axins, loc1=2, loc2=1, fc="none")#, ec="0.5")
tickfs(ax)
plt.savefig("plots_paper/val_covar_cmb_sph_nocont.pdf",bbox_inches='tight')
#Plot residuals
cols=plt.cm.rainbow(np.linspace(0,1,3))
fig=plt.figure()
ax=fig.add_axes((0.12,0.3,0.78,0.6))
ax.plot([-1,-1],[-1,-1],'k-' ,label='${\\rm Sims}$')
ax.plot([-1,-1],[-1,-1],'k--',label='${\\rm Input}$')
ic=0
ax.plot(l_th[l_th>18],clEE_pure_mean[l_th>18],label='$EE$',c=cols[ic],alpha=0.5) #Plotting above ell=18 to avoid quirky lines due to negative values
ax.plot(l_th[l_th>18],clEE_th[l_th>18],'--',c=cols[ic]);
ic+=1
ax.plot(l_th,clEB_pure_mean,label='$EB$',c=cols[ic],alpha=0.5);
ic+=1
ax.plot(l_th[l_th>10],clBB_pure_mean[l_th>10],label='$BB$',c=cols[ic],alpha=0.5);
ax.plot(l_th,np.fabs(clBB_nodb_mean),'-.',
label='$BB,\\,\\,{\\rm no\\,\\,debias}$',c=cols[ic]);
ax.plot(l_th,clBB_th,'--',c=cols[ic]);
ic+=1
ax.set_ylim([2E-8,1.3E-2])
ax.legend(loc='upper left',frameon=False,fontsize=14,ncol=3,labelspacing=0.1)
ax.set_xlim([0,515])
ax.set_yscale('log');
tickfs(ax)
ax.set_xticks([])
ax.set_ylabel('$C_\\ell\\,[\\mu K^2\\,{\\rm srad}]$',fontsize=15)
ax=fig.add_axes((0.12,0.1,0.78,0.2))
ic=0
ax.errorbar(l_th ,(clEE_pure_mean-clEE_th)*np.sqrt(nsims+0.)/np.sqrt(np.diag(clEE_pure_cov)),
yerr=np.ones(ndof),label='$EE$',fmt='.',c=cols[ic]); ic+=1
ax.errorbar(l_th+2,(clEB_pure_mean-clEB_th)*np.sqrt(nsims+0.)/np.sqrt(np.diag(clEB_pure_cov)),
yerr=np.ones(ndof),label='$EB$',fmt='.',c=cols[ic]); ic+=1
ax.errorbar(l_th+4,(clBB_pure_mean-clBB_th)*np.sqrt(nsims+0.)/np.sqrt(np.diag(clBB_pure_cov)),
yerr=np.ones(ndof),label='$BB$',fmt='.',c=cols[ic]); ic+=1
ax.set_xlabel('$\\ell$',fontsize=15)
ax.set_ylabel('$\\Delta C_\\ell/\\sigma_\\ell$',fontsize=15)
ax.set_ylim([-6,6])
ax.set_xlim([0,515])
ax.set_yticks([-4,0,4])
tickfs(ax)
plt.savefig("plots_paper/val_cl_cmb_sph.pdf",bbox_inches='tight')
#Plot chi2 dist
xr=[ndof-5*np.sqrt(2.*ndof),ndof+5*np.sqrt(2*ndof)]
x=np.linspace(xr[0],xr[1],256)
pdf=st.chi2.pdf(x,ndof)
plt.figure(figsize=(10,4))
ax=[plt.subplot(1,3,i+1) for i in range(3)]
plt.subplots_adjust(wspace=0, hspace=0)
h,b,p=ax[0].hist(clEE_pure_chi2all,bins=40,density=True,range=xr)
ax[0].text(0.8,0.9,'$EE$',transform=ax[0].transAxes,fontsize=14)
ax[0].plot([clEE_pure_chi2r,clEE_pure_chi2r],[0,1.4*np.amax(pdf)],'k-.')
ax[0].set_xlabel('$\\chi^2$',fontsize=15)
ax[0].set_ylabel('$P(\\chi^2)$',fontsize=15)
print('EE : %.3lE'%(1-st.chi2.cdf(clEE_pure_chi2r,ndof)))
h,b,p=ax[1].hist(clEB_pure_chi2all,bins=40,density=True,range=xr)
ax[1].text(0.8,0.9,'$EB$',transform=ax[1].transAxes,fontsize=14)
ax[1].plot([clEB_pure_chi2r,clEB_pure_chi2r],[0,1.4*np.amax(pdf)],'k-.')
print('EB : %.3lE'%(1-st.chi2.cdf(clEB_pure_chi2r,ndof)))
h,b,p=ax[2].hist(clBB_pure_chi2all,bins=40,density=True,range=xr)
ax[2].text(0.8,0.9,'$BB$',transform=ax[2].transAxes,fontsize=14)
ax[2].plot([clBB_pure_chi2r,clBB_pure_chi2r],[0,1.4*np.amax(pdf)],'k-.')
print('BB : %.3lE'%(1-st.chi2.cdf(clBB_pure_chi2r,ndof)))
for a in ax :
a.set_xlabel('$\\chi^2$',fontsize=15)
ax[1].set_yticklabels([])
ax[2].set_yticklabels([])
for a in ax :
tickfs(a)
a.set_xlim([ndof-5*np.sqrt(2.*ndof),ndof+5*np.sqrt(2.*ndof)])
a.set_ylim([0,1.4*np.amax(pdf)])
a.plot(x,pdf,'k-',label='$P(\\chi^2)$')
a.plot([ndof,ndof],[0,1.4*np.amax(pdf)],'k--',label='$N_{\\rm dof}$')
a.plot([-1,-1],[-1,-1],'k-.',label='$\\chi^2_{\\rm mean}$')
ax[0].legend(loc='upper left',fontsize=12,frameon=False)
plt.savefig("plots_paper/val_chi2_cmb_sph.pdf",bbox_inches='tight')
print("Computing bandpower weights")
ls=np.arange(3*nside,dtype=int)
bpws=np.zeros(3*nside,dtype=int)-1
weights= | np.ones(3*nside) | numpy.ones |
"""Film Mode Matching Mode Solver
Implementation of the Film Mode Matching (FMM) algorithm, as described in:
- Sudbo, "Film mode matching a versatile numerical method for vector mode field calculations in dielectric waveguides", Pure App. Optics, 2 (1993), 211-233
- Sudbo, "Improved formulation of the film mode matching method for mode field calculations in dielectric waveguides", Pure App. Optics, 3 (1994), 381-388
Examples
========
See L{FMM1d} and L{FMM2d}.
"""
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
from functools import reduce
__author__ = '<NAME> & <NAME>'
import numpy
import scipy
import scipy.optimize
import copy
import EMpy.utils
from EMpy.modesolvers.interface import *
import pylab
class Message(object):
def __init__(self, msg, verbosity=0):
self.msg = msg
self.verbosity = verbosity
def show(self, verbosity=0):
if self.verbosity <= verbosity:
print((self.verbosity - 1) * '\t' + self.msg)
class Struct(object):
"""Empty class to fill with whatever I want. Maybe a dictionary would do?"""
pass
class Boundary(object):
"""Boundary conditions.
Electric and Magnetic boundary conditions are translated to Symmetric
and Antisymmetric for each field.
@ivar xleft: Left bc on x.
@ivar xright: Right bc on x.
@ivar yleft: Left bc on y.
@ivar yright: Right bc on y.
"""
def __init__(self, xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall'):
"""Set the boundary conditions, validate and translate."""
self.xleft = xleft
self.yleft = yleft
self.xright = xright
self.yright = yright
self.validate()
self.translate()
def validate(self):
"""Validate the input.
@raise ValueError: Unknown boundary.
"""
if not reduce(lambda x, y: x & y,
[(x == 'Electric Wall') | (x == 'Magnetic Wall') for x in [self.xleft, self.yleft, self.xright, self.yright]]):
raise ValueError('Unknown boundary.')
def translate(self):
"""Translate for each field.
@raise ValueError: Unknown boundary.
"""
self.xh = ''
self.xe = ''
self.yh = ''
self.ye = ''
if self.xleft == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xleft == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.xright == 'Electric Wall':
self.xh += 'A'
self.xe += 'S'
elif self.xright == 'Magnetic Wall':
self.xh += 'S'
self.xe += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yleft == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yleft == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
if self.yright == 'Electric Wall':
self.yh += 'A'
self.ye += 'S'
elif self.yright == 'Magnetic Wall':
self.yh += 'S'
self.ye += 'A'
else:
raise ValueError('Unknown boundary.')
def __str__(self):
return 'xleft = %s, xright = %s, yleft = %s, yright = %s' % (self.xleft, self.xright, self.yleft, self.yright)
class Slice(object):
"""One dimensional arrangement of layers and 1d modes.
A slice is made of a stack of layers, i.e. refractive indeces with a thickness,
with given boundary conditions.
It holds 1d modes, both TE and TM.
@ivar x1: start point of the slice in x.
@ivar x2: end point of the slice in x.
@ivar Uy: array of points delimiting the layers.
@ivar boundary: boundary conditions.
@ivar modie: E modes.
@ivar modih: H modes.
@ivar Ux: array of points delimiting the slices in x (internally set).
@ivar refractiveindex: refractive index of all the slices (internally set).
@ivar epsilon: epsilon of all the slices (internally set).
@ivar wl: vacuum wavelength.
"""
def __init__(self, x1, x2, Uy, boundary, modie, modih):
self.x1 = x1
self.x2 = x2
self.Uy = Uy
self.boundary = boundary
self.modie = modie
self.modih = modih
def __str__(self):
return 'x1 = %g, x2 = %g\nUy = %s\nboundary = %s' % (self.x1, self.x2, self.Uy, self.boundary)
class FMMMode1d(Mode):
"""One dimensional mode.
Note
====
Virtual class.
"""
pass
class FMMMode1dx(FMMMode1d):
"""Matching coefficients in the x-direction.
L{FMMMode1dy}s are weighted by these coefficients to assure continuity.
"""
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.U.__str__())
class FMMMode1dy(FMMMode1d):
"""One dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
Note
====
The mode is suppose one dimensional, in the y direction.
@ivar sl: array of value of the mode at the lhs of each slice.
@ivar sr: array of value of the mode at the rhs of each slice.
@ivar al: array of value of the derivative of the mode at the lhs of each slice.
@ivar ar: array of value of the derivative of the mode at the lhs of each slice.
@ivar k: wavevector inside each layer.
@ivar keff: effective wavevector.
@ivar zero: how good the mode is? it must be as close to zero as possible!
@ivar Uy: array of points delimiting the layers.
"""
def eval(self, y_):
"""Evaluate the mode at y."""
y = numpy.atleast_1d(y_)
ny = len(y)
f = numpy.zeros(ny, dtype=complex)
for iU in range(len(self.U) - 1):
k = self.k[iU]
sl = self.sl[iU]
al = self.al[iU]
Ul = self.U[iU]
Ur = self.U[iU+1]
idx = numpy.where((Ul <= y) & (y <= Ur))
yy = y[idx] - Ul
f[idx] = sl * numpy.cos(k * yy) + al * sinxsux(k * yy) * yy
return f
def plot(self, y):
f = self.eval(y)
pylab.plot(y, numpy.real(f), y, numpy.imag(y))
pylab.legend(('real', 'imag'))
pylab.xlabel('y')
pylab.ylabel('mode1d')
pylab.show()
def __str__(self):
return 'sl = %s\nsr = %s\nal = %s\nar = %s\nk = %s\nkeff = %s\nzero = %s\nU = %s' % \
(self.sl.__str__(),
self.sr.__str__(),
self.al.__str__(),
self.ar.__str__(),
self.k.__str__(),
self.keff.__str__(),
self.zero.__str__(),
self.U.__str__())
class FMMMode2d(Mode):
"""Two dimensional mode.
It holds the coefficients that describe the mode in the FMM expansion.
"""
def get_x(self, n=100):
return numpy.linspace(self.slicesx[0].Ux[0], self.slicesx[0].Ux[-1], n)
def get_y(self, n=100):
return numpy.linspace(self.slicesx[0].Uy[0], self.slicesx[0].Uy[-1], n)
def eval(self, x_=None, y_=None):
"""Evaluate the mode at x,y."""
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
nmodi = len(self.modie)
lenx = len(x)
leny = len(y)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
uh = numpy.zeros((nmodi, lenx), dtype=complex)
ue = numpy.zeros_like(uh)
udoth = numpy.zeros_like(uh)
udote = numpy.zeros_like(uh)
Exsh = numpy.zeros((leny, nmodi), dtype=complex)
Exah = numpy.zeros_like(Exsh)
Exse = numpy.zeros_like(Exsh)
Exae = numpy.zeros_like(Exsh)
Eysh = numpy.zeros_like(Exsh)
Eyah = numpy.zeros_like(Exsh)
Eyse = numpy.zeros_like(Exsh)
Eyae = numpy.zeros_like(Exsh)
Ezsh = numpy.zeros_like(Exsh)
Ezah = numpy.zeros_like(Exsh)
Ezse = numpy.zeros_like(Exsh)
Ezae = numpy.zeros_like(Exsh)
cBxsh = numpy.zeros_like(Exsh)
cBxah = numpy.zeros_like(Exsh)
cBxse = numpy.zeros_like(Exsh)
cBxae = numpy.zeros_like(Exsh)
cBysh = numpy.zeros_like(Exsh)
cByah = numpy.zeros_like(Exsh)
cByse = numpy.zeros_like(Exsh)
cByae = numpy.zeros_like(Exsh)
cBzsh = numpy.zeros_like(Exsh)
cBzah = numpy.zeros_like(Exsh)
cBzse = numpy.zeros_like(Exsh)
cBzae = numpy.zeros_like(Exsh)
ExTE = numpy.zeros((leny,lenx), dtype=complex)
EyTE = numpy.zeros_like(ExTE)
EzTE = numpy.zeros_like(ExTE)
ExTM = numpy.zeros_like(ExTE)
EyTM = numpy.zeros_like(ExTE)
EzTM = numpy.zeros_like(ExTE)
cBxTE = numpy.zeros_like(ExTE)
cByTE = numpy.zeros_like(ExTE)
cBzTE = numpy.zeros_like(ExTE)
cBxTM = numpy.zeros_like(ExTE)
cByTM = numpy.zeros_like(ExTE)
cBzTM = numpy.zeros_like(ExTE)
for mx, slice in enumerate(self.slicesx):
idx = numpy.where((slice.x1 <= x) & (x < slice.x2))
x2 = x[idx] - slice.x1
x1 = slice.x2 - x[idx]
dx = slice.x2 - slice.x1
for n in range(nmodi):
fi = slice.modih[n].eval(y)
fidot = dot(slice.modih[n]).eval(y)
psi = slice.modie[n].eval(y)
psisueps = sueps(slice.modie[n]).eval(y)
psidotsueps = sueps(dot(slice.modie[n])).eval(y)
kfh = self.modih[n].k[mx]
kxh = scipy.sqrt(kfh**2 - kz**2)
sl = self.modih[n].sl[mx] * (k0/kfh)**2
al = self.modih[n].al[mx]
sr = self.modih[n].sr[mx] * (k0/kfh)**2
ar = self.modih[n].ar[mx]
uh[n,idx] = (numpy.sin(kxh * x1) * sl + numpy.sin(kxh * x2) * sr) / numpy.sin(kxh * dx)
udoth[n,idx] = (numpy.sin(kxh * x1) * al + numpy.sin(kxh * x2) * ar) / numpy.sin(kxh * dx)
kfe = self.modie[n].k[mx]
kxe = scipy.sqrt(kfe**2 - kz**2)
sl = self.modie[n].sl[mx] * (k0/kfe)**2
al = self.modie[n].al[mx]
sr = self.modie[n].sr[mx] * (k0/kfe)**2
ar = self.modie[n].ar[mx]
ue[n,idx] = (numpy.sin(kxe * x1) * sl + numpy.sin(kxe * x2) * sr) / numpy.sin(kxe * dx)
udote[n,idx] = (numpy.sin(kxe * x1) * al + numpy.sin(kxe * x2) * ar) / numpy.sin(kxe * dx)
Exsh[:,n] = (kz/k0) * fi
Exah[:,n] = 0
Exse[:,n] = 0
Exae[:,n] = -psidotsueps / k0**2
Eysh[:,n] = 0
Eyah[:,n] = 0
Eyse[:,n] = -(kfe/k0)**2 * psisueps
Eyae[:,n] = 0
Ezsh[:,n] = 0
Ezah[:,n] = -1j * fi / k0
Ezse[:,n] = 1j * kz / k0**2 * psidotsueps
Ezae[:,n] = 0
cBxsh[:,n] = 0
cBxah[:,n] = fidot / k0**2
cBxse[:,n] = kz / k0 * psi
cBxae[:,n] = 0
cBysh[:,n] = (kfh/k0)**2 * fi
cByah[:,n] = 0
cByse[:,n] = 0
cByae[:,n] = 0
cBzsh[:,n] = -1j * kz / k0**2 * fidot
cBzah[:,n] = 0
cBzse[:,n] = 0
cBzae[:,n] = -1j * psi / k0
ExTE[:,idx] = numpy.tensordot(Exsh, uh[:,idx], axes=1) + numpy.tensordot(Exah, udoth[:,idx], axes=1)
ExTM[:,idx] = numpy.tensordot(Exse, ue[:,idx], axes=1) + numpy.tensordot(Exae, udote[:,idx], axes=1)
EyTE[:,idx] = numpy.tensordot(Eysh, uh[:,idx], axes=1) + numpy.tensordot(Eyah, udoth[:,idx], axes=1)
EyTM[:,idx] = numpy.tensordot(Eyse, ue[:,idx], axes=1) + numpy.tensordot(Eyae, udote[:,idx], axes=1)
EzTE[:,idx] = numpy.tensordot(Ezsh, uh[:,idx], axes=1) + numpy.tensordot(Ezah, udoth[:,idx], axes=1)
EzTM[:,idx] = numpy.tensordot(Ezse, ue[:,idx], axes=1) + numpy.tensordot(Ezae, udote[:,idx], axes=1)
cBxTE[:,idx] = numpy.tensordot(cBxsh, uh[:,idx], axes=1) + numpy.tensordot(cBxah, udoth[:,idx], axes=1)
cBxTM[:,idx] = numpy.tensordot(cBxse, ue[:,idx], axes=1) + numpy.tensordot(cBxae, udote[:,idx], axes=1)
cByTE[:,idx] = numpy.tensordot(cBysh, uh[:,idx], axes=1) + numpy.tensordot(cByah, udoth[:,idx], axes=1)
cByTM[:,idx] = numpy.tensordot(cByse, ue[:,idx], axes=1) + numpy.tensordot(cByae, udote[:,idx], axes=1)
cBzTE[:,idx] = numpy.tensordot(cBzsh, uh[:,idx], axes=1) + numpy.tensordot(cBzah, udoth[:,idx], axes=1)
cBzTM[:,idx] = numpy.tensordot(cBzse, ue[:,idx], axes=1) + numpy.tensordot(cBzae, udote[:,idx], axes=1)
return (ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM)
def fields(self, x=None, y=None):
ExTE, ExTM, EyTE, EyTM, EzTE, EzTM, cBxTE, cBxTM, cByTE, cByTM, cBzTE, cBzTM = self.eval(x, y)
Ex = ExTE + ExTM
Ey = EyTE + EyTM
Ez = EzTE + EzTM
cBx = cBxTE + cBxTM
cBy = cByTE + cByTM
cBz = cBzTE + cBzTM
return (Ex, Ey, Ez, cBx, cBy, cBz)
def intensity(self, x=None, y=None):
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = .5 * (Ex * numpy.conj(cBy) - Ey * numpy.conj(cBx))
return cSz
def TEfrac_old(self, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz, cSz = self.fields(x, y)
cSTE = .5 * EMpy.utils.trapz2(Ex * numpy.conj(cBy), y, x)
cSTM = .5 * EMpy.utils.trapz2(-Ey * numpy.conj(cBx), y, x)
return numpy.abs(cSTE) / (numpy.abs(cSTE) + numpy.abs(cSTM))
def TEfrac(self):
Sx, Sy = self.__overlap(self)
return Sx / (Sx - Sy)
def overlap_old(self, m, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x, y)
cSz = self.intensity(x, y)
norm = scipy.sqrt(EMpy.utils.trapz2(cSz, y, x))
Ex1, Ey1, Ez1, cBx1, cBy1, cBz1 = m.fields(x, y)
cSz1 = m.intensity(x, y)
norm1 = scipy.sqrt(EMpy.utils.trapz2(cSz1, y, x))
return .5 * EMpy.utils.trapz2(Ex/norm * numpy.conj(cBy1/norm1) - Ey/norm * numpy.conj(cBx1/norm1), y, x)
def __overlap_old(self, mode):
nmodi = len(self.modie)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
Sx = 0j
Sy = 0j
for mx, slice in enumerate(self.slicesx):
for n1 in range(nmodi):
phi_n1 = slice.modih[n1]
phidot_n1 = dot(phi_n1)
psi_n1 = slice.modie[n1]
psisueps_n1 = sueps(psi_n1)
psidotsueps_n1 = sueps(dot(psi_n1))
uh_n1 = copy.deepcopy(self.modih[n1])
# reduce to a single slice
kfh_n1 = uh_n1.k[mx]
uh_n1.k = numpy.atleast_1d(scipy.sqrt(kfh_n1**2 - kz**2))
uh_n1.sl = numpy.atleast_1d(uh_n1.sl[mx] * (k0/kfh_n1)**2)
uh_n1.al = numpy.atleast_1d(uh_n1.al[mx])
uh_n1.sr = numpy.atleast_1d(uh_n1.sr[mx] * (k0/kfh_n1)**2)
uh_n1.ar = numpy.atleast_1d(uh_n1.ar[mx])
uh_n1.U = numpy.atleast_1d(uh_n1.U[mx:mx+2])
uhdot_n1 = dot(uh_n1)
ue_n1 = copy.deepcopy(self.modie[n1])
# reduce to a single slice
kfe_n1 = ue_n1.k[mx]
ue_n1.k = numpy.atleast_1d(scipy.sqrt(kfe_n1**2 - kz**2))
ue_n1.sl = numpy.atleast_1d(ue_n1.sl[mx] * (k0/kfe_n1)**2)
ue_n1.al = numpy.atleast_1d(ue_n1.al[mx])
ue_n1.sr = numpy.atleast_1d(ue_n1.sr[mx] * (k0/kfe_n1)**2)
ue_n1.ar = numpy.atleast_1d(ue_n1.ar[mx])
ue_n1.U = numpy.atleast_1d(ue_n1.U[mx:mx+2])
uedot_n1 = dot(ue_n1)
for n2 in range(nmodi):
phi_n2 = mode.slicesx[mx].modih[n2]
phidot_n2 = dot(phi_n2)
psi_n2 = mode.slicesx[mx].modie[n2]
psisueps_n2 = sueps(psi_n2)
psidotsueps_n2 = sueps(dot(psi_n2))
uh_n2 = copy.deepcopy(mode.modih[n2])
# reduce to a single slice
kfh_n2 = uh_n2.k[mx]
uh_n2.k = numpy.atleast_1d(scipy.sqrt(kfh_n2**2 - kz**2))
uh_n2.sl = numpy.atleast_1d(uh_n2.sl[mx] * (k0/kfh_n2)**2)
uh_n2.al = numpy.atleast_1d(uh_n2.al[mx])
uh_n2.sr = numpy.atleast_1d(uh_n2.sr[mx] * (k0/kfh_n2)**2)
uh_n2.ar = numpy.atleast_1d(uh_n2.ar[mx])
uh_n2.U = numpy.atleast_1d(uh_n2.U[mx:mx+2])
uhdot_n2 = dot(uh_n2)
ue_n2 = copy.deepcopy(mode.modie[n2])
# reduce to a single slice
kfe_n2 = ue_n2.k[mx]
ue_n2.k = numpy.atleast_1d(scipy.sqrt(kfe_n2**2 - kz**2))
ue_n2.sl = numpy.atleast_1d(ue_n2.sl[mx] * (k0/kfe_n2)**2)
ue_n2.al = numpy.atleast_1d(ue_n2.al[mx])
ue_n2.sr = numpy.atleast_1d(ue_n2.sr[mx] * (k0/kfe_n2)**2)
ue_n2.ar = numpy.atleast_1d(ue_n2.ar[mx])
ue_n2.U = numpy.atleast_1d(ue_n2.U[mx:mx+2])
uedot_n2 = dot(ue_n2)
Sx += kz * kfh_n2**2 / k0**3 * scalarprod(uh_n1, uh_n2) * scalarprod(phi_n1, phi_n2) \
- kfh_n2**2 / k0**4 * scalarprod(uedot_n1, uh_n2) * scalarprod(psidotsueps_n1, phi_n2)
Sy += kfe_n1**2 * kz / k0**3 * scalarprod(ue_n1, ue_n2) * scalarprod(psisueps_n1, psi_n2) \
+ kfe_n1**2 / k0**4 * scalarprod(ue_n1, uhdot_n2) * scalarprod(psisueps_n1, phidot_n2)
return (Sx, Sy)
def __overlap(self, mode):
nmodi = len(self.modie)
k0 = 2. * numpy.pi / self.slicesx[0].wl
kz = self.keff
Sx = 0j
Sy = 0j
for mx, slice in enumerate(self.slicesx):
phi_n1s = []
phidot_n1s = []
psi_n1s = []
psisueps_n1s = []
psidotsueps_n1s = []
uh_n1s = []
uhdot_n1s = []
ue_n1s = []
uedot_n1s = []
kfe_n1s = []
kfh_n1s = []
phi_n2s = []
phidot_n2s = []
psi_n2s = []
psisueps_n2s = []
psidotsueps_n2s = []
uh_n2s = []
uhdot_n2s = []
ue_n2s = []
uedot_n2s = []
kfe_n2s = []
kfh_n2s = []
for n1 in range(nmodi):
phi_n1 = slice.modih[n1]
phi_n1s.append(phi_n1)
phidot_n1s.append(dot(phi_n1))
psi_n1 = slice.modie[n1]
psi_n1s.append(psi_n1)
psisueps_n1s.append(sueps(psi_n1))
psidotsueps_n1s.append(sueps(dot(psi_n1)))
uh_n1 = copy.deepcopy(self.modih[n1])
# reduce to a single slice
kfh_n1 = uh_n1.k[mx]
kfh_n1s.append(kfh_n1)
uh_n1.k = numpy.atleast_1d(scipy.sqrt(kfh_n1**2 - kz**2))
uh_n1.sl = numpy.atleast_1d(uh_n1.sl[mx] * (k0/kfh_n1)**2)
uh_n1.al = numpy.atleast_1d(uh_n1.al[mx])
uh_n1.sr = numpy.atleast_1d(uh_n1.sr[mx] * (k0/kfh_n1)**2)
uh_n1.ar = numpy.atleast_1d(uh_n1.ar[mx])
uh_n1.U = numpy.atleast_1d(uh_n1.U[mx:mx+2])
uh_n1s.append(uh_n1)
uhdot_n1s.append(dot(uh_n1))
ue_n1 = copy.deepcopy(self.modie[n1])
# reduce to a single slice
kfe_n1 = ue_n1.k[mx]
kfe_n1s.append(kfe_n1)
ue_n1.k = numpy.atleast_1d(scipy.sqrt(kfe_n1**2 - kz**2))
ue_n1.sl = numpy.atleast_1d(ue_n1.sl[mx] * (k0/kfe_n1)**2)
ue_n1.al = numpy.atleast_1d(ue_n1.al[mx])
ue_n1.sr = numpy.atleast_1d(ue_n1.sr[mx] * (k0/kfe_n1)**2)
ue_n1.ar = numpy.atleast_1d(ue_n1.ar[mx])
ue_n1.U = numpy.atleast_1d(ue_n1.U[mx:mx+2])
ue_n1s.append(ue_n1)
uedot_n1s.append(dot(ue_n1))
phi_n2 = mode.slicesx[mx].modih[n1]
phi_n2s.append(phi_n2)
phidot_n2s.append(dot(phi_n2))
psi_n2 = mode.slicesx[mx].modie[n1]
psi_n2s.append(psi_n2)
psisueps_n2s.append(sueps(psi_n2))
psidotsueps_n2s.append(sueps(dot(psi_n2)))
uh_n2 = copy.deepcopy(mode.modih[n1])
# reduce to a single slice
kfh_n2 = uh_n2.k[mx]
kfh_n2s.append(kfh_n2)
uh_n2.k = numpy.atleast_1d(scipy.sqrt(kfh_n2**2 - kz**2))
uh_n2.sl = numpy.atleast_1d(uh_n2.sl[mx] * (k0/kfh_n2)**2)
uh_n2.al = numpy.atleast_1d(uh_n2.al[mx])
uh_n2.sr = numpy.atleast_1d(uh_n2.sr[mx] * (k0/kfh_n2)**2)
uh_n2.ar = numpy.atleast_1d(uh_n2.ar[mx])
uh_n2.U = numpy.atleast_1d(uh_n2.U[mx:mx+2])
uh_n2s.append(uh_n2)
uhdot_n2s.append(dot(uh_n2))
ue_n2 = copy.deepcopy(mode.modie[n1])
# reduce to a single slice
kfe_n2 = ue_n2.k[mx]
kfe_n2s.append(kfe_n2)
ue_n2.k = numpy.atleast_1d(scipy.sqrt(kfe_n2**2 - kz**2))
ue_n2.sl = numpy.atleast_1d(ue_n2.sl[mx] * (k0/kfe_n2)**2)
ue_n2.al = numpy.atleast_1d(ue_n2.al[mx])
ue_n2.sr = numpy.atleast_1d(ue_n2.sr[mx] * (k0/kfe_n2)**2)
ue_n2.ar = numpy.atleast_1d(ue_n2.ar[mx])
ue_n2.U = numpy.atleast_1d(ue_n2.U[mx:mx+2])
ue_n2s.append(ue_n2)
uedot_n2.append(dot(ue_n2))
for n1 in range(nmodi):
uh_n1 = uh_n1s[n1]
ue_n1 = ue_n1s[n1]
uedot_n1 = uedot_n1s[n1]
phi_n1 = phi_n1s[n1]
psi_n1 = psi_n1s[n1]
psidotsueps_n1 = psidotsueps_n1s[n1]
psisueps_n1 = psisueps_n1s[n1]
kfe_n1 = kfe_n1s[n1]
for n2 in range(nmodi):
uh_n2 = uh_n2s[n2]
uhdot_n2 = uhdot_n2s[n2]
ue_n2 = ue_n2s[n2]
phi_n2 = phi_n2s[n2]
phidot_n2 = phidot_n2s[n2]
psi_n2 = psi_n2s[n2]
kfh_n2 = kfh_n2s[n2]
Sx += kz * kfh_n2**2 / k0**3 * scalarprod(uh_n1, uh_n2) * scalarprod(phi_n1, phi_n2) \
- kfh_n2**2 / k0**4 * scalarprod(uedot_n1, uh_n2) * scalarprod(psidotsueps_n1, phi_n2)
Sy += kfe_n1**2 * kz / k0**3 * scalarprod(ue_n1, ue_n2) * scalarprod(psisueps_n1, psi_n2) \
+ kfe_n1**2 / k0**4 * scalarprod(ue_n1, uhdot_n2) * scalarprod(psisueps_n1, phidot_n2)
return (Sx, Sy)
def overlap(self, mode):
Sx, Sy = self.__overlap(mode)
return Sx - Sy
def norm(self):
return scipy.sqrt(self.overlap(self))
def normalize(self):
n = self.norm()
for ue, uh in zip(self.modie, self.modih):
ue.sl /= n
ue.al /= n
ue.sr /= n
ue.ar /= n
uh.sl /= n
uh.al /= n
uh.sr /= n
uh.ar /= n
def get_fields_for_FDTD(self, x, y):
"""Get mode's field on a staggered grid.
Note: ignores some fields on the boudaries.
"""
x0 = self.get_x()
y0 = self.get_y()
Ex, Ey, Ez, cBx, cBy, cBz = self.fields(x0, y0)
# Ex: ignores y = 0, max
x_Ex_FDTD = EMpy.utils.centered1d(x)
y_Ex_FDTD = y[1:-1]
Ex_FDTD = EMpy.utils.interp2(x_Ex_FDTD, y_Ex_FDTD, x0, y0, Ex)
# Ey: ignores x = 0, max
x_Ey_FDTD = x[1:-1]
y_Ey_FDTD = EMpy.utils.centered1d(y)
Ey_FDTD = EMpy.utils.interp2(x_Ey_FDTD, y_Ey_FDTD, x0, y0, Ey)
# Ez: ignores x, y = 0, max
x_Ez_FDTD = x[1:-1]
y_Ez_FDTD = y[1:-1]
Ez_FDTD = EMpy.utils.interp2(x_Ez_FDTD, y_Ez_FDTD, x0, y0, Ez)
# Hx: ignores x = 0, max, /120pi, reverse direction
x_Hx_FDTD = x[1:-1]
y_Hx_FDTD = EMpy.utils.centered1d(y)
Hx_FDTD = EMpy.utils.interp2(x_Hx_FDTD, y_Hx_FDTD, x0, y0, cBx) / (-120. * numpy.pi) # OKKIO!
# Hy: ignores y = 0, max, /120pi, reverse direction
x_Hy_FDTD = EMpy.utils.centered1d(x)
y_Hy_FDTD = y[1:-1]
Hy_FDTD = EMpy.utils.interp2(x_Hy_FDTD, y_Hy_FDTD, x0, y0, Hy) / (-120. * numpy.pi)
# Hz: /120pi, reverse direction
x_Hz_FDTD = EMpy.utils.centered1d(x)
y_Hz_FDTD = EMpy.utils.centered1d(y)
Hz_FDTD = EMpy.utils.interp2(x_Hz_FDTD, y_Hz_FDTD, x0, y0, Hz) / (-120. * numpy.pi)
return (Ex_FDTD, Ey_FDTD, Ez_FDTD, Hx_FDTD, Hy_FDTD, Hz_FDTD)
def plot(self, x_=None, y_=None):
if x_ is None:
x = self.get_x()
else:
x = numpy.atleast_1d(x_)
if y_ is None:
y = self.get_y()
else:
y = numpy.atleast_1d(y_)
f = self.fields(x, y)
# fields
pylab.figure()
titles = ['Ex', 'Ey', 'Ez', 'cBx', 'cBy', 'cBz']
for i in range(6):
subplot_id = 231 + i
pylab.subplot(subplot_id)
pylab.contour(x, y, numpy.abs(f[i]))
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title(titles[i])
pylab.axis('image')
pylab.show()
# power
pylab.figure()
pylab.contour(x, y, numpy.abs(f[-1]))
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title('cSz')
pylab.axis('image')
pylab.show()
def __str__(self):
return 'neff = %s' % (self.keff / (2 * numpy.pi / self.slicesx[0].wl))
class FMM(ModeSolver):
pass
class FMM1d(FMM):
"""Drive to simulate 1d structures.
Examples
========
Find the first 3 TE modes of two slabs of refractive indeces 1 and 3,
of thickness 1um each, for wl = 1, with symmetric boundary conditions:
>>> import numpy
>>> import FMM
>>> Uy = numpy.array([0., 1., 2.])
>>> ny = numpy.array([1., 3.])
>>> wl = 1.
>>> nmodi = 3
>>> simul = FMM.FMM1d(Uy, ny, 'SS').solve(wl, nmodi, 'TE')
>>> keff_0_expected = 18.790809413149393
>>> keff_1_expected = 18.314611633384185
>>> keff_2_expected = 17.326387847565034
>>> assert(numpy.allclose(simul.modes[0].keff, keff_0_expected))
>>> assert(numpy.allclose(simul.modes[1].keff, keff_1_expected))
>>> assert(numpy.allclose(simul.modes[2].keff, keff_2_expected))
"""
def __init__(self, Uy, ny, boundary):
"""Set coordinates of regions, refractive indeces and boundary conditions."""
self.Uy = Uy
self.ny = ny
self.boundary = boundary
def solve(self, wl, nmodes, polarization, verbosity=0):
"""Find nmodes modes at a given wavelength and polarization."""
Message('Solving 1d modes.', 1).show(verbosity)
self.wl = wl
self.nmodes = nmodes
self.polarization = polarization
self.modes = FMM1d_y(self.Uy, self.ny, self.wl, self.nmodes, self.boundary, self.polarization, verbosity)
return self
class FMM2d(FMM):
"""Drive to simulate 2d structures.
Examples
========
Find the first 2 modes of a lossy Si channel waveguide in SiO2, using
only 3 1dmodes and with electric and magnetic bc on x and y, respectively:
>>> import numpy
>>> import FMM
>>> wl = 1.55
>>> nmodislices = 3
>>> nmodi2d = 2
>>> Ux = numpy.array([0, 2, 2.4, 4.4])
>>> Uy = numpy.array([0, 2, 2.22, 4.22])
>>> boundary = Boundary(xleft='Electric Wall',
yleft='Magnetic Wall',
xright='Electric Wall',
yright='Magnetic Wall')
>>> n2 = 1.446
>>> n1 = 3.4757 - 1e-4j
>>> refindex = numpy.array([[n2, n2, n2],
[n2, n1, n2],
[n2, n2, n2]])
>>> simul = FMM.FMM2d(Ux, Uy, refindex, boundary).solve(wl, nmodislices, nmodi2d)
>>> keff0_expected = 9.666663697969399e+000 -4.028846755836984e-004j
>>> keff1_expected = 7.210476803133368e+000 -2.605078086535284e-004j
>>> assert(numpy.allclose(simul.modes[0].keff, keff0_expected))
>>> assert(numpy.allclose(simul.modes[1].keff, keff1_expected))
"""
def __init__(self, Ux, Uy, rix, boundary):
"""Set coordinates of regions, refractive indeces and boundary conditions."""
self.Ux = Ux
self.Uy = Uy
self.rix = rix
self.boundary = boundary
def solve(self, wl, n1dmodes, nmodes, verbosity=0):
"""Find nmodes modes at a given wavelength using n1dmodes 1d modes in each slice."""
Message('Solving 2d modes', 1).show(verbosity)
self.wl = wl
self.n1dmodes = n1dmodes
self.nmodes = nmodes
self.slices = script1d(self.Ux, self.Uy, self.rix, self.wl, self.boundary, self.n1dmodes, verbosity)
self.modes = FMM1d_x_component(self.slices, nmodes, verbosity)
return self
def analyticalsolution(nmodi, TETM, FMMpars):
betay = FMMpars['beta']
epsilon = FMMpars['epsilon']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
Nregions = len(epsilon)
sl = numpy.zeros((nmodi,Nregions), dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
# interval
D = Uy[-1] - Uy[0]
if TETM == 'TE':
N = numpy.sqrt(2. / D)
else:
N = numpy.sqrt(2. / D * epsilon[0])
# boundary condition
if by == 'AA':
kn = (numpy.pi * numpy.arange(1, nmodi + 1) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.sin(kn * (Uy[:-1] - Uy[0]))
sr = numpy.sin(kn * (Uy[1:] - Uy[0]))
al = numpy.cos(kn * (Uy[:-1] - Uy[0]))
ar = numpy.cos(kn * (Uy[1:] - Uy[0]))
sr[:, -1] = 0.
sl[:, 0] = 0.
elif by == 'AS':
kn = (numpy.pi * (numpy.arange(0, nmodi) + .5) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.sin(kn * (Uy[:-1] - Uy[0]))
sr = numpy.sin(kn * (Uy[1:] - Uy[0]))
al = numpy.cos(kn * (Uy[:-1] - Uy[0]))
ar = numpy.cos(kn * (Uy[1:] - Uy[0]))
ar[:, -1] = 0.
sl[:, 0] = 0.
elif by == 'SA':
kn = (numpy.pi * (numpy.arange(0, nmodi) + .5) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.cos(kn * (Uy[:-1] - Uy[0]))
sr = numpy.cos(kn * (Uy[1:] - Uy[0]))
al = -numpy.sin(kn * (Uy[:-1] - Uy[0]))
ar = -numpy.sin(kn * (Uy[1:] - Uy[0]))
sr[:, -1] = 0.
al[:, 0] = 0.
elif by == 'SS':
kn = (numpy.pi * numpy.arange(0, nmodi) / D)
kn = kn[:, numpy.newaxis]
sl = numpy.cos(kn * (Uy[:-1] - Uy[0]))
sr = numpy.cos(kn * (Uy[1:] - Uy[0]))
al = -numpy.sin(kn * (Uy[:-1] - Uy[0]))
ar = -numpy.sin(kn * (Uy[1:] - Uy[0]))
ar[:, -1] = 0.
al[:, 0] = 0.
# normalizzazione
sl *= N
sr *= N
for n in range(0, nmodi):
al[n,:] *= N * kn[n]
ar[n,:] *= N * kn[n]
# caso speciale. se k=0 la funzione e' costante e la normalizzazione e'
# diversa. capita solo con boundary SS e per il primo modo
if by == 'SS':
sqrt2 = numpy.sqrt(2.)
sl[0,:] /= sqrt2
sr[0,:] /= sqrt2
al[0,:] /= sqrt2
ar[0,:] /= sqrt2
modi = []
for mk in range(0, nmodi):
modo = FMMMode1dy()
modo.sl = sl[mk,:].astype(complex)
modo.sr = sr[mk,:].astype(complex)
modo.al = al[mk,:].astype(complex)
modo.ar = ar[mk,:].astype(complex)
modo.k = kn[mk] * numpy.ones(Nregions)
modo.U = Uy
modo.keff = scipy.sqrt(betay[0]**2 - kn[mk]**2)
modo.zero = 0.
modo.pars = FMMpars
modi.append(modo)
return modi
def sinxsux(x):
return numpy.sinc(x / numpy.pi)
def FMMshootingTM(kz_, FMMpars):
betay = FMMpars['beta']
eps = FMMpars['epsilon']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
kz = numpy.atleast_1d(kz_)
Nregions = len(betay)
d = numpy.diff(Uy)
Delta = numpy.zeros_like(kz)
sl = numpy.zeros(Nregions, dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
k_ = scipy.sqrt(betay**2 - kz[:,numpy.newaxis]**2)
kd = k_[:,numpy.newaxis] * d
sinkdsuk_ = sinxsux(kd) * d
coskd_ = numpy.cos(kd)
sinkdk_ = numpy.sin(kd) * k_[:,numpy.newaxis]
# left boundary condition
if by[0] == 'A':
al[0] = 1
elif by[0] == 'S':
sl[0] = 1
else:
raise ValueError('unrecognized left boundary condition')
# right boundary condition
if by[1] == 'A':
ar[-1] = 1
elif by[1] == 'S':
sr[-1] = 1
else:
raise ValueError('unrecognized right boundary condition')
# ciclo sui layer
maxbetay = numpy.max(numpy.real(betay))
n1 = numpy.argmax(numpy.real(betay)) + 1
if n1 == Nregions:
n1 = Nregions - 1
n2 = n1 + 1
modo = FMMMode1dy()
for m in range(0, len(kz)):
k = k_[m,:]
sinkdsuk = sinkdsuk_[m,:][0]
coskd = coskd_[m,:][0]
sinkdk = sinkdk_[m,:][0]
for idx in range(0, n1):
sr[idx] = sl[idx] * coskd[idx] + al[idx] * sinkdsuk[idx]
ar[idx] = al[idx] * coskd[idx] - sl[idx] * sinkdk[idx]
#******************* requirement of continuity
if idx < n1 - 1:
sl[idx+1] = sr[idx];
al[idx+1] = ar[idx] / eps[idx] * eps[idx + 1];
#*******************
for idx1 in range(Nregions - 1, n2 - 2, -1):
sl[idx1] = sr[idx1] * coskd[idx1] - ar[idx1] * sinkdsuk[idx1]
al[idx1] = ar[idx1] * coskd[idx1] + sr[idx1] * sinkdk[idx1]
#******************* requirement of continuity
if idx1 > n2:
sr[idx1 - 1] = sl[idx1]
ar[idx1 - 1] = al[idx1] / eps[idx1] * eps[idx1 - 1]
#*******************
Delta[m] = (eps[n1-1] * sr[n1-1] * al[n2-1] - eps[n2-1] * ar[n1-1] * sl[n2-1])
if len(kz) < 2:
# normalize and save only if len(kz) == 1
# otherwise, modo is ignored and only Delta is useful
# normalizza la propagazione sinistra e quella destra
alfa = sr[n1-1] / sl[n2-1]
sl[n2-1:] *= alfa
sr[n2-1:] *= alfa
al[n2-1:] *= alfa
ar[n2-1:] *= alfa
modo.sl = sl
modo.sr = sr
modo.al = al
modo.ar = ar
modo.k = k
modo.U = Uy
modo.keff = kz
modo.zero = Delta
modo.pars = FMMpars
return (Delta, modo)
def FMMshooting(kz_, FMMpars):
betay = FMMpars['beta']
Uy = FMMpars['Uy']
by = FMMpars['boundary']
kz = numpy.atleast_1d(kz_)
Nregions = len(betay)
d = numpy.diff(Uy)
Delta = numpy.zeros_like(kz)
sl = numpy.zeros(Nregions, dtype=complex)
sr = numpy.zeros_like(sl)
al = numpy.zeros_like(sl)
ar = numpy.zeros_like(sl)
k_ = scipy.sqrt(betay**2 - kz[:,numpy.newaxis]**2)
kd = k_[:,numpy.newaxis] * d
sinkdsuk_ = sinxsux(kd) * d
coskd_ = numpy.cos(kd)
sinkdk_ = numpy.sin(kd) * k_[:,numpy.newaxis]
# left boundary condition
if by[0] == 'A':
al[0] = 1
elif by[0] == 'S':
sl[0] = 1
else:
raise ValueError('unrecognized left boundary condition')
# right boundary condition
if by[1] == 'A':
ar[-1] = 1
elif by[1] == 'S':
sr[-1] = 1
else:
raise ValueError('unrecognized right boundary condition')
# ciclo sui layer
maxbetay = numpy.max(numpy.real(betay))
n1 = numpy.argmax(numpy.real(betay)) + 1
if n1 == Nregions:
n1 = Nregions - 1
n2 = n1 + 1
modo = FMMMode1dy()
for m in range(0, len(kz)):
k = k_[m,:]
sinkdsuk = sinkdsuk_[m,:][0]
coskd = coskd_[m,:][0]
sinkdk = sinkdk_[m,:][0]
for idx in range(0, n1):
sr[idx] = sl[idx] * coskd[idx] + al[idx] * sinkdsuk[idx]
ar[idx] = al[idx] * coskd[idx] - sl[idx] * sinkdk[idx]
#******************* requirement of continuity
if idx < n1 - 1:
sl[idx + 1] = sr[idx];
al[idx + 1] = ar[idx];
#*******************
for idx1 in range(Nregions - 1, n2 - 2, -1):
sl[idx1] = sr[idx1] * coskd[idx1] - ar[idx1] * sinkdsuk[idx1]
al[idx1] = ar[idx1] * coskd[idx1] + sr[idx1] * sinkdk[idx1]
#******************* requirement of continuity
if idx1 > n2:
sr[idx1 - 1] = sl[idx1]
ar[idx1 - 1] = al[idx1]
#*******************
Delta[m] = (sr[n1-1] * al[n2-1] - ar[n1-1] * sl[n2-1])
## len_kz = len(kz)
## k = k_[0,:]
## sinkdsuk = sinkdsuk_[0,:][0]
## coskd = coskd_[0,:][0]
## sinkdk = sinkdk_[0,:][0]
## code = """
## for (int m = 0; m < len_kz; ++m) {
## //k = k_(m,:);
## //sinkdsuk = sinkdsuk_(0,:);
## //coskd = coskd_(0,:);
## //sinkdk = sinkdk_(0,:);
## int nn1 = int(n1);
## for (int idx = 0; idx < nn1; ++idx) {
## sr(idx) = sl(idx) * coskd(idx) + al(idx) * sinkdsuk(idx);
## ar(idx) = al(idx) * coskd(idx) - sl(idx) * sinkdk(idx);
## if (idx < nn1 - 1) {
## sl(idx + 1) = sr(idx);
## al(idx + 1) = ar(idx);
## }
## }
## int nn2 = int(n2);
## for (int idx1 = Nregions - 1; idx1 > nn2 - 2; --idx1) {
## sl(idx1) = sr(idx1) * coskd(idx1) - ar(idx1) * sinkdsuk(idx1);
## al(idx1) = ar(idx1) * coskd(idx1) + sr(idx1) * sinkdk(idx1);
## if (idx1 > nn2) {
## sr(idx1 - 1) = sl(idx1);
## ar(idx1 - 1) = al(idx1);
## }
## }
## //Delta(m) = std::complex<double>(1) * (sr(nn1-1) * al(nn2-1) - ar(nn1-1) * sl(nn2-1));
## }
## """
##
## from scipy import weave
## from scipy.weave import converters
## weave.inline(code,
## ['n1', 'n2', 'Nregions', 'sl', 'sr', 'al', 'ar', 'len_kz', 'Delta',
## 'k', 'sinkdsuk', 'sinkdk', 'coskd',
## 'k_', 'sinkdsuk_', 'sinkdk_', 'coskd_'],
## type_converters = converters.blitz,
## compiler = 'gcc')
if len(kz) < 2:
# normalize and save only if len(kz) == 1
# otherwise, modo is ignored and only Delta is useful
# normalizza la propagazione sinistra e quella destra
alfa = sr[n1-1] / sl[n2-1]
sl[n2-1:] *= alfa
sr[n2-1:] *= alfa
al[n2-1:] *= alfa
ar[n2-1:] *= alfa
modo.sl = sl
modo.sr = sr
modo.al = al
modo.ar = ar
modo.k = k
modo.U = Uy
modo.keff = kz
modo.zero = Delta
modo.pars = FMMpars
return (Delta, modo)
def remove_consecutives(x, y):
b = numpy.r_[numpy.diff(x) == 1, 0].astype(int)
ic = 0
flag = 0
l = []
for ib in range(len(b)):
if flag == 0:
c = [x[ib]]
ic += 1
if b[ib] == 1:
flag = 1
else:
l.append(c)
else:
c.append(x[ib])
if b[ib] != 1:
flag = 0
l.append(c)
index = []
for il, ll in enumerate(l):
newi = ll
itmp = numpy.argmax(y[newi])
index.append(newi[0] + itmp)
return index
def findzerosnew(x, y, searchinterval):
minsi = 2 * numpy.abs(x[1] - x[0])
if searchinterval < minsi:
searchinterval = minsi
dy = numpy.r_[0, numpy.diff(numpy.diff(scipy.log(y))), 0]
idy = numpy.where(dy > 0.005)[0]
if len(idy) == 0:
zeri = numpy.array([])
z1 = numpy.array([])
z2 = numpy.array([])
else:
ind = remove_consecutives(idy, dy)
zeri = x[ind]
z1 = numpy.zeros_like(zeri)
z2 = numpy.zeros_like(zeri)
dz = numpy.abs(numpy.diff(zeri))
if len(dz) == 0:
z1[0] = zeri - searchinterval/2
z2[0] = zeri + searchinterval/2
else:
delta = numpy.min([dz[0], searchinterval])
z1[0] = zeri[0] - delta/2
z2[0] = zeri[0] + delta/2
for idx in range(1, len(zeri) - 1):
delta = numpy.min([dz[idx - 1], dz[idx], searchinterval])
z1[idx] = zeri[idx] - delta/2
z2[idx] = zeri[idx] + delta/2
delta = numpy.min([dz[-1], searchinterval])
z1[-1] = zeri[-1] - delta/2
z2[-1] = zeri[-1] + delta/2
return (zeri, z1, z2)
def absfzzero2(t, f, xmin, xmax, ymin, ymax):
xmean = numpy.mean([xmin, xmax])
ymean = numpy.mean([ymin, ymax])
xwidth = xmax - xmin
ywidth = ymax - ymin
x = xmean + xwidth * t[0] / (1. + numpy.abs(t[0])) / 2.
y = ymean + ywidth * t[1] / (1. + numpy.abs(t[1])) / 2.
z = x + 1j * y
fv = f(z)
return numpy.abs(fv)**2
def fzzeroabs2(f, zmin, zmax):
xmin = numpy.real(zmin)
ymin = numpy.imag(zmin)
xmax = numpy.real(zmax)
ymax = numpy.imag(zmax)
tx0 = 0.
ty0 = 0.
t0 = scipy.optimize.fmin(lambda t: absfzzero2(t, f, xmin, xmax, ymin, ymax), [tx0, ty0],
maxiter=100000, maxfun=100000, xtol=1e-15, ftol=1e-15, disp=0)
xmean = numpy.mean([xmin, xmax])
ymean = numpy.mean([ymin, ymax])
xwidth = xmax - xmin
ywidth = ymax - ymin
x0 = xmean + xwidth * t0[0] / (1 + numpy.abs(t0[0])) / 2
y0 = ymean + ywidth * t0[1] / (1 + numpy.abs(t0[1])) / 2
z0 = x0 + 1j * y0
valf = f(z0)
return (z0, valf)
def scalarprod(modo1, modo2):
d = numpy.diff(modo1.U)
ky1 = modo1.k
al1 = modo1.al
sl1 = modo1.sl
ar1 = modo1.ar
sr1 = modo1.sr
ky2 = modo2.k
al2 = modo2.al
sl2 = modo2.sl
ar2 = modo2.ar
sr2 = modo2.sr
Nlayers = len(modo1.sl)
scprod = numpy.zeros_like(modo1.sl)
for idy in range(Nlayers):
if numpy.allclose(ky1[idy], ky2[idy]):
if numpy.linalg.norm(ky1) < 1e-10:
scprod[idy] = sl1[idy] * sl2[idy] * (modo1.U[idy+1] - modo1.U[idy])
else:
scprod[idy] = (sl1[idy] * al2[idy] - sr1[idy] * ar2[idy]) / ky1[idy] / ky2[idy] / 2. + \
d[idy]/2. * (sl1[idy] * sl2[idy] + al1[idy] * al2[idy] / ky1[idy] / ky2[idy])
else:
if numpy.linalg.norm(ky1) < 1e-10:
scprod[idy] = sl1[idy] * (al2[idy] - ar2[idy]) / ky2[idy]**2
elif numpy.linalg.norm(ky2) < 1e-10:
scprod[idy] = sl2[idy] * (al1[idy] - ar1[idy]) / ky1[idy]**2
else:
scprod[idy] = (sr1[idy] * ar2[idy] - ar1[idy] * sr2[idy] -
sl1[idy] * al2[idy] + al1[idy] * sl2[idy]) / (ky1[idy]**2 - ky2[idy]**2)
return numpy.sum(scprod)
def sueps(modo):
eps = modo.pars['epsilon']
modosueps = copy.deepcopy(modo)
modosueps.sl /= eps
modosueps.sr /= eps
modosueps.al /= eps
modosueps.ar /= eps
return modosueps
def FMM1d_y(Uy, ny, wl, nmodi, boundaryRL, TETM, verbosity=0):
k0 = 2 * numpy.pi / wl
betay = ny * k0
Nstepskz = 1543
searchinterval = max(50. / Nstepskz, numpy.abs(numpy.min(numpy.imag(2. * betay))))
imsearchinterval = 10 * k0
ypointsperregion = 5000
FMMpars = {'epsilon': ny**2, 'beta': betay, 'boundary': boundaryRL, 'Uy': Uy}
# analytical solution
if numpy.allclose(ny, ny[0]):
Message('Uniform slice found: using analytical solution.', 2).show(verbosity)
return analyticalsolution(nmodi, TETM, FMMpars)
##rekz = numpy.linspace(numpy.max(numpy.real(betay)) + searchinterval, 0., Nstepskz)
rekz2 = numpy.linspace((numpy.max(numpy.real(betay))+searchinterval)**2, 0., Nstepskz)
rekz = scipy.sqrt(rekz2)
if TETM == 'TM':
Message('Shooting TM.', 3).show(verbosity)
matchingre, modotmp = FMMshootingTM(rekz, FMMpars)
else:
Message('Shooting TE.', 3).show(verbosity)
matchingre, modotmp = FMMshooting(rekz, FMMpars)
nre = rekz / k0
nre2 = rekz2 / k0**2
##zerire, z1, z2 = findzerosnew(nre, numpy.abs(matchingre), searchinterval / k0)
zerire2, z12, z22 = findzerosnew(nre2, numpy.abs(matchingre), (searchinterval / k0)**2)
zerire = scipy.sqrt(zerire2)
kz1 = zerire * k0 - searchinterval / 2. + 1j * imsearchinterval
kz2 = zerire * k0 + searchinterval / 2. - 1j * imsearchinterval
Message('Found %d real zeros.' % len(zerire), 2).show(verbosity)
if len(zerire) < nmodi:
Message('Number of real zeros not enough: scan imaginary axis.', 2).show(verbosity)
imkza = -numpy.max(numpy.real(betay))
imkzb = 0.
while len(kz1) < nmodi:
imkza = imkza + numpy.max(numpy.real(betay))
imkzb = imkzb + numpy.max(numpy.real(betay))
##imkz = numpy.linspace(imkza, imkzb, Nstepskz)
imkz2 = numpy.linspace(imkza**2, imkzb**2, Nstepskz)
imkz = scipy.sqrt(imkz2)
if TETM == 'TM':
matchingim, modotmp = FMMshootingTM(1j * imkz, FMMpars)
else:
matchingim, modotmp = FMMshooting(1j * imkz, FMMpars)
nim = imkz * wl / 2. / numpy.pi
nim2 = imkz2 * (wl / 2. / numpy.pi)**2
##zeriim, z1, z2 = findzerosnew(nim, numpy.abs(matchingim), searchinterval / k0)
zeriim2, z12, z22 = findzerosnew(nim2, numpy.abs(matchingim), (searchinterval / k0)**2)
zeriim = scipy.sqrt(zeriim2)
Message('Found %d imag zeros.' % len(zeriim), 2).show(verbosity)
kz1 = numpy.r_[kz1, 1j * (zeriim * k0 - imsearchinterval / 2. + 1j * searchinterval / 2.)]
kz2 = numpy.r_[kz2, 1j * (zeriim * k0 + imsearchinterval / 2. - 1j * searchinterval / 2.)]
mk = 0
modi = []
# inizia il ciclo sugli intervalli
Message('Refine zeros.', 2).show(verbosity)
for m in range(0, len(kz1)):
if mk == nmodi:
break
if TETM == 'TM':
z0, valf = fzzeroabs2(lambda kz: FMMshootingTM(kz, FMMpars)[0], kz1[m], kz2[m])
z0 = numpy.atleast_1d(z0)
else:
z0, valf = fzzeroabs2(lambda kz: FMMshooting(kz, FMMpars)[0], kz1[m], kz2[m])
z0 = numpy.atleast_1d(z0)
if len(z0) > 0:
if TETM == 'TM':
zero, modo = FMMshootingTM(z0, FMMpars)
else:
zero, modo = FMMshooting(z0, FMMpars)
if TETM == 'TM':
normalizzazione = 1. / numpy.sqrt(scalarprod(modo, sueps(modo)))
else:
normalizzazione = 1. / numpy.sqrt(scalarprod(modo, modo))
modo.sl *= normalizzazione
modo.al *= normalizzazione
modo.sr *= normalizzazione
modo.ar *= normalizzazione
mk += 1
modi.append(modo)
return modi
def script1d(Ux, Uy, refindex, wl, boundary, nmodislices, verbosity=0):
nx = refindex.shape[0]
slices = []
for m in range(nx):
Message('Finding 1dmodes TE.', 1).show(verbosity)
ymodih = FMM1d_y(Uy, refindex[m,:], wl, nmodislices, boundary.yh, 'TE', verbosity)
Message('Finding 1dmodes TM.', 1).show(verbosity)
ymodie = FMM1d_y(Uy, refindex[m,:], wl, nmodislices, boundary.ye, 'TM', verbosity)
slice = Slice(x1=Ux[m], x2=Ux[m+1], Uy=Uy, boundary=boundary, modie=ymodie, modih=ymodih)
# OKKIO: do I really need them?
slice.Ux = Ux
slice.refractiveindex = refindex
slice.epsilon = refindex**2
slice.wl = wl
slices.append(slice)
return slices
def dot(modo):
k = modo.k
mododot = copy.deepcopy(modo)
mododot.sl = modo.al
mododot.sr = modo.ar
mododot.al = -k**2 * modo.sl
mododot.ar = -k**2 * modo.sr
return mododot
def genera_rotazione(slices):
nmodi = len(slices[0].modih)
k0 = 2 * numpy.pi / slices[0].wl
Nslices = len(slices);
R = Struct()
# alloc R
R.Ree = numpy.zeros((nmodi, nmodi, Nslices-1), dtype=complex)
R.Reem = numpy.zeros_like(R.Ree)
R.Rhh = numpy.zeros_like(R.Ree)
R.Rhhm = numpy.zeros_like(R.Ree)
R.Rhe = numpy.zeros_like(R.Ree)
R.Rhem = numpy.zeros_like(R.Ree)
for idx in range(len(slices) - 1):
slice = slices[idx]
slicep1 = slices[idx + 1]
for n in range(nmodi):
Fhn = slice.modih[n]
Fp1hn = slicep1.modih[n]
Fen = slice.modie[n]
Fp1en = slicep1.modie[n]
Fhndot = dot(Fhn)
Fp1hndot = dot(Fp1hn)
khidx = slice.modih[n].keff
khidxp1 = slicep1.modih[n].keff
for m in range(nmodi):
Fem = slice.modie[m]
Fhm = slice.modih[m]
Fp1em = slicep1.modie[m]
Fp1hm = slicep1.modih[m]
Femsueps = sueps(Fem)
Femdotsueps = dot(Femsueps)
Fp1emsueps = sueps(Fp1em)
Fp1emdotsueps = dot(Fp1emsueps)
keidx = slice.modie[m].keff
keidxp1 = slicep1.modie[m].keff
R.Ree[n, m, idx] = scalarprod(Fen, Fp1emsueps)
R.Reem[n, m, idx] = scalarprod(Fp1en, Femsueps)
R.Rhh[n, m, idx] = scalarprod(Fhn, Fp1hm)
R.Rhhm[n, m, idx] = scalarprod(Fp1hn, Fhm)
s1 = k0 * scalarprod(Fhndot, Fp1emsueps) / khidx**2
s2 = k0 * scalarprod(Fhn, Fp1emdotsueps) / keidxp1**2
R.Rhe[n, m, idx] = (s1 + s2).item()
s1 = k0 * scalarprod(Fp1hndot, Femsueps) / khidxp1**2
s2 = k0 * scalarprod(Fp1hn, Femdotsueps) / keidx**2
R.Rhem[n, m, idx] = (s1 + s2).item()
return R
def ortonormalita(slices):
nmodi = len(slices[0].modih)
k0 = 2 * numpy.pi / slices[0].wl
Nslices = len(slices);
neesueps = numpy.zeros(Nslices, dtype=complex)
nhh = numpy.zeros_like(neesueps)
nRhe = numpy.zeros_like(neesueps)
nRee = numpy.zeros_like(neesueps)
nRhh = numpy.zeros_like(neesueps)
nAC = numpy.zeros_like(neesueps)
M = Struct()
M.ee = numpy.zeros((nmodi, nmodi, Nslices), dtype=complex)
M.eesueps = numpy.zeros_like(M.ee)
M.hh = numpy.zeros_like(M.ee)
M.Rhe = numpy.zeros_like(M.ee)
for idx, slice in enumerate(slices):
for n in range(nmodi):
Fhn = slice.modih[n]
Fen = slice.modie[n]
khidx = slice.modih[n].keff
for m in range(nmodi):
Fem = slice.modie[m]
Fhm = slice.modih[m]
keidxp1 = slice.modie[m].keff
M.ee[n, m, idx] = scalarprod(Fen, Fem)
M.eesueps[n, m, idx] = scalarprod(Fen, sueps(Fem))
M.hh[n, m, idx] = scalarprod(Fhn, Fhm)
Fp1em = slice.modie[m]
s1 = k0 * scalarprod(dot(Fhn), sueps(Fp1em)) / khidx**2
s2 = k0 * scalarprod(Fhn, sueps(dot(Fp1em))) / keidxp1**2
M.Rhe[n, m, idx] = (s1 + s2).item()
R = genera_rotazione(slices)
Ident = numpy.eye(nmodi)
for idx in range(Nslices):
neesueps[idx] = numpy.linalg.norm(M.eesueps[:,:,idx] - Ident)
nhh[idx] = numpy.linalg.norm(M.hh[:,:,idx] - Ident)
nRhe[idx] = numpy.linalg.norm(M.Rhe[:,:,idx])
for idx in range(Nslices-1):
nRee[idx] = numpy.linalg.norm(numpy.dot(R.Ree[:,:,idx], R.Reem[:,:,idx]) - Ident)
nRhh[idx] = numpy.linalg.norm(numpy.dot(R.Rhh[:,:,idx], R.Rhhm[:,:,idx]) - Ident)
nAC[idx] = numpy.linalg.norm(numpy.dot(R.Rhe[:,:,idx], R.Reem[:,:,idx]) +
numpy.dot(R.Rhh[:,:,idx], R.Rhem[:,:,idx]))
ns1 = numpy.linalg.norm(numpy.r_[neesueps, nhh, nRhe])
ns2 = numpy.linalg.norm(numpy.r_[nRee, nRhh, nAC])
errore = numpy.linalg.norm(numpy.r_[ns1, ns2]) / scipy.sqrt(8 * nmodi)
return errore
def method_of_component(kz_, slices, Rot, uscelto=None, icomp=None):
kz = numpy.atleast_1d(kz_)
abscomp = numpy.zeros(len(kz))
## tmp = 500 # OKKIO: perche' 500?
tmp = 100 * len(slices[0].modie) * (len(slices) - 1) # OKKIO: dimension of Mvec * 50. enough?
normu = numpy.zeros(tmp, dtype=complex)
for m in range(len(kz)):
M = Mvec(kz[m], slices, Rot)
urn = numpy.zeros((M.shape[0], tmp), dtype=complex)
if (uscelto is None) and (icomp is None):
for k in range(tmp):
numpy.random.seed()
ur = numpy.random.rand(M.shape[0])
urn[:,k] = ur / numpy.linalg.norm(ur)
normu[k] = numpy.linalg.norm(numpy.dot(M, urn[:,k]))
iurn = numpy.argmin(normu)
uscelto = urn[:, iurn]
icomp = numpy.argmax(uscelto)
Mmeno1u = numpy.linalg.solve(M, uscelto)
abscomp[m] = 1. / numpy.linalg.norm(Mmeno1u)
return (abscomp, uscelto, icomp)
def creaTeThSeSh(kz, slices):
Nslices = len(slices)
nmodi = len(slices[0].modie)
d = numpy.array([s.x2 - s.x1 for s in slices])
k0 = 2. * numpy.pi / slices[0].wl
Th = numpy.zeros((nmodi, Nslices), dtype=complex)
Sh = numpy.zeros_like(Th)
Te = numpy.zeros_like(Th)
Se = numpy.zeros_like(Th)
Thleft = numpy.zeros_like(Th)
Teleft = numpy.zeros_like(Th)
Thright = numpy.zeros_like(Th)
Teright = | numpy.zeros_like(Th) | numpy.zeros_like |
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
from DejaVu.Polylines import Polylines
import numpy.oldnumeric as Numeric
class NormalsViewer:
"""Object that take a DejaVu geometry and a viewer and displays the
geometry's normals in the viewer"""
def __init__(self, geom, viewer):
self.geom = geom
self.normalsGeom = Polylines("normals_for_" + geom.name)
self.viewer = viewer
viewer.AddObject(self.normalsGeom, parent=geom)
self.update()
def update(self):
vertices = self.geom.getVertices()
normals = self.geom.getVNormals()
pts = | Numeric.concatenate((vertices, vertices + normals), 1) | numpy.oldnumeric.concatenate |
# Copyright (C) 2011 <NAME>
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from distutils.version import StrictVersion
import phonopy.structure.spglib as spg
from phonopy.structure.atoms import PhonopyAtoms
def get_supercell(unitcell, supercell_matrix, is_old_style=True, symprec=1e-5):
return Supercell(unitcell,
supercell_matrix,
is_old_style=is_old_style,
symprec=symprec)
def get_primitive(supercell, primitive_frame, symprec=1e-5):
return Primitive(supercell, primitive_frame, symprec=symprec)
def print_cell(cell, mapping=None, stars=None):
symbols = cell.get_chemical_symbols()
masses = cell.get_masses()
magmoms = cell.get_magnetic_moments()
lattice = cell.get_cell()
print("Lattice vectors:")
print(" a %20.15f %20.15f %20.15f" % tuple(lattice[0]))
print(" b %20.15f %20.15f %20.15f" % tuple(lattice[1]))
print(" c %20.15f %20.15f %20.15f" % tuple(lattice[2]))
print("Atomic positions (fractional):")
for i, v in enumerate(cell.get_scaled_positions()):
num = " "
if stars is not None:
if i in stars:
num = "*"
num += "%d" % (i + 1)
line = ("%5s %-2s%18.14f%18.14f%18.14f" %
(num, symbols[i], v[0], v[1], v[2]))
if masses is not None:
line += " %7.3f" % masses[i]
if magmoms is not None:
line += " %5.3f" % magmoms[i]
if mapping is None:
print(line)
else:
print(line + " > %d" % (mapping[i] + 1))
class Supercell(PhonopyAtoms):
"""Build supercell from supercell matrix and unit cell
"""
def __init__(self,
unitcell,
supercell_matrix,
is_old_style=True,
symprec=1e-5):
"""
Note
----
``is_old_style=True`` invokes the following algorithm.
In this function, unit cell is considered
[1,0,0]
[0,1,0]
[0,0,1].
Supercell matrix is given by relative ratio, e.g,
[-1, 1, 1]
[ 1,-1, 1] is for FCC from simple cubic.
[ 1, 1,-1].
In this case multiplicities of surrounding simple lattice are [2,2,2].
First, create supercell with surrounding simple lattice.
Second, trim the surrounding supercell with the target lattice.
``is_old_style=False`` calls the Smith normal form.
These two algorithm may order atoms in different ways. So for the
backward compatibitily, ``is_old_style=True`` is the default
option. However the Smith normal form shows far better performance
in case of very large supercell multiplicities.
Parameters
----------
unitcell: PhonopyAtoms
Unit cell
supercell_matrix: ndarray or list of list
Transformation matrix from unit cell to supercell. The
elements have to be integers.
shape=(3,3)
is_old_stype: bool
This swithes the algorithms. See Note.
symprec: float, optional
Tolerance to find overlapping atoms in supercell cell. The default
values is 1e-5.
"""
self._is_old_style = is_old_style
self._s2u_map = None
self._u2s_map = None
self._u2u_map = None
self._supercell_matrix = np.array(supercell_matrix, dtype='intc')
self._create_supercell(unitcell, symprec)
def get_supercell_matrix(self):
return self._supercell_matrix
@property
def s2u_map(self):
return self._s2u_map
def get_supercell_to_unitcell_map(self):
return self.s2u_map
@property
def u2s_map(self):
return self._u2s_map
def get_unitcell_to_supercell_map(self):
return self.u2s_map
@property
def u2u_map(self):
return self._u2u_map
def get_unitcell_to_unitcell_map(self):
return self.u2u_map
def _create_supercell(self, unitcell, symprec):
mat = self._supercell_matrix
if self._is_old_style:
P = None
multi = self._get_surrounding_frame(mat)
# trim_fram is to trim overlapping atoms.
trim_frame = np.array([mat[0] / float(multi[0]),
mat[1] / float(multi[1]),
mat[2] / float(multi[2])])
else:
# In the new style, it is unnecessary to trim atoms,
if (np.diag(np.diagonal(mat)) != mat).any():
snf = SNF3x3(mat)
snf.run()
P = snf.P
multi = np.diagonal(snf.D)
else:
P = None
multi = np.diagonal(mat)
trim_frame = np.eye(3)
sur_cell, u2sur_map = self._get_simple_supercell(unitcell, multi, P)
trimmed_cell_ = _trim_cell(trim_frame, sur_cell, symprec)
if trimmed_cell_:
supercell, sur2s_map, mapping_table = trimmed_cell_
else:
return False
num_satom = supercell.get_number_of_atoms()
num_uatom = unitcell.get_number_of_atoms()
N = num_satom // num_uatom
if N != determinant(self._supercell_matrix):
print("Supercell creation failed.")
print("Probably some atoms are overwrapped. "
"The mapping table is give below.")
print(mapping_table)
PhonopyAtoms.__init__(self)
else:
PhonopyAtoms.__init__(
self,
numbers=supercell.get_atomic_numbers(),
masses=supercell.get_masses(),
magmoms=supercell.get_magnetic_moments(),
scaled_positions=supercell.get_scaled_positions(),
cell=supercell.get_cell(),
pbc=True)
self._u2s_map = | np.arange(num_uatom) | numpy.arange |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import tensorflow as tf
import numpy as np
from tensorguard import ShapeError
from tensorguard import TensorGuard
def test_guard_raises_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3])
with pytest.raises(ShapeError):
tg.guard(a, "3, 2, 1")
def test_guard_infers_dimensions_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3])
tg.guard(a, "A, B, C")
assert tg.dims == {"A": 1, "B": 2, "C": 3}
def test_guard_infers_dimensions_complex_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3])
tg.guard(a, "A, B*2, A+C")
assert tg.dims == {"A": 1, "B": 1, "C": 2}
def test_guard_infers_dimensions_operator_priority_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 8])
tg.guard(a, "A, B, A+C*2+1")
assert tg.dims == {"A": 1, "B": 2, "C": 3}
def test_guard_raises_complex_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3])
with pytest.raises(ShapeError):
tg.guard(a, "A, B, B")
def test_guard_raises_inferred_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3])
b = tf.ones([3, 2, 5])
tg.guard(a, "A, B, C")
with pytest.raises(ShapeError):
tg.guard(b, "C, B, A")
def test_guard_ignores_wildcard_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3])
tg.guard(a, "*, *, 3")
assert tg.dims == {}
def test_guard_dynamic_shape_tensorflow():
tg = TensorGuard()
with pytest.raises(ShapeError):
tg.guard([None, 2, 3], "C, B, A")
tg.guard([None, 2, 3], "?, B, A")
tg.guard([1, 2, 3], "C?, B, A")
tg.guard([None, 2, 3], "C?, B, A")
def test_guard_ellipsis_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3, 4, 5])
tg.guard(a, "...")
tg.guard(a, "..., 5")
tg.guard(a, "..., 4, 5")
tg.guard(a, "1, ...")
tg.guard(a, "1, 2, ...")
tg.guard(a, "1, 2, ..., 4, 5")
tg.guard(a, "1, 2, 3, ..., 4, 5")
with pytest.raises(ShapeError):
tg.guard(a, "1, 2, 3, 4, 5, 6,...")
with pytest.raises(ShapeError):
tg.guard(a, "..., 1, 2, 3, 4, 5, 6")
def test_guard_ellipsis_infer_dims_tensorflow():
tg = TensorGuard()
a = tf.ones([1, 2, 3, 4, 5])
tg.guard(a, "A, B, ..., C")
assert tg.dims == {"A": 1, "B": 2, "C": 5}
# ============ pytorch ==================
def test_guard_raises_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3])
with pytest.raises(ShapeError):
tg.guard(a, "3, 2, 1")
def test_guard_infers_dimensions_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3])
tg.guard(a, "A, B, C")
assert tg.dims == {"A": 1, "B": 2, "C": 3}
def test_guard_infers_dimensions_complex_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3])
tg.guard(a, "A, B*2, A+C")
assert tg.dims == {"A": 1, "B": 1, "C": 2}
def test_guard_infers_dimensions_operator_priority_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 8])
tg.guard(a, "A, B, A+C*2+1")
assert tg.dims == {"A": 1, "B": 2, "C": 3}
def test_guard_raises_complex_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3])
with pytest.raises(ShapeError):
tg.guard(a, "A, B, B")
def test_guard_raises_inferred_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3])
b = torch.ones([3, 2, 5])
tg.guard(a, "A, B, C")
with pytest.raises(ShapeError):
tg.guard(b, "C, B, A")
def test_guard_ignores_wildcard_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3])
tg.guard(a, "*, *, 3")
assert tg.dims == {}
def test_guard_dynamic_shape_pytorch():
tg = TensorGuard()
with pytest.raises(ShapeError):
tg.guard([None, 2, 3], "C, B, A")
tg.guard([None, 2, 3], "?, B, A")
tg.guard([1, 2, 3], "C?, B, A")
tg.guard([None, 2, 3], "C?, B, A")
def test_guard_ellipsis_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3, 4, 5])
tg.guard(a, "...")
tg.guard(a, "..., 5")
tg.guard(a, "..., 4, 5")
tg.guard(a, "1, ...")
tg.guard(a, "1, 2, ...")
tg.guard(a, "1, 2, ..., 4, 5")
tg.guard(a, "1, 2, 3, ..., 4, 5")
with pytest.raises(ShapeError):
tg.guard(a, "1, 2, 3, 4, 5, 6,...")
with pytest.raises(ShapeError):
tg.guard(a, "..., 1, 2, 3, 4, 5, 6")
def test_guard_ellipsis_infer_dims_pytorch():
tg = TensorGuard()
a = torch.ones([1, 2, 3, 4, 5])
tg.guard(a, "A, B, ..., C")
assert tg.dims == {"A": 1, "B": 2, "C": 5}
# ================= numpy =======================
def test_guard_raises_numpy():
tg = TensorGuard()
a = np.ones([1, 2, 3])
with pytest.raises(ShapeError):
tg.guard(a, "3, 2, 1")
def test_guard_infers_dimensions_numpy():
tg = TensorGuard()
a = np.ones([1, 2, 3])
tg.guard(a, "A, B, C")
assert tg.dims == {"A": 1, "B": 2, "C": 3}
def test_guard_infers_dimensions_complex_numpy():
tg = TensorGuard()
a = | np.ones([1, 2, 3]) | numpy.ones |
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines3.common.logger import Logger as log
import random
from copy import deepcopy
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
buy_cost_pct,
sell_cost_pct,
reward_scaling,
state_space,
action_space,
mode='',
out_of_cash_penalty=0.01,
cash_limit=0.1):
self.df = df #数据
self.stock_dim = stock_dim #股票数量
self.hmax = hmax #每日最大交易数量
self.initial_amount = initial_amount #启动资金
self.buy_cost_pct = buy_cost_pct #买摩擦费用
self.sell_cost_pct = sell_cost_pct #卖摩擦费用
self.reward_scaling = reward_scaling #奖励放大倍数
self.state_space = state_space #状态维度
self.action_space = action_space #操作维度
self.mode=mode #模式 'test' 'train'
self.out_of_cash_penalty = out_of_cash_penalty #资金太少的惩罚
self.cash_limit = cash_limit #资金极限占比
###################################################################################
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))
####################################################################################
self.day_start = 0 #开始日期
self.day = self.day_start #当前日期
self.cash = self.initial_amount #现金
self.holds = [0]*self.stock_dim #持仓
self.cost = 0
self.count_0 = 0 #为了提高采样的质量,记录无操作的次数
self.actions_memory=[]
self.date_memory=[]
self.asset_memory=[]
def reset(self):
if self.mode == 'train':
lll = len(self.df.date.unique())
length = int(lll*0.95)
day_start = random.choice(range(length))
self.day_start = 0
else:
self.day_start = 0
print("day_start {0}".format(self.day_start))
self.day = self.day_start
self.cash = self.initial_amount #现金.如果是train,应该根据域范围随机得到
self.holds = [0]*self.stock_dim #持仓
self.cost = 0
self.count_0 = 0
self.actions_memory=[]
self.date_memory=[]
self.asset_memory=[]
self.cash_memory = []
self.date_memory.append(self._get_date())
self.asset_memory.append(self.cash)
self.cash_memory.append(self.cash)
#if self.mode == 'train':
#self._initial_cash_and_buy_()
state = self._update_state()
return state
def _initial_cash_and_buy_(self):
"""Initialize the state, already bought some"""
data = self.df.loc[self.day, :]
'''
cash_max = max(data.cash_max)
cash_min = min(data.cash_min)
if cash_max > 10000*10:
cash_max = 10000*10
if cash_min < 10000*0.1:
cash_min = 10000*0.1
cash_u = random.uniform(cash_min, cash_max)
self.cash = self.initial_amount/10000 * cash_u
'''
prices = data.close.values.tolist()
avg_price = sum(prices)/len(prices)
ran = random.random() #随机买。因为开始日期是随机的,initial_amount也可以是随机的。需要新加域,表明当前的cash范围,然后在范围内随机一个值
buy_nums_each_tic = ran*self.cash//(avg_price*len(prices)) # only use half of the initial amount
buy_nums_each_tic = buy_nums_each_tic//100*100
cost = sum(prices)*buy_nums_each_tic
self.cash = self.cash - cost
self.holds = [buy_nums_each_tic]*self.stock_dim
'''
state = [self.initial_amount-cost] + \
self.data.close.values.tolist() + \
[buy_nums_each_tic]*self.stock_dim + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
'''
def step(self, actions):
#print('step')
#actions = actions * self.hmax #actions initially is scaled between 0 to 1
#actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares
actions_old = None
if self.mode == 'test':
actions_old = actions.copy()
begin_total_asset = self._update_total_assets()
stocks_can_buy = self._get_can_buy()
stocks_can_sell = -np.array(self.holds)
base_ = np.array([-1]*self.stock_dim)
actions = (actions - base_)/2*(stocks_can_buy - stocks_can_sell)+stocks_can_sell
argsort_actions = np.argsort(actions) #索引排序
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]] #得到卖的索引
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]] #得到买的索引
for index in sell_index:
actions[index] = self._sell_stock(index, actions[index]) * (-1)
for index in buy_index:
actions[index] = self._buy_stock(index, actions[index])
#self.actions_memory.append(actions) #此处的action是被处理过的。如果action始终为0也要被惩罚,这属于reword塑形
self.day += 1
terminal = self.day >= len(self.df.index.unique())-1
if terminal == True: #统计非0的操作数量
count_non0 = np.count_nonzero(self.actions_memory)
#print('no zero count {0} mode {1}'.format(count_non0, self.mode))
state = self._update_state() #新的一天,close和技术指标都变了
end_total_asset = self._update_total_assets()
if self.mode == 'test':
actions_all = np.hstack((actions, actions_old))
self.actions_memory.append(actions_all)
self.date_memory.append(self._get_date())
self.asset_memory.append(end_total_asset)
self.cash_memory.append(self.cash)
reward = end_total_asset - begin_total_asset #总资产差就是reward
'''
penalty2 = 0
if self.cash < end_total_asset*self.cash_limit: #如果金钱太少,需要进行惩罚,否则在训练的时候因为没钱导致探索空间不够,,训练出来的AI像个傻子,test可以把限制去掉。
penalty2 = self.initial_amount*self.out_of_cash_penalty
reward -= penalty2
'''
'''
if self.mode == 'train': #为了加快采样的有效率。当loss降到一定程度,可以把这块代码注释掉
count_non0 = np.count_nonzero(actions)
if count_non0 == 0:
self.count_0 += 1
day_pass = self.day - self.day_start
if self.count_0 > 200: #0.99的200次方是0.13,以后把gamma设置小点
terminal = True
print('terminal by hand')
else:
self.count_0 = 0
'''
reward = reward * self.reward_scaling
return state, reward, terminal, {}
def _sell_stock(self, index, action):
def _do_sell_normal():
data = self.df.loc[self.day, :]
data = data.reset_index(drop=True)
close = data.close
price = close[index]
if price > 0: #价格大于0
# Sell only if the price is > 0 (no missing data in this particular date)
# perform sell action based on the sign of the action
if self.holds[index] > 0: #股份大于0
# Sell only if current asset is > 0
sell_num_shares = min(abs(action), self.holds[index]) #不能卖空
sell_num_shares = sell_num_shares//100*100 #100倍数
sell_amount = price * sell_num_shares * (1- self.sell_cost_pct) #扣除费用,实际获得金额
self.cash += sell_amount #更新金额
self.holds[index] -= sell_num_shares #更新股票
self.cost += price * sell_num_shares * self.sell_cost_pct #更新交易摩擦费用
#self.trades+=1 #更新交易数量
else:
sell_num_shares = 0
else:
sell_num_shares = 0
return sell_num_shares
sell_num_shares = _do_sell_normal()
return sell_num_shares
def _buy_stock(self, index, action):
def _do_buy():
data = self.df.loc[self.day, :]
data = data.reset_index(drop=True)
close = data.close
price = close[index]
if price > 0: #股票价格大于0
# Buy only if the price is > 0 (no missing data in this particular date)
available_amount = self.cash // price #所有钱能买的数量
# update balance
buy_num_shares = min(available_amount, action) #实际能买的数量
buy_num_shares = buy_num_shares//100*100
buy_amount = price * buy_num_shares * (1 + self.buy_cost_pct) #实际花费的金额
self.cash -= buy_amount #更新金额
self.holds[index] += buy_num_shares #更新股票数量
self.cost += price * buy_num_shares * self.buy_cost_pct #更新交易摩擦费用
#self.trades+=1 #更新交易数量
else:
buy_num_shares = 0
return buy_num_shares
buy_num_shares = _do_buy()
return buy_num_shares
def _update_total_assets(self):
data = self.df.loc[self.day, :]
close = data.close
total_assets = self.cash + sum( | np.array(close) | numpy.array |
'''Various image handling functions.'''
from .common import *
import math
import numpy as np
# HANDLE IMAGE LOADING
try:
import lycon
# define functions
def imwrite(img_path, img):
'''Stores image to disk.'''
img = np.ascontiguousarray(img, dtype=np.uint8)
lycon.save(img_path, img)
def imread(img_path, channels=3):
'''Loads an image from the given path.'''
img = lycon.load(img_path)
if channels == 3:
img = img[...,[2,1,0]]
elif channels == 1:
img = np.mean(img, axis=-1, keepdims=True)
return img
def imresize(img, width, height):
return lycon.resize(img, width=width, height=height, interpolation=lycon.Interpolation.LINEAR)
except ImportError:
print("WARNING: Could not find lycon, using cv2 instead!")
try:
import cv2
except ImportError:
raise RuntimeError("storage library requires either cv2 or lycon to be installed!")
# define functions
def imwrite(img_path, img):
'''Stores image to disk.'''
cv2.imwrite(img_path, img)
def imread(img_path, channels=3):
'''Loads an image from the given path.'''
img = cv2.imread(img_path, 1)
if channels == 3:
img = img[...,[2,1,0]]
elif channels == 1:
img = np.mean(img, axis=-1, keepdims=True)
return img
def imresize(img, width, height):
return cv2.resize(img, (int(width), int(height)), interpolation=cv2.INTER_LINEAR)
# ----
def get_padding(params):
if "padding" not in params.training:
raise KeyError("Could not find value 'padding' in 'training'!")
pad = params.training.padding
mode = PadMode.EDGE
resize = ResizeMode.FIT
color = (0,0,0)
if pad[0] == "center":
mode = PadMode.CENTER
if pad[1] == "stretch":
resize = ResizeMode.STRETCH
elif pad[1] == "black":
resize = ResizeMode.PAD_COLOR
color = (0,0,0)
elif pad[1] == "blue":
resize = ResizeMode.PAD_COLOR
color = (0,0,255)
elif pad[1] == "red":
resize = ResizeMode.PAD_COLOR
color = (255,0,0)
elif pad[1] == "green":
resize = ResizeMode.PAD_COLOR
color = (0,255,0)
elif pad[1] == "color":
resize = ResizeMode.PAD_COLOR
color = params.training.pad_color
elif pad[1] == "random":
resize = ResizeMode.PAD_RANDOM
elif pad[1] == "mean":
resize = ResizeMode.PAD_MEAN
elif pad[1] == "edge":
resize = ResizeMode.PAD_EDGE
return mode, resize, color
def imread_resize(img_path, params):
'''Loads an image from the given path and resizes it according to configuration.'''
img = imread(img_path, params.network.color_channels)
mode, res_mode, pad_color = get_padding(params)
img, _, _ = resize(img, params.network.input_size, res_mode, pad_color, mode)
return img
def pad(img, size, resize=ResizeMode.FIT, pad_color=(0,0,0), pad_mode=PadMode.EDGE):
'''Pads an image to a new size.
Returns:
img (np.array): padded image
offset (tuple): integer tuple that stores the offset from the upper left corner in format `[TOP, LEFT]`
'''
# retrieve general parameter
pad_size = [(size[0] - img.shape[0]), (size[1] - img.shape[1])]
padding = [(0, 0), (0, 0), (0, 0)]
# add padding to the image
if pad_mode == PadMode.EDGE:
padding = [(0, int(pad_size[0])), (0, int(pad_size[1])), (0, 0)]
pad_size = [0, 0]
elif pad_mode == PadMode.CENTER:
pad_size = [pad_size[0] / 2, pad_size[1] / 2]
padding = [(math.floor(pad_size[0]), math.ceil(pad_size[0])), (math.floor(pad_size[1]), math.ceil(pad_size[1])), (0, 0)]
# check additional padding modes
if resize == ResizeMode.PAD_COLOR:
img_new = np.stack([np.full(size, col) for col in pad_color], axis=-1)
img_new[padding[0][0]:padding[0][0]+img.shape[0], padding[1][0]:padding[1][0]+img.shape[1], :] = img
img = img_new
elif resize == ResizeMode.PAD_MEAN:
mode = "mean"
elif resize == ResizeMode.PAD_EDGE:
mode = "edge"
elif resize == ResizeMode.PAD_RANDOM:
img_new = np.random.randint(low=0, high=255, size=[size[0], size[1], 3])
img_new[padding[0][0]:padding[0][0]+img.shape[0], padding[1][0]:padding[1][0]+img.shape[1], :] = img
img = img_new
else:
return img, (0, 0)
# update the image
if resize not in (ResizeMode.PAD_COLOR, ResizeMode.PAD_RANDOM):
padding = padding if len(img.shape)>=3 and img.shape[2]>1 else padding[:2]
img = np.pad(img, padding, mode=mode)
return img, (padding[0][0], padding[1][0])
def resize(img, size=None, resize=ResizeMode.FIT, pad_color=(0,0,0), pad_mode=PadMode.EDGE):
'''Resizes the image and provides the scale.
Returns:
img (np.array): Array of the image
scale (tuple): Tuple of float values containing the scale of the image in both dimensions
offset (tuple): Tuple of int values containing the offset of the image from top left corner (through padding)
'''
# check if valid
if size is None:
return img, (1.0, 1.0), (0, 0)
# retrieve some params
img_size = img.shape[:2]
offset = (0, 0)
scale = (1.0, 1.0)
# check the type of data
if type(size) == tuple or type(size) == list or type(size) == np.ndarray:
if resize == ResizeMode.FIT:
frac = min((size[0] / img_size[0], size[1] / img_size[1]))
scale = (frac, frac)
elif resize == ResizeMode.STRETCH:
scale = (size[0] / img_size[0], size[1] / img_size[1])
frac = size
else:
frac = min((size[0] / img_size[0], size[1] / img_size[1]))
scale = (frac, frac)
elif type(size) == int:
if resize == ResizeMode.FIT:
frac = float(size) / max(img_size)
scale = (frac, frac)
elif resize == ResizeMode.STRETCH:
frac = (size, size)
scale = (frac[0] / img_size[0], frac[1] / img_size[1])
else:
frac = float(size) / max(img_size)
scale = (frac, frac)
size = (size, size)
else:
raise ValueError("Size has unkown type ({}: {})".format(type(size), size))
# scale image and set padding
#img = scipy.misc.imresize(img, frac)
nsize = img.shape
if isinstance(frac, float):
nsize = [min(np.ceil(nsize[0] * frac), size[0]), min(np.ceil(nsize[1] * frac), size[1])]
else:
nsize = frac
img = imresize(img, width=nsize[1], height=nsize[0])
img, offset = pad(img, size, resize, pad_color, pad_mode)
return img, scale, offset
def get_spaced_colors(n):
'''Retrieves n colors distributed over the color space.'''
max_value = 16581375 #255**3
interval = int(max_value / n)
colors = [hex(I)[2:].zfill(6) for I in range(0, max_value, interval)]
return [(int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16)) for i in colors][:n]
def fill_patch(img, bbox, mode, color):
'''Fills the given image patch in the given mode.
Args:
img (np.ndarray): Image array
bbox (list): Bounding box for the patch in absolute coordinates and yx format
mode (FillMode): FillMode that is used to fill the item
'''
# safty: check size of the box against image size
bbox = [max(0, bbox[0]), max(0, bbox[1]), min(img.shape[0], bbox[2]), min(img.shape[1], bbox[3])]
def _gen_patch(color):
arr = []
for i in range(len(color)):
el = | np.full([bbox[2] - bbox[0], bbox[3] - bbox[1]], color[i]) | numpy.full |
# pylint: disable=missing-function-docstring, missing-module-docstring/
import numpy as np
from pyccel.decorators import types, stack_array, allow_negative_index
a_1d = np.array([1 << i for i in range(21)], dtype=int)
a_2d_f = np.array([[1 << j for j in range(21)] for i in range(21)], dtype=int, order='F')
a_2d_c = np.array([[1 << j for j in range(21)] for i in range(21)], dtype=int)
#==============================================================================
# 1D ARRAYS OF INT-32
#==============================================================================
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_add( x, a ):
x[:] += a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_sub( x, a ):
x[:] -= a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_mul( x, a ):
x[:] *= a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_div( x, a ):
x[:] = x / a
@types( 'int32[:]', 'int32' )
def array_int32_1d_scalar_idiv( x, a ):
x[:] = x // a
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_add( x, y ):
x[:] += y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_sub( x, y ):
x[:] -= y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_mul( x, y ):
x[:] *= y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_idiv( x, y ):
x[:] = x // y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_add_augassign( x, y ):
x += y
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_sub_augassign( x, y ):
x -= y
def array_int_1d_initialization_1():
import numpy as np
a = np.array([1, 2, 4, 8, 16])
b = np.array(a)
return np.sum(b), b[0], b[-1]
def array_int_1d_initialization_2():
import numpy as np
a = [1, 2, 4, 8, 16]
b = np.array(a)
return np.sum(b), b[0], b[-1]
def array_int_1d_initialization_3():
import numpy as np
a = (1, 2, 4, 8, 16)
b = np.array(a)
return np.sum(b), b[0], b[-1]
#==============================================================================
# 2D ARRAYS OF INT-32 WITH C ORDERING
#==============================================================================
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_add( x, a ):
x[:,:] += a
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int32[:,:]', 'int32' )
def array_int32_2d_C_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_add( x, y ):
x[:,:] += y
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_sub( x, y ):
x[:,:] -= y
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_mul( x, y ):
x[:,:] *= y
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_idiv( x, y ):
x[:,:] = x // y
#==============================================================================
# 2D ARRAYS OF INT-32 WITH F ORDERING
#==============================================================================
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_add( x, a ):
x[:,:] += a
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int32[:,:](order=F)', 'int32' )
def array_int32_2d_F_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_add( x, y ):
x[:,:] += y
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_sub( x, y ):
x[:,:] -= y
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_mul( x, y ):
x[:,:] *= y
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_idiv( x, y ):
x[:,:] = x // y
#==============================================================================
# 1D ARRAYS OF INT-64
#==============================================================================
@types( 'int[:]', 'int' )
def array_int_1d_scalar_add( x, a ):
x[:] += a
@types( 'int[:]', 'int' )
def array_int_1d_scalar_sub( x, a ):
x[:] -= a
@types( 'int[:]', 'int' )
def array_int_1d_scalar_mul( x, a ):
x[:] *= a
@types( 'int[:]', 'int' )
def array_int_1d_scalar_idiv( x, a ):
x[:] = x // a
@types( 'int[:]', 'int[:]' )
def array_int_1d_add( x, y ):
x[:] += y
@types( 'int[:]', 'int[:]' )
def array_int_1d_sub( x, y ):
x[:] -= y
@types( 'int[:]', 'int[:]' )
def array_int_1d_mul( x, y ):
x[:] *= y
@types( 'int[:]', 'int[:]' )
def array_int_1d_idiv( x, y ):
x[:] = x // y
#==============================================================================
# 2D ARRAYS OF INT-64 WITH C ORDERING
#==============================================================================
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_add( x, a ):
x[:,:] += a
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int[:,:]', 'int' )
def array_int_2d_C_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_add( x, y ):
x[:,:] += y
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_sub( x, y ):
x[:,:] -= y
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_mul( x, y ):
x[:,:] *= y
@types( 'int[:,:]', 'int[:,:]' )
def array_int_2d_C_idiv( x, y ):
x[:,:] = x // y
@types('int[:,:]')
def array_int_2d_C_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]])
a[:,:] = tmp[:,:]
#==============================================================================
# 2D ARRAYS OF INT-64 WITH F ORDERING
#==============================================================================
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_add( x, a ):
x[:,:] += a
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_sub( x, a ):
x[:,:] -= a
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_mul( x, a ):
x[:,:] *= a
@types( 'int[:,:](order=F)', 'int' )
def array_int_2d_F_scalar_idiv( x, a ):
x[:,:] = x // a
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_add( x, y ):
x[:,:] += y
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_sub( x, y ):
x[:,:] -= y
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_mul( x, y ):
x[:,:] *= y
@types( 'int[:,:](order=F)', 'int[:,:](order=F)' )
def array_int_2d_F_idiv( x, y ):
x[:,:] = x // y
@types('int[:,:](order=F)')
def array_int_2d_F_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]], dtype='int', order='F')
a[:,:] = tmp[:,:]
#==============================================================================
# 1D ARRAYS OF REAL
#==============================================================================
@types( 'real[:]', 'real' )
def array_real_1d_scalar_add( x, a ):
x[:] += a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_sub( x, a ):
x[:] -= a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_mul( x, a ):
x[:] *= a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_div( x, a ):
x[:] /= a
@types( 'real[:]', 'real' )
def array_real_1d_scalar_idiv( x, a ):
x[:] = x // a
@types( 'real[:]', 'real[:]' )
def array_real_1d_add( x, y ):
x[:] += y
@types( 'real[:]', 'real[:]' )
def array_real_1d_sub( x, y ):
x[:] -= y
@types( 'real[:]', 'real[:]' )
def array_real_1d_mul( x, y ):
x[:] *= y
@types( 'real[:]', 'real[:]' )
def array_real_1d_div( x, y ):
x[:] /= y
@types( 'real[:]', 'real[:]' )
def array_real_1d_idiv( x, y ):
x[:] = x // y
#==============================================================================
# 2D ARRAYS OF REAL WITH C ORDERING
#==============================================================================
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_add( x, a ):
x[:,:] += a
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_sub( x, a ):
x[:,:] -= a
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_mul( x, a ):
x[:,:] *= a
@types( 'real[:,:]', 'real' )
def array_real_2d_C_scalar_div( x, a ):
x[:,:] /= a
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_add( x, y ):
x[:,:] += y
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_sub( x, y ):
x[:,:] -= y
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_mul( x, y ):
x[:,:] *= y
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_div( x, y ):
x[:,:] /= y
@types('real[:,:]')
def array_real_2d_C_array_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]], dtype='float')
a[:,:] = tmp[:,:]
@types('real[:,:]','real[:,:]', 'real[:,:,:]')
def array_real_3d_C_array_initialization_1(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float')
a[:,:,:] = tmp[:,:,:]
@types('real[:,:,:]')
def array_real_3d_C_array_initialization_2(a):
from numpy import array
x = array([[[0., 1., 2., 3.], [4., 5., 6., 7.], [8., 9., 10., 11.]],
[[12., 13., 14., 15.], [16., 17., 18., 19.], [20., 21., 22., 23.]]], order='C')
a[:,:,:] = x[:,:,:]
@types('real[:,:,:]','real[:,:,:]', 'real[:,:,:,:]')
def array_real_4d_C_array_initialization(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float')
a[:,:,:,:] = tmp[:,:,:,:]
#==============================================================================
# 2D ARRAYS OF REAL WITH F ORDERING
#==============================================================================
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_add( x, a ):
x[:,:] += a
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_sub( x, a ):
x[:,:] -= a
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_mul( x, a ):
x[:,:] *= a
@types( 'real[:,:](order=F)', 'real' )
def array_real_2d_F_scalar_div( x, a ):
x[:,:] /= a
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_add( x, y ):
x[:,:] += y
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_sub( x, y ):
x[:,:] -= y
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_mul( x, y ):
x[:,:] *= y
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_div( x, y ):
x[:,:] /= y
@types('real[:,:](order=F)')
def array_real_2d_F_array_initialization(a):
from numpy import array
tmp = array([[1, 2, 3], [4, 5, 6]], dtype='float', order='F')
a[:,:] = tmp[:,:]
@types('real[:,:](order=F)','real[:,:](order=F)', 'real[:,:,:](order=F)')
def array_real_3d_F_array_initialization_1(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float', order='F')
a[:,:,:] = tmp[:,:,:]
@types('real[:,:,:](order=F)')
def array_real_3d_F_array_initialization_2(a):
from numpy import array
x = array([[[0., 1., 2., 3.], [4., 5., 6., 7.], [8., 9., 10., 11.]],
[[12., 13., 14., 15.], [16., 17., 18., 19.], [20., 21., 22., 23.]]], order='F')
a[:,:,:] = x[:,:,:]
@types('real[:,:,:](order=F)','real[:,:,:](order=F)', 'real[:,:,:,:](order=F)')
def array_real_4d_F_array_initialization(x, y, a):
from numpy import array
tmp = array([x, y], dtype='float', order='F')
a[:,:,:,:] = tmp[:,:,:,:]
@types('real[:,:](order=F)', 'real[:,:,:,:](order=F)')
def array_real_4d_F_array_initialization_mixed_ordering(x, a):
import numpy as np
tmp = np.array(((((0., 1.), (2., 3.)),
((4., 5.), (6., 7.)),
((8., 9.), (10., 11.))),
(((12., 13.), (14., 15.)),
x,
((20., 21.), (22., 23.)))),
dtype='float', order='F')
a[:,:,:,:] = tmp[:,:,:,:]
#==============================================================================
# COMPLEX EXPRESSIONS IN 3D : TEST CONSTANT AND UNKNOWN SHAPES
#==============================================================================
@types( 'int32[:]', 'int32[:]' )
def array_int32_1d_complex_3d_expr( x, y ):
from numpy import full, int32
z = full(3,5, dtype=int32)
x[:] = (x // y) * x + z
@types( 'int32[:,:]', 'int32[:,:]' )
def array_int32_2d_C_complex_3d_expr( x, y ):
from numpy import full, int32
z = full((2,3),5, dtype=int32)
x[:] = (x // y) * x + z
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)' )
def array_int32_2d_F_complex_3d_expr( x, y ):
from numpy import full, int32
z = full((2,3),5,order='F', dtype=int32)
x[:] = (x // y) * x + z
@types( 'real[:]', 'real[:]' )
def array_real_1d_complex_3d_expr( x, y ):
from numpy import full
z = full(3,5)
x[:] = (x // y) * x + z
@types( 'real[:,:]', 'real[:,:]' )
def array_real_2d_C_complex_3d_expr( x, y ):
from numpy import full
z = full((2,3),5)
x[:] = (x // y) * x + z
@types( 'real[:,:](order=F)', 'real[:,:](order=F)' )
def array_real_2d_F_complex_3d_expr( x, y ):
from numpy import full
z = full((2,3),5,order='F')
x[:] = (x // y) * x + z
@types( 'int32[:]', 'int32[:]', 'bool[:]' )
def array_int32_in_bool_out_1d_complex_3d_expr( x, y, ri ):
from numpy import full, int32, empty
z = full(3,5, dtype=int32)
ri[:] = (x // y) * x > z
@types( 'int32[:,:]', 'int32[:,:]', 'bool[:,:]' )
def array_int32_in_bool_out_2d_C_complex_3d_expr( x, y, ri ):
from numpy import full, int32
z = full((2,3),5, dtype=int32)
ri[:] = (x // y) * x > z
@types( 'int32[:,:](order=F)', 'int32[:,:](order=F)', 'bool[:,:](order=F)' )
def array_int32_in_bool_out_2d_F_complex_3d_expr( x, y, ri ):
from numpy import full, int32
z = full((2,3),5,order='F', dtype=int32)
ri[:] = (x // y) * x > z
#==============================================================================
# 1D STACK ARRAYS OF REAL
#==============================================================================
@stack_array('a')
def array_real_1d_sum_stack_array():
from numpy import zeros
a = zeros(10)
s = 0.
for i in range(10):
s += a[i]
return s
@stack_array('a')
def array_real_1d_div_stack_array():
from numpy import ones
a = ones(10)
s = 0.
for i in range(10):
s += 1.0 / a[i]
return s
@stack_array('a')
@stack_array('b')
def multiple_stack_array_1():
from numpy import ones, array
a = ones(5)
b = array([1, 3, 5, 7, 9])
s = 0.0
for i in range(5):
s += a[i] / b[i]
return s
@stack_array('a')
@stack_array('b', 'c')
def multiple_stack_array_2():
from numpy import ones, array
a = ones(5)
b = array([2, 4, 6, 8, 10])
c = array([1, 3, 5, 7, 9])
s = 0.0
for i in range(5):
s = s + b[i] - a[i] / c[i]
return s
#==============================================================================
# 2D STACK ARRAYS OF REAL
#==============================================================================
@stack_array('a')
def array_real_2d_sum_stack_array():
from numpy import zeros
a = zeros((10, 10))
s = 0.
for i in range(10):
for j in range(10):
s += a[i][j]
return s
@stack_array('a')
def array_real_2d_div_stack_array():
from numpy import full
a = full((10, 10), 2)
s = 1.
for i in range(10):
for j in range(10):
s /= a[i][j]
return s
@stack_array('a')
@stack_array('b')
def multiple_2d_stack_array_1():
from numpy import ones, array
a = ones((2, 5))
b = array([[1, 3, 5, 7, 9], [11, 13, 17, 19, 23]])
s = 0.0
j = 0
for i in range(2):
for j in range(5):
s += a[i][j] / b[i][j]
return s
@stack_array('a')
@stack_array('b', 'c')
def multiple_2d_stack_array_2():
from numpy import ones, array
a = ones(5)
b = array([[2, 4, 6, 8, 10], [1, 3, 5, 7, 9]])
c = array([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
s = 0.0
for i in range(2):
for j in range(5):
s = s + b[i][j] - a[j] / c[i][j]
return s
#==============================================================================
# TEST: Product and matrix multiplication
#==============================================================================
@types('real[:], real[:]')
def array_real_1d_1d_prod(x, out):
from numpy import prod
out[:] = prod(x)
@types('real[:,:], real[:], real[:]')
def array_real_2d_1d_matmul(A, x, out):
from numpy import matmul
out[:] = matmul(A, x)
@types('real[:,:], real[:]')
def array_real_2d_1d_matmul_creation(A, x):
from numpy import matmul
out = matmul(A, x)
return out.sum()
@types('real[:,:](order=F), real[:], real[:]')
def array_real_2d_1d_matmul_order_F(A, x, out):
from numpy import matmul
out[:] = matmul(A, x)
@types('real[:], real[:,:], real[:]')
def array_real_1d_2d_matmul(x, A, out):
from numpy import matmul
out[:] = matmul(x, A)
@types('real[:,:], real[:,:], real[:,:]')
def array_real_2d_2d_matmul(A, B, out):
from numpy import matmul
out[:,:] = matmul(A, B)
@types('real[:,:](order=F), real[:,:](order=F), real[:,:](order=F)')
def array_real_2d_2d_matmul_F_F(A, B, out):
from numpy import matmul
out[:,:] = matmul(A, B)
# Mixed order, not supported currently, see #244
@types('real[:,:], real[:,:](order=F), real[:,:]')
def array_real_2d_2d_matmul_mixorder(A, B, out):
from numpy import matmul
out[:,:] = matmul(A, B)
@types('real[:,:], real[:,:], real[:,:]')
def array_real_2d_2d_matmul_operator(A, B, out):
out[:,:] = A @ B
@types('real[:], real[:], real[:]')
def array_real_loopdiff(x, y, out):
dxy = x - y
for k in range(len(x)):
out[k] = dxy[k]
#==============================================================================
# KEYWORD ARGUMENTS
#==============================================================================
def array_kwargs_full():
""" full(shape, fill_value, dtype=None, order='C')
"""
from numpy import sum as np_sum
from numpy import full
n = 3
a = full((n, n-1), 0.5, 'float', 'C')
b = full((n+1, 2*n), 2.0, order='F')
c = full((1, n), 3)
d = full(2+n, order='F', fill_value=5)
e = full(dtype=int, fill_value=1.0, shape=2*n)
return np_sum(a) + np_sum(b) + np_sum(c) + np_sum(d) + np_sum(e)
def array_kwargs_ones():
""" ones(shape, dtype=float, order='C')
"""
from numpy import sum as np_sum
from numpy import ones
n = 4
a = ones((n, n-1), 'float', 'C')
b = ones((n+1, 2*n), float, order='F')
c = ones((1, n), complex)
d = ones(dtype=int, shape=2+n)
return np_sum(a) + np_sum(b) + np_sum(c) + np_sum(d)
#==============================================================================
# NEGATIVE INDEXES
#==============================================================================
@types('int')
def constant_negative_index(n):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
return a[-1], a[-2]
@types('int')
def almost_negative_index(n):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
j = -1
return a[-j]
@allow_negative_index('a')
@types('int', 'int')
def var_negative_index(n, idx):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
return a[idx]
@allow_negative_index('a')
@types('int', 'int', 'int')
def expr_negative_index(n, idx_1, idx_2):
import numpy as np
a = np.empty(n, dtype=int)
for i in range(n):
a[i] = i
return a[idx_1-idx_2]
@allow_negative_index('a')
@allow_negative_index('b')
@types('int', 'int')
def test_multiple_negative_index(c, d):
import numpy as np
a = np.array([1, 2, 3, 4, 5, 6])
b = np.array([1, 2, 3])
x = a[c]
y = b[d]
return x, y
@allow_negative_index('a', 'b')
@types('int', 'int')
def test_multiple_negative_index_2(c, d):
import numpy as np
a = np.array([1.2, 2.2, 3.2, 4.2])
b = np.array([1, 5, 9, 13])
x = a[c] * d
y = b[d] * c
return x, y
@allow_negative_index('a')
@allow_negative_index('b', 'c')
@types('int', 'int', 'int')
def test_multiple_negative_index_3(d, e, f):
import numpy as np
a = np.array([1.2, 2.2, 3.2, 4.2])
b = np.array([1])
c = np.array([1, 2, 3])
return a[d], b[e], c[f]
@allow_negative_index('a')
@types('int[:]')
def test_argument_negative_index_1(a):
c = -2
d = 5
return a[c], a[d]
@allow_negative_index('a', 'b')
@types('int[:]', 'int[:]')
def test_argument_negative_index_2(a, b):
c = -2
d = 3
return a[c], a[d], b[c], b[d]
#==============================================================================
# SHAPE INITIALISATION
#==============================================================================
def array_random_size():
import numpy as np
a = np.zeros(np.random.randint(23))
c = np.zeros_like(a)
return np.shape(a)[0], np.shape(c)[0]
@types('int','int')
def array_variable_size(n,m):
import numpy as np
s = n
a = np.zeros(s)
s = m
c = np.zeros_like(a)
return np.shape(a)[0], np.shape(c)[0]
#==============================================================================
# 1D ARRAY SLICING
#==============================================================================
@types('int[:]')
def array_1d_slice_1(a):
import numpy as np
b = a[:]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_2(a):
import numpy as np
b = a[5:]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_3(a):
import numpy as np
b = a[:5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_4(a):
import numpy as np
b = a[5:15]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_5(a):
import numpy as np
b = a[:-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_6(a):
import numpy as np
b = a[-5:]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_7(a):
import numpy as np
b = a[-15:-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_8(a):
import numpy as np
b = a[5:-5]
return np.sum(b), b[0], b[-1], len(b)
@types('int[:]')
def array_1d_slice_9(a):
import numpy as np
b = a[-15:15]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_10(a):
import numpy as np
c = -15
b = a[c:]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_11(a):
import numpy as np
c = -5
b = a[:c]
return np.sum(b), b[0], b[-1], len(b)
@allow_negative_index('a')
@types('int[:]')
def array_1d_slice_12(a):
import numpy as np
c = -15
d = -5
b = a[c:d]
return np.sum(b), b[0], b[-1], len(b)
#==============================================================================
# 2D ARRAY SLICE ORDER F
#==============================================================================
@types('int[:,:](order=F)')
def array_2d_F_slice_1(a):
import numpy as np
b = a[:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_2(a):
import numpy as np
b = a[5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_3(a):
import numpy as np
b = a[:5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_4(a):
import numpy as np
b = a[-15:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_5(a):
import numpy as np
b = a[:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_6(a):
import numpy as np
b = a[5:15]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_7(a):
import numpy as np
b = a[-15:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_8(a):
import numpy as np
b = a[::]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_9(a):
import numpy as np
b = a[5:, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_10(a):
import numpy as np
b = a[:5, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_11(a):
import numpy as np
b = a[:, 5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_12(a):
import numpy as np
b = a[:, :5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_13(a):
import numpy as np
b = a[:-5, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_14(a):
import numpy as np
b = a[-5:, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_15(a):
import numpy as np
b = a[:, -5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_16(a):
import numpy as np
b = a[:, :-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_17(a):
import numpy as np
b = a[:, 5:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_18(a):
import numpy as np
b = a[5:15, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_19(a):
import numpy as np
b = a[5:15, -5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:](order=F)')
def array_2d_F_slice_20(a):
import numpy as np
b = a[5:15, 5:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:](order=F)')
def array_2d_F_slice_21(a):
import numpy as np
c = -5
d = 5
b = a[d:15, 5:c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:](order=F)')
def array_2d_F_slice_22(a):
import numpy as np
c = -5
d = -15
b = a[d:15, 5:c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@allow_negative_index('a')
@types('int[:,:](order=F)')
def array_2d_F_slice_23(a):
import numpy as np
c = -5
b = a[:c, :c]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
#==============================================================================
# 2D ARRAY SLICE ORDER C
#==============================================================================
@types('int[:,:]')
def array_2d_C_slice_1(a):
import numpy as np
b = a[:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_2(a):
import numpy as np
b = a[5:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_3(a):
import numpy as np
b = a[:5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_4(a):
import numpy as np
b = a[-15:]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_5(a):
import numpy as np
b = a[:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_6(a):
import numpy as np
b = a[5:15]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_7(a):
import numpy as np
b = a[-15:-5]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_8(a):
import numpy as np
b = a[::]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_9(a):
import numpy as np
b = a[5:, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_10(a):
import numpy as np
b = a[:5, :]
return np.sum(b), b[0][0], b[-1][-1], len(b), len(b[0])
@types('int[:,:]')
def array_2d_C_slice_11(a):
import numpy as np
b = a[:, 5:]
return | np.sum(b) | numpy.sum |
import numpy as np
def length(mat):
return np.max(mat.shape)
def ismember(mat1, mat2):
return np.isin(mat1, mat2)
def incVAT(RV, C, I, RI, d, distance_previous_points):
I_old = I
C_old = C
new_point_index = np.max(I.shape)+1
new_point_location = np.max(I.shape)+1
for j in range(np.max(I.shape)):
value, index = np.min(distance_previous_points[0:j]), np.argmin(
distance_previous_points[0:j])
if value < d[j]:
new_point_location = j+1
break
else:
[_, index] = np.argmin(distance_previous_points)
remaining_points = I[new_point_location:-1]
remaining_points_old_points_method = remaining_points
remaining_points_location_in_RV = np.max(RV.shape)
remaining_points_old_points_method_location_in_RV = remaining_points_location_in_RV
included_old_points = np.empty()
included_old_points_location_in_RV = np.empty()
pointer_last_point = new_point_location-1
d_remaining = d[new_point_location-1:-1]
C_remaining = C[new_point_location: -1]
I = np.array([I[0:new_point_location], new_point_index])
d = np.array([d[0:new_point_location-1],
np.min(distance_previous_points[0:new_point_location])])
RV_reordering = np.linspace(0, new_point_location)
C = np.array([C[0:new_point_location], index])
method = np.empty()
for k in range(np.max(remaining_points.shape)):
min_dist_old_points = d_remaining[0]
closest_old_points = remaining_points_old_points_method[0]
closest_old_points_location_RV = remaining_points_location_in_RV[0]
[_, closest_point_C_remaining_old_points] = np.isin(
I_old[C_remaining[0]], I)
dist_new_point = distance_previous_points[remaining_points_location_in_RV]
[min_dist_new_point, index] = np.min(
dist_new_point), np.argmin(dist_new_point)
closest_new_point_location_RV = remaining_points_location_in_RV[index]
closest_new_point = remaining_points[index]
closest_point_C_remaining_new_point = new_point_location
dist_included_old_points = RV[
included_old_points_location_in_RV, remaining_points_location_in_RV]
if np.max(included_old_points_location_in_RV.shape) == 1:
[value1, index1] = min(dist_included_old_points)
[_, closest_point_C_included_old_points] = np.isin(
included_old_points, I)
else:
[value, index] = min(dist_included_old_points)
[value1, index1] = min(value)
[_, closest_point_C_included_old_points] = np.isin(
included_old_points(index[index1]), I)
min_dist_included_old_points = value1
closest_included_old_points_location_RV = remaining_points_location_in_RV[index1]
closest_included_old_points = remaining_points[index1]
if np.shape(min_dist_included_old_points) == (0, 0):
[min_dist_all, min_dist_method] = np.min(np.array([min_dist_old_points, min_dist_new_point])), np.argmin(
np.array([min_dist_old_points, min_dist_new_point]))
else:
[min_dist_all, min_dist_method] = np.min(np.array(
[min_dist_old_points, min_dist_new_point, min_dist_included_old_points])), np.argmin(np.array(
[min_dist_old_points, min_dist_new_point, min_dist_included_old_points]))
if min_dist_method == 1:
method = np.array([method, 1])
I = np.array([I, closest_old_points])
d = np.array([d, min_dist_old_points])
C = np.array([C, closest_point_C_remaining_old_points])
RV_reordering = np.array(
[RV_reordering, closest_old_points_location_RV])
remaining_points[remaining_points ==
closest_old_points] = np.empty()
remaining_points_old_points_method[remaining_points_old_points_method ==
closest_old_points] = np.empty()
remaining_points_old_points_method_location_in_RV[remaining_points_old_points_method_location_in_RV ==
closest_old_points_location_RV] = np.empty()
remaining_points_location_in_RV[remaining_points_location_in_RV ==
closest_old_points_location_RV] = np.empty()
pointer_last_point = pointer_last_point+1
d_remaining[0] = np.empty()
C_remaining[0] = np.empty()
if np.max(remaining_points_old_points_method.shape) > 0:
while np.isin(remaining_points_old_points_method[0], I):
pointer_last_point = pointer_last_point+1
d_remaining[0] = np.empty()
C_remaining[0] = | np.empty() | numpy.empty |
from functools import partial
from types import SimpleNamespace
from typing import Optional, List
import numpy as np
import scipy.optimize
import scipy.special
import sklearn.metrics.pairwise as skmetrics
def Phi(
D: np.ndarray,
edge_list: np.ndarray = None,
):
"""
Given an n x d matrix of (example, slices), calculate the potential
matrix.
Includes correlations modeled by the edges in the `edge_list`.
Args:
D (np.ndarray): n x d matrix of (example, slice)
edge_list (np.ndarray): k x 2 matrix of edge correlations to be modeled.
edge_list[i, :] should be indices for a pair of columns of D.
Returns:
Potential matrix. Equals D when edge_list is None, otherwise adds additional
(x_i * x_j) "cross-terms" corresponding to the edges in the `edge_list`.
Examples:
>>> D = np.random.choice([-1, 1], size=(100, 6))
>>> edge_list = np.array([(0, 1), (1, 4)])
>>> Phi(D, edge_list)
"""
if edge_list is not None:
pairwise_terms = (
D[np.arange(len(D)), edge_list[:, 0][:, np.newaxis]].T
* D[np.arange(len(D)), edge_list[:, 1][:, np.newaxis]].T
)
return np.concatenate([D, pairwise_terms], axis=1)
else:
return D
def log_partition_ratio(
x: np.ndarray,
Phi_D_src: np.ndarray,
n_src: int,
):
"""
Calculate the log-partition ratio in the KLIEP problem.
"""
return np.log(n_src) - scipy.special.logsumexp(Phi_D_src.dot(x))
def mandoline(
D_src: np.ndarray,
D_tgt: np.ndarray,
edge_list: np.ndarray,
sigma: float=None,
):
"""
Mandoline solver.
Args:
D_src: (n_src x d) matrix of (example, slices) for the source distribution.
D_tgt: (n_tgt x d) matrix of (example, slices) for the source distribution.
edge_list: list of edge correlations between slices that should be modeled.
sigma: optional parameter that activates RBF kernel-based KLIEP with scale
`sigma`.
Returns: SimpleNamespace that contains
opt: result of scipy.optimize
Phi_D_src: source potential matrix used in Mandoline
Phi_D_tgt: target potential matrix used in Mandoline
n_src: number of source samples
n_tgt: number of target samples
edge_list: the `edge_list` parameter passed as input
"""
# Copy and binarize the input matrices to -1/1
D_src, D_tgt = np.copy(D_src), np.copy(D_tgt)
if np.min(D_src) == 0:
D_src[D_src == 0] = -1
D_tgt[D_tgt == 0] = -1
# Edge list encoding dependencies between gs
if edge_list is not None:
edge_list = np.array(edge_list)
# Create the potential matrices
Phi_D_tgt, Phi_D_src = Phi(D_tgt, edge_list), Phi(D_src, edge_list)
# Number of examples
n_src, n_tgt = Phi_D_src.shape[0], Phi_D_tgt.shape[0]
def f(x):
obj = Phi_D_tgt.dot(x).sum() - n_tgt * scipy.special.logsumexp(Phi_D_src.dot(x))
return -obj
# Set the kernel
kernel = partial(skmetrics.rbf_kernel, gamma=sigma)
def llkliep_f(x):
obj = kernel(
Phi_D_tgt, x[:, np.newaxis]
).sum() - n_tgt * scipy.special.logsumexp(kernel(Phi_D_src, x[:, np.newaxis]))
return -obj
# Solve
if not sigma:
opt = scipy.optimize.minimize(
f, | np.random.randn(Phi_D_tgt.shape[1]) | numpy.random.randn |
import os
import mne
import numpy as np
import time
import pandas as pd
import itertools
from plipy.ddl_sto import StoDeepCDL1Rank
from scipy.optimize import linear_sum_assignment
from scipy.signal import correlate
from joblib import Memory
from tqdm import tqdm
mem = Memory(location='.', verbose=0)
N_EXAMPLES = 10
def cost_matrix_v(D, Dref):
C = np.zeros((D.shape[0], Dref.shape[0]))
for i in range(D.shape[0]):
for j in range(Dref.shape[0]):
C[i, j] = correlate(D[i, 0], Dref[j, 0]).max()
return C
def recovery_score(D, Dref, u=True):
"""
Comparison between a learnt prior and the truth
"""
try:
if u:
cost_matrix = np.abs(Dref.T @ D)
row_ind, col_ind = linear_sum_assignment(cost_matrix, maximize=True)
score = cost_matrix[row_ind, col_ind].sum() / np.min([D.shape[1], Dref.shape[1]])
else:
cost_matrix = cost_matrix_v(D, Dref)
row_ind, col_ind = linear_sum_assignment(cost_matrix, maximize=True)
score = cost_matrix[row_ind, col_ind].sum()
except:
score = 0
return score
@mem.cache
def run_test(params, num_exp):
lambd = params["lambd"]
mbs = params["mbs"]
window = params["window"]
epoch_steps = params["epochs_steps"]
epoch = params["epochs"]
iter_per_epoch = params["iter_per_epoch"]
reco_u = np.zeros(num_exp)
reco_v = | np.zeros(num_exp) | numpy.zeros |
"""
This module is an example of a barebones function plugin for napari
It implements the ``napari_experimental_provide_function`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from __future__ import print_function, division
from typing import TYPE_CHECKING, DefaultDict
from unicodedata import name
import six
# import modules
import sys # input, output, errors, and files
import os # interacting with file systems
import time # getting time
import datetime
import inspect # get passed parameters
import yaml # parameter importing
import json # for importing tiff metadata
try:
import cPickle as pickle # loading and saving python objects
except:
import pickle
import numpy as np # numbers package
import struct # for interpretting strings as binary data
import re # regular expressions
from pprint import pprint # for human readable file output
import traceback # for error messaging
import warnings # error messaging
import copy # not sure this is needed
import h5py # working with HDF5 files
import pandas as pd
import networkx as nx
import collections
# scipy and image analysis
from scipy.signal import find_peaks_cwt # used in channel finding
from scipy.optimize import curve_fit # fitting ring profile
from scipy.optimize import leastsq # fitting 2d gaussian
from scipy import ndimage as ndi # labeling and distance transform
from skimage import io
from skimage import segmentation # used in make_masks and segmentation
from skimage.transform import rotate
from skimage.feature import match_template # used to align images
from skimage.feature import blob_log # used for foci finding
from skimage.filters import threshold_otsu, median # segmentation
from skimage import filters
from skimage import morphology # many functions is segmentation used from this
from skimage.measure import regionprops # used for creating lineages
from skimage.measure import profile_line # used for ring an nucleoid analysis
from skimage import util, measure, transform, feature
import tifffile as tiff
from sklearn import metrics
# deep learning
import tensorflow as tf # ignore message about how tf was compiled
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras import backend as K
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # supress warnings
# Parralelization modules
import multiprocessing
from multiprocessing import Pool
# Plotting for debug
import matplotlib as mpl
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
mpl.rc('font', **font)
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib.patches import Ellipse
from pathlib import Path
import time
import matplotlib.pyplot as plt
# import modules
import os
import glob
import re
import numpy as np
import tifffile as tiff
import pims_nd2
from skimage import io, measure, morphology
import tifffile as tiff
from scipy import stats
from pprint import pprint # for human readable file output
import multiprocessing
from multiprocessing import Pool
import numpy as np
import warnings
from tensorflow.python.keras import models
from enum import Enum
import numpy as np
import multiprocessing
from multiprocessing import Pool
import os
from napari_plugin_engine import napari_hook_implementation
from skimage.filters import threshold_otsu # segmentation
from skimage import morphology # many functions is segmentation used from this
from skimage import segmentation # used in make_masks and segmentation
from scipy import ndimage as ndi # labeling and distance transform
import matplotlib.gridspec as gridspec
from skimage.exposure import rescale_intensity # for displaying in GUI
from skimage import io, morphology, segmentation
# import mm3_helpers as mm3
import napari
# This is the actual plugin function, where we export our function
# (The functions themselves are defined below)
@napari_hook_implementation
def napari_experimental_provide_function():
# we can return a single function
# or a tuple of (function, magicgui_options)
# or a list of multiple functions with or without options, as shown here:
#return [Segment, threshold, image_arithmetic]
return [Compile, ChannelPicker, Segment]
# 1. First example, a simple function that thresholds an image and creates a labels layer
def threshold(data: "napari.types.ImageData", threshold: int) -> "napari.types.LabelsData":
"""Threshold an image and return a mask."""
return (data > threshold).astype(int)
# print a warning
def warning(*objs):
print(time.strftime("%H:%M:%S WARNING:", time.localtime()), *objs, file=sys.stderr)
# print information
def information(*objs):
print(time.strftime("%H:%M:%S", time.localtime()), *objs, file=sys.stdout)
def julian_day_number():
"""
Need this to solve a bug in pims_nd2.nd2reader.ND2_Reader instance initialization.
The bug is in /usr/local/lib/python2.7/site-packages/pims_nd2/ND2SDK.py in function `jdn_to_datetime_local`, when the year number in the metadata (self._lim_metadata_desc) is not in the correct range. This causes a problem when calling self.metadata.
https://en.wikipedia.org/wiki/Julian_day
"""
dt=datetime.datetime.now()
tt=dt.timetuple()
jdn=(1461.*(tt.tm_year + 4800. + (tt.tm_mon - 14.)/12))/4. + (367.*(tt.tm_mon - 2. - 12.*((tt.tm_mon -14.)/12)))/12. - (3.*((tt.tm_year + 4900. + (tt.tm_mon - 14.)/12.)/100.))/4. + tt.tm_mday - 32075
return jdn
def get_plane(filepath):
pattern = r'(c\d+).tif'
res = re.search(pattern,filepath)
if (res != None):
return res.group(1)
else:
return None
def get_fov(filepath):
pattern = r'xy(\d+)\w*.tif'
res = re.search(pattern,filepath)
if (res != None):
return int(res.group(1))
else:
return None
def get_time(filepath):
pattern = r't(\d+)xy\w+.tif'
res = re.search(pattern,filepath)
if (res != None):
return np.int_(res.group(1))
else:
return None
# loads and image stack from TIFF or HDF5 using mm3 conventions
def load_stack(fov_id, peak_id, color='c1', image_return_number=None):
'''
Loads an image stack.
Supports reading TIFF stacks or HDF5 files.
Parameters
----------
fov_id : int
The FOV id
peak_id : int
The peak (channel) id. Dummy None value incase color='empty'
color : str
The image stack type to return. Can be:
c1 : phase stack
cN : where n is an integer for arbitrary color channel
sub : subtracted images
seg : segmented images
empty : get the empty channel for this fov, slightly different
Returns
-------
image_stack : np.ndarray
The image stack through time. Shape is (t, y, x)
'''
# things are slightly different for empty channels
if 'empty' in color:
if params['output'] == 'TIFF':
img_filename = params['experiment_name'] + '_xy%03d_%s.tif' % (fov_id, color)
with tiff.TiffFile(os.path.join(params['empty_dir'],img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r') as h5f:
img_stack = h5f[color][:]
return img_stack
# load normal images for either TIFF or HDF5
if params['output'] == 'TIFF':
if color[0] == 'c':
img_dir = params['chnl_dir']
elif 'sub' in color:
img_dir = params['sub_dir']
elif 'foci' in color:
img_dir = params['foci_seg_dir']
elif 'seg' in color:
img_dir = params['seg_dir']
img_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, color)
with tiff.TiffFile(os.path.join(img_dir, img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'], 'xy%03d.hdf5' % fov_id), 'r') as h5f:
# normal naming
# need to use [:] to get a copy, else it references the closed hdf5 dataset
img_stack = h5f['channel_%04d/p%04d_%s' % (peak_id, peak_id, color)][:]
return img_stack
# load the time table and add it to the global params
def load_time_table():
'''Add the time table dictionary to the params global dictionary.
This is so it can be used during Cell creation.
'''
# try first for yaml, then for pkl
try:
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'rb') as time_table_file:
params['time_table'] = yaml.safe_load(time_table_file)
except:
with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'rb') as time_table_file:
params['time_table'] = pickle.load(time_table_file)
return
# function for loading the channel masks
def load_channel_masks():
'''Load channel masks dictionary. Should be .yaml but try pickle too.
'''
information("Loading channel masks dictionary.")
# try loading from .yaml before .pkl
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.yaml'))
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'r') as cmask_file:
channel_masks = yaml.safe_load(cmask_file)
except:
warning('Could not load channel masks dictionary from .yaml.')
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.pkl'))
with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'rb') as cmask_file:
channel_masks = pickle.load(cmask_file)
except ValueError:
warning('Could not load channel masks dictionary from .pkl.')
return channel_masks
# function for loading the specs file
def load_specs():
'''Load specs file which indicates which channels should be analyzed, used as empties, or ignored.'''
try:
with open(os.path.join(params['ana_dir'], 'specs.yaml'), 'r') as specs_file:
specs = yaml.safe_load(specs_file)
except:
try:
with open(os.path.join(params['ana_dir'], 'specs.pkl'), 'rb') as specs_file:
specs = pickle.load(specs_file)
except ValueError:
warning('Could not load specs file.')
return specs
### functions for dealing with raw TIFF images
# get params is the major function which processes raw TIFF images
def get_initial_tif_params(image_filename):
'''This is a function for getting the information
out of an image for later trap identification, cropping, and aligning with Unet. It loads a tiff file and pulls out the image metadata.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
#print(image_data.shape) # uncomment for debug
#if len(image_data.shape) == 2:
# img_shape = [image_data.shape[0],image_data.shape[1]]
#else:
img_shape = [image_data.shape[1],image_data.shape[2]]
plane_list = [str(i+1) for i in range(image_data.shape[0])]
#print(plane_list) # uncomment for debug
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : plane_list, # list of plane names
'shape' : img_shape} # image shape x y in pixels
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# get params is the major function which processes raw TIFF images
def get_tif_params(image_filename, find_channels=True):
'''This is a damn important function for getting the information
out of an image. It loads a tiff file, pulls out the image data, and the metadata,
including the location of the channels if flagged.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
'channels': cp_dict, # dictionary of channel locations, in the case of Unet-based channel segmentation, it's a dictionary of channel labels
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
# look for channels if flagged
if find_channels:
# fix the image orientation and get the number of planes
image_data = fix_orientation(image_data)
# if the image data has more than 1 plane restrict image_data to phase,
# which should have highest mean pixel data
if len(image_data.shape) > 2:
#ph_index = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
ph_index = int(params['phase_plane'][1:]) - 1
image_data = image_data[ph_index]
# get shape of single plane
img_shape = [image_data.shape[0], image_data.shape[1]]
# find channels on the processed image
chnl_loc_dict = find_channel_locs(image_data)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : image_metadata['planes'], # list of plane names
'shape' : img_shape, # image shape x y in pixels
# 'channels' : {1 : {'A' : 1, 'B' : 2}, 2 : {'C' : 3, 'D' : 4}}}
'channels' : chnl_loc_dict} # dictionary of channel locations
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# finds metdata in a tiff image which has been expoted with Nikon Elements.
def get_tif_metadata_elements(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by Nikon Elements as a stacked tiff, each for one tpoint.
tif is an opened tif file (using the package tifffile)
arguments:
fname (tifffile.TiffFile): TIFF file object from which data will be extracted
returns:
dictionary of values:
'jdn' (float)
'x' (float)
'y' (float)
'plane_names' (list of strings)
Called by
mm3.Compile
'''
# image Metadata
idata = { 'fov': -1,
't' : -1,
'jd': -1 * 0.0,
'x': -1 * 0.0,
'y': -1 * 0.0,
'planes': []}
# get the fov and t simply from the file name
idata['fov'] = int(tif.fname.split('xy')[1].split('.tif')[0])
idata['t'] = int(tif.fname.split('xy')[0].split('t')[-1])
# a page is plane, or stack, in the tiff. The other metdata is hidden down in there.
for page in tif:
for tag in page.tags.values():
#print("Checking tag",tag.name,tag.value)
t = tag.name, tag.value
t_string = u""
time_string = u""
# Interesting tag names: 65330, 65331 (binary data; good stuff), 65332
# we wnat to work with the tag of the name 65331
# if the tag name is not in the set of tegs we find interesting then skip this cycle of the loop
if tag.name not in ('65331', '65332', 'strip_byte_counts', 'image_width', 'orientation', 'compression', 'new_subfile_type', 'fill_order', 'max_sample_value', 'bits_per_sample', '65328', '65333'):
#print("*** " + tag.name)
#print(tag.value)
pass
#if tag.name == '65330':
# return tag.value
if tag.name in ('65331'):
# make info list a list of the tag values 0 to 65535 by zipoing up a paired list of two bytes, at two byte intervals i.e. fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
# note that 0X100 is hex for 256
infolist = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
# get char values for each element in infolist
for c_entry in range(0, len(infolist)):
# the element corresponds to an ascii char for a letter or bracket (and a few other things)
if infolist[c_entry] < 127 and infolist[c_entry] > 64:
# add the letter to the unicode string t_string
t_string += chr(infolist[c_entry])
#elif infolist[c_entry] == 0:
# continue
else:
t_string += " "
# this block will find the dTimeAbsolute and print the subsequent integers
# index 170 is counting seconds, and rollover of index 170 leads to increment of index 171
# rollover of index 171 leads to increment of index 172
# get the position of the array by finding the index of the t_string at which dTimeAbsolute is listed not that 2*len(dTimeAbsolute)=26
#print(t_string)
arraypos = t_string.index("dXPos") * 2 + 16
xarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in xarr)
idata['x'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dYPos") * 2 + 16
yarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in yarr)
idata['y'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dTimeAbsolute") * 2 + 26
shortarray = tag.value[arraypos+2:arraypos+10]
b = ''.join(chr(i) for i in shortarray)
idata['jd'] = float(struct.unpack('<d', b)[0])
# extract plane names
il = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
li = [a+b*0x100 for a,b in zip(tag.value[1::2], tag.value[2::2])]
strings = list(zip(il, li))
allchars = ""
for c_entry in range(0, len(strings)):
if 31 < strings[c_entry][0] < 127:
allchars += chr(strings[c_entry][0])
elif 31 < strings[c_entry][1] < 127:
allchars += chr(strings[c_entry][1])
else:
allchars += " "
allchars = re.sub(' +',' ', allchars)
words = allchars.split(" ")
planes = []
for idx in [i for i, x in enumerate(words) if x == "sOpticalConfigName"]:
planes.append(words[idx+1])
idata['planes'] = planes
return idata
# finds metdata in a tiff image which has been expoted with nd2ToTIFF.py.
def get_tif_metadata_nd2ToTIFF(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by the mm3 function mm3_nd2ToTIFF.py. All the metdata
is found in that script and saved in json format to the tiff, so it is simply extracted here
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
'planes' (list of strings)
Called by
mm3_Compile.get_tif_params
'''
# get the first page of the tiff and pull out image description
# this dictionary should be in the above form
for tag in tif.pages[0].tags:
if tag.name=="ImageDescription":
idata=tag.value
break
#print(idata)
idata = json.loads(idata)
return idata
# Finds metadata from the filename
def get_tif_metadata_filename(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This just gets the tiff metadata from the filename and is a backup option when the known format of the metadata is not known.
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
Called by
mm3_Compile.get_tif_params
'''
idata = {'fov' : get_fov(tif.filename), # fov id
't' : get_time(tif.filename), # time point
'jd' : -1 * 0.0, # absolute julian time
'x' : -1 * 0.0, # x position on stage [um]
'y' : -1 * 0.0} # y position on stage [um]
return idata
# make a lookup time table for converting nominal time to elapsed time in seconds
def make_time_table(analyzed_imgs):
'''
Loops through the analyzed images and uses the jd time in the metadata to find the elapsed
time in seconds that each picture was taken. This is later used for more accurate elongation
rate calculation.
Parametrs
---------
analyzed_imgs : dict
The output of get_tif_params.
params['use_jd'] : boolean
If set to True, 'jd' time will be used from the image metadata to use to create time table. Otherwise the 't' index will be used, and the parameter 'seconds_per_time_index' will be used from the parameters.yaml file to convert to seconds.
Returns
-------
time_table : dict
Look up dictionary with keys for the FOV and then the time point.
'''
information('Making time table...')
# initialize
time_table = {}
first_time = float('inf')
# need to go through the data once to find the first time
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
if idata['jd'] < first_time:
first_time = idata['jd']
else:
if idata['t'] < first_time:
first_time = idata['t']
# init dictionary for specific times per FOV
if idata['fov'] not in time_table:
time_table[idata['fov']] = {}
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
# convert jd time to elapsed time in seconds
t_in_seconds = np.around((idata['jd'] - first_time) * 24*60*60, decimals=0).astype('uint32')
else:
t_in_seconds = np.around((idata['t'] - first_time) * params['moviemaker']['seconds_per_time_index'], decimals=0).astype('uint32')
time_table[int(idata['fov'])][int(idata['t'])] = int(t_in_seconds)
# save to .pkl. This pkl will be loaded into the params
# with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'wb') as time_table_file:
# pickle.dump(time_table, time_table_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(params['ana_dir'], 'time_table.txt'), 'w') as time_table_file:
# pprint(time_table, stream=time_table_file)
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'w') as time_table_file:
yaml.dump(data=time_table, stream=time_table_file, default_flow_style=False, tags=None)
information('Time table saved.')
return time_table
# saves traps sliced via Unet
def save_tiffs(imgDict, analyzed_imgs, fov_id):
savePath = os.path.join(params['experiment_directory'],
params['analysis_directory'],
params['chnl_dir'])
img_names = [key for key in analyzed_imgs.keys()]
image_params = analyzed_imgs[img_names[0]]
for peak,img in six.iteritems(imgDict):
img = img.astype('uint16')
if not os.path.isdir(savePath):
os.mkdir(savePath)
for planeNumber in image_params['planes']:
channel_filename = os.path.join(savePath, params['experiment_name'] + '_xy{0:0=3}_p{1:0=4}_c{2}.tif'.format(fov_id, peak, planeNumber))
io.imsave(channel_filename, img[:,:,:,int(planeNumber)-1])
# slice_and_write cuts up the image files one at a time and writes them out to tiff stacks
def tiff_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images per channel.
Loads all tiffs from and FOV into memory and then slices all time points at once.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# go through list of images and get the file path
for n, image in enumerate(images_to_write):
# analyzed_imgs dictionary will be found in main scope. [0] is the key, [1] is jd
image_params = analyzed_imgs[image[0]]
information("Loading %s." % image_params['filepath'].split('/')[-1])
if n == 1:
# declare identification variables for saving using first image
fov_id = image_params['fov']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
# change axis so it goes Y, X, Plane
image_data = | np.rollaxis(image_data, 0, 3) | numpy.rollaxis |
"""Download and pre-process SQuAD and GloVe.
Usage:
> source activate squad
> python setup.py
Pre-processing code adapted from:
> https://github.com/HKUST-KnowComp/R-Net/blob/master/prepro.py
Author:
<NAME> (<EMAIL>)
"""
import numpy as np
import os
import re
import sys
import spacy
import ujson as json
import urllib.request
from args import get_exp2_setup_args
from codecs import open
from collections import Counter
from subprocess import run
from tqdm import tqdm
from zipfile import ZipFile
from toolkit import save, get_logger, quick_clean
import torch
def word_tokenize(sent):
doc = nlp(sent)
return [token.text for token in doc]
def convert_idx(text, tokens):
current = 0
spans = []
for token in tokens:
current = text.find(token, current)
if current < 0:
continue
spans.append((current, current + len(token)))
current += len(token)
return spans
def process_file(filename, data_type, word_counter, char_counter, logger, chunk_size):
logger.info(f"Pre-processing {data_type} examples...")
ret_examples = []
ret_eval_examples = {}
examples = []
eval_examples = {}
topic_context_examples = []
total = 0
with open(filename, "r") as fh:
source = json.load(fh)
chunk_tracker = chunk_size
for topic_id, topic in tqdm(enumerate(source["data"])):
chunk_tracker -= 1
# context processing
topic_context = quick_clean(raw_str=topic["context"])
topic_context_tokens = word_tokenize(topic_context)
topic_context_chars = [list(token) for token in topic_context_tokens]
spans = convert_idx(topic_context, topic_context_tokens)
for token in topic_context_tokens:
# it was originally len(para['qas']) but that seemed arbitrary so I used 1s
word_counter[token] += 1
for char in token:
char_counter[char] += 1
topic_context_dict = {"context_tokens":topic_context_tokens,
"context_chars":topic_context_chars,
"context":topic_context}
topic_context_examples.append(topic_context_dict)
# qas processing
for qa in topic["qas"]:
total += 1
#question processing
ques = quick_clean(qa["question"])
ques_tokens = word_tokenize(ques)
ques_chars = [list(token) for token in ques_tokens]
for token in ques_tokens:
word_counter[token] += 1
for char in token:
char_counter[char] += 1
# answer processing
y1s, y2s = [], []
answer_texts = []
for answer in qa["answers"]:
answer_text = answer["text"]
answer_start = answer['answer_start']
answer_end = answer_start + len(answer_text)
answer_texts.append(answer_text)
answer_span = []
for idx, span in enumerate(spans):
if not (answer_end <= span[0] or answer_start >= span[1]):
answer_span.append(idx)
y1, y2 = answer_span[0], answer_span[-1]
y1s.append(y1)
y2s.append(y2)
example = {"ques_tokens": ques_tokens,
"ques_chars": ques_chars,
"topic_context_id": topic_id,
"y1s": y1s,
"y2s": y2s,
"id": total}
examples.append(example)
eval_examples[str(total)] = {"question": ques,
"context": topic_context_examples[topic_id]["context"],
"spans": spans,
"answers": answer_texts,
"uuid": qa["id"]}
if chunk_tracker == 0 or topic_id == (len(source['data'])-1):
# print(f"creating chunk b/c {chunk_tracker == 0} or {n == (len(source['data'])-1)}")
# print(f"number of examples is {chunk_size - chunk_tracker}")
ret_examples.append(examples)
examples=[]
chunk_tracker = chunk_size
return ret_examples, eval_examples, topic_context_examples
def get_embedding(counter, data_type, limit=-1, emb_file=None, vec_size=None, num_vectors=None):
logger.info(f"Pre-processing {data_type} vectors...")
embedding_dict = {}
filtered_elements = [k for k, v in counter.items() if v > limit]
if emb_file is not None:
assert vec_size is not None
with open(emb_file, "r", encoding="utf-8") as fh:
for line in tqdm(fh, total=num_vectors):
array = line.split()
word = "".join(array[0:-vec_size])
vector = list(map(float, array[-vec_size:]))
if word in counter and counter[word] > limit:
embedding_dict[word] = vector
logger.info(f"{len(embedding_dict)} / {len(filtered_elements)} tokens have corresponding {data_type} embedding vector")
else:
assert vec_size is not None
for token in filtered_elements:
embedding_dict[token] = [np.random.normal(
scale=0.1) for _ in range(vec_size)]
logger.info(f"{len(filtered_elements)} tokens have corresponding {data_type} embedding vector")
NULL = "--NULL--"
OOV = "--OOV--"
token2idx_dict = {token: idx for idx, token in enumerate(embedding_dict.keys(), 2)}
token2idx_dict[NULL] = 0
token2idx_dict[OOV] = 1
embedding_dict[NULL] = [0. for _ in range(vec_size)]
embedding_dict[OOV] = [0. for _ in range(vec_size)]
idx2emb_dict = {idx: embedding_dict[token]
for token, idx in token2idx_dict.items()}
emb_mat = [idx2emb_dict[idx] for idx in range(len(idx2emb_dict))]
return emb_mat, token2idx_dict
def is_answerable(example):
return len(example['y2s']) > 0 and len(example['y1s']) > 0
def build_features(args, examples, topic_contexts, data_type, out_file, word2idx_dict, char2idx_dict, exp2_topic_contexts_file, is_test=False, chunk_size=1):
topic_context_lens = [len(context["context"]) for context in topic_contexts]
para_limit = max(topic_context_lens)
ques_limit = args.ques_limit
ans_limit = args.ans_limit
char_limit = args.char_limit
del topic_context_lens
def drop_example(ex, is_test_=False):
if is_test_:
drop = False
else:
drop = len(topic_contexts[ex["topic_context_id"]]["context_tokens"]) > para_limit or \
len(ex["ques_tokens"]) > ques_limit or \
(is_answerable(ex) and
ex["y2s"][0] - ex["y1s"][0] > ans_limit)
return drop
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2idx_dict:
return word2idx_dict[each]
return 1
def _get_char(char):
if char in char2idx_dict:
return char2idx_dict[char]
return 1
total = 0
total_ = 0
meta = {}
context_idxs = []
context_char_idxs = []
ques_idxs = []
ques_char_idxs = []
topic_ids = []
y1s = []
y2s = []
ids = []
# context feature building
logger.info(f"Creating the {data_type} topic_context features")
for z, topic in tqdm(enumerate(topic_contexts)):
context_idx = np.zeros([para_limit], dtype=np.int32)
context_char_idx = | np.zeros([para_limit, char_limit], dtype=np.int32) | numpy.zeros |
import os
import unittest
import numpy
import scipy.special
import moments
import pickle
import time
class SpectrumTestCase(unittest.TestCase):
def setUp(self):
self.startTime = time.time()
def tearDown(self):
t = time.time() - self.startTime
print("%s: %.3f seconds" % (self.id(), t))
def test_to_file(self):
"""
Saving spectrum to file.
"""
comments = ['comment 1', 'comment 2']
filename = 'test.fs'
data = numpy.random.rand(3,3)
fs = moments.Spectrum(data)
fs.to_file(filename, comment_lines=comments)
os.remove(filename)
fs.to_file(filename, comment_lines=comments, foldmaskinfo=False)
os.remove(filename)
def test_from_file(self):
"""
Loading spectrum from file.
"""
commentsin = ['comment 1', 'comment 2']
filename = 'test.fs'
data = numpy.random.rand(3,3)
fsin = moments.Spectrum(data)
fsin.to_file(filename, comment_lines=commentsin)
# Read the file.
fsout,commentsout = moments.Spectrum.from_file(filename,
return_comments=True)
os.remove(filename)
# Ensure that fs was read correctly.
self.assert_(numpy.allclose(fsout.data, fsin.data))
self.assert_(numpy.all(fsout.mask == fsin.mask))
self.assertEqual(fsout.folded, fsin.folded)
# Ensure comments were read correctly.
for ii,line in enumerate(commentsin):
self.assertEqual(line, commentsout[ii])
# Test using old file format
fsin.to_file(filename, comment_lines=commentsin, foldmaskinfo=False)
# Read the file.
fsout,commentsout = moments.Spectrum.from_file(filename,
return_comments=True)
os.remove(filename)
# Ensure that fs was read correctly.
self.assert_(numpy.allclose(fsout.data, fsin.data))
self.assert_(numpy.all(fsout.mask == fsin.mask))
self.assertEqual(fsout.folded, fsin.folded)
# Ensure comments were read correctly.
for ii,line in enumerate(commentsin):
self.assertEqual(line, commentsout[ii])
#
# Now test a file with folding and masking
#
fsin = moments.Spectrum(data).fold()
fsin.mask[0,1] = True
fsin.to_file(filename)
fsout = moments.Spectrum.from_file(filename)
os.remove(filename)
# Ensure that fs was read correctly.
self.assert_(numpy.allclose(fsout.data, fsin.data))
self.assert_(numpy.all(fsout.mask == fsin.mask))
self.assertEqual(fsout.folded, fsin.folded)
def test_pickle(self):
"""
Saving spectrum to file.
"""
comments = ['comment 1', 'comment 2']
filename = 'test.p'
data = numpy.random.rand(3,3)
fs = moments.Spectrum(data)
pickle.dump(fs, open(filename, "wb" ))
os.remove(filename)
def test_unpickle(self):
"""
Loading spectrum from file.
"""
commentsin = ['comment 1', 'comment 2']
filename = 'test.p'
data = numpy.random.rand(3,3)
fsin = moments.Spectrum(data)
pickle.dump(fsin, open(filename, "wb" ))
# Read the file.
fsout = pickle.load(open(filename, "rb" ))
os.remove(filename)
# Ensure that fs was read correctly.
self.assert_(numpy.allclose(fsout.data, fsin.data))
self.assert_(numpy.all(fsout.mask == fsin.mask))
self.assertEqual(fsout.folded, fsin.folded)
#
# Now test a file with folding and masking
#
fsin = moments.Spectrum(data).fold()
fsin.mask[0,1] = True
pickle.dump(fsin, open(filename, "wb" ))
# Read the file.
fsout = pickle.load(open(filename, "rb" ))
os.remove(filename)
# Ensure that fs was read correctly.
self.assert_(numpy.allclose(fsout.data, fsin.data))
self.assert_(numpy.all(fsout.mask == fsin.mask))
self.assertEqual(fsout.folded, fsin.folded)
def test_folding(self):
"""
Folding a 2D spectrum.
"""
data = numpy.reshape(numpy.arange(12), (3,4))
fs = moments.Spectrum(data)
ff = fs.fold()
# Ensure no SNPs have gotten lost.
self.assertAlmostEqual(fs.sum(), ff.sum(), 6)
self.assertAlmostEqual(fs.data.sum(), ff.data.sum(), 6)
# Ensure that the empty entries are actually empty.
self.assert_(numpy.all(ff.data[::-1] == numpy.tril(ff.data[::-1])))
# This turns out to be the correct result.
correct = numpy.tri(4)[::-1][-3:]*11
self.assert_(numpy.allclose(correct, ff.data))
def test_ambiguous_folding(self):
"""
Test folding when the minor allele is ambiguous.
"""
data = numpy.zeros((4,4))
# Both these entries correspond to a an allele seen in 3 of 6 samples.
# So the minor allele is ambiguous. In this case, we average the two
# possible assignments.
data[0,3] = 1
data[3,0] = 3
fs = moments.Spectrum(data)
ff = fs.fold()
correct = numpy.zeros((4,4))
correct[0,3] = correct[3,0] = 2
self.assert_(numpy.allclose(correct, ff.data))
def test_masked_folding(self):
"""
Test folding when the minor allele is ambiguous.
"""
data = numpy.zeros((5,6))
fs = moments.Spectrum(data)
# This folds to an entry that will already be masked.
fs.mask[1,2] = True
# This folds to (1,1), which needs to be masked.
fs.mask[3,4] = True
ff = fs.fold()
# Ensure that all those are masked.
for entry in [(1,2), (3,4), (1,1)]:
self.assert_(ff.mask[entry])
def test_folded_slices(self):
ns = (3,4)
fs1 = moments.Spectrum(numpy.random.rand(*ns))
folded1 = fs1.fold()
self.assert_(fs1[:].folded == False)
self.assert_(folded1[:].folded == True)
self.assert_(fs1[0].folded == False)
self.assert_(folded1[1].folded == True)
self.assert_(fs1[:,0].folded == False)
self.assert_(folded1[:,1].folded == True)
def test_folded_arithmetic(self):
"""
Test that arithmetic operations respect and propogate .folded attribute.
"""
# Disable logging of warnings because arithmetic may generate Spectra
# with entries < 0, but we don't care at this point.
import logging
moments.Spectrum_mod.logger.setLevel(logging.ERROR)
ns = (3,4)
fs1 = moments.Spectrum(numpy.random.uniform(size=ns))
fs2 = moments.Spectrum(numpy.random.uniform(size=ns))
folded1 = fs1.fold()
folded2 = fs2.fold()
# We'll iterate through each of these arithmetic functions.
try:
from operator import add, sub, mul, div, truediv, floordiv, pow, abs, pos, neg
lst = [add, sub, mul, div, truediv, floordiv, pow]
except:
from operator import add, sub, mul, truediv, floordiv, pow, abs, pos, neg
lst = [add, sub, mul, truediv, floordiv, pow]
arr = numpy.random.uniform(size=ns)
marr = numpy.random.uniform(size=ns)
# I found some difficulties with multiplication by numpy.float64, so I
# want to explicitly test this case.
numpyfloat = numpy.float64(2.0)
for op in lst:
# Check that binary operations propogate folding status.
# Need to check cases both on right-hand-side of operator and
# left-hand-side
# Note that numpy.power(2.0,fs2) does not properly propagate type
# or status. I'm not sure how to fix this.
result = op(fs1,fs2)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs1.mask))
result = op(fs1,2.0)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs1.mask))
result = op(2.0,fs2)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs2.mask))
result = op(fs1,numpyfloat)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs1.mask))
result = op(numpyfloat,fs2)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs2.mask))
result = op(fs1,arr)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs1.mask))
result = op(arr,fs2)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs2.mask))
result = op(fs1,marr)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs1.mask))
result = op(marr,fs2)
self.assertFalse(result.folded)
self.assert_(numpy.all(result.mask == fs2.mask))
# Now with folded Spectra
result = op(folded1,folded2)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded1.mask))
result = op(folded1,2.0)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded1.mask))
result = op(2.0,folded2)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded2.mask))
result = op(folded1,numpyfloat)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded1.mask))
result = op(numpyfloat,folded2)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded2.mask))
result = op(folded1,arr)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded1.mask))
result = op(arr,folded2)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded2.mask))
result = op(folded1,marr)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded1.mask))
result = op(marr,folded2)
self.assertTrue(result.folded)
self.assert_(numpy.all(result.mask == folded2.mask))
# Check that exceptions are properly raised when folding status
# differs
self.assertRaises(ValueError, op, fs1, folded2)
self.assertRaises(ValueError, op, folded1, fs2)
for op in [abs,pos,neg,scipy.special.gammaln]:
# Check that unary operations propogate folding status.
result = op(fs1)
self.assertFalse(result.folded)
result = op(folded1)
self.assertTrue(result.folded)
try:
# The in-place methods aren't in operator in python 2.4...
from operator import iadd,isub,imul,idiv,itruediv,ifloordiv,ipow
for op in [iadd,isub,imul,idiv,itruediv,ifloordiv,ipow]:
fs1origmask = fs1.mask.copy()
# Check that in-place operations preserve folding status.
op(fs1,fs2)
self.assertFalse(fs1.folded)
self.assert_(numpy.all(fs1.mask == fs1origmask))
op(fs1,2.0)
self.assertFalse(fs1.folded)
self.assert_(numpy.all(fs1.mask == fs1origmask))
op(fs1,numpyfloat)
self.assertFalse(fs1.folded)
self.assert_(numpy.all(fs1.mask == fs1origmask))
op(fs1,arr)
self.assertFalse(fs1.folded)
self.assert_(numpy.all(fs1.mask == fs1origmask))
op(fs1,marr)
self.assertFalse(fs1.folded)
self.assert_(numpy.all(fs1.mask == fs1origmask))
# Now folded Spectra
folded1origmask = folded1.mask.copy()
op(folded1,folded2)
self.assertTrue(folded1.folded)
self.assert_(numpy.all(folded1.mask == folded1origmask))
op(folded1,2.0)
self.assertTrue(folded1.folded)
self.assert_(numpy.all(folded1.mask == folded1origmask))
op(folded1,numpyfloat)
self.assertTrue(folded1.folded)
self.assert_(numpy.all(folded1.mask == folded1origmask))
op(folded1,arr)
self.assertTrue(folded1.folded)
self.assert_(numpy.all(folded1.mask == folded1origmask))
op(folded1,marr)
self.assertTrue(folded1.folded)
self.assert_(numpy.all(folded1.mask == folded1origmask))
# Check that exceptions are properly raised.
self.assertRaises(ValueError, op, fs1, folded2)
self.assertRaises(ValueError, op, folded1, fs2)
except ImportError:
pass
# Restore logging of warnings
moments.Spectrum_mod.logger.setLevel(logging.WARNING)
def test_unfolding(self):
ns = (3,4)
# We add some unusual masking.
fs = moments.Spectrum(numpy.random.uniform(size=ns))
fs.mask[0,1] = fs.mask[1,1] = True
folded = fs.fold()
unfolded = folded.unfold()
# Check that it was properly recorded
self.assertFalse(unfolded.folded)
# Check that no data was lost
self.assertAlmostEqual(fs.data.sum(), folded.data.sum())
self.assertAlmostEqual(fs.data.sum(), unfolded.data.sum())
# Note that fs.sum() need not be equal to folded.sum(), if fs had
# some masked values.
self.assertAlmostEqual(folded.sum(), unfolded.sum())
# Check that the proper entries are masked.
self.assertTrue(unfolded.mask[0,1])
self.assertTrue(unfolded.mask[(ns[0]-1),(ns[1]-1)-1])
self.assertTrue(unfolded.mask[1,1])
self.assertTrue(unfolded.mask[(ns[0]-1)-1,(ns[1]-1)-1])
def test_marginalize(self):
ns = (7,8,6)
fs = moments.Spectrum(numpy.random.uniform(size=ns))
folded = fs.fold()
marg1 = fs.marginalize([1])
# Do manual marginalization.
manual = moments.Spectrum(fs.data.sum(axis=1))
# Check that these are equal in the unmasked entries.
self.assert_(numpy.allclose(numpy.where(marg1.mask, 0, marg1.data),
numpy.where(manual.mask, 0, manual.data)))
# Check folded Spectrum objects. I should get the same result if I
# marginalize then fold, as if I fold then marginalize.
mf1 = marg1.fold()
mf2 = folded.marginalize([1])
self.assert_(numpy.allclose(mf1,mf2))
def test_projection(self):
# Test that projecting a multi-dimensional Spectrum succeeds
ns = (7,8,6)
fs = moments.Spectrum(numpy.random.uniform(size=ns))
p = fs.project([3,4,5])
# Also that we don't lose any data
self.assertAlmostEqual(fs.data.sum(), p.data.sum())
# Check that when I project an equilibrium spectrum, I get back an
# equilibrium spectrum
fs = moments.Spectrum(1./numpy.arange(100))
p = fs.project([17])
self.assert_(numpy.allclose(p[1:-1], 1./numpy.arange(1,len(p)-1)))
# Check that masked values are propagated correctly.
fs = moments.Spectrum(1./numpy.arange(20))
# All values with 3 or fewer observed should be masked.
fs.mask[3] = True
p = fs.project([10])
self.assert_(numpy.all(p.mask[:4]))
# Check that masked values are propagated correctly.
fs = moments.Spectrum(1./ | numpy.arange(20) | numpy.arange |
import numpy as np
import networkx as nx
from matplotlib import pyplot as plt
import scipy
import pickle
from sklearn import preprocessing
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import os
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import pdist, squareform
from scipy.sparse.csgraph import minimum_spanning_tree
import h5py
import math
import numpy.linalg as lg
from scipy import sparse
import scipy.linalg as slg
import torch
import sklearn
from torch.autograd import Variable
from sklearn.neighbors import kneighbors_graph
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
from sklearn.metrics.cluster import adjusted_rand_score as ri
from sklearn.metrics import accuracy_score
from sklearn.cluster import KMeans, SpectralClustering
import itertools
from scipy.sparse import csr_matrix
import scipy.io
def build_karate_club_graph():
# All 78 edges are stored in two numpy arrays. One for source endpoints
# while the other for destination endpoints.
src = np.array([1, 2, 2, 3, 3, 3, 4, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 10, 10,
10, 11, 12, 12, 13, 13, 13, 13, 16, 16, 17, 17, 19, 19, 21, 21,
25, 25, 27, 27, 27, 28, 29, 29, 30, 30, 31, 31, 31, 31, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33])
dst = np.array([0, 0, 1, 0, 1, 2, 0, 0, 0, 4, 5, 0, 1, 2, 3, 0, 2, 2, 0, 4,
5, 0, 0, 3, 0, 1, 2, 3, 5, 6, 0, 1, 0, 1, 0, 1, 23, 24, 2, 23,
24, 2, 23, 26, 1, 8, 0, 24, 25, 28, 2, 8, 14, 15, 18, 20, 22, 23,
29, 30, 31, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30,
31, 32])
# Edges are directional in DGL; Make them bi-directional.
u = np.concatenate([src, dst])
v = np.concatenate([dst, src])
# Construct a DGLGraph
return dgl.DGLGraph((u, v))
def purity_score(y_true, y_pred):
"""Purity score
Args:
y_true(np.ndarray): n*1 matrix Ground truth labels
y_pred(np.ndarray): n*1 matrix Predicted clusters
Returns:
float: Purity score
"""
# matrix which will hold the majority-voted labels
y_voted_labels = np.zeros(y_true.shape)
# Ordering labels
## Labels might be missing e.g with set like 0,2 where 1 is missing
## First find the unique labels, then map the labels to an ordered set
## 0,2 should become 0,1
labels = np.unique(y_true)
ordered_labels = np.arange(labels.shape[0])
for k in range(labels.shape[0]):
y_true[y_true==labels[k]] = ordered_labels[k]
# Update unique labels
labels = np.unique(y_true)
# We set the number of bins to be n_classes+2 so that
# we count the actual occurence of classes between two consecutive bins
# the bigger being excluded [bin_i, bin_i+1[
bins = np.concatenate((labels, [np.max(labels)+1]), axis=0)
for cluster in np.unique(y_pred):
hist, _ = np.histogram(y_true[y_pred==cluster], bins=bins)
# Find the most present label in the cluster
winner = np.argmax(hist)
y_voted_labels[y_pred==cluster] = winner
return accuracy_score(y_true, y_voted_labels)
def accuracy_clustering(y_true, y_pred):
# Ordering labels
labels = np.unique(y_true)
ordered_labels = np.arange(labels.shape[0])
for k in range(labels.shape[0]):
y_true[y_true==labels[k]] = ordered_labels[k]
labels = np.unique(y_true)
scores = []
# Try all the possible permutations
permutations = list(itertools.permutations(labels))
for perm in permutations:
y_permuted = | np.zeros_like(y_true) | numpy.zeros_like |
import sys
import numpy as np
from gcodeBuddy import angle, Arc, centers_from_params
class Command:
"""
represents line of Marlin g-code
:param init_string: line of Marlin g-code
:type init_string: str
"""
def __init__(self, init_string):
"""
initialization method
"""
err_msg = "Error in marlin.gcode_command.__init__(): "
no_parameter_commands = ["M84"] # list of commands that don't require a value after the parameters
if len(init_string) == 0:
print(err_msg + "argument passed to 'init_string' can't be empty string")
sys.exit(1)
# removing extraneous spaces
command_string = init_string
while command_string[0] == " ":
command_string = command_string[1:]
while command_string[-1] == " ":
command_string = command_string[:-1]
ind = 0
while (ind + 1) < len(command_string):
if command_string[ind] == " " and command_string[ind + 1] == " ":
command_string = command_string[:ind] + command_string[(ind + 1):]
else:
ind += 1
# ensuring valid command
command_list = command_string.split(" ")
if command_list[0] in marlin_commands():
self.command = command_list[0]
command_list = command_list[1:]
else:
print(err_msg + "Unrecognized Marlin command passed in argument 'init_string'")
sys.exit(1)
self.params = dict() # a dictionary storing param - values pairs (ie. {"x": 0, ... }
for parameter_str in command_list:
if parameter_str[0].isalpha():
if self.command in no_parameter_commands:
self.params[parameter_str.upper()] = 0
else:
try:
float(parameter_str[1:])
except ValueError:
print(err_msg + "Marlin parameter passed in argument 'init_string' of non-int/non-float type")
sys.exit(1)
else:
self.params[parameter_str[0].upper()] = float(parameter_str[1:])
else:
print(err_msg + "Unrecognized Marlin parameter passed in argument 'init_string'")
sys.exit(1)
def get_command(self):
"""
:return: g-code command
:rtype: str
"""
return self.command
def has_param(self, param_char):
"""
:param param_char: parameter character to search for in g-code command
:type param_char: str
:return: whether the Command object has the given parameter
:rtype: bool
"""
err_msg = "Error in marlin.gcode_command.has_param(): "
# ensuring string passed
if isinstance(param_char, str):
return param_char.upper() in self.params
else:
print(err_msg + "Argument 'param_char' of non-string type")
sys.exit(1)
def get_param(self, param_char):
"""
:param param_char: parameter character to search for in g-code command
:type param_char: str
:return: value of parameter character stored in g-code command
:rtype: float
"""
err_msg = "Error in marlin.gcode_command.get_param(): "
# ensuring param_char is string, and is in self.params
if isinstance(param_char, str):
if param_char in self.params:
return self.params[param_char]
else:
print(err_msg + "Command does not contain Marlin parameter given in argument 'param_char'")
sys.exit(1)
else:
print(err_msg + "Argument 'param_char' of non-string type")
sys.exit(1)
def set_param(self, param_char, param_val):
"""
sets parameter value
:param param_char: parameter character to change value
:type param_char: str
:param param_val: parameter value to set
:type param_val: int, float
"""
err_msg = "Error in marlin.gcode_command.set_param(): "
# ensuring param_char is string and is in self.params and param_val is number
if isinstance(param_char, str):
if isinstance(param_val, (int, float)):
if param_char in self.params:
self.params[param_char] = param_val
else:
print(err_msg + "Command does not contain Marlin parameter given in argument 'param_char'")
sys.exit(1)
else:
print(err_msg + "Argument 'param_val' of non-int/non-float type")
sys.exit(1)
else:
print(err_msg + "Argument 'param_char' of non-string type")
sys.exit(1)
def get_string(self):
"""
:return: entire g-code command in line form
:rtype: string
"""
ret_val = self.command
for param_key in self.params:
ret_val += " " + param_key + str(self.params[param_key])
return ret_val
def command_to_arc(curr_pos, command):
"""
converts G2/G3 Marlin g-code command to Arc object
:param curr_pos: position of toolhead before given command
:type curr_pos: list[int, float], tuple(int, float)
:param command: G2/G3 command
:type command: Command
:return: arc toolpath travel corresponding to given g-code command
:rtype: Arc
"""
err_msg = "Error in marlin.command_to_arc(): "
# error checking curr_pos
if isinstance(curr_pos, (list, tuple)):
if len(curr_pos) == 2:
valid_types = True
for coord in curr_pos:
if not isinstance(coord, (int, float)):
valid_types = False
if not valid_types:
print(err_msg + "Element in argument 'curr_pos' of non-int/non-float type")
sys.exit(1)
else:
print(err_msg + "Argument 'curr_pos' does not contain two elements")
sys.exit(1)
else:
print(err_msg + "Argument 'curr_pos' of non-list/non-tuple type")
sys.exit(1)
# error checking command - error checking done in Command.__init__(), just need to make sure command is passed
if not isinstance(command, Command):
print(err_msg + "Argument 'command' of non-Command type")
sys.exit(1)
if command.get_command() not in ("G2", "G3"):
print(err_msg + "Command must be 'G2' or 'G3' for arc conversion")
sys.exit(1)
# organizing parameters into list (for error checking)
param_list =[]
for letter in "XYIJR":
if command.has_param(letter):
param_list.append(letter)
# setting direction
direction = "c"
if command.get_command() == "G3":
direction = "cc"
if ("I" in param_list) or ("J" in param_list): # I and J parameters
# more error checking
if "R" in param_list:
print(err_msg + "Command cannot mix parameter 'R' with parameters 'I' and 'J' for arc conversion")
sys.exit(1)
# if only given I, J, or I and J
if ("X" not in param_list) and ("Y" not in param_list):
if param_list == ["I"]: # I
I = command.get_param("I")
center = [curr_pos[0] + I, curr_pos[1]]
radius = I
start_angle = angle(center, curr_pos)
end_angle = angle(center, curr_pos)
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
elif param_list == ["J"]: # J
J = command.get_param("J")
center = [curr_pos[0], curr_pos[1] + J]
radius = J
start_angle = angle(center, curr_pos)
end_angle = angle(center, curr_pos)
return Arc(center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
direction=direction)
else: # I J
I = command.get_param("I")
J = command.get_param("J")
center = [curr_pos[0] + I, curr_pos[1] + J]
radius = | np.sqrt(I**2 + J**2) | numpy.sqrt |
import os
import glob
from PIL import Image
from resizeimage import resizeimage
import sys
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
import xml.etree.cElementTree as ET
from yattag import Doc, indent
import shutil
import pandas as pd
from google_images_download import google_images_download
from io import BytesIO
import numpy as np
import tensorflow as tf
import datetime
def size_and_name(root_dir,query,pypath):
i = 1
z = 1
main_dir = root_dir+'/'+'downloads'+'/'+query
for filename in glob.iglob(main_dir + '**/*.jpg', recursive=True):
print(filename)
im = Image.open(filename)
im = im.convert('RGB')
im.save(filename , 'JPEG', quality=90)
for filename in glob.iglob(main_dir + '**/*.png', recursive=True):
print(filename)
im = Image.open(filename)
im = im.convert('RGB')
im.save(filename , 'JPEG', quality=90)
for filename in os.listdir(main_dir):
tst =query + str(i) +'.jpg'
src =main_dir+'/'+filename
tst =main_dir+'/'+tst
os.rename(src, tst)
i = i+1
for filename in glob.iglob(main_dir + '**/*.jpg', recursive=True):
class DeepLabModel(object):
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
self.graph = tf.Graph()
graph_def = None
graph_def = tf.GraphDef.FromString(open(pypath+"/PSCMR_Tensorflow_object_trainer/"+tarball_path + "/frozen_inference_graph.pb", "rb").read())
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
start = datetime.datetime.now()
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
end = datetime.datetime.now()
diff = end - start
print("Time taken to evaluate segmentation is : " + str(diff))
return resized_image, seg_map
def drawSegment(baseImg, matImg):
width, height = baseImg.size
dummyImg = | np.zeros([height, width, 4], dtype=np.uint8) | numpy.zeros |
#!/usr/bin/env python
# imports
import numpy as np
import scipy.linalg as spla
# load data
convergence_DM = np.loadtxt("../../data/convergence_DM.txt")
convergence_E = np.loadtxt("../../data/convergence_E.txt")
S = np.loadtxt("../../data/S.txt")
T = np.loadtxt("../../data/T.txt")
V = np.loadtxt("../../data/V.txt")
eri = np.loadtxt("../../data/eri.txt")
E_nuc = np.loadtxt("../../data/E_nuc.txt")
iteration_max = (int)(np.loadtxt("../../data/iteration_max.txt"))
num_ao = (int)(np.loadtxt("../../data/num_ao.txt"))
num_elec_alpha = (int)( | np.loadtxt("../../data/num_elec_alpha.txt") | numpy.loadtxt |
import time
import numpy as np
import torch
import torch.nn as nn
import open3d as o3d
import h5py
import math
import sklearn
import copy
from sklearn.neighbors import KDTree
from PIL import Image
import matplotlib.pyplot as plt
def show_point_cloud(src_, src_corr_, ref_, ref_corr_):
src = src_.copy()
src_corr = src_corr_.copy()
ref = ref_.copy()
ref_corr = ref_corr_.copy()
ref[:,1] = ref[:,1] + 2.5
ref_corr[:,1] = ref_corr[:,1] + 2.5
src_pcd = o3d.geometry.PointCloud()
src_corr_pcd = o3d.geometry.PointCloud()
ref_pcd = o3d.geometry.PointCloud()
ref_corr_pcd = o3d.geometry.PointCloud()
src_pcd.points = o3d.utility.Vector3dVector(src)
ref_pcd.points = o3d.utility.Vector3dVector(ref)
src_corr_pcd.points = o3d.utility.Vector3dVector(src_corr)
ref_corr_pcd.points = o3d.utility.Vector3dVector(ref_corr )
ref_pcd.paint_uniform_color([1, 0, 0.651]) # 蓝色
# src_corr_pcd.paint_uniform_color([1, 0.706, 0]) # 黄色
src_pcd.paint_uniform_color([0, 0.651, 0.929]) # 红色
line_size = src_corr.shape[0]
line_src = np.arange(0, 2 * line_size, 2) # 这个代表所有偶数
rand_idxs = np.random.choice(line_size, math.ceil(line_size / 3), replace=False)
# print('line_src',line_src)
line_src = line_src[rand_idxs].reshape(rand_idxs.shape[0], 1)
# print('line_src',line_src)
line_ref = line_src + 1
# print('line_ref',line_ref)
lines = np.concatenate([line_ref, line_src], -1).reshape(-1, 2)
# print('lines',lines)
colors = [[1, 0, 0]]
# triangle_points=np.concatenate([data['points_ref'][1, :, :3].detach().cpu().numpy()+1,data['points_src'][1, :, :3].detach().cpu().numpy()],-1)
triangle_points = np.concatenate([src_corr, ref_corr ], -1)
triangle_points = triangle_points.reshape(-1, 3)
# print('triangle_points',triangle_points.shape)
line_pcd = o3d.geometry.LineSet()
line_pcd.lines = o3d.utility.Vector2iVector(lines)
line_pcd.colors = o3d.utility.Vector3dVector(colors)
# line_pcd.paint_uniform_color([1, 0.706, 0])
line_pcd.points = o3d.utility.Vector3dVector(triangle_points)
o3d.visualization.draw_geometries([line_pcd, src_pcd, ref_pcd], window_name='line_pcd src_pcd src_corr_pcd')
# o3d.visualization.draw_geometries([src_corr_pcd, ref_pcd], window_name='src_corr_pcd ref_pcd')
# src_pcd.transform(transform)
# src_corr_pcd.points = o3d.utility.Vector3dVector(weighted_ref)
# o3d.visualization.draw_geometries([src_corr_pcd, src_pcd], window_name='src_corr_pcd src_pcd.transform(T)')
#
# ref_pcd.points = o3d.utility.Vector3dVector(ref)
# o3d.visualization.draw_geometries([src_pcd, ref_pcd], window_name='src_pcd.transform(T) ref_pcd')
def draw_registration_result(source, target, src_color, tgt_color):
src_pcd = o3d.geometry.PointCloud()
ref_pcd = o3d.geometry.PointCloud()
src_pcd.points = o3d.utility.Vector3dVector(source)
ref_pcd.points = o3d.utility.Vector3dVector(target)
src_pcd.colors = o3d.utility.Vector3dVector(src_color)
ref_pcd.colors = o3d.utility.Vector3dVector(tgt_color)
# src_pcd.paint_uniform_color([1, 0.706, 0])
# ref_pcd.paint_uniform_color([0, 0.651, 0.929])
o3d.visualization.draw_geometries([src_pcd, ref_pcd])
def draw_registration_result_no_blocking(source, target,vis):
vis.update_geometry(source)
vis.poll_events()
vis.update_renderer()
def get_npy_data(filename, index):
all_data = np.load(filename, allow_pickle=True)
# print(len(all_data))
# xyz_src = torch.from_numpy(all_data[index * 3])
# feat_src = torch.from_numpy(all_data[index * 3 + 2])
# xyz_ref = torch.from_numpy(all_data[index * 3 + 3])
# feat_ref = torch.from_numpy(all_data[index * 3 + 5])
xyz = all_data[index * 4]
normal = all_data[index * 4 + 1]
feat = all_data[index * 4 + 2]
color = all_data[index * 4 + 3]
return xyz, normal, feat, color
def calGrad(point,normal,feature,kdTree):
# n * 3; n * 3 ; n * d
N = point.shape[0]
d = feature.shape[1]
grads = np.zeros([N,3,d])
for i in range(N):
pt = point[i,:].reshape(1,-1)
nt = normal[i,:].reshape(1,-1)
ft = feature[i,:].reshape(1,-1)
_, idx = kdTree.query(pt, k=20, return_distance=True)
# idx_ = np.reshape(idx,(-1,1))
# neighbor_ = point[idx_, :]
# neighbor = np.reshape(neighbor_, (N,-1, 3))
neighbor_pt = point[idx, :].reshape(-1,3)
neighbor_ft = feature[idx,:].reshape(-1,d)
proj_pt = neighbor_pt - (neighbor_pt - pt) @ nt.T * nt
A = proj_pt - pt
b = neighbor_ft - ft
A = np.concatenate((A,nt),axis=0)
b = np.concatenate((b,np.zeros(d).reshape(1,d)))
x = np.linalg.inv(A.T@A)@A.T@b
grads[i,:,:] = x
return grads
def pt2plTrans(source,target,corr, weights):
ps = source.point[corr[:, 0], :]
pt = target.point[corr[:, 1], :]
nt = target.normal[corr[:, 1], :]
geo_A = np.concatenate((np.cross(ps, nt), nt), axis=1) * weights
geo_b = np.sum((ps-pt)*nt, axis=1,keepdims=True) * weights
Ja = geo_A
res = geo_b
vecTrans = -np.linalg.inv(Ja.T@Ja)@Ja.T@res
vecTrans = np.squeeze(vecTrans)
cx = np.cos(vecTrans[0])
cy = np.cos(vecTrans[1])
cz = np.cos(vecTrans[2])
sx = np.sin(vecTrans[0])
sy = np.sin(vecTrans[1])
sz = np.sin(vecTrans[2])
R = np.array([[cy*cz, sx*sy*cz-cx*sz, cx*sy*cz+sx*sz],
[cy*sz, cx*cz+sx*sy*sz, cx*sy*sz-sx*cz],
[-sy, sx*cy, cx*cy]])
t = vecTrans[3:]
transform = np.identity(4)
transform[0:3, 0:3] = R
transform[0:3, 3] = t
t = t.reshape(3, 1)
return R, t, transform
class PointCloud:
def __init__(self,point,normal,feature):
self.point = point
self.normal = normal
self.feature = feature
def file2matrix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines()) #get the number of lines in the file
trans = np.eye(4) #prepare matrix to return
truth = [] #prepare labels return
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip()
# listFromLine = line.split('\t')
listFromLine = line.split()
listFromLine = [float(x) for x in listFromLine]
if(index % 5 ==0):
index = 0
elif(index % 5 ==1):
trans[0, :] = np.array(listFromLine)
elif(index % 5 ==2):
trans[1,:] = np.array(listFromLine)
elif(index % 5 ==3):
trans[2,:] = np.array(listFromLine)
elif(index % 5 ==4):
trans[3,:] = np.array(listFromLine)
truth.append(trans.copy())#这里不用copy的话,,,每个元素都是一样的
index += 1
return truth
if __name__ == '__main__':
root_path = '/Bill/DataSet/RedWood/'
dataset_names = ['loft', 'lobby', 'apartment','bedroom','boardroom']
root_save_path = '/sin_ours/src2ref'
dataset_numbers = [252,199,319,219,243]
for i in range(len(dataset_names)):
# for i in range(1):
file_path = root_path + dataset_names[i]
end = dataset_numbers[i]
save_path = dataset_names[i] + root_save_path
print(file_path)
groud_truth = file2matrix(file_path + '/reg_output.log')
voxel_size = 0.05 # means 5cm for this dataset
err_R = []
err_T = []
trans_all = []
fail_list = []
start = 0
for j in range(start, end):
print(
'j',j
)
# index_src = j + 1
# index_ref = j
index_src = j
index_ref = j + 1
source_show = o3d.io.read_point_cloud(file_path + "/mesh_%s.ply"%(index_src))
target_show = o3d.io.read_point_cloud(file_path + "/mesh_%s.ply"%(index_ref))
filename = file_path + '/xyz_nor_feat_color.npy'
xyz_src, normal_src, feat_src, color_src = get_npy_data(filename, index_src)
xyz_ref, normal_ref, feat_ref, color_ref = get_npy_data(filename, index_ref)
# draw_registration_result(xyz_src, xyz_ref, color_src, color_ref)
# print('feat_src', feat_src.shape, feat_ref.shape)
total_trans = np.eye(4)
# lambda_hybrid = 0.8
lambda_color_ge = 0
fail_flag = 0
for m in range(35):
lambda_hybrid = (np.sin(0.67 * 0.9 ** (m) * 1.68)) ** 2
src_hybrid_feature = np.concatenate(((lambda_hybrid) * feat_src,
((1 - lambda_hybrid) * lambda_color_ge) * color_src,
((1 - lambda_hybrid) * (1 - lambda_color_ge)) * xyz_src), 1)
ref_hybrid_feature = np.concatenate(((lambda_hybrid) * feat_ref,
((1 - lambda_hybrid) * lambda_color_ge) * color_ref,
((1 - lambda_hybrid) * (1 - lambda_color_ge)) * xyz_ref), 1)
# src_hybrid_feature = np.concatenate((np.sqrt(lambda_hybrid) * feat_src, np.sqrt((1-lambda_hybrid) * lambda_color_ge) * color_src, np.sqrt((1-lambda_hybrid) * (1-lambda_color_ge)) * xyz_src), 1)
# ref_hybrid_feature = np.concatenate((np.sqrt(lambda_hybrid) * feat_ref, np.sqrt((1-lambda_hybrid) * lambda_color_ge) * color_ref, np.sqrt((1-lambda_hybrid) * (1-lambda_color_ge)) * xyz_ref), 1)
feat_ref_tree = KDTree(ref_hybrid_feature)
dist_feat, corr = feat_ref_tree.query(src_hybrid_feature, k = 1, return_distance = True)#src 找 tgt里边最近的点,得到的是tgt里面的索引
# print('dist_feat',dist_feat.shape)
corr_xyz_ref = xyz_ref[corr].reshape(-1,3)
corr_xyz_src = xyz_src
distance_threshold = np.sqrt(lambda_hybrid ** 2 * 0.4 + ((1-lambda_hybrid) * lambda_color_ge) ** 2 * 0.3 + ((1 - lambda_hybrid) * (1-lambda_color_ge)) ** 2 * 0.3 )
ref_correct_corr = corr[dist_feat < distance_threshold]#满足距离要求的位置为1,然后再给对应关系,就得到ref中计算的点
ref_correct_xyz = xyz_ref[ref_correct_corr]
ref_correct_normal = normal_ref[ref_correct_corr]
ref_correct_color = color_ref[ref_correct_corr]
if ref_correct_xyz.shape[0] == 0:
fail_flag = 1
continue
src_correct_corr = np.where((np.array(dist_feat < distance_threshold) > 0 ).reshape(-1, 1))[0]#因为src就是从0到n的索引,大于0是取了那些满足要求的位置,所以只需要知道dist_feat的哪个位置满足要求即可
src_correct_xyz = xyz_src[src_correct_corr]
src_correct_normal = normal_src[src_correct_corr]
src_correct_color = color_src[src_correct_corr]
source = PointCloud(src_correct_xyz, src_correct_normal, src_correct_color)
target = PointCloud(ref_correct_xyz, ref_correct_normal, ref_correct_color)
useful_dis = dist_feat[src_correct_corr]#这个距离向量是src和ref的距离,所以取src,假设你src第4个点满足要求,肯定是对应dist_feat中的第四个值嘛
# show_point_cloud(corr_xyz_src, src_correct_xyz, xyz_ref, ref_correct_xyz)
# weights = np.ones(src_correct_xyz.shape[0]).reshape(-1,1)#这里得到的就是满足要求的索引np.sum(np.power((src_correct_color - ref_correct_color), 2), 1).reshape(-1,1) *
weights = np.exp(-useful_dis/0.1).reshape(-1,1)#这里得到的就是满足要求的索引
weights = weights/np.sum(weights)
# print('corr_xyz_ref',i , distance_threshold, ref_correct_corr.shape, xyz_src.shape, xyz_ref.shape, weights.shape,src_correct_corr.shape)
N = src_correct_xyz.shape[0]
corr_src = np.array(range(N)).reshape(N, 1)
corr = np.concatenate((corr_src, corr_src), axis=1)#因为把有效的点都合在一起了
R, t, transform = pt2plTrans(source, target, corr, weights)# 1 - 0.002 * i
xyz_src = (R @ xyz_src.T + t).T
source_show.transform(transform)
lambda_hybrid = 0.9 * lambda_hybrid
total_trans = transform @ total_trans
if fail_flag == 1:
total_trans = | np.eye(4) | numpy.eye |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 20 12:16:29 2021
@author: WANGH0M
"""
import numpy as np
from scipy import sparse
from constraints_basic import columnnew,\
con_edge,con_unit,con_constl,con_equal_length,\
con_constangle2,con_constangle,con_unit_vector,con_dependent_vector,\
con_planarity,con_osculating_tangent,con_diagonal,\
con_equal_opposite_angle,\
con_dot,con_cross_product2,con_bisecting_vector,\
con_normal_constraints, con_planarity_constraints,\
con_unit_tangentplane_normal
# -------------------------------------------------------------------------
# common used net-constraints:
# -------------------------------------------------------------------------
#--------------------------------------------------------------------------
# isogonals:
#--------------------------------------------------------------------------
def con_unit_edge(rregular=False,**kwargs):
""" unit_edge / unit_diag_edge
X += [l1,l2,l3,l4,ue1,ue2,ue3,ue4]
(vi-v) = li*ui, ui**2=1, (i=1,2,3,4)
"""
if kwargs.get('unit_diag_edge'):
w = kwargs.get('unit_diag_edge')
diag=True
elif kwargs.get('unit_edge'):
w = kwargs.get('unit_edge')
diag=False
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
V = mesh.V
if diag:
v,v1,v2,v3,v4 = mesh.rr_star_corner
elif rregular:
v,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
else:
#v,v1,v2,v3,v4 = mesh.ver_regular_star.T # default angle=90, non-orient
v,v1,v2,v3,v4 = mesh.ver_star_matrix.T # oriented
num = len(v)
c_v = columnnew(v,0,V)
c_v1 = columnnew(v1,0,V)
c_v2 = columnnew(v2,0,V)
c_v3 = columnnew(v3,0,V)
c_v4 = columnnew(v4,0,V)
arr = np.arange(num)
c_l1 = N5-16*num + arr
c_l2 = c_l1 + num
c_l3 = c_l2 + num
c_l4 = c_l3 + num
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_v1,c_v,c_l1,c_ue1,num,N)
H2,r2 = con_edge(X,c_v2,c_v,c_l2,c_ue2,num,N)
H3,r3 = con_edge(X,c_v3,c_v,c_l3,c_ue3,num,N)
H4,r4 = con_edge(X,c_v4,c_v,c_l4,c_ue4,num,N)
Hu1,ru1 = con_unit(X,c_ue1,num,N)
Hu2,ru2 = con_unit(X,c_ue2,num,N)
Hu3,ru3 = con_unit(X,c_ue3,num,N)
Hu4,ru4 = con_unit(X,c_ue4,num,N)
H = sparse.vstack((H1,H2,H3,H4,Hu1,Hu2,Hu3,Hu4))
r = np.r_[r1,r2,r3,r4,ru1,ru2,ru3,ru4]
return H*w,r*w
def con_orthogonal(diagmesh=False,**kwargs): # simpliest one, for auxetic-cmc-case
"""(v1-v3)*(v2-v4)=0, no auxilary variables
"""
if kwargs.get('orthogonal'):
w = kwargs.get('orthogonal')
elif kwargs.get('orthogonal_diag'):
w = kwargs.get('orthogonal_diag')
diagmesh=True
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
V = mesh.V
if diagmesh:
"(v1-v3)*(v2-v4)=0"
v,v1,v2,v3,v4 = mesh.rr_star_corner
else:
v0, vj, l = mesh.vertex_ring_vertices_iterators(order=True,
return_lengths=True)
ind = np.in1d(v0, np.where(l == 4)[0])
v0 = v0[ind]
vj = vj[ind]
v = v0[::4]
v1,v2,v3,v4 = vj[::4],vj[1::4],vj[2::4],vj[3::4]
c_v1 = columnnew(v1,0,V)
c_v2 = columnnew(v2,0,V)
c_v3 = columnnew(v3,0,V)
c_v4 = columnnew(v4,0,V)
col = np.r_[c_v1,c_v2,c_v3,c_v4]
num = len(v)
row = np.tile(np.arange(num),12)
d1 = X[c_v2]-X[c_v4]
d2 = X[c_v1]-X[c_v3]
d3 = X[c_v4]-X[c_v2]
d4 = X[c_v3]-X[c_v1]
data = np.r_[d1,d2,d3,d4]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = np.einsum('ij,ij->i',d1.reshape(-1,3, order='F'),d2.reshape(-1,3, order='F'))
#self.add_iterative_constraint(H*w, r*w, name)
return H*w,r*w
def con_orthogonal_midline(**kwargs):
""" this method is almost the same as above, minor differences at boundary
control quadfaces: two middle line are orthogonal to each other
quadface: v1,v2,v3,v4
middle lins: e1 = (v1+v2)/2-(v3+v4)/2; e2 = (v2+v3)/2-(v4+v1)/2
<===> e1 * e2 = 0 <==> (v1-v3)^2=(v2-v4)^2
"""
w = kwargs.get('orthogonal')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
num = mesh.num_quadface
v1,v2,v3,v4 = mesh.rr_quadface.T # in odrder
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
H,r = con_equal_length(X,c_v1,c_v2,c_v3,c_v4,num,N)
return H*w,r*w
def con_isogonal(cos0,assign=False,**kwargs):
"""
keep tangent crossing angle
X += [lt1,lt2, ut1,ut2, cos]
(ue1-ue3) = lt1 * ut1, ut1**2 = 1
(ue2-ue4) = lt2 * ut2, ut2**2 = 1
ut1 * ut2 = cos
if assign:
cos == cos0
"""
w = kwargs.get('isogonal')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
N6 = kwargs.get('N6')
num = mesh.num_regular
arr = np.arange(num)
c_l1 = N6-8*num-1 + arr
c_l2 = c_l1+num
c_ut1 = columnnew(arr,N6-6*num-1,num)
c_ut2 = columnnew(arr,N6-3*num-1,num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_ue1,c_ue3,c_l1,c_ut1,num,N)
H2,r2 = con_edge(X,c_ue2,c_ue4,c_l2,c_ut2,num,N)
Hu1,ru1 = con_unit(X,c_ut1,num,N)
Hu2,ru2 = con_unit(X,c_ut2,num,N)
Ha,ra = con_constangle2(X,c_ut1,c_ut2,N6-1,num,N)
H = sparse.vstack((H1,H2,Hu1,Hu2,Ha))
r = np.r_[r1,r2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N6-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal')
#print('err:isogonal:',np.sum(np.square(H*X-r)))
return H*w,r*w
def con_isogonal_diagnet(cos0,assign=False,**kwargs):
"""
keep tangent crossing angle, of diagnal directions
X += [lt1,lt2, ut1,ut2, cos]
(ue1-ue3) = lt1 * ut1, ut1**2 = 1
(ue2-ue4) = lt2 * ut2, ut2**2 = 1
ut1 * ut2 = cos
if assign:
cos == cos0
"""
w = kwargs.get('isogonal_diagnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
N6 = kwargs.get('N6')
num = len(mesh.ind_rr_star_v4f4)
arr = np.arange(num)
c_l1 = N6-8*num-1 + arr
c_l2 = c_l1+num
c_ut1 = columnnew(arr,N6-6*num-1,num)
c_ut2 = columnnew(arr,N6-3*num-1,num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_ue1,c_ue3,c_l1,c_ut1,num,N)
H2,r2 = con_edge(X,c_ue2,c_ue4,c_l2,c_ut2,num,N)
Hu1,ru1 = con_unit(X,c_ut1,num,N)
Hu2,ru2 = con_unit(X,c_ut2,num,N)
Ha,ra = con_constangle2(X,c_ut1,c_ut2,N6-1,num,N)
H = sparse.vstack((H1,H2,Hu1,Hu2,Ha))
r = np.r_[r1,r2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N6-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal_diagnet')
return H*w,r*w
def con_isogonal_checkerboard_based(cos0,assign=False,**kwargs):
"""
quadface: diagonal crossing angle
X += [ld1,ld2, ud1,ud2]
1. (v1-v3) = ld1*ud1, ud1**2=1
2. (v2-v4) = ld2*ud2, ud2**2=1
3. ud1*ud2 == cos0
"""
w = kwargs.get('isogonal_ck_based')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N10 = kwargs.get('N10')
V = mesh.V
num = mesh.num_quadface
numl = N10-8*num-1
numud = N10-6*num-1
arr = np.arange(num)
c_ld1 = numl+arr
c_ld2 = numl+num+arr
v1,v2,v3,v4 = mesh.rr_quadface.T # in odrder
c_v1 = np.r_[v1,V+v1,2*V+v1] # [x,y,z]
c_v2 = np.r_[v2,V+v2,2*V+v2] # [x,y,z]
c_v3 = np.r_[v3,V+v3,2*V+v3] # [x,y,z]
c_v4 = np.r_[v4,V+v4,2*V+v4] # [x,y,z]
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
He1,re1 = con_edge(X,c_v1,c_v3,c_ld1,c_ud1,num,N)
He2,re2 = con_edge(X,c_v2,c_v4,c_ld2,c_ud2,num,N)
Hu1,ru1 = con_unit(X,c_ud1,num,N)
Hu2,ru2 = con_unit(X,c_ud2,num,N)
Ha,ra = con_constangle2(X,c_ud1,c_ud2,N10-1,num,N)
H = sparse.vstack((He1,He2,Hu1,Hu2,Ha*10))
r = np.r_[re1,re2,ru1,ru2,ra*10]
if assign:
H0,r0 = con_constl(np.array([N10-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal_ck_based')
return H*w,r*w
def con_isogonal_quadface_based(cos0,assign=False,halfdiag=True,**kwargs):
"""
quadface: midedge point edge vectors
X += [ld1,ld2, ud1,ud2]
1. (v2+v3-v1-v4) = 2* ld1*ud1, ud1**2=1
2. (v3+v4-v1-v2) = 2* ld2*ud2, ud2**2=1
3. ud1*ud2 == cos0
"""
w = kwargs.get('isogonal_face_based')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N10 = kwargs.get('N10')
V = mesh.V
if halfdiag:
ib,ir = mesh.vertex_check_ind
_,v1,v2,v3,v4 = mesh.rr_star.T
v1,v2,v3,v4 = v1[ib],v2[ib],v3[ib],v4[ib]
num = len(v1)
else:
num = mesh.num_quadface
v1,v2,v3,v4 = mesh.rr_quadface.T # in odrder
numl = N10-8*num-1
numud = N10-6*num-1
arr = np.arange(num)
c_ld1 = numl+arr
c_ld2 = numl+num+arr
c_v1 = np.r_[v1,V+v1,2*V+v1] # [x,y,z]
c_v2 = np.r_[v2,V+v2,2*V+v2] # [x,y,z]
c_v3 = np.r_[v3,V+v3,2*V+v3] # [x,y,z]
c_v4 = np.r_[v4,V+v4,2*V+v4] # [x,y,z]
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
def _edge(c_ld1,c_ud1,dddd):
"(v2+v3-v1-v4) = 2* ld1*ud1, ud1**2=1"
ld1 = X[c_ld1]
ud1 = X[c_ud1]
row = np.tile(np.arange(3*num),6)
col = np.r_[c_v1,c_v2,c_v3,c_v4,np.tile(c_ld1,3),c_ud1]
data = np.r_[dddd,-2*ud1,-2*np.tile(ld1,3)]
r = -2*np.tile(ld1,3)*ud1
H = sparse.coo_matrix((data,(row,col)), shape=(3*num, N))
return H,r
a3 = np.ones(3*num)
d1 = np.r_[-a3,a3,a3,-a3]
d2 = np.r_[-a3,-a3,a3,a3]
He1,re1 = _edge(c_ld1,c_ud1,d1)
He2,re2 = _edge(c_ld2,c_ud2,d2)
Hu1,ru1 = con_unit(X,c_ud1,num,N)
Hu2,ru2 = con_unit(X,c_ud2,num,N)
Ha,ra = con_constangle2(X,c_ud1,c_ud2,N10-1,num,N)
H = sparse.vstack((He1,He2,Hu1,Hu2,Ha))
r = np.r_[re1,re2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N10-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal_face_based')
return H*w,r*w
def con_unequal_two_neighbouring_edges(v012,eps,**kwargs):
""" oriented edge1,edge2 l1>=l2 <==> l1^2-l2^2*(1+eps)=s^2
(v1-v)^2-(v2-v)^2*(1+eps) = s^2
"""
w = kwargs.get('nonsymmetric')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nnonsym = kwargs.get('Nnonsym')
num = len(v012[0])
c_s = Nnonsym-num+np.arange(num)
c_v = columnnew(v012[0],0, mesh.V)
c_v1 = columnnew(v012[1],0, mesh.V)
c_v2 = columnnew(v012[2],0, mesh.V)
col = np.r_[c_v,c_v1,c_v2,c_s]
row = np.tile(np.arange(num),10)
X0,X1,X2,Xs = X[c_v],X[c_v1],X[c_v2],X[c_s]
data = np.r_[-2*(X1-X0)+2*(X2-X0)*(1+eps),2*(X1-X0),-2*(X2-X0)*(1+eps),-2*Xs]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
E1,E2 = (X1-X0).reshape(-1,3,order='F'),(X2-X0).reshape(-1,3,order='F')
r = np.linalg.norm(E1,axis=1)**2-np.linalg.norm(E2,axis=1)**2*(1+eps)
r -= Xs**2
return H*w,r*w
def con_nonsquare_quadface(v012,il12,eps,**kwargs):
""" oriented edge1,edge2 l1 > l2 or l1<l2.
<==> (l1-l2)^2 = s^2 + eps
l1**2 = (v1-v0)^2; l2**2 = (v2-v0)^2
v012 := [v0,v1,v2]
il12 := [il1, il2]
"""
w = kwargs.get('nonsymmetric')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nnonsym = kwargs.get('Nnonsym')
c_v = columnnew(v012[0],0, mesh.V)
c_v1 = columnnew(v012[1],0, mesh.V)
c_v2 = columnnew(v012[2],0, mesh.V)
num = len(il12[0])
c_l1 = Nnonsym-mesh.E-num + il12[0]
c_l2 = Nnonsym-mesh.E-num + il12[1]
c_s = Nnonsym-num + np.arange(num)
Xl1,Xl2,Xs = X[c_l1],X[c_l2],X[c_s]
def _ratio():
col = np.r_[c_l1,c_l2,c_s]
row = np.tile(np.arange(num),3)
data = np.r_[2*(Xl1-Xl2),-2*(Xl1-Xl2),-2*Xs]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = (Xl1-Xl2)**2-Xs**2 + np.ones(num)*eps
return H,r
def _edge(c_l1,c_v0,c_v1):
"l1**2 = (v1-v0)^2"
col = np.r_[c_v0,c_v1,c_l1]
row = np.tile(np.arange(num),7)
data = 2*np.r_[-X[c_v1]+X[c_v0],X[c_v1]-X[c_v0],-X[c_l1]]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = np.linalg.norm((X[c_v1]-X[c_v0]).reshape(-1,3,order='F'),axis=1)**2
r -= X[c_l1]**2
return H,r
H1,r1 = _ratio()
H2,r2 = _edge(c_l1,c_v,c_v1)
H3,r3 = _edge(c_l2,c_v,c_v2)
H = sparse.vstack((H1, H2, H3))
r = np.r_[r1,r2,r3]
return H*w,r*w
def con_ctrlnet_symmetric_1_diagpoly(another_poly_direction=False,**kwargs):
""" ctrl-quadmesh + 1diagonal form a web:
three families of polylines satisfy symmetric condtion:
ut1,ut2 (unit tangnets of control polylines); ud1 (unit tangent of diagonal)
ut1 and ut2 symmetric to ud1
<==>
ud1 * (ut1-ut2) = 0;
(v1-v3) = l1 * ut1; (v2-v4) = l2 * ut2; (va-vc) = lac * ud1
ut1^2=1; ut2^2=1; ut1^2=1;
X = [lt1,lt2,ut1,ut2; lac,ud1] ##len=1+1+3+3+1+3
"""
w = kwargs.get('ctrlnet_symmetric_1diagpoly')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
num = len(mesh.ind_rr_star_v4f4)
arr,arr3 = np.arange(num), np.arange(3*num)
Ncds = kwargs.get('Ncds')-12*num
c_lt1,c_t1 = Ncds+arr, Ncds+2*num+arr3
c_lt2,c_t2 = c_lt1+num, c_t1+3*num
c_ld1,c_d1 = Ncds+8*num+arr,Ncds+9*num+arr3
_,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
_,va,vb,vc,vd = mesh.rr_star_corner# in diagonal direction
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
if another_poly_direction:
c_va = columnnew(vb,0,mesh.V)
c_vc = columnnew(vd,0,mesh.V)
else:
c_va = columnnew(va,0,mesh.V)
c_vc = columnnew(vc,0,mesh.V)
H1,r1 = con_edge(X,c_v1,c_v3,c_lt1,c_t1,num,N)
H2,r2 = con_edge(X,c_v2,c_v4,c_lt2,c_t2,num,N)
H3,r3 = con_edge(X,c_va,c_vc,c_ld1,c_d1,num,N)
Hu1,ru1 = con_unit(X,c_t1,num,N)
Hu2,ru2 = con_unit(X,c_t2,num,N)
Hu3,ru3 = con_unit(X,c_d1,num,N)
Hs,rs = con_planarity(X,c_t1,c_t2,c_d1,num,N)
H = sparse.vstack((H1, H2, H3, Hu1,Hu2,Hu3,Hs))
r = np.r_[r1,r2,r3,ru1,ru2,ru3,rs]
return H*w,r*w
def con_chebyshev(l0,assign=False,**kwargs):
"""
keeping all edge_length equal
(Vi-Vj)^2 = l^2
if assign:
l == l0
"""
w = kwargs.get('chebyshev')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N8 = kwargs.get('N8')
V = mesh.V
vi, vj = mesh.vertex_ring_vertices_iterators(order=True)
num = len(vi)
numl = N8-1
c_l = np.tile(numl, num)
c_vi = columnnew(vi,0,V)
c_vj = columnnew(vj,0,V)
data1 = X[c_vi]
data2 = X[c_vj]
col = np.r_[c_vi, c_vj, c_l]
data = 2*np.r_[data1-data2, data2-data1, -X[c_l]]
row = np.tile(np.arange(num),7)
r = np.einsum('ij,ij->i',(data1-data2).reshape(-1,3, order='F'),(data1-data2).reshape(-1,3, order='F')) - X[c_l]**2
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
if assign:
Hl,rl = con_constl(np.array([numl],dtype=int),np.array([l0]),1,N)
H = sparse.vstack((H, Hl))
r = np.r_[r,rl]
return H*w, r*w
#--------------------------------------------------------------------------
# A-net:
#--------------------------------------------------------------------------
def _con_anet(X,w,c_n,c_v,c_v1,c_v2,c_v3,c_v4,N):
"vn*(vi-v)=0; vn**2=1"
num = int(len(c_v)/3)
H1,r1 = con_planarity(X,c_v,c_v1,c_n,num,N)
H2,r2 = con_planarity(X,c_v,c_v2,c_n,num,N)
H3,r3 = con_planarity(X,c_v,c_v3,c_n,num,N)
H4,r4 = con_planarity(X,c_v,c_v4,c_n,num,N)
Hn,rn = con_unit(X,c_n,num,N)
H = sparse.vstack((H1,H2,H3,H4,Hn))
r = np.r_[r1,r2,r3,r4,rn]
return H*w, r*w
def con_anet(rregular=False,checker_weight=1,id_checker=None,pitch=1,**kwargs): #TODO
""" based on con_unit_edge()
X += [ni]
ni * (vij - vi) = 0
"""
w = kwargs.get('Anet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nanet = kwargs.get('Nanet')
if rregular:
v,v1,v2,v3,v4 = mesh.rr_star[mesh.ind_rr_star_v4f4].T
num=len(mesh.ind_rr_star_v4f4)
else:
num = mesh.num_regular
v,v1,v2,v3,v4 = mesh.ver_regular_star.T
c_n = Nanet-3*num+np.arange(3*num)
c_v = columnnew(v ,0,mesh.V)
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
if rregular and checker_weight<1:
"at red-rr-vs, smaller weight"
wr = checker_weight
iblue,ired = id_checker
ib = columnnew(iblue,0,len(mesh.ind_rr_star_v4f4))
ir = columnnew(ired,0,len(mesh.ind_rr_star_v4f4))
Hb,rb = _con_anet(X,w,c_n[ib],c_v[ib],c_v1[ib],c_v2[ib],c_v3[ib],c_v4[ib],N)
Hr,rr = _con_anet(X,wr,c_n[ir],c_v[ir],c_v1[ir],c_v2[ir],c_v3[ir],c_v4[ir],N)
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _con_anet(X,w,c_n,c_v,c_v1,c_v2,c_v3,c_v4,N)
if kwargs.get('normal_bar'):
Nbar = kwargs.get('Nbar')
if pitch<0:
c_nbar = Nbar-3*num+np.arange(3*num)-1
annnbar = [c_v,c_n,c_nbar,Nbar-1]
else:
c_nbar = Nbar-3*num+np.arange(3*num)
annnbar = [c_v,c_n,c_nbar]
return H,r, annnbar
return H,r
def con_anet_diagnet(checker_weight=1,id_checker=None,
assign_crpc_ratio=1,pitch=1,**kwargs):
"based on con_unit_edge(diag=True); X += [ni]; ni * (vij - vi) = 0"
w = kwargs.get('Anet_diagnet')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
Nanet = kwargs.get('Nanet')
#c_v,c_v1,c_v2,c_v3,c_v4 = mesh.get_vs_diagonal_v(index=False)
v,v1,v2,v3,v4 = mesh.rr_star_corner
c_v = columnnew(v ,0,mesh.V)
c_v1 = columnnew(v1,0,mesh.V)
c_v2 = columnnew(v2,0,mesh.V)
c_v3 = columnnew(v3,0,mesh.V)
c_v4 = columnnew(v4,0,mesh.V)
num = int(len(c_v)/3)
c_n = Nanet-3*num+np.arange(3*num)
if checker_weight<1:
"at red-rr-vs, smaller weight"
wr = checker_weight
iblue,ired = id_checker
ib = columnnew(iblue,0,len(mesh.ind_rr_star_v4f4))
ir = columnnew(ired,0,len(mesh.ind_rr_star_v4f4))
Hb,rb = _con_anet(X,w,c_n[ib],c_v[ib],c_v1[ib],c_v2[ib],c_v3[ib],c_v4[ib],N)
Hr,rr = _con_anet(X,wr,c_n[ir],c_v[ir],c_v1[ir],c_v2[ir],c_v3[ir],c_v4[ir],N)
H = sparse.vstack((Hb,Hr))
r = np.r_[rb,rr]
else:
"all rr-vs, same weight"
H,r = _con_anet(X,w,c_n,c_v,c_v1,c_v2,c_v3,c_v4,N)
annnbar = None
if kwargs.get('normal_bar'):
N10 = kwargs.get('N10')
Nbar = kwargs.get('Nbar')
if pitch<0:
c_nbar = Nbar-3*num+np.arange(3*num)-1
annnbar = [c_v,c_n,c_nbar,Nbar-1]
else:
c_nbar = Nbar-3*num+np.arange(3*num)
annnbar = [c_v,c_n,c_nbar]
return H*w,r*w,annnbar
if kwargs.get('CRPC'):
"""
quadface: diagonal crossing angle
no additional varibalse; related with e1,e2,given ratio a
a family of constraints:
(1-a) e1*e2 - a-1=0 <==> e1*e2 = (1+a) / (1-a) === cos0
"""
num = mesh.num_quadface
numud = N10-6*num-1
arr = np.arange(num)
c_ud1 = np.r_[numud+arr,numud+num+arr,numud+2*num+arr]
c_ud2 = c_ud1+3*num
col = np.r_[c_ud1,c_ud2]
row = | np.tile(arr,6) | numpy.tile |
##########################################
# FUNCTIONS FOR SEARCHLIGHT RSA ANALYSES #
##########################################
# This code implements RSA within a moveable searchlight by adapting the nilearn searchlight class.
# This is extensively optimised using Numba and certain elements can be run in parallel using joblib.
# This implementation is NOT designed to be flexible however, for example it only implements Spearman
# correlation as a measure of similarity.
from numba import njit
import numpy as np
from nilearn._utils.niimg_conversions import check_niimg_4d, check_niimg_3d
from sklearn import neighbors
from nilearn.image.resampling import coord_transform
import joblib
from nilearn import image
import warnings
from sklearn.externals.joblib import Parallel, delayed, cpu_count
from sklearn.base import BaseEstimator
from sklearn.exceptions import ConvergenceWarning
from nilearn import masking
from nilearn.image.resampling import coord_transform
from nilearn._utils import check_niimg_4d
np.seterr(divide='ignore', invalid='ignore')
@njit
def get_tri(arr):
tri_idx = np.triu_indices_from(arr, k=1)
out = np.zeros(len(tri_idx[0]), arr.dtype)
for n, (i,j) in enumerate(zip(tri_idx[0], tri_idx[1])):
out[n] = arr[i,j]
return out
@njit
def scale_data(X):
return (X - np.nanmean(X)) / (np.nanstd(X) + 1e-20)
@njit
def ols(x, y, y_mask):
for i in range(y.shape[1]):
y_mask[np.isnan(y[:, i])] = True
y_mask[np.isnan(x[:, 0])] = True
x = x[~y_mask]
y = y[~y_mask]
coefs = np.dot(np.linalg.pinv(np.dot(x.T,x)),np.dot(x.T,y))
return coefs
@njit
def rankdata(a):
arr = np.ravel(np.asarray(a))
sorter = np.argsort(arr, kind='quicksort')
inv = np.empty(sorter.size, dtype=np.int16)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
arr = arr[sorter]
obs = np.hstack((np.array([True]), arr[1:] != arr[:-1]))
dense = obs.cumsum()[inv]
# cumulative counts of each unique value
count = np.hstack((np.nonzero(obs)[0], np.array([len(obs)])))
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
@njit
def pearson_corr(data1, data2):
M = data1.size
sum1 = 0.
sum2 = 0.
for i in range(M):
sum1 += data1[i]
sum2 += data2[i]
mean1 = sum1 / M
mean2 = sum2 / M
var_sum1 = 0.
var_sum2 = 0.
cross_sum = 0.
for i in range(M):
var_sum1 += (data1[i] - mean1) ** 2
var_sum2 += (data2[i] - mean2) ** 2
cross_sum += (data1[i] * data2[i])
std1 = (var_sum1 / M) ** .5
std2 = (var_sum2 / M) ** .5
cross_mean = cross_sum / M
std1 = std1 + 1e-8
std2 = std2 + 1e-8
out = (cross_mean - mean1 * mean2) / (std1 * std2)
return out
@njit
def pairwise_corr(X):
n, m = X.shape
out = | np.zeros((n, n)) | numpy.zeros |
"""
Filename: visualization.py
Purpose: Set of go-to plotting functions
Author: <NAME>
Date created: 28.11.2018
Possible problems:
1.
"""
import os
import numpy as np
from tractor.galaxy import ExpGalaxy
from tractor import EllipseE
from tractor.galaxy import ExpGalaxy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, SymLogNorm
from matplotlib.patches import Ellipse
from matplotlib.patches import Rectangle
from skimage.segmentation import find_boundaries
from astropy.visualization import hist
from scipy import stats
import config as conf
import matplotlib.cm as cm
import random
from time import time
from astropy.io import fits
import logging
logger = logging.getLogger('farmer.visualization')
# Random discrete color generator
colors = cm.rainbow(np.linspace(0, 1, 1000))
cidx = np.arange(0, 1000)
random.shuffle(cidx)
colors = colors[cidx]
def plot_background(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
vmin, vmax = brick.background_images[idx].min(), brick.background_images[idx].max()
vmin = -vmax
img = ax.imshow(brick.background_images[idx], cmap='RdGy', norm=SymLogNorm(linthresh=0.03))
# plt.colorbar(img, ax=ax)
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_background.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_mask(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
img = ax.imshow(brick.masks[idx])
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_mask.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_brick(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
backlevel, noisesigma = brick.backgrounds[idx]
vmin, vmax = np.max([backlevel + noisesigma, 1E-5]), brick.images[idx].max()
# vmin, vmax = brick.images[idx].min(), brick.images[idx].max()
if vmin > vmax:
logger.warning(f'{band} brick not plotted!')
return
vmin = -vmax
norm = SymLogNorm(linthresh=0.03)
img = ax.imshow(brick.images[idx], cmap='RdGy', origin='lower', norm=norm)
# plt.colorbar(img, ax=ax)
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_brick.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_blob(myblob, myfblob):
fig, ax = plt.subplots(ncols=4, nrows=1+myfblob.n_bands, figsize=(5 + 5*myfblob.n_bands, 10), sharex=True, sharey=True)
back = myblob.backgrounds[0]
mean, rms = back[0], back[1]
noise = np.random.normal(mean, rms, size=myfblob.dims)
tr = myblob.solution_tractor
norm = LogNorm(np.max([mean + rms, 1E-5]), myblob.images.max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
# img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
mmask = myblob.masks[0].copy()
mmask[mmask==1] = np.nan
ax[0, 0].imshow(myblob.images[0], **img_opt)
ax[0, 0].imshow(mmask, alpha=0.5, cmap='Greys')
ax[0, 1].imshow(myblob.solution_model_images[0] + noise, **img_opt)
ax[0, 2].imshow(myblob.images[0] - myblob.solution_model_images[0], cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[0, 3].imshow(myblob.solution_chi_images[0], cmap='RdGy', vmin = -7, vmax = 7)
ax[0, 0].set_ylabel(f'Detection ({myblob.bands[0]})')
ax[0, 0].set_title('Data')
ax[0, 1].set_title('Model')
ax[0, 2].set_title('Data - Model')
ax[0, 3].set_title('$\chi$-map')
band = myblob.bands[0]
for j, src in enumerate(myblob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = myblob.solved_chisq[j]
topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
ystart = 0.99 - j * 0.4
ax[0, 3].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[0, 3].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[0, 3].text(1.05, ystart - 0.3, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
objects = myblob.bcatalog[j]
e = Ellipse(xy=(objects['x'], objects['y']),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax[0, 0].add_artist(e)
try:
for i in np.arange(myfblob.n_bands):
back = myfblob.backgrounds[i]
mean, rms = back[0], back[1]
noise = np.random.normal(mean, rms, size=myfblob.dims)
tr = myfblob.solution_tractor
# norm = LogNorm(np.max([mean + rms, 1E-5]), myblob.images.max(), clip='True')
# img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[i+1, 0].imshow(myfblob.images[i], **img_opt)
ax[i+1, 1].imshow(myfblob.solution_model_images[i] + noise, **img_opt)
ax[i+1, 2].imshow(myfblob.images[i] - myfblob.solution_model_images[i], cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[i+1, 3].imshow(myfblob.solution_chi_images[i], cmap='RdGy', vmin = -7, vmax = 7)
ax[i+1, 0].set_ylabel(myfblob.bands[i])
band = myfblob.bands[i]
for j, src in enumerate(myfblob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = myfblob.solution_chisq[j, i]
Nres = myfblob.n_residual_sources[i]
topt = dict(color=colors[j], transform = ax[i+1, 3].transAxes)
ystart = 0.99 - j * 0.4
ax[i+1, 3].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[i+1, 3].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[i+1, 3].text(1.05, ystart - 0.3, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
if Nres > 0:
ax[i+1, 3].text(1.05, ystart - 0.4, f'{Nres} residual sources found!', **topt)
res_x = myfblob.residual_catalog[i]['x']
res_y = myfblob.residual_catalog[i]['y']
for x, y in zip(res_x, res_y):
ax[i+1, 3].scatter(x, y, marker='+', color='r')
for s, src in enumerate(myfblob.solution_catalog):
x, y = src.pos
color = colors[s]
for i in np.arange(1 + myfblob.n_bands):
for j in np.arange(4):
ax[i,j].plot([x, x], [y - 10, y - 5], c=color)
ax[i,j].plot([x - 10, x - 5], [y, y], c=color)
except:
logger.warning('Could not plot multiwavelength diagnostic figures')
[[ax[i,j].set(xlim=(0,myfblob.dims[1]), ylim=(0,myfblob.dims[0])) for i in np.arange(myfblob.n_bands+1)] for j in np.arange(4)]
#fig.suptitle(f'Solution for {blob_id}')
fig.subplots_adjust(wspace=0.01, hspace=0, right=0.8)
if myblob._is_itemblob:
sid = myblob.bcatalog['source_id'][0]
fig.savefig(os.path.join(conf.PLOT_DIR, f'{myblob.brick_id}_B{myblob.blob_id}_S{sid}.pdf'))
else:
fig.savefig(os.path.join(conf.PLOT_DIR, f'{myblob.brick_id}_B{myblob.blob_id}.pdf'))
plt.close()
def plot_srcprofile(blob, src, sid, bands=None):
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_srcprofile.pdf')
elif (len(bands) == 1) & (bands[0] == conf.MODELING_NICKNAME):
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_srcprofile.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{nickname}_srcprofile.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{bands[0]}_srcprofile.pdf')
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(outpath)
for idx, band in zip(bidx, bands):
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
rband = conf.MODELING_NICKNAME
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
# zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
if band_name == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
rband = band
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
rband = conf.MODELING_NICKNAME + '_' + band_name
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
rband = conf.MODELING_NICKNAME + '_' + band
# information
bid = blob.blob_id
bsrc = blob.bcatalog[blob.bcatalog['source_id'] == sid]
ra, dec = bsrc['RA'][0], bsrc['DEC'][0]
if nickname == conf.MODELING_NICKNAME:
xp0, yp0 = bsrc['x_orig'][0] - blob.subvector[1], bsrc['y_orig'][0] - blob.subvector[0]
else:
xp0, yp0 = bsrc['x_orig'][0] - blob.subvector[1] - blob.mosaic_origin[1] + conf.BRICK_BUFFER, bsrc['y_orig'][0] - blob.subvector[0] - blob.mosaic_origin[0] + conf.BRICK_BUFFER
xp, yp = src.pos[0], src.pos[1]
xps, yps = xp, yp
flux, flux_err = bsrc[f'FLUX_{band}'][0], bsrc[f'FLUXERR_{band}'][0]
mag, mag_err = bsrc[f'MAG_{band}'][0], bsrc[f'MAGERR_{band}'][0]
n_blob = bsrc['N_BLOB'][0]
chi2 = bsrc[f'CHISQ_{band}'][0]
snr = bsrc[f'SNR_{band}'][0]
is_resolved = False
if src.name not in ('PointSource', 'SimpleGalaxy'):
is_resolved = True
col = np.array(bsrc.colnames)[np.array([tcoln.startswith('REFF') for tcoln in bsrc.colnames])][0]
rband = col[len('REFF_'):]
reff, reff_err = np.exp(bsrc[f'REFF_{rband}'][0])*conf.PIXEL_SCALE, np.exp(bsrc[f'REFF_{rband}'][0])*bsrc[f'REFF_ERR_{rband}'][0]*2.303*conf.PIXEL_SCALE
ab, ab_err = bsrc[f'AB_{rband}'][0], bsrc[f'AB_ERR_{rband}'][0]
if ab == -99.0:
ab = -99
ab_err = -99
theta, theta_err = bsrc[f'THETA_{rband}'][0], bsrc[f'THETA_ERR_{rband}'][0]
if 'Sersic' in src.name:
nre, nre_err = bsrc[f'N_{rband}'][0], bsrc[f'N_ERR_{rband}'][0]
# images
img = blob.images[idx]
wgt = blob.weights[idx]
err = 1. / np.sqrt(wgt)
mask = blob.masks[idx]
seg = blob.segmap.copy()
seg[blob.segmap != sid] = 0
mod = blob.solution_model_images[idx]
chi = blob.solution_tractor.getChiImage(idx)
chi[blob.segmap != sid] = 0
res = img - mod
rms = np.median(blob.background_rms_images[idx])
xpix, ypix = np.nonzero(seg)
dx, dy = (np.max(xpix) - np.min(xpix)) / 2., (np.max(ypix) - np.min(ypix)) / 2.
buff = np.min([conf.BLOB_BUFFER, 10.])
xlim, ylim = np.array([-(dx + buff), (dx + buff)]) * conf.PIXEL_SCALE, np.array([-(dy + buff), (dy + buff)]) * conf.PIXEL_SCALE
h, w = np.shape(img)
dw, dh = w - xp - 1, h - yp - 1
extent = np.array([-xp, dw, -yp, dh]) * conf.PIXEL_SCALE
xp0, yp0 = (xp0 - xp) * conf.PIXEL_SCALE, (yp0 - yp) * conf.PIXEL_SCALE
xp, yp = 0., 0.
if is_resolved:
aeff = reff #* conf.PIXEL_SCALE
beff = reff / ab #* conf.PIXEL_SCALE
xa = xp + np.cos(np.deg2rad(90-theta)) * np.array([-1, 1]) * aeff
ya = yp + np.sin(np.deg2rad(90-theta)) * np.array([-1, 1]) * aeff
xb = xp + np.cos(np.deg2rad(theta)) * np.array([-1, 1]) * beff
yb = yp + np.sin(np.deg2rad(theta)) * np.array([1, -1]) * beff
# tests
res_seg = res[blob.segmap==sid].flatten()
try:
k2, p_norm = stats.normaltest(res_seg)
except:
k2, p_norm = -99, -99
chi_seg = chi[blob.segmap==sid].flatten()
chi_sig = np.std(chi_seg)
chi_mu = np.mean(chi_seg)
# plotting
fig, ax = plt.subplots(ncols=4, nrows=4, figsize=(15, 15))
# row 1 -- image, info
if rms > 0.95*np.nanmax(img):
normmin = 1.05*np.nanmin(abs(img))
else:
normmin = rms
normmax = 0.95*np.nanmax(img)
if normmin != normmax:
norm = LogNorm(normmin, normmax, clip='True')
else:
norm=None
ax[0,0].imshow(img, norm=norm, cmap='Greys', extent=extent)
ax[0,0].text(0.05, 1.03, band, transform=ax[0,0].transAxes)
ax[0,0].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[0,0].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[0,0].set(xlim=xlim, ylim=ylim)
ax[0,1].axis('off')
ax[0,2].axis('off')
ax[0,3].axis('off')
ax[0,1].text(0, 0.90,
s = f'Source: {sid} | Blob: {bid} | Brick: {blob.brick_id} | RA: {ra:6.6f}, Dec: {dec:6.6f}',
transform=ax[0,1].transAxes)
if is_resolved:
if 'Sersic' in src.name:
ax[0,1].text(0, 0.70,
s = f'{src.name} with Reff: {reff:3.3f}+/-{reff_err:3.3f}, n: {nre:3.3f}+/-{nre_err:3.3f}, A/B: {ab:3.3f}+/-{ab_err:3.3f}, Theta: {theta:3.3f}+/-{theta_err:3.3f}',
transform=ax[0,1].transAxes)
else:
ax[0,1].text(0, 0.70,
s = f'{src.name} with Reff: {reff:3.3f}+/-{reff_err:3.3f}, A/B: {ab:3.3f}+/-{ab_err:3.3f}, and Theta: {theta:3.3f}+/-{theta_err:3.3f}',
transform=ax[0,1].transAxes)
else:
ax[0,1].text(0, 0.70,
s = f'{src.name}',
transform=ax[0,1].transAxes)
ax[0,1].text(0, 0.50,
s = f'{band} | {flux:3.3f}+/-{flux_err:3.3f} uJy | {mag:3.3f}+/-{mag_err:3.3f} AB | S/N = {snr:3.3f}',
transform=ax[0,1].transAxes)
ax[0,1].text(0, 0.30,
s = f'Chi2/N: {chi2:3.3f} | N_blob: {n_blob} | '+r'$\mu(\chi)$'+f'={chi_mu:3.3f}, '+r'$\sigma(\chi)$'+f'={chi_sig:3.3f} | K2-test: {k2:3.3f}',
transform=ax[0,1].transAxes)
# row 2 -- image, weights, mask, segment
ax[1,0].imshow(img, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[1,0].text(0.05, 1.03, 'Image', transform=ax[1,0].transAxes)
ax[1,0].set(xlim=xlim, ylim=ylim)
ax[1,0].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,1].imshow(err, cmap='Greys', extent=extent)
ax[1,1].text(0.05, 1.03, r'med($\sigma$)'+f'={rms*10**(-0.4 * (zpt - 23.9)):5.5f} uJy', transform=ax[1,1].transAxes)
ax[1,1].set(xlim=xlim, ylim=ylim)
ax[1,1].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,2].imshow(mask, cmap='Greys', extent=extent)
ax[1,2].text(0.05, 1.03, 'Blob', transform=ax[1,2].transAxes)
ax[1,2].set(xlim=xlim, ylim=ylim)
ax[1,2].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,3].imshow(~seg, cmap='Greys', extent=extent)
ax[1,3].text(0.05, 1.03, 'Segment', transform=ax[1,3].transAxes)
ax[1,3].set(xlim=xlim, ylim=ylim)
ax[1,3].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,0].scatter(xp, yp, c='royalblue', marker='x')
ax[1,2].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[1,2].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[1,3].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[1,3].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[1,2].plot()
# row 3 -- image, model, residual, chi
ax[2,0].imshow(img/err, vmin=-3, vmax=3, cmap='RdGy', extent=extent)
ax[2,0].text(0.05, 1.03, 'S/N', transform=ax[2,0].transAxes)
ax[2,0].set(xlim=xlim, ylim=ylim)
ax[2,0].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
# ax[2,1].imshow(mod, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[2,1].imshow(mod, norm=norm, cmap='Greys', extent=extent)
ax[2,1].text(0.05, 1.03, 'Model', transform=ax[2,1].transAxes)
ax[2,1].set(xlim=xlim, ylim=ylim)
ax[2,1].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[2,2].imshow(res, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[2,2].text(0.05, 1.03, 'Residual', transform=ax[2,2].transAxes)
ax[2,2].set(xlim=xlim, ylim=ylim)
ax[2,2].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[2,3].imshow(chi, vmin=-3, vmax=3, cmap='RdGy', extent=extent)
ax[2,3].text(0.05, 1.03, r'$\chi$', transform=ax[2,3].transAxes)
ax[2,3].set(xlim=xlim, ylim=ylim)
ax[2,3].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
if is_resolved:
ax[2,0].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,0].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,1].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,1].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,2].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,2].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,3].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,3].plot(xb, yb, c='royalblue', alpha=0.7)
else:
ax[2,0].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,1].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,2].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,3].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
# row 4 -- psf, x-slice, y-slice, hist
psfmodel = blob.psfimg[band]
xax = np.arange(-np.shape(psfmodel)[0]/2 + 0.5, np.shape(psfmodel)[0]/2 + 0.5)
[ax[3,0].plot(xax * 0.15, psfmodel[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(psfmodel)[0])]
ax[3,0].axvline(0, ls='dotted', c='k')
ax[3,0].set(xlim=(-5, 5), yscale='log', ylim=(1E-6, 1E-1), xlabel='arcsec')
ax[3,0].text(0.05, 1.03, 'PSF', transform=ax[3,0].transAxes)
# x slice
imgx = blob.images[idx][:, int(xps)]
errx = 1./np.sqrt(blob.weights[idx][:, int(xps)])
modx = blob.solution_model_images[idx][:, int(xps)]
sign = 1
if bsrc[f'RAWFLUX_{band}'][0] < 0:
sign = -1
modxlo = blob.solution_model_images[idx][:, int(xps)] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] - sign * bsrc[f'RAWFLUXERR_{band}'][0])
modxhi = blob.solution_model_images[idx][:, int(xps)] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] + sign * bsrc[f'RAWFLUXERR_{band}'][0])
resx = imgx - modx
# y slice
imgy = blob.images[idx][int(yps), :]
erry = 1./np.sqrt(blob.weights[idx][int(yps), :])
mody = blob.solution_model_images[idx][int(yps), :]
if bsrc[f'RAWFLUX_{band}'][0] < 0:
sign = -1
modylo = blob.solution_model_images[idx][int(yps), :] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] - sign * bsrc[f'RAWFLUXERR_{band}'][0])
modyhi = blob.solution_model_images[idx][int(yps), :] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] + sign * bsrc[f'RAWFLUXERR_{band}'][0])
resy = imgy - mody
ylim = (0.9*np.min([np.min(imgx), np.min(imgy)]), 1.1*np.max([np.max(imgx), np.max(imgy)]))
xax = np.linspace(extent[2], extent[3]+conf.PIXEL_SCALE, len(imgx))
ax[3,2].errorbar(xax, imgx, yerr=errx, c='k')
ax[3,2].plot(xax, modx, c='r')
ax[3,2].fill_between(xax, modxlo, modxhi, color='r', alpha=0.3)
ax[3,2].plot(xax, resx, c='g')
ax[3,2].axvline(0, ls='dotted', c='k')
ax[3,2].set(ylim =ylim, xlabel='arcsec', xlim=xlim)
ax[3,2].text(0.05, 1.03, 'Y', transform=ax[3,2].transAxes)
yax = np.linspace(extent[0], extent[1]+conf.PIXEL_SCALE, len(imgy))
ax[3,1].errorbar(yax, imgy, yerr=erry, c='k')
ax[3,1].plot(yax, mody, c='r')
ax[3,1].fill_between(yax, modylo, modyhi, color='r', alpha=0.3)
ax[3,1].plot(yax, resy, c='g')
ax[3,1].axvline(0, ls='dotted', c='k')
ax[3,1].set(ylim=ylim, xlabel='arcsec', xlim=xlim)
ax[3,1].text(0.05, 1.03, 'X', transform=ax[3,1].transAxes)
hist(chi_seg, ax=ax[3,3], bins='freedman', histtype='step', density=True)
ax[3,3].axvline(0, ls='dotted', color='grey')
ax[3,3].text(0.05, 1.03, 'Residual '+r'$\sigma(\chi)$'+f'={chi_sig:3.3f}', transform=ax[3,3].transAxes)
ax[3,3].set(xlim=(-10, 10), xlabel=r'$\chi$')
ax[3,3].axvline(chi_mu, c='royalblue', ls='dashed')
ax[3,3].axvline(0, c='grey', ls='dashed', alpha=0.3)
ax[3,3].axvline(chi_mu-chi_sig, c='royalblue', ls='dotted')
ax[3,3].axvline(chi_mu+chi_sig, c='royalblue', ls='dotted')
ax[3,3].axvline(-1, c='grey', ls='dotted', alpha=0.3)
ax[3,3].axvline(1, c='grey', ls='dotted', alpha=0.3)
pdf.savefig(fig)
plt.close()
logger.info(f'Saving figure: {outpath}')
pdf.close()
def plot_apertures(blob, band=None):
pass
def plot_iterblob(blob, tr, iteration, bands=None):
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{nickname}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{bands[0]}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{conf.MODELING_NICKNAME}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{nickname}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{bands[0]}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(outpath)
for idx, band in zip(bidx, bands):
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
cat = tr.getCatalog()
xp, yp = [src.pos[0] for src in cat], [src.pos[1] for src in cat]
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
# image
img = blob.images[idx]
# model
mod = tr.getModelImage(idx)
# residual
res = img - mod
# chi2
chi2 = tr.getChiImage(idx)
fig, ax = plt.subplots(ncols=4)
ax[0].imshow(img, **img_opt)
ax[1].imshow(mod, **img_opt)
ax[2].imshow(res, **img_opt)
ax[3].imshow(chi2, cmap='RdGy', vmin=-5, vmax=5)
fig.suptitle(f'Blob {blob.blob_id} | {band} | iter: {iteration}')
[ax[i].scatter(xp, yp, marker='x', c='royalblue') for i in np.arange(4)]
[ax[i].set_title(title, fontsize=20) for i, title in enumerate(('Image', 'Model', 'Image-Model', '$\chi^{2}$'))]
pdf.savefig(fig)
plt.close()
logger.info(f'Saving figure: {outpath}')
pdf.close()
def plot_modprofile(blob, band=None):
if band is None:
band = conf.MODELING_NICKNAME
idx = 0
else:
idx = blob._band2idx(band, bands=blob.bands)
psfmodel = blob.psfimg[band]
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
noise = np.random.normal(mean, rms, size=blob.dims)
tr = blob.solution_tractor
norm = LogNorm(mean + 3*rms, blob.images[idx].max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
xlim = (-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2)
fig, ax = plt.subplots(ncols = 5, nrows = 2, figsize=(20,10))
ax[1,0].imshow(blob.images[idx], **img_opt)
ax[1,1].imshow(blob.solution_model_images[idx], **img_opt)
residual = blob.images[idx] - blob.solution_model_images[idx]
ax[1,2].imshow(residual, **img_opt)
xax = np.arange(-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2)
[ax[0,0].plot(xax * 0.15, blob.images[idx][x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(blob.images[idx])[0])]
ax[0,0].axvline(0, ls='dotted', c='k')
ax[0,0].set(yscale='log', xlabel='arcsec')
xax = np.arange(-np.shape(blob.solution_model_images[idx])[1]/2, np.shape(blob.solution_model_images[idx])[1]/2)
[ax[0,1].plot(xax * 0.15, blob.solution_model_images[idx][x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(blob.solution_model_images[idx])[0])]
ax[0,1].axvline(0, ls='dotted', c='k')
ax[0,1].set(yscale='log', xlabel='arcsec')
xax = np.arange(-np.shape(residual)[1]/2, np.shape(residual)[1]/2)
[ax[0,2].plot(xax * 0.15, residual[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(residual)[0])]
ax[0,2].axvline(0, ls='dotted', c='k')
ax[0,2].set(yscale='log', xlabel='arcsec')
norm = LogNorm(1e-5, 0.1*np.nanmax(psfmodel), clip='True')
img_opt = dict(cmap='Blues', norm=norm)
ax[1,3].imshow(psfmodel, norm=norm, extent=0.15 *np.array([-np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2, -np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2,]))
ax[1,3].set(xlim=xlim, ylim=xlim)
xax = np.arange(-np.shape(psfmodel)[0]/2 + 0.5, np.shape(psfmodel)[0]/2 + 0.5)
[ax[0,3].plot(xax * 0.15, psfmodel[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(psfmodel)[0])]
ax[0,3].axvline(0, ls='dotted', c='k')
ax[0,3].set(xlim=xlim, yscale='log', ylim=(1E-6, 1E-1), xlabel='arcsec')
for j, src in enumerate(blob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = blob.solution_chisq[j, idx]
band = band.replace(' ', '_')
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
mag = zpt - 2.5 * np.log10(flux)
topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
ystart = 0.99 - j * 0.5
ax[0, 4].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[0, 4].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.3, f' M({band}) = {mag:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.4, f' zpt({band}) = {zpt:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.5, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
ax[0, 4].axis('off')
ax[1, 4].axis('off')
for i in np.arange(3):
ax[0, i].set(xlim=(0.15*xlim[0], 0.15*xlim[1]), ylim=(np.nanmedian(blob.images[idx]), blob.images[idx].max()))
# ax[1, i].set(xlim=(-15, 15), ylim=(-15, 15))
ax[0, 3].set(xlim=(0.15*xlim[0], 0.15*xlim[1]))
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{band}_debugprofile.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{band}_debugprofile.pdf')
logger.info(f'Saving figure: {outpath}')
fig.savefig(outpath)
plt.close()
def plot_xsection(blob, band, src, sid):
if band is None:
band = conf.MODELING_NICKNAME
idx = 0
else:
idx = blob._band2idx(band, bands=blob.bands)
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
fig, ax = plt.subplots(ncols=2)
posx, posy = src.pos[0], src.pos[1]
try:
# x slice
imgx = blob.images[idx][:, int(posx)]
errx = 1/np.sqrt(blob.weights[idx][:, int(posx)])
modx = blob.solution_model_images[idx][:, int(posx)]
resx = imgx - modx
# y slice
imgy = blob.images[idx][int(posy), :]
erry = 1/np.sqrt(blob.weights[idx][int(posy), :])
mody = blob.solution_model_images[idx][int(posy), :]
resy = imgy - mody
except:
plt.close()
logger.warning('Could not make plot -- object may have escaped?')
return
# idea: show areas outside segment in grey
ylim = (0.9*np.min([np.min(imgx), np.min(imgy)]), 1.1*np.max([np.max(imgx), | np.max(imgy) | numpy.max |
import librosa.display
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from scipy.fft import fft
import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
from tqdm import tqdm
from nara_wpe.wpe import wpe
from nara_wpe.wpe import get_power
from nara_wpe.utils import stft, istft, get_stft_center_frequencies
from nara_wpe import project_root
plt.figure(dpi=600) # 将显示的所有图分辨率调高
matplotlib.rc("font",family='SimHei') # 显示中文
matplotlib.rcParams['axes.unicode_minus']=False # 显示符号
def displayWaveform(path): # 显示语音时域波形
"""
display waveform of a given speech sample
:param sample_name: speech sample name
:param fs: sample frequency
:return:
"""
samples, sr = librosa.load(path, sr=16000)
# samples = samples[6000:16000]
print(len(samples), sr)
time = np.arange(0, len(samples)) * (1.0 / sr)
plt.plot(time, samples)
plt.title("Voice signal time domain waveform")
plt.xlabel("Duration (seconds)")
plt.ylabel("amplitude")
# plt.savefig("your dir\语音信号时域波形图", dpi=600)
plt.show()
def displaySpectrum(path): # 显示语音频域谱线
x, sr = librosa.load(path, sr=16000)
print(len(x))
# ft = librosa.stft(x)
# magnitude = np.abs(ft) # 对fft的结果直接取模(取绝对值),得到幅度magnitude
# frequency = np.angle(ft) # (0, 16000, 121632)
ft = fft(x)
print(len(ft), type(ft), np.max(ft), np.min(ft))
magnitude = np.absolute(ft) # 对fft的结果直接取模(取绝对值),得到幅度magnitude
frequency = np.linspace(0, sr, len(magnitude)) # (0, 16000, 121632)
print(len(magnitude), type(magnitude), np.max(magnitude), np.min(magnitude))
print(len(frequency), type(frequency), | np.max(frequency) | numpy.max |
import os
from glob import glob
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
fig = plt.figure(figsize=(7.2,6))
grid = plt.GridSpec(18, 10, wspace=4, hspace=15)
ax = fig.add_subplot(grid[:9, :5])
ax.text(0.025, 0.966, 'a', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
var_dir = '/home/atom/ongoing/work_worldwide/variance'
region_list = os.listdir(var_dir)
region_nmad = []
region_nsamp = []
for region in region_list:
list_fn_csv = [os.path.join(var_dir,region,f) for f in os.listdir(os.path.join(var_dir,region))]
list_nmad = []
list_nsamp = []
for fn_csv in list_fn_csv:
df = pd.read_csv(fn_csv)
list_nmad.append(df.nmad.values)
list_nsamp.append(df.nsamp.values)
nmad_all = np.stack(list_nmad,axis=1)
nsamp_all = np.stack(list_nsamp,axis=1)
nan_mask = np.all(np.logical_or(np.isnan(nmad_all),nmad_all==0),axis=1)
nmad_final = np.nansum(nmad_all * nsamp_all,axis=1) / np.nansum(nsamp_all,axis=1)
nsamp_final = np.nansum(nsamp_all,axis=1)
nmad_final[nan_mask] = np.nan
nsamp_final[nan_mask] = 0
region_nmad.append(nmad_final)
region_nsamp.append(nsamp_final)
# ax.figure(figsize=(16,9))
slope = df.bin_slope.values
corr = df.bin_corr.values
bin_slope = sorted(list(set(list(slope))))
bin_corr = sorted(list(set(list(corr))))
nb_slope = len(bin_slope)
nb_corr = len(bin_corr)
color_list = ['tab:orange','tab:blue','tab:olive','tab:cyan','tab:red','tab:purple','tab:brown','tab:pink','tab:gray','tab:olive']
ls_list = ['solid','dashed','dotted']
# model_var = np.sqrt(3**2 + (20 * np.tan(np.array(5) * np.pi / 180))**2) + (((100-np.array(bin_corr))/100)*20)**1.25
#
# for i in range(len(region_nmad)):
# i = 0
# for j in range(nb_slope-2):
#
# nmad = region_nmad[i]
#
# ax.plot(corr[1:nb_corr],nmad[j*nb_corr+1:j*nb_corr+nb_corr],label='Slope category: '+str(bin_slope[j]-5)+'-'+str(bin_slope[j]+5)+' degrees',color=color_list[j],linestyle=ls_list[i])
#
#
# # ax.plot(bin_corr,model_var,label='model',linewidth=2)
#
# ax.xlabel('Correlation (percent)')
# ax.ylabel('Stable terrain NMAD (m)')
# ax.ylim([0,50])
# ax.legend()
#
x_slope = np.arange(5,45,0.1)
model_var = np.sqrt(3**2 + (40 * np.tan(np.array(x_slope) * np.pi / 180))**2.5 + (((100-np.array(50))/100)*20)**2)
i=0
# for i in range(len(region_nmad)-1):
u=0
for j in np.arange(1,nb_corr,2):
nmad = region_nmad[i]
# ax.plot(bin_slope,nmad[np.arange(j,len(slope),nb_corr)],label='region: '+region_list[i]+', corr: '+str(bin_corr[j]),color=color_list[j],linestyle=ls_list[i])
ax.plot(bin_slope[:-2],nmad[np.arange(j,len(slope)-2*nb_corr,nb_corr)]**2,label='Empirical variance: $q$='+str(int(bin_corr[j]-5))+'-'+str(int(bin_corr[j]+5))+' %',color=color_list[u],linestyle=ls_list[i],marker='o',lw=0.5)
u+=1
model_var = np.sqrt(3**2 + ((20+(((100-np.array(100))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(95))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,label='Modelled: center of above\ncategories',linestyle='dashed',color='black',lw=0.5)
model_var = np.sqrt(3**2 + ((20+(((100-np.array(80))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(75))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,linestyle='dashed',color='black',lw=0.5)
model_var = np.sqrt(3**2 + ((20+(((100-np.array(60))/100)*20)) * np.tan( | np.array(x_slope) | numpy.array |
# -*- coding: UTF-8 -*-
"""
Correlation alignment.
Reference:
<NAME>., <NAME>., & <NAME>. (2016, March).
Return of frustratingly easy domain adaptation.
In Thirtieth AAAI Conference on Artificial Intelligence.
:author: <NAME> (2019)
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import numpy as np
import scipy as sp
from sklearn.base import BaseEstimator
from .base import BaseDetector
from ..utils.preprocessing import TransferScaler
# ----------------------------------------------------------------------------
# CORAL class
# ----------------------------------------------------------------------------
class CORAL(BaseEstimator, BaseDetector):
""" Correlation alignment algorithm.
Parameters
----------
scaling : str (default='standard')
Scale the source and target domain before transfer.
Standard scaling is indicated in the paper.
Attributes
----------
type_ : str
The type of transfer learning (e.g., domain adaptation).
X_trans_ : np.array of shape (<= n_samples,)
The (transformed) source instances that are transferred.
Ixs_trans_ : np.array of shape (n_samples, n_features)
The indices of the instances selected for transfer.
"""
def __init__(self,
scaling='standard',
tol=1e-8,
verbose=False):
super().__init__(
scaling=scaling,
tol=tol,
verbose=verbose)
# type
self.type_ = 'domain_adaptation'
def fit(self, Xs=None, Xt=None, ys=None, yt=None):
""" Fit the model on data X.
Parameters
----------
Xs : np.array of shape (n_samples, n_features), optional (default=None)
The source instances.
Xt : np.array of shape (n_samples, n_features), optional (default=None)
The target instances.
ys : np.array of shape (n_samples,), optional (default=None)
The ground truth of the source instances.
yt : np.array of shape (n_samples,), optional (default=None)
The ground truth of the target instances.
Returns
-------
self : object
"""
# check all inputs
Xs, Xt, ys, yt = self._check_all_inputs(Xs, Xt, ys, yt)
ns, nfs = Xs.shape
nt, nft = Xt.shape
# align means: feature normalization/standardization!
self.target_scaler_ = TransferScaler(self.scaling)
self.source_scaler_ = TransferScaler(self.scaling)
Xt = self.target_scaler_.fit_transform(Xt)
Xs = self.source_scaler_.fit_transform(Xs)
# align covariances: denoising - noising transformation
Cs = | np.cov(Xs.T) | numpy.cov |
"""This file contains all the utility functions required for
simulation
.. module:: utilities
:synopsis: unility functions used in simulation
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pandas as pd
def get_cov(pos, rsq, eta, p, lmd):
"""Compute covariance from given parameters
Args:
pos (list): Position of relevant components
rsq (list): Coefficient of determination
eta (float): Decay factor of eigenvalues corresponding to response matrix
p (int): Number of predictor variables
lmd (list): Decay factor of eigenvalues corresponding ot predictor matrix
Returns:
A covariance value with non-zero at position defined at
``pos`` and zero at other places
>>> len(get_cov([1, 2, 3], 0.8, 1, 5, [1. , 0.5 , 0.25, 0.12, 0.06]))
5
This always return an array of length equals to length of predictor
"""
pos = [x - 1 for x in pos]
out = np.zeros(p)
alpha_ = np.random.uniform(-1.0, 1.0, len(pos))
alpha = np.sign(alpha_) * np.sqrt(rsq * | np.abs(alpha_) | numpy.abs |
#
# BSD 3-Clause License
#
# Copyright (c) 2019, Analog Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import aditofpython as tof
import tof_calib.device as device
import numpy as np
import numpy.ma as ma
import pandas as pd
import os
import time
from natsort import natsorted, ns
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from shutil import copyfile
import re
import tof_calib.gen_delays as gd
import core.frame as frame
import re
import itertools as it
import seaborn as sns
import logging
from scipy import stats
#temp lib add
import cv2
def get_TAL_values(dev): #Ret: [TAL1_R, TAL2_R, ... , TAL6_F, TAL7_F]
TAL_val = device.read_AFE_reg(dev, 0xc740, 14)
for ind, x in enumerate(TAL_val):
if (x >> 11) == 1:
#print(ind)
TAL_val[ind] = TAL_val[ind] | 0xF000
return TAL_val.astype('int16')
def get_pulse_count_regs(file_path):
regList = []
with open(file_path) as f:
for rep_data in f:
hexaPattern = re.compile(r'\b[0-9a-fA-F]{4}\b')#Mataches the first 4 hex address values (16-bit) on a line
m = re.search(hexaPattern, rep_data)
if m:
regList.append(int(m.group(0), 16))
return regList
def generate_distance_list(target_distance, min_dist, max_dist, dist_step):
'''
Generate the list of distance steps given a target. min, max and step.
The distance has to pivot based on the target_distance.
e.g. target_distance = 10, min_dist = 3, max_dist = 15, dist_step = 3
then the returned list is: 4, 7, 10, 13
'''
dist_list = np.arange(target_distance, min_dist, -dist_step)[::-1]
dist_list = np.append(dist_list, np.arange(target_distance, max_dist, dist_step)[1::])
return dist_list
def show_image(name, image):
'''
Useful for debug: stream image from camera
'''
plt.figure(name)
plt.clf()
plt.imshow(image, cmap='gray')
plt.draw()
plt.pause(0.00001)
def stream_init(image):
ax = plt.subplot(111)
im = ax.imshow(image)
plt.ion()
plt.show()
return im
def stream_update(im, image):
im.set_data(image)
def stream_off(im):
nothing = 0
def data_stats(curr_delay, curr_dist, depth_crop, ir_crop, frame_count, window, scale_factor, sw_gain, sw_offset, cam_handle):
# Mask Saturated/0 Pixels
depth_masked = ma.masked_equal(depth_crop, 0)
satCount = ma.count_masked(depth_masked)
if satCount == frame_count * window['X'] * window['Y']:
meas_depth = 0
else:
meas_depth = np.mean(depth_masked)
meas_depth_14b = meas_depth*scale_factor
expected_depth_14b = (curr_dist*scale_factor)
correction_offset_14b = (((curr_dist - sw_offset) / sw_gain)*4) - meas_depth_14b
meas_depth_adj = (np.median(depth_crop) * sw_gain) + sw_offset
meas_ir = np.median(ir_crop)
#Get TAL Values
TAL_values = get_TAL_values(cam_handle)
depth_std = np.std(depth_crop) * sw_gain
depth_noise = 100 * depth_std / meas_depth_adj
ir_std = np.std(ir_crop)
ir_noise = 100 * ir_std / meas_ir
error = meas_depth_adj - curr_dist
return [curr_delay, curr_dist, meas_depth, meas_depth_adj, error, meas_ir, \
satCount, depth_std, depth_noise, ir_std, ir_noise, \
TAL_values[0], TAL_values[7], TAL_values[1], TAL_values[8], TAL_values[2], TAL_values[9], TAL_values[3], TAL_values[10], TAL_values[4], TAL_values[11], \
expected_depth_14b, meas_depth_14b, correction_offset_14b]
def pulse_sweep(delay_dict, min_dist, max_dist, dist_step, target_distance, dist_interval, raw_frame_dict, frame_dict, frame_count, window, scale_factor, sw_gain, sw_offset, cam_handle):
logger = logging.getLogger(__name__)
logger.info('Running Delay Sweep')
min_delay = min(delay_dict.keys())
max_delay = max(delay_dict.keys())
delay_list = | np.arange(min_delay, max_delay) | numpy.arange |
"""combining models
This module is about chapter14.
AdaBoost, CARTRegressor, CARTClassifier, LinearMixture, LogisticMixture are implemented.
Todo:
In LogisticMixture, inverse matrix cannot be calculated
"""
import numpy as np
from prml.utils.util import _log,sigmoid
from prml.linear_classifier import Classifier,_logistic_regression_base
from prml.linear_regression import Regression
class AdaBoost(Classifier):
"""AdaBoost
weak_learner is decision stump
Attributes:
M (int): number of weak leaner
weak_leaner (list): list of data about weak learner
"""
def __init__(self,M=5) -> None:
"""__init__
Args:
M (int): number of weak leaner
"""
super(AdaBoost,self).__init__()
self.M = M
def fit(self,X,y):
"""fit
only accept N_dim = 2 data
Args:
X (2-D array): shape = (N_samples,2),
y (1-D array or 2-D array) : if 1-D array, y should be label-encoded, but 2-D arrray, y should be one-hot-encoded. should be 2-class data.
"""
y = self._onehot_to_label(y)
y[y == 0.0] = -1.0
y = y.astype("int")
N = len(X)
sort_idx = np.argsort(X,axis=0)
weight = np.ones(N)/N
weak_learner = [None]*self.M
for i in range(self.M):
x_border,x_more_or_less,x_score = self._weak_learn(X[:,0],sort_idx[:,0],y,weight)
y_border,y_more_or_less,y_score = self._weak_learn(X[:,1],sort_idx[:,1],y,weight)
if x_score < y_score:
ax = "x"
border,more_or_less = x_border,x_more_or_less
else:
ax = "y"
border,more_or_less = y_border,y_more_or_less
miss = self._miss_idx(X,y,ax,border,more_or_less)
eps = np.sum(miss*weight)/np.sum(weight)
alpha = _log((1 - eps)/eps)
weight *= np.exp(alpha*miss)
weak_learner[i] = {
"ax":ax,
"border":border,
"more_or_less":more_or_less,
"alpha":alpha
}
self.weak_learner = weak_learner
def _weak_learn(self,X,sort_idx,y,weight):
weight_sum = weight.sum()
more_score = weight[y != 1].sum() # score when all data is asigned 1
border,more_or_less,score = X[sort_idx[0]]-1,"more",more_score
for i in range(len(X)):
if y[sort_idx[i]] == 1:
more_score += weight[sort_idx[i]]
else:
more_score -= weight[sort_idx[i]]
less_score = weight_sum - more_score
if more_score < score:
border,more_or_less,score = X[sort_idx[i]],"more",more_score
if less_score < score:
border,more_or_less,score = X[sort_idx[i]],"less",less_score
return border,more_or_less,score
def _miss_idx(self,X,y,ax,border,more_or_less):
y_pred = self._predict(X,ax,border,more_or_less)
return (y_pred != y).astype("int")
def _predict(self,X,ax,border,more_or_less):
if more_or_less == "more":
if ax == "x":
class1 = X[:,0] > border
elif ax == "y":
class1 = X[:,1] > border
elif more_or_less == "less":
if ax == "x":
class1 = X[:,0] <= border
elif ax == 'y':
class1 = X[:,1] <= border
pred = np.zeros(len(X)) - 1
pred[class1] = 1
return pred
def predict(self,X):
"""predict
Args:
X (2-D array): explanatory variable, shape = (N_samples,2)
Returns:
1-D array or 2-D array: if 1-D array, y should be label-encoded, but 2-D arrray, y should be one-hot-encoded. This depends on parameter y when fitting.
"""
y_pred = np.zeros(len(X))
for i in range(self.M):
pred = self._predict(
X,
self.weak_learner[i]["ax"],
self.weak_learner[i]["border"],
self.weak_learner[i]["more_or_less"],
)
y_pred += self.weak_learner[i]["alpha"]*pred
y_pred = np.sign(y_pred)
return self._inverse_transform(y_pred)
class CARTRegressor():
"""CARTRegressor
Attributes:
lamda (float): regularizatioin parameter
tree (object): parameter
"""
def __init__(self,lamda=1e-2):
"""__init__
Args:
lamda (float): regularizatioin parameter
"""
self.lamda = lamda
def fit(self,X,y):
"""fit
Args:
X (2-D array) : explanatory variable,shape = (N_samples,N_dim)
y (1-D array) : target variable, shape = (N_samples)
"""
N = len(X)
leaves = np.zeros(N)
num_nodes = 1
num_leaves = 1
tree = []
while True:
if num_leaves == 0:
break
for leaf in range(num_nodes-num_leaves,num_nodes):
idx = np.arange(N)[leaf == leaves]
if len(idx) == 1:
num_leaves -= 1
tree.append({
"border": None,
"target": y[idx][0]
}) # has no child
continue
ax,border,score,more_index,less_index = -1,None,1e20,None,None
for m in range(X.shape[1]):
now_border,now_score,now_more_index,now_less_index = self._find_boundry(idx,X[idx,m],y[idx])
if now_score < score:
ax,border,score,more_index,less_index = m,now_border,now_score,now_more_index,now_less_index
if border is None:
num_leaves -= 1
tree.append({
"border": None,
"target": y[idx].mean()
}) # has no child
continue
tree.append({
"left_index": num_nodes,
"right_index": num_nodes+1,
"border": border,
"ax": ax
})
leaves[less_index] = num_nodes
leaves[more_index] = num_nodes+1
num_nodes += 2
num_leaves += 1
self.tree = tree
def _find_boundry(self,idx,X,y):
n = len(idx)
sort_idx = np.argsort(X)
all_sum = np.sum(y)
right_sum = all_sum
# when all data is in one leaf
score_now = self._error_function(y,right_sum/n) + self.lamda
border_index,score = None,score_now
pred = np.zeros(n)
for i in range(n-1):
right_sum -= y[sort_idx[i]]
left_sum = all_sum - right_sum
pred[sort_idx[i+1:]] = right_sum/(n-i-1)
pred[sort_idx[:i+1]] = left_sum/(i+1)
score_now = self._error_function(y,pred) + self.lamda*2
if score_now < score:
border_index,score = i,score_now
if border_index is None: # no division
return None,1e20,None,None
border = X[sort_idx[border_index]]
more_index = idx[sort_idx[border_index+1:]]
less_index = idx[sort_idx[:border_index+1]]
return border,score,more_index,less_index
def _error_function(self,y,pred):
return np.mean((y-pred)**2)
def _predict(self,X,p_id=0):
if self.tree[p_id]["border"] is None:
return np.zeros(len(X)) + self.tree[p_id]["target"]
ax = self.tree[p_id]["ax"]
border = self.tree[p_id]["border"]
y = np.zeros(len(X))
y[X[:,ax] > border] = self._predict(X[X[:,ax] > border],p_id=self.tree[p_id]["right_index"])
y[X[:,ax] <= border] = self._predict(X[X[:,ax] <= border],p_id=self.tree[p_id]["left_index"])
return y
def predict(self,X):
"""predict
Args:
X (2-D array) : explanatory variable, shape = (N_samples,N_dim)
Returns:
1-D array: predictive value
"""
y = self._predict(X)
return y
class CARTClassifier(Classifier):
"""CARTClassifier
Attributes:
lamda (float): reguralization parameter
error_function (str): "gini" or "error_rate" or "cross_entropy"
"""
def __init__(self,lamda=1e-2,error_function="gini"):
"""__init__
Arg:
lamda (float): reguralization parameter
error_function (str): "gini" or "error_rate" or "cross_entropy"
"""
super(CARTClassifier,self).__init__()
self.lamda = lamda
self.error_function = error_function
def fit(self,X,y):
"""fit
Args:
X (2-D array): shape = (N_samples,2),
1-D array or 2-D array: if 1-D array, y should be label-encoded, but 2-D arrray, y should be one-hot-encoded. should be 2-class data.
"""
N = len(X)
y = self._onehot_to_label(y).ravel()
leaves = np.zeros(N)
num_nodes = 1
num_leaves = 1
tree = []
while True:
if num_leaves == 0:
break
for leaf in range(num_nodes-num_leaves,num_nodes):
idx = np.arange(N)[leaf == leaves]
if len(np.unique(y[idx])) == 1:
num_leaves -= 1
tree.append({
"border": None,
"target": y[idx][0]
}) # has no child
continue
ax,border,score,more_index,less_index = -1,None,1e20,None,None
for m in range(X.shape[1]):
now_border,now_score,now_more_index,now_less_index = self._find_boundry(idx,X[idx,m],y[idx])
if now_score < score:
ax,border,score,more_index,less_index = m,now_border,now_score,now_more_index,now_less_index
if border is None:
num_leaves -= 1
tree.append({
"border": None,
"target": round(y[idx].mean())
}) # has no child
continue
tree.append({
"left_index": num_nodes,
"right_index": num_nodes+1,
"border": border,
"ax": ax
})
leaves[less_index] = num_nodes
leaves[more_index] = num_nodes+1
num_nodes += 2
num_leaves += 1
self.tree = tree
def _find_boundry(self,idx,X,y):
n = len(idx)
sort_idx = np.argsort(X)
all_sum = np.sum(y)
right_sum = all_sum
# when all data is in one leaf,
# score_now = self._error_function(y,round(right_sum/n)) + self.lamda
# border_index,score = None,score_now
border_index,score = None,1e20
pred = np.zeros(n)
for i in range(n-1):
right_sum -= y[sort_idx[i]]
left_sum = all_sum - right_sum
pred[sort_idx[i+1:]] = round(right_sum/(n-i-1))
pred[sort_idx[:i+1]] = round(left_sum/(i+1))
score_now = self._error_function(y,pred) + self.lamda*2
if score_now < score:
border_index,score = i,score_now
if border_index is None: # no division
return None,1e20,None,None
border = X[sort_idx[border_index]]
more_index = idx[sort_idx[border_index+1:]]
less_index = idx[sort_idx[:border_index+1]]
return border,score,more_index,less_index
def _error_function(self,y,pred):
if self.error_function == "error_rate":
return (y != pred).astype("int").sum()/len(y)
elif self.error_function == "gini":
u = np.unique(pred)
err = 0
for cl in u:
_,count = np.unique(y[pred == cl],return_counts=True)
class_rate = count/count.sum()
err += np.sum(class_rate*(1 - class_rate))
return err
elif self.error_function == "cross_entropy":
u = np.unique(pred)
err = 0
for cl in u:
_,count = np.unique(y[pred == cl],return_counts=True)
class_rate = count/count.sum()
err -= np.sum(class_rate*np.log(class_rate))
return err
else:
raise ValueError(f"there is no error function whose name is {self.error_function}")
def _predict(self,X,p_id=0):
if self.tree[p_id]["border"] is None:
return np.zeros(len(X)) + self.tree[p_id]["target"]
ax = self.tree[p_id]["ax"]
border = self.tree[p_id]["border"]
y = np.zeros(len(X))
y[X[:,ax] > border] = self._predict(X[X[:,ax] > border],p_id=self.tree[p_id]["right_index"])
y[X[:,ax] <= border] = self._predict(X[X[:,ax] <= border],p_id=self.tree[p_id]["left_index"])
return y
def predict(self,X):
"""predict
Args:
X (2-D array) : explanatory variable, shape = (N_samples,2)
Returns:
1-D array or 2-D array: if 1-D array, y should be label-encoded, but 2-D arrray, y should be one-hot-encoded. This depends on parameter y when fitting.
"""
y = self._predict(X)
return self._inverse_transform(y)
class LinearMixture(Regression):
"""LinearMixture
Attributes:
K (int): number of mixture modesl
max_iter (int): max iteration
threshold (float): threshold for EM algorithm
pi (1-D array): mixture, which model is chosen
weight (2-D array): shape = (K,M), M is dimension of feature space, weight
beta (float): precision parameter
"""
def __init__(self,K=3,max_iter=100,threshold=1e-3,basis_function="gauss",mu=None,s=None,deg=None):
super(LinearMixture,self).__init__(basis_function,mu,s,deg)
self.K = K
self.max_iter = max_iter
self.threshold = threshold
def _gauss(self,x,mu,beta):
return (beta/2*np.pi)**0.5 * | np.exp(-beta/2*(x-mu)**2) | numpy.exp |
import numpy as np
from matplotlib import pyplot as plt
from sdca4crf.parameters.sparse_weights import SparsePrimalDirection
from sdca4crf.parameters.weights import WeightsWithoutEmission
from sdca4crf.utils import letters2wordimage
class DenseWeights(WeightsWithoutEmission):
"""Implement the weights of the model.
Support all the operations necessary for the CRF and the optimization.
Is also used to store the primal direction for dense data.
"""
def __init__(self, emission=None, bias=None, transition=None,
nb_labels=0, nb_features=0, is_dataset_sparse=False):
super().__init__(bias=bias, transition=transition, nb_labels=nb_labels)
self.is_dataset_sparse = is_dataset_sparse
self.emission = np.zeros([nb_labels, nb_features]) if emission is None else emission
# BUILD THE WEIGHTS FROM DATA
def add_datapoint(self, points_sequence, labels_sequence):
super().add_datapoint(points_sequence, labels_sequence)
if self.is_dataset_sparse:
for point, label in zip(points_sequence, labels_sequence):
self.emission[label, point[point >= 0]] += 1
else:
for point, label in zip(points_sequence, labels_sequence):
self.emission[label] += point
def add_centroid(self, points_sequence, marginals):
if marginals.islog:
marginals = marginals.exp()
super().add_centroid(points_sequence, marginals)
if self.is_dataset_sparse: # slow?
for point, unimarginal in zip(points_sequence, marginals.unary):
self.emission[:, point[point >= 0]] += unimarginal[:, np.newaxis]
else:
self.emission += | np.dot(marginals.unary.T, points_sequence) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 19 13:51:29 2021
@author: <NAME>
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, \
confusion_matrix, ConfusionMatrixDisplay
df = pd.read_excel('classification_features.xlsx')
# Get names of indexes to drop
indexNames = df[df['Class'] == 'Banana'].index
# Delete these row indexes from dataFrame
df.drop(indexNames, inplace=True)
# Replacing Mango and Orange with 1 and -1
df = df.replace(['Mango','Orange'], [1,0])
# splitting the test and training data set
mango_df_train = df[df['Class']==1].head(18)
mango_df_test = df[df['Class']==1].tail(17)
orange_df_train = df[df['Class']==0].head(18)
orange_df_test = df[df['Class']==0].tail(17)
df_train = pd.concat([mango_df_train, orange_df_train], axis=0)
df_test = pd.concat([mango_df_test, orange_df_test], axis=0)
train_samples = df_train.shape[0] # no. of samples of train set
test_samples = df_test.shape[0] # no. of samples of test set
# input features + bias
x1_train = df_train[['Normalized Hue','NormRound']].values
x0_train = np.ones((train_samples,1))
x_train = np.concatenate((x0_train,x1_train), axis=1)
x1_test = df_test[['Normalized Hue','NormRound']].values
x0_test = | np.ones((test_samples,1)) | numpy.ones |
#!/usr/bin/env python
#
# Copyright (C) 2014 <NAME>, <NAME>,
# Regents of the University of California
#
# This implementation utilizes code and methods from Riccardo Vianello
# as well as code structure and inspiration from geoalchemy2
import base64
import contextlib
import functools
import numbers
import operator
import types
import numpy as np
from sqlalchemy import event, Table, bindparam, func
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression, functions, type_coerce, elements, operators
from sqlalchemy.types import (
UserDefinedType,
_Binary,
TypeDecorator,
BINARY,
Float,
Integer,
String,
Text,
)
from sqlalchemy.dialects.postgresql.base import (
DOUBLE_PRECISION,
ischema_names,
)
from rdkit import Chem, DataStructs
from rdkit.Chem import Descriptors, rdchem
from rdkit.Chem.Draw import MolToImage
from rdkit.Chem.rdDistGeom import EmbedMolecule
# _all_bytes = str.maketrans('', '')
class ChemistryError(ValueError):
pass
class CustomEqualityBinaryExpression_HACK(elements.BinaryExpression):
custom_opstrings = ()
def __bool__(self):
try:
return super(CustomEqualityBinaryExpression_HACK, self).__bool__()
except TypeError:
if self.operator.opstring in self.custom_opstrings:
return operators.eq(hash(self._orig[0]), hash(self._orig[1]))
else:
raise
__nonzero__ = __bool__
@classmethod
def _override_expr(cls, source, opstrings):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = cls.__new__(cls)
c.__dict__ = source.__dict__.copy()
elements.ClauseElement._cloned_set._reset(c)
elements.ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = source
c.custom_opstrings = opstrings
return c
def _remove_control_characters(data):
if not isinstance(data, str):
raise ValueError("Data must be a string")
else:
data = str(data)
# return data.translate(_all_bytes, _all_bytes[:32])
# return data.translate(_all_bytes)
return data.encode('ascii',errors='ignore')
## Datatype Converstions
## TODO: This should be reorganized... do we even want
## automatic conversion from ctab to mol?
## Break these out into separate files for each type
## Mol conversions ##################################################
def ensure_mol(mol, sanitize=True):
if not isinstance(mol, (Chem.Mol, rdchem.Mol)):
raise ValueError("Not already an instance of rdkit.Chem.Mol")
if sanitize:
try:
Chem.SanitizeMol(mol)
except ValueError as e:
raise ChemistryError(str(e))
return mol
def extract_mol_element(mol, sanitize=True):
if hasattr(mol, 'as_mol'):
mol = mol.as_mol
elif hasattr(mol, 'mol'):
mol = mol.mol
else:
raise ValueError("Not an instance of RawMolElement or compatible")
if sanitize:
try:
Chem.SanitizeMol(mol)
except ValueError as e:
raise ChemistryError(str(e))
return mol
def smiles_to_mol(smiles, sanitize=True):
smiles = _remove_control_characters(smiles)
mol = Chem.MolFromSmiles(smiles, sanitize=False)
if mol is None:
raise ValueError("Failed to parse SMILES: `{0}`".format(smiles))
if sanitize:
try:
Chem.SanitizeMol(mol)
Chem.AssignStereochemistry(mol)
except ValueError as e:
raise ChemistryError(str(e))
return mol
def smarts_to_mol(smarts, sanitize=True):
smiles = _remove_control_characters(smarts)
mol = Chem.MolFromSmarts(smarts, mergeHs=True)
if mol is None:
raise ValueError("Failed to parse SMARTS: `{0}`".format(smarts))
if sanitize:
Chem.SanitizeMol(mol, catchErrors=True)
Chem.AssignStereochemistry(mol)
return mol
def binary_to_mol(data, sanitize=True):
try:
mol = Chem.Mol(data)
except Exception: # This is a proxy for Boost.Python.ArgumentError
raise ValueError("Invalid binary mol data: `{0}`".format(data))
if sanitize:
try:
Chem.SanitizeMol(mol)
Chem.AssignStereochemistry(mol)
except ValueError as e:
raise ChemistryError(str(e))
return mol
def ctab_to_mol(data, sanitize=True):
smiles = _remove_control_characters(data)
mol = Chem.MolFromMolBlock(data, sanitize=sanitize, removeHs=sanitize)
if mol is None:
raise ValueError("Failed to parse CTAB")
if sanitize:
try:
Chem.SanitizeMol(mol)
Chem.AssignStereochemistry(mol)
except ValueError as e:
raise ChemistryError(str(e))
return mol
def inchi_to_mol(inchi, sanitize=True):
smiles = _remove_control_characters(inchi)
mol = Chem.MolFromInchi(inchi, sanitize=sanitize, removeHs=sanitize)
if mol is None:
raise ValueError("Failed to parse InChI: `{0}`".format(inchi))
if sanitize:
try:
Chem.SanitizeMol(mol)
Chem.AssignStereochemistry(mol)
except ValueError as e:
raise ChemistryError(str(e))
return mol
# Want to maintain order
MOL_PARSERS = [
('mol', ensure_mol),
('element', extract_mol_element),
('binary', binary_to_mol),
('smiles', smiles_to_mol),
('smarts', smarts_to_mol),
('ctab', ctab_to_mol),
('inchi', inchi_to_mol),
]
def attempt_mol_coersion(data, sanitize=True, exclude=()):
# RDKit doesn't like Unicode
if isinstance(data, str):
data = str(data)
# Record all parsing errors
errors = []
# Try all known mol parsers
for fmt, parser in MOL_PARSERS:
if fmt in exclude:
errors.append("Explicitly skipping {}".format(fmt))
continue
try:
mol = parser(data, sanitize=sanitize)
return fmt, mol
except ChemistryError as error:
errors.append(str(error))
break
except ValueError as error:
errors.append(str(error))
raise ValueError("Failed to convert `{0}` to mol. Errors were: {1}".format(data, ", ".join(errors)))
def coerce_to_mol(data, sanitize=True, exclude=()):
fmt, mol = attempt_mol_coersion(data, sanitize=sanitize, exclude=exclude)
return mol
def infer_mol_format(data, sanitize=True, exclude=()):
fmt, mol = attempt_mol_coersion(data, sanitize=sanitize, exclude=exclude)
return fmt
## BFP Conversions ##################################################
def chunks(xs, k):
n = len(xs)
for i in range(0, n, k):
yield xs[i:i+k]
def byte_from_hex(value):
return int(value, 16)
def byte_to_hex(value):
return "{:02x}".format(value)
def ensure_bfp(value, size=None):
if not isinstance(value, DataStructs.ExplicitBitVect):
raise ValueError("Not already a bfp (rdkit.DataStructs.ExplicitBitVect)")
if size is not None and size != value.GetNumBits():
raise Exception("BFP size does not match expected {0}".format(size))
return value
def extract_bfp_element(value, size=None):
if hasattr(value, 'as_bfp'):
value = value.as_bfp
else:
raise ValueError("Not already a bfp element (or compatable)")
if size is not None and size != value.GetNumBits():
raise ValueError("BFP size does not match expected {0}".format(size))
return value
def bfp_from_raw_binary_text(raw, size=None):
vect = DataStructs.CreateFromBinaryText(raw)
if vect.GetNumBits() != size:
raise ValueError("BFP size does not match expected {0}".format(size))
return vect
def bfp_to_raw_binary_text(bfp):
return bfp.ToBinary()
def bytes_from_binary_text(binary_text):
if not isinstance(binary_text, str):
raise ValueError("Binary text must be a string")
if binary_text.startswith(r'\x'):
binary_text = binary_text[2:]
else:
raise ValueError("Binary text must be hex-encoded and prefixed with '\\x'")
byte_chunks = list(chunks(binary_text, 2))
byte_values = list(map(byte_from_hex, byte_chunks))
values = np.array(byte_values, dtype=np.uint8)
return values
def bytes_to_binary_text(byte_values):
hex_chars = list(map(byte_to_hex, byte_values))
binary_text = r'\x' + ''.join(hex_chars)
return binary_text
def bytes_from_chars(chars):
byte_values = list(map(ord, chars))
values = | np.array(byte_values, dtype=np.uint8) | numpy.array |
import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys
from numpy import newaxis
from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors
from nltk.stem.wordnet import WordNetLemmatizer
from tensorflow.python.layers.core import Dense
from nltk.corpus import stopwords
from multiprocessing import Pool
from collections import Counter
from pprint import pprint
from keras.models import Model
from keras.layers import *
from keras.optimizers import *
from keras.models import model_from_json
from keras.models import load_model
from keras.callbacks import *
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from copy import deepcopy
#eager.enable_eager_execution()
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
train_original = "train.en"
train_translated = "train.vi"
test_original = "tst2013.en"
test_translated = "tst2013.vi"
word_number_mapping_file = "word_mappings.txt"
processed_original = "translated_numeric.txt"
processed_translated = "original_numeric.txt"
modelDir = './model/'
modelFileName = 'Eldin_Sahbaz_Model.ckpt'
#this commented filtering code splits the code at the character level rather than the word level
'''
def filter_symbols(original_input, translated_input):
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', ' ', zero(text))
two = lambda text: list(one(text)) #[character for character in list(one(text)) if (character not in ['-', '\\', '/', '.', '—', '…', '...', '?', ',', '<', '>', '\"', ';', ':', '[', ']', '{', '}', '|', '=', '+', '_', '*', '&', '^', '%', '$', '#', '@', '!', '`', '~'])]
return (two(original_input), two(translated_input))
except:
return None
def filter_symbols_test(input_text):
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', ' ', zero(text))
two = lambda text: list(one(text)) #[character for character in list(one(text)) if (character not in ['-', '\\', '/', '.', '—', '…', '...', '?', ',', '<', '>', '\"', ';', ':', '[', ']', '{', '}', '|', '=', '+', '_', '*', '&', '^', '%', '$', '#', '@', '!', '`', '~'])]
return two(input_text)
except:
return None
'''
#In this function, I convert all contractions (converting can't to cannot and so on), tokenize the sentences at the word
#level, lemmatize the words and remove the stop words from the input text but not from the translated text
def filter_symbols(original_input, translated_input):
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
table = str.maketrans({key: None for key in string.punctuation})
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', '.', zero(text))
two = lambda text: nltk.word_tokenize(one(text))
three_1 = lambda text: list(filter(lambda x: x, [lemmatizer.lemmatize(word1.translate(table)) for word1 in two(text) if ((word1 not in stop_words))]))
three_2 = lambda text: list(filter(lambda x: x, [lemmatizer.lemmatize(word1.translate(table)) for word1 in two(text)]))
return (three_1(original_input), three_2(translated_input))
except:
return None
#This is the same as the function above except for it only takes the input text that is used for testing
def filter_symbols_test(input_text):
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
table = str.maketrans({key: None for key in string.punctuation})
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', '.', zero(text))
two = lambda text: nltk.word_tokenize(one(text))
three = lambda text: list(filter(lambda x: x, [lemmatizer.lemmatize(word1.translate(table)) for word1 in two(text) if ((word1 not in stop_words))]))
return(three(input_text))
except:
return None
#Here load the data files, use multiprocessing to apply filtering to all the data, then store the filtered files
def clean_data(original, translated):
cleaned = None
with open(original, 'r', encoding="utf8") as file:
original_data = file.read().split('\n')
with open(translated, 'r', encoding="utf8") as file:
translated_data = file.read().split('\n')
data = list(zip(original_data, translated_data))
pool = Pool()
cleaned = pool.starmap(filter_symbols, data)
pool.close()
pool.join()
original_text, translated_text = list(zip(*cleaned))
original_text = list(filter(lambda y: y, original_text))
translated_text = list(filter(lambda y: y, translated_text))
with open("filtered_data", 'wb') as file: pickle.dump(cleaned, file)
return (original_text, translated_text)
def convert_text(original_text, translated_text, cutoff):
original_DNS = {'forward':{'<PAD>':0, '<UNK>':1, '<EOS>':2, '<GO>':3}, 'backward':{0:'<PAD>', 1:'<UNK>', 2:'<EOS>', 3:'<GO>'}}
translated_DNS = {'forward': {'<PAD>': 0, '<UNK>': 1, '<EOS>': 2, '<GO>': 3}, 'backward': {0: '<PAD>', 1: '<UNK>', 2: '<EOS>', 3: '<GO>'}}
original_words = list()
translated_words = list()
stop_words = set(stopwords.words('english'))
converted_original, converted_translated = list(), list()
#aggregate all the words into a list
for sentence in original_text: original_words.extend(sentence)
for sentence in translated_text: translated_words.extend(sentence)
original_word_frequencies = [x for x in sorted(Counter(original_words).items(), key=lambda x: x[1], reverse=True) if ((x[1] >= cutoff) and (x[0] not in stop_words))]
translated_word_frequencies = [x for x in sorted(Counter(translated_words).items(), key=lambda x: x[1], reverse=True) if (x[1] >= cutoff)]
# create mapping for word -> int and for int -> word for the first language
if original_word_frequencies:
words, freqs = list(zip(*original_word_frequencies))
original_DNS['forward'].update(dict(zip(words, list(range(len(original_DNS['forward']), len(words)+len(original_DNS['forward']))))))
original_DNS['backward'].update({v: k for k, v in original_DNS['forward'].items()})
# create mapping for word -> int and for int -> word for the second language
if translated_word_frequencies:
words, freqs = list(zip(*translated_word_frequencies))
translated_DNS['forward'].update(dict(zip(words, list(range(len(translated_DNS['forward']), len(words)+len(translated_DNS['forward']))))))
translated_DNS['backward'].update({v: k for k, v in translated_DNS['forward'].items()})
#Compute the translation to int for the full text
for sentence in original_text:
temp_sentence = list()
temp_sentence.append(original_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(original_DNS['forward'][word])
except : temp_sentence.append(original_DNS['forward']['<UNK>'])
temp_sentence.append(original_DNS['forward']['<EOS>'])
converted_original.append(temp_sentence)
for sentence in translated_text:
temp_sentence = list()
temp_sentence.append(translated_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(translated_DNS['forward'][word])
except : temp_sentence.append(translated_DNS['forward']['<UNK>'])
temp_sentence.append(translated_DNS['forward']['<EOS>'])
converted_translated.append(temp_sentence)
#These lines of code get some statistics about the dataset
original_text_lengths, translated_text_lengths, original_unk_counts, translated_unk_counts = list(), list(), list(), list()
#90th percentile of original text lengths
for sentence in converted_original: original_text_lengths.append(len(sentence))
original_text_pd = pd.DataFrame(original_text_lengths, columns=['counts'])
max_original_length = int(np.percentile(original_text_pd.counts, 90))
#90th percentile of translated text lengths
for sentence in converted_translated: translated_text_lengths.append(len(sentence))
translated_text_pd = pd.DataFrame(translated_text_lengths, columns=['counts'])
max_translated_length = int(np.percentile(translated_text_pd.counts, 90))
#5th percentile for minimum text length
data_pd = pd.DataFrame(original_text_lengths + translated_text_lengths, columns=['counts'])
min_length = int(np.percentile(data_pd.counts, 5))
#5th percentile for minimum unknown token limit in original text
for sentence in converted_original: original_unk_counts.append(Counter(sentence)[original_DNS['forward']['<UNK>']])
original_pd = pd.DataFrame(original_unk_counts, columns=['counts'])
unk_original_limit = int(np.percentile(original_pd.counts, 5))
#5th percentile for minimum unknown token limit in translated text
for sentence in converted_translated: translated_unk_counts.append(Counter(sentence)[translated_DNS['forward']['<UNK>']])
translated_pd = pd.DataFrame(translated_unk_counts, columns=['counts'])
unk_translated_limit = int(np.percentile(translated_pd.counts, 5))
#truncate all the text and pad them with 0s
truncated_original_text, truncated_translated_text = list(), list()
#padding here is done in the front because the string is reversed
for sentence in converted_original:
temp = sentence[:max_original_length]
temp[-1] = original_DNS['forward']['<EOS>']
temp = list(reversed(temp))
if len(temp) < max_original_length: temp[0:0] = [original_DNS['forward']['<PAD>']]*(max_original_length-len(temp))
truncated_original_text.append(temp)
#padding here is done at the end
for sentence in converted_translated:
temp = sentence[:max_translated_length]
temp[-1] = translated_DNS['forward']['<EOS>']
if len(temp) < max_translated_length: temp[len(temp):len(temp)] = [translated_DNS['forward']['<PAD>']]*(max_translated_length-len(temp))
truncated_translated_text.append(temp)
#remove samples that have too many unknown tokens
cleaned_truncated_original, cleaned_truncated_translated = list(), list()
for original, translated in list(zip(truncated_original_text, truncated_translated_text)):
original_count, translated_count = Counter(original), Counter(translated)
if ((original_count[original_DNS['forward']['<UNK>']] <= unk_original_limit) and (translated_count[translated_DNS['forward']['<UNK>']] <= unk_translated_limit) and (len(original) >= min_length) and (len(translated) >= min_length)):
cleaned_truncated_original.append(original)
cleaned_truncated_translated.append(translated)
return (original_DNS, translated_DNS, np.array(cleaned_truncated_original), np.array(cleaned_truncated_translated), max_original_length, max_translated_length, min_length, unk_original_limit, unk_translated_limit)
def convert_text_test(original_text, translated_text, original_DNS, translated_DNS, max_original_length, max_translated_length):
converted_original, converted_translated = list(), list()
# Compute the translation to int for the full text
for sentence in original_text:
temp_sentence = list()
temp_sentence.append(original_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(original_DNS['forward'][word])
except: temp_sentence.append(original_DNS['forward']['<UNK>'])
temp_sentence.append(original_DNS['forward']['<EOS>'])
converted_original.append(temp_sentence)
for sentence in translated_text:
temp_sentence = list()
temp_sentence.append(translated_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(translated_DNS['forward'][word])
except: temp_sentence.append(translated_DNS['forward']['<UNK>'])
temp_sentence.append(translated_DNS['forward']['<EOS>'])
converted_translated.append(temp_sentence)
# Compute the truncated version of the texts above
truncated_original_text, truncated_translated_text = list(), list()
for sentence in converted_original:
temp = sentence[:max_original_length]
temp[-1] = original_DNS['forward']['<EOS>']
temp = list(reversed(temp))
if len(temp) < max_original_length: temp[0:0] = [original_DNS['forward']['<PAD>']] * (max_original_length - len(temp))
truncated_original_text.append(temp)
for sentence in converted_translated:
temp = sentence[:max_translated_length]
temp[-1] = translated_DNS['forward']['<EOS>']
if len(temp) < max_translated_length: temp[len(temp):len(temp)] = [translated_DNS['forward']['<PAD>']] * (max_translated_length - len(temp))
truncated_translated_text.append(temp)
return (np.array(truncated_original_text), | np.array(truncated_translated_text) | numpy.array |
"""PyFstat search & follow-up classes using MCMC-based methods
The general approach is described in
Ashton & Prix (PRD 97, 103020, 2018):
https://arxiv.org/abs/1802.05450
and we use the `ptemcee` sampler
described in Vousden et al. (MNRAS 455, 1919-1937, 2016):
https://arxiv.org/abs/1501.05823
and based on Foreman-Mackey et al. (PASP 125, 306, 2013):
https://arxiv.org/abs/1202.3665
Defining the prior
##################
The MCMC based searches (i.e. `pyfstat.MCMC*`) require a prior specification for each model parameter,
implemented via a `python dictionary <https://docs.python.org/tutorial/datastructures.html#dictionaries>`_.
This is best explained through a simple example, here is the prior for a *directed* search with a *uniform*
prior on the frequency and a *normal* prior on the frequency derivative:
.. code-block:: python
theta_prior = {'F0': {'type': 'unif',
'lower': 29.9,
'upper': 30.1},
'F1': {'type': 'norm',
'loc': 0,
'scale': 1e-10},
'F2': 0,
'Alpha': 2.3,
'Delta': 1.8
}
For the sky positions ``Alpha`` and ``Delta``, we give the fixed values (i.e. they are considered *known* by
the MCMC simulation), the same is true for ``F2``, the second derivative of the frequency which we fix at ``0``.
Meanwhile, for the frequency ``F0`` and first frequency derivative ``F1`` we give a dictionary specifying their
prior distribution. This dictionary must contain three arguments: the ``type`` (in this case either ``unif`` or
``norm``) which specifies the type of distribution, then two shape arguments. The shape parameters will depend
on the ``type`` of distribution, but here we use ``lower`` and ``upper``, required for the ``unif`` prior while
``loc`` and ``scale`` are required for the ``norm`` prior.
Currently, two other types of prior are implemented: ``halfnorm``, ``neghalfnorm`` (both of which require ``loc``
and ``scale`` shape parameters). Further priors can be added by modifying ``pyfstat.MCMCSearch._generic_lnprior``.
"""
import sys
import os
import copy
import logging
from collections import OrderedDict
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from ptemcee import Sampler as PTSampler
import corner
import dill as pickle
from scipy.stats import lognorm
import pyfstat.core as core
from pyfstat.core import BaseSearchClass, tqdm, args
import pyfstat.optimal_setup_functions as optimal_setup_functions
import pyfstat.helper_functions as helper_functions
class MCMCSearch(BaseSearchClass):
"""
MCMC search using ComputeFstat.
Evaluates the coherent F-statistic across a parameter space region
corresponding to an isolated/binary-modulated CW signal.
"""
symbol_dictionary = dict(
F0=r"$f$",
F1=r"$\dot{f}$",
F2=r"$\ddot{f}$",
Alpha=r"$\alpha$",
Delta=r"$\delta$",
asini=r"asini",
period=r"P",
ecc=r"ecc",
tp=r"tp",
argp=r"argp",
)
"""
Key, val pairs of the parameters (`F0`, `F1`, ...), to LaTeX math
symbols for plots
"""
unit_dictionary = dict(
F0=r"Hz",
F1=r"Hz/s",
F2=r"Hz/s$^2$",
Alpha=r"rad",
Delta=r"rad",
asini="",
period=r"s",
ecc="",
tp=r"s",
argp="",
)
"""
Key, val pairs of the parameters (i.e. `F0`, `F1`), and the
units (i.e. `Hz`)
"""
transform_dictionary = {}
"""
Key, val pairs of the parameters (i.e. `F0`, `F1`), where the key is
itself a dictionary which can item `multiplier`, `subtractor`, or
`unit` by which to transform by and update the units.
"""
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
RngMedWindow=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
transientWindowType=None,
tCWFstatMapVersion="lal",
earth_ephem=None,
sun_ephem=None,
allowedMismatchFromSFTLength=None,
):
"""
Parameters
----------
theta_prior: dict
Dictionary of priors and fixed values for the search parameters.
For each parameters (key of the dict), if it is to be held fixed
the value should be the constant float, if it is be searched, the
value should be a dictionary of the prior.
tref, minStartTime, maxStartTime: int
GPS seconds of the reference time, start time and end time. While tref
is requirede, minStartTime and maxStartTime default to None in which
case all available data is used.
label, outdir: str
A label and output directory (optional, default is `data`) to
name files
sftfilepattern: str, optional
Pattern to match SFTs using wildcards (`*?`) and ranges [0-9];
mutiple patterns can be given separated by colons.
detectors: str, optional
Two character reference to the detectors to use, specify None for no
contraint and comma separated strings for multiple references.
nsteps: list (2,), optional
Number of burn-in and production steps to take, [nburn, nprod]. See
`pyfstat.MCMCSearch.setup_initialisation()` for details on adding
initialisation steps.
nwalkers, ntemps: int, optional
The number of walkers and temperates to use in the parallel
tempered PTSampler.
log10beta_min: float < 0, optional
The log_10(beta) value. If given, the set of betas passed to PTSampler
are generated from `np.logspace(0, log10beta_min, ntemps)` (given
in descending order to ptemcee).
theta_initial: dict, array, optional
A dictionary of distribution about which to distribute the
initial walkers about.
rhohatmax: float, optional
Upper bound for the SNR scale parameter (required to normalise the
Bayes factor) - this needs to be carefully set when using the
evidence.
binary: bool, optional
If true, search over binary orbital parameters.
BSGL: bool, optional
If true, use the BSGL statistic.
SSBPrec: int, optional
SSBPrec (SSB precision) to use when calling ComputeFstat. See `core.ComputeFstat`.
RngMedWindow: int, optional
Running-Median window size (number of bins) for ComputeFstat. See `core.ComputeFstat`.
minCoverFreq, maxCoverFreq: float, optional
Minimum and maximum instantaneous frequency which will be covered
over the SFT time span as passed to CreateFstatInput. See `core.ComputeFstat`.
injectSources: dict, optional
If given, inject these properties into the SFT files before running
the search. See `core.ComputeFstat`.
assumeSqrtSX: float or list or str
Don't estimate noise-floors, but assume (stationary) per-IFO sqrt{SX}.
See `core.ComputeFstat`.
transientWindowType: str
If 'rect' or 'exp',
compute atoms so that a transient (t0,tau) map can later be computed.
('none' instead of None explicitly calls the transient-window function,
but with the full range, for debugging). See `core.ComputeFstat`.
Currently only supported for nsegs=1.
tCWFstatMapVersion: str
Choose between standard 'lal' implementation,
'pycuda' for gpu, and some others for devel/debug.
allowedMismatchFromSFTLength: float
Maximum allowed mismatch from SFTs being too long
[Default: what's hardcoded in XLALFstatMaximumSFTLength].
"""
self._set_init_params_dict(locals())
self.theta_prior = theta_prior
self.tref = tref
self.label = label
self.outdir = outdir
self.minStartTime = minStartTime
self.maxStartTime = maxStartTime
self.sftfilepattern = sftfilepattern
self.detectors = detectors
self.nsteps = nsteps
self.nwalkers = nwalkers
self.ntemps = ntemps
self.log10beta_min = log10beta_min
self.theta_initial = theta_initial
self.rhohatmax = rhohatmax
self.binary = binary
self.BSGL = BSGL
self.SSBprec = SSBprec
self.RngMedWindow = RngMedWindow
self.minCoverFreq = minCoverFreq
self.maxCoverFreq = maxCoverFreq
self.injectSources = injectSources
self.assumeSqrtSX = assumeSqrtSX
self.transientWindowType = transientWindowType
self.tCWFstatMapVersion = tCWFstatMapVersion
self.set_ephemeris_files(earth_ephem, sun_ephem)
self.allowedMismatchFromSFTLength = allowedMismatchFromSFTLength
os.makedirs(outdir, exist_ok=True)
self.output_file_header = self.get_output_file_header()
self._add_log_file(self.output_file_header)
logging.info("Set-up MCMC search for model {}".format(self.label))
if sftfilepattern:
logging.info("Using data {}".format(self.sftfilepattern))
else:
logging.info("No sftfilepattern given")
if injectSources:
logging.info("Inject sources: {}".format(injectSources))
self.pickle_path = os.path.join(self.outdir, self.label + "_saved_data.p")
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self._set_likelihoodcoef()
self._log_input()
def _set_likelihoodcoef(self):
"""Additional constant terms to turn a detection statistic into a likelihood.
In general, the (log-)likelihood can be obtained from the signal-to-noise
(log-)Bayes factor
(omitting the overall Gaussian-noise normalization term)
but the detection statistic may only be a monotonic function of the
Bayes factor, not the full thing.
E.g. this is the case for the standard CW F-statistic!
"""
if self.BSGL:
# In this case, the corresponding term is already included
# in the detection statistic itself.
# See Eq. (36) in Keitel et al (PRD 89, 064023, 2014):
# https://arxiv.org/abs/1311.5738
# where Fstar0 = ln(cstar) = ln(rhohatmax**4/70).
# We just need to switch to natural log basis.
self.likelihooddetstatmultiplier = np.log(10)
self.likelihoodcoef = 0
else:
# If assuming only Gaussian noise + signal,
# the likelihood is essentially the F-statistic,
# but with an extra constant term depending on the amplitude prior.
# See Eq. (9) of Ashton & Prix (PRD 97, 103020, 2018):
# https://arxiv.org/abs/1802.05450
# Also need to go from twoF to F.
self.likelihooddetstatmultiplier = 0.5
self.likelihoodcoef = np.log(70.0 / self.rhohatmax ** 4)
def _log_input(self):
logging.info("theta_prior = {}".format(self.theta_prior))
logging.info("nwalkers={}".format(self.nwalkers))
logging.info("nsteps = {}".format(self.nsteps))
logging.info("ntemps = {}".format(self.ntemps))
logging.info("log10beta_min = {}".format(self.log10beta_min))
def _get_search_ranges(self):
"""take prior widths as proxy "search ranges" to allow covering band estimate"""
if (self.minCoverFreq is None) or (self.maxCoverFreq is None):
normal_stds = 3 # this might not always be enough
prior_bounds, norm_trunc_warn = self._get_prior_bounds(normal_stds)
if norm_trunc_warn:
logging.warning(
"Gaussian priors (normal / half-normal) have been truncated"
" at {:f} standard deviations for estimating the coverage"
" frequency band. If sampling fails at any point, please"
" consider manually setting [minCoverFreq,maxCoverFreq] to"
" more generous values.".format(normal_stds)
)
# first start with parameters that have non-delta prior ranges
search_ranges = {
key: [bound["lower"], bound["upper"]]
for key, bound in prior_bounds.items()
}
# then add fixed-point (delta prior) parameters
for key in self.theta_prior:
if key not in self.theta_keys:
search_ranges[key] = [self.theta_prior[key]]
return search_ranges
else:
return None
def _initiate_search_object(self):
logging.info("Setting up search object")
search_ranges = self._get_search_ranges()
self.search = core.ComputeFstat(
tref=self.tref,
sftfilepattern=self.sftfilepattern,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
search_ranges=search_ranges,
detectors=self.detectors,
BSGL=self.BSGL,
transientWindowType=self.transientWindowType,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
binary=self.binary,
injectSources=self.injectSources,
assumeSqrtSX=self.assumeSqrtSX,
SSBprec=self.SSBprec,
RngMedWindow=self.RngMedWindow,
tCWFstatMapVersion=self.tCWFstatMapVersion,
earth_ephem=self.earth_ephem,
sun_ephem=self.sun_ephem,
allowedMismatchFromSFTLength=self.allowedMismatchFromSFTLength,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def _logp(self, theta_vals, theta_prior, theta_keys, search):
H = [
self._generic_lnprior(**theta_prior[key])(p)
for p, key in zip(theta_vals, theta_keys)
]
return np.sum(H)
def _set_point_for_evaluation(self, theta):
"""Combines fixed and variable parameters to form a valid evaluation point.
Parameters
----------
theta: list or np.ndarray
The sampled (variable) parameters.
Returns
-------
p: list
The full parameter space point as a list.
"""
p = copy.copy(self.fixed_theta)
for j, theta_i in enumerate(self.theta_idxs):
p[theta_i] = theta[j]
return p
def _logl(self, theta, search):
in_theta = self._set_point_for_evaluation(theta)
detstat = search.get_det_stat(*in_theta)
return detstat * self.likelihooddetstatmultiplier + self.likelihoodcoef
def _unpack_input_theta(self):
self.full_theta_keys = ["F0", "F1", "F2", "Alpha", "Delta"]
if self.binary:
self.full_theta_keys += ["asini", "period", "ecc", "tp", "argp"]
full_theta_keys_copy = copy.copy(self.full_theta_keys)
self.theta_keys = []
fixed_theta_dict = {}
for key, val in self.theta_prior.items():
if type(val) is dict:
fixed_theta_dict[key] = 0
self.theta_keys.append(key)
elif type(val) in [float, int, np.float64]:
fixed_theta_dict[key] = val
else:
raise ValueError(
"Type {} of {} in theta not recognised".format(type(val), key)
)
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
if len(full_theta_keys_copy) > 0:
raise ValueError(
("Input dictionary `theta` is missing the" "following keys: {}").format(
full_theta_keys_copy
)
)
self.fixed_theta = [fixed_theta_dict[key] for key in self.full_theta_keys]
self.theta_idxs = [self.full_theta_keys.index(k) for k in self.theta_keys]
self.theta_symbols = [self.symbol_dictionary[k] for k in self.theta_keys]
idxs = np.argsort(self.theta_idxs)
self.theta_idxs = [self.theta_idxs[i] for i in idxs]
self.theta_symbols = [self.theta_symbols[i] for i in idxs]
self.theta_keys = [self.theta_keys[i] for i in idxs]
self.output_keys = self.theta_keys.copy()
self.output_keys.append("twoF")
if self.BSGL:
self.output_keys.append("log10BSGL")
def _evaluate_logpost(self, p0vec):
init_logp = np.array(
[
self._logp(p, self.theta_prior, self.theta_keys, self.search)
for p in p0vec
]
)
init_logl = np.array([self._logl(p, self.search) for p in p0vec])
return init_logl + init_logp
def _check_initial_points(self, p0):
for nt in range(self.ntemps):
logging.info("Checking temperature {} chains".format(nt))
num = sum(self._evaluate_logpost(p0[nt]) == -np.inf)
if num > 0:
logging.warning(
"Of {} initial values, {} are -np.inf due to the prior".format(
len(p0[0]), num
)
)
p0 = self._generate_new_p0_to_fix_initial_points(p0, nt)
def _generate_new_p0_to_fix_initial_points(self, p0, nt):
logging.info("Attempting to correct intial values")
init_logpost = self._evaluate_logpost(p0[nt])
idxs = np.arange(self.nwalkers)[init_logpost == -np.inf]
count = 0
while sum(init_logpost == -np.inf) > 0 and count < 100:
for j in idxs:
p0[nt][j] = p0[nt][np.random.randint(0, self.nwalkers)] * (
1 + np.random.normal(0, 1e-10, self.ndim)
)
init_logpost = self._evaluate_logpost(p0[nt])
count += 1
if sum(init_logpost == -np.inf) > 0:
logging.info("Failed to fix initial priors")
else:
logging.info("Suceeded to fix initial priors")
return p0
def setup_initialisation(self, nburn0, scatter_val=1e-10):
"""Add an initialisation step to the MCMC run
If called prior to `run()`, adds an intial step in which the MCMC
simulation is run for `nburn0` steps. After this, the MCMC simulation
continues in the usual manner (i.e. for nburn and nprod steps), but the
walkers are reset scattered around the maximum likelihood position
of the initialisation step.
Parameters
----------
nburn0: int
Number of initialisation steps to take.
scatter_val: float
Relative number to scatter walkers around the maximum likelihood
position after the initialisation step. If the maximum likelihood
point is located at `p`, the new walkers are randomly drawn from a
multivariate gaussian distribution centered at `p` with standard
deviation `diag(scatter_val * p)`.
"""
logging.info(
"Setting up initialisation with nburn0={}, scatter_val={}".format(
nburn0, scatter_val
)
)
self.nsteps = [nburn0] + self.nsteps
self.scatter_val = scatter_val
def _run_sampler(self, p0, nprod=0, nburn=0, window=50):
for result in tqdm(
self.sampler.sample(p0, iterations=nburn + nprod), total=nburn + nprod
):
pass
self.mean_acceptance_fraction = np.mean(
self.sampler.acceptance_fraction, axis=1
)
logging.info(
"Mean acceptance fraction: {}".format(self.mean_acceptance_fraction)
)
if self.ntemps > 1:
self.tswap_acceptance_fraction = self.sampler.tswap_acceptance_fraction
logging.info(
"Tswap acceptance fraction: {}".format(
self.sampler.tswap_acceptance_fraction
)
)
self.autocorr_time = self.sampler.get_autocorr_time(window=window)
logging.info("Autocorrelation length: {}".format(self.autocorr_time))
def _estimate_run_time(self):
"""Print the estimated run time
Uses timing coefficients based on a Lenovo T460p Intel(R)
Core(TM) i5-6300HQ CPU @ 2.30GHz.
"""
# Todo: add option to time on a machine, and move coefficients to
# ~/.pyfstat.conf
if (
type(self.theta_prior["Alpha"]) == dict
or type(self.theta_prior["Delta"]) == dict
):
tau0LD = 5.2e-7
tau0T = 1.5e-8
tau0S = 1.2e-4
tau0C = 5.8e-6
else:
tau0LD = 1.3e-7
tau0T = 1.5e-8
tau0S = 9.1e-5
tau0C = 5.5e-6
Nsfts = (self.maxStartTime - self.minStartTime) / 1800.0
if hasattr(self, "run_setup"):
ts = []
for row in self.run_setup:
nsteps = row[0]
nsegs = row[1]
numb_evals = np.sum(nsteps) * self.nwalkers * self.ntemps
t = (tau0S + tau0LD * Nsfts) * numb_evals
if nsegs > 1:
t += (tau0C + tau0T * Nsfts) * nsegs * numb_evals
ts.append(t)
time = np.sum(ts)
else:
numb_evals = np.sum(self.nsteps) * self.nwalkers * self.ntemps
time = (tau0S + tau0LD * Nsfts) * numb_evals
if getattr(self, "nsegs", 1) > 1:
time += (tau0C + tau0T * Nsfts) * self.nsegs * numb_evals
logging.info(
"Estimated run-time = {} s = {:1.0f}:{:1.0f} m".format(
time, *divmod(time, 60)
)
)
def run(
self,
proposal_scale_factor=2,
save_pickle=True,
export_samples=True,
save_loudest=True,
plot_walkers=True,
walker_plot_args=None,
window=50,
):
"""Run the MCMC simulatation
Parameters
----------
proposal_scale_factor: float
The proposal scale factor `a > 1` used by the sampler.
See <NAME> (Comm App Math Comp Sci, Vol 5, No. 1, 2010): 10.2140/camcos.2010.5.65.
The bigger the value, the wider the range to draw proposals from.
If the acceptance fraction is too low, you can raise it by
decreasing the `a` parameter; and if it is too high, you can reduce
it by increasing the `a` parameter.
See Foreman-Mackay et al. (PASP 125 306, 2013): https://arxiv.org/abs/1202.3665.
save_pickle: bool
If true, save a pickle file of the full sampler state.
export_samples: bool
If true, save ASCII samples file to disk. See `MCMCSearch.export_samples_to_disk`.
save_loudest: bool
If true, save a CFSv2 .loudest file to disk. See `MCMCSearch.generate_loudest`.
plot_walkers: bool
If true, save trace plots of the walkers.
walker_plot_args:
Dictionary passed as kwargs to _plot_walkers to control the plotting.
Histogram of sampled detection statistic values can be retrieved setting "plot_det_stat" to `True`.
Parameters corresponding to an injected signal can be passed through "injection_parameters"
as a dictionary containing the parameters of said signal. All parameters being searched for must
be present, otherwise this option is ignored.
If both "fig" and "axes" entries are set, the plot is not saved to disk
directly, but (fig, axes) are returned.
window: int
The minimum number of autocorrelation times needed to trust the
result when estimating the autocorrelation time (see
ptemcee.Sampler.get_autocorr_time for further details.
"""
self._initiate_search_object()
self.old_data_is_okay_to_use = self._check_old_data_is_okay_to_use()
if self.old_data_is_okay_to_use is True:
logging.warning("Using saved data from {}".format(self.pickle_path))
d = self.get_saved_data_dictionary()
self.samples = d["samples"]
self.lnprobs = d["lnprobs"]
self.lnlikes = d["lnlikes"]
self.all_lnlikelihood = d["all_lnlikelihood"]
self.chain = d["chain"]
return
self._estimate_run_time()
walker_plot_args = walker_plot_args or {}
self.sampler = PTSampler(
ntemps=self.ntemps,
nwalkers=self.nwalkers,
dim=self.ndim,
logl=self._logl,
logp=self._logp,
logpargs=(self.theta_prior, self.theta_keys, self.search),
loglargs=(self.search,),
betas=self.betas,
a=proposal_scale_factor,
)
p0 = self._generate_initial_p0()
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
# Run initialisation steps if required
ninit_steps = len(self.nsteps) - 2
for j, n in enumerate(self.nsteps[:-2]):
logging.info(
"Running {}/{} initialisation with {} steps".format(j, ninit_steps, n)
)
self._run_sampler(p0, nburn=n, window=window)
if plot_walkers:
try:
walker_fig, walker_axes = self._plot_walkers(**walker_plot_args)
walker_fig.tight_layout()
walker_fig.savefig(
os.path.join(
self.outdir, "{}_init_{}_walkers.png".format(self.label, j)
)
)
plt.close(walker_fig)
except Exception as e:
logging.warning(
"Failed to plot initialisation walkers due to Error {}".format(
e
)
)
p0 = self._get_new_p0()
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
self.sampler.reset()
if len(self.nsteps) > 1:
nburn = self.nsteps[-2]
else:
nburn = 0
nprod = self.nsteps[-1]
logging.info("Running final burn and prod with {} steps".format(nburn + nprod))
self._run_sampler(p0, nburn=nburn, nprod=nprod)
samples = self.sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim))
lnprobs = self.sampler.logprobability[0, :, nburn:].reshape((-1))
lnlikes = self.sampler.loglikelihood[0, :, nburn:].reshape((-1))
all_lnlikelihood = self.sampler.loglikelihood[:, :, nburn:]
self.samples = samples
self.chain = self.sampler.chain
self.lnprobs = lnprobs
self.lnlikes = lnlikes
self.all_lnlikelihood = all_lnlikelihood
if save_pickle:
self._pickle_data(samples, lnprobs, lnlikes, all_lnlikelihood)
if export_samples:
self.export_samples_to_disk()
if save_loudest:
self.generate_loudest()
if plot_walkers:
try:
walkers_fig, walkers_axes = self._plot_walkers(
nprod=nprod, **walker_plot_args
)
walkers_fig.tight_layout()
except Exception as e:
logging.warning("Failed to plot walkers due to Error {}".format(e))
if (walker_plot_args.get("fig") is not None) and (
walker_plot_args.get("axes") is not None
):
self.walker_fig = walkers_fig
self.walker_axes = walkers_axes
else:
try:
walkers_fig.savefig(
os.path.join(self.outdir, self.label + "_walkers.png")
)
plt.close(walkers_fig)
except Exception as e:
logging.warning(
"Failed to save walker plots due to Error {}".format(e)
)
def _get_rescale_multiplier_for_key(self, key):
"""Get the rescale multiplier from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 1
if "multiplier" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["multiplier"]
if type(val) == str:
if hasattr(self, val):
multiplier = getattr(
self, self.transform_dictionary[key]["multiplier"]
)
else:
raise ValueError("multiplier {} not a class attribute".format(val))
else:
multiplier = val
else:
multiplier = 1
return multiplier
def _get_rescale_subtractor_for_key(self, key):
"""Get the rescale subtractor from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 0
if "subtractor" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["subtractor"]
if type(val) == str:
if hasattr(self, val):
subtractor = getattr(
self, self.transform_dictionary[key]["subtractor"]
)
else:
raise ValueError("subtractor {} not a class attribute".format(val))
else:
subtractor = val
else:
subtractor = 0
return subtractor
def _scale_samples(self, samples, theta_keys):
"""Scale the samples using the transform_dictionary"""
for key in theta_keys:
if key in self.transform_dictionary:
idx = theta_keys.index(key)
s = samples[:, idx]
subtractor = self._get_rescale_subtractor_for_key(key)
s = s - subtractor
multiplier = self._get_rescale_multiplier_for_key(key)
s *= multiplier
samples[:, idx] = s
return samples
def _get_labels(self, newline_units=False):
"""Combine the units, symbols and rescaling to give labels"""
labels = []
for key in self.theta_keys:
values = self.transform_dictionary.get(key, {})
s, label, u = [
values.get(slu_key, None) for slu_key in ["symbol", "label", "unit"]
]
if label is None:
s = s or self.symbol_dictionary[key].replace(
"_{glitch}", r"_\mathrm{glitch}"
)
u = u or self.unit_dictionary[key]
label = (
f"{s}"
+ ("\n" if newline_units else " ")
+ (f"[{u}]" if u != "" else "")
)
labels.append(label)
return labels
def plot_corner(
self,
figsize=(10, 10),
add_prior=False,
nstds=None,
label_offset=0.4,
dpi=300,
rc_context={},
tglitch_ratio=False,
fig_and_axes=None,
save_fig=True,
**kwargs,
):
"""Generate a corner plot of the posterior
Using the `corner` package (https://pypi.python.org/pypi/corner/),
generate estimates of the posterior from the production samples.
Parameters
----------
figsize: tuple (7, 7)
Figure size in inches (passed to plt.subplots)
add_prior: bool, str
If true, plot the prior as a red line. If 'full' then for uniform
priors plot the full extent of the prior.
nstds: float
The number of standard deviations to plot centered on the median.
Standard deviation is computed from the samples using `numpy.std`.
label_offset: float
Offset the labels from the plot: useful to prevent overlapping the
tick labels with the axis labels. This option is passed to `ax.[x|y]axis.set_label_coords`.
dpi: int
Passed to plt.savefig.
rc_context: dict
Dictionary of rc values to set while generating the figure (see
matplotlib rc for more details).
tglitch_ratio: bool
If true, and tglitch is a parameter, plot posteriors as the
fractional time at which the glitch occurs instead of the actual
time.
fig_and_axes: tuple
(fig, axes) tuple to plot on. The axes must be of the right shape,
namely (ndim, ndim)
save_fig: bool
If true, save the figure, else return the fig, axes.
**kwargs:
Passed to corner.corner. Use "truths" to plot the true parameters of a signal.
Returns
-------
fig, axes:
The matplotlib figure and axes, only returned if save_fig = False.
"""
if "truths" in kwargs:
if not isinstance(kwargs["truths"], dict):
raise ValueError("'truths' must be a dictionary.")
missing_keys = set(self.theta_keys) - kwargs["truths"].keys()
if missing_keys:
logging.warning(
f"plot_corner(): Missing keys {missing_keys} in 'truths' dictionary,"
" argument will be ignored."
)
kwargs["truths"] = None
else:
kwargs["truths"] = [kwargs["truths"][key] for key in self.theta_keys]
kwargs["truths"] = self._scale_samples(
np.reshape(kwargs["truths"], (1, -1)), self.theta_keys
).ravel()
if "truth_color" not in kwargs:
kwargs["truth_color"] = "black"
if self.ndim < 2:
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig, ax = fig_and_axes
ax.hist(self.samples, bins=50, histtype="stepfilled")
ax.set_xlabel(self.theta_symbols[0])
fig.savefig(os.path.join(self.outdir, self.label + "_corner.png"), dpi=dpi)
plt.close(fig)
return
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, axes = plt.subplots(self.ndim, self.ndim, figsize=figsize)
else:
fig, axes = fig_and_axes
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=False)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
if tglitch_ratio:
for j, k in enumerate(self.theta_keys):
if k == "tglitch":
s = samples_plt[:, j]
samples_plt[:, j] = (s - self.minStartTime) / (
self.maxStartTime - self.minStartTime
)
labels[j] = r"$R_{\mathrm{glitch}}$"
if type(nstds) is int and "range" not in kwargs:
_range = []
for j, s in enumerate(samples_plt.T):
median = np.median(s)
std = np.std(s)
_range.append((median - nstds * std, median + nstds * std))
elif "range" in kwargs:
_range = kwargs.pop("range")
else:
_range = None
hist_kwargs = kwargs.pop("hist_kwargs", dict())
if "density" not in hist_kwargs:
hist_kwargs["density"] = True
fig_triangle = corner.corner(
samples_plt,
labels=labels,
fig=fig,
bins=50,
max_n_ticks=4,
plot_contours=True,
plot_datapoints=True,
label_kwargs={"fontsize": 12},
data_kwargs={"alpha": 0.1, "ms": 0.5},
range=_range,
hist_kwargs=hist_kwargs,
show_titles=True,
fill_contours=True,
quantiles=[0.05, 0.95]
if "quantiles" not in kwargs
else kwargs.pop("quantiles"),
verbose=True if "verbose" not in kwargs else kwargs.pop("verbose"),
**kwargs,
)
axes_list = fig_triangle.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
for tick in ax.xaxis.get_major_ticks():
# tick.label1.set_fontsize(8)
tick.label1.set_rotation(30)
for tick in ax.yaxis.get_major_ticks():
# tick.label1.set_fontsize(8)
tick.label1.set_rotation(30)
plt.tight_layout()
fig.subplots_adjust(hspace=0.1, wspace=0.1)
if add_prior:
self._add_prior_to_corner(axes, self.samples, add_prior)
if save_fig:
fig_triangle.savefig(
os.path.join(self.outdir, self.label + "_corner.png"), dpi=dpi
)
plt.close(fig_triangle)
else:
return fig, axes
def plot_chainconsumer(self, save_fig=True, label_offset=0.25, dpi=300, **kwargs):
"""Generate a corner plot of the posterior using the `chaniconsumer` package.
`chainconsumer` is an optional dependency of PyFstat. See https://samreay.github.io/ChainConsumer/.
Parameters are akin to the ones described in MCMCSearch.plot_corner.
Only the differing parameters are explicitly described.
Parameters
----------
**kwargs:
Passed to chainconsumer.plotter.plot. Use "truths" to plot the true parameters of a signal.
"""
try:
import chainconsumer
except ImportError:
logging.warning(
"Could not import 'chainconsumer' package, please install it to use this method."
)
return
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=True)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
if "truth" in kwargs:
if not isinstance(kwargs["truth"], dict):
raise ValueError("'truth' must be a dictionary.")
missing_keys = np.setdiff1d(self.theta_keys, list(kwargs["truth"].keys()))
if len(missing_keys) > 0:
logging.warning(
"plot_chainconsumer(): Missing keys {} in 'truth' dictionary,"
" argument will be ignored.".format(missing_keys)
)
kwargs["truth"] = None
else:
parameters_in_order = np.array(
[kwargs["truth"][key] for key in self.theta_keys]
).reshape((1, -1))
kwargs["truth"] = self._scale_samples(
parameters_in_order, self.theta_keys
).ravel()
c = chainconsumer.ChainConsumer()
c.add_chain(samples_plt, parameters=labels)
# We set usetex=False to avoid dependency on 'kpsewhich' TeX tool
c.configure(smooth=0, summary=False, sigma2d=True, usetex=False)
fig = c.plotter.plot(**kwargs)
axes_list = fig.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
plt.tight_layout(h_pad=0.0, w_pad=0.0)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
if save_fig:
fig.savefig(
os.path.join(self.outdir, self.label + "_chainconsumer_corner.png"),
dpi=dpi,
)
plt.close(fig)
else:
return fig, axes
def _add_prior_to_corner(self, axes, samples, add_prior):
for i, key in enumerate(self.theta_keys):
ax = axes[i][i]
s = samples[:, i]
lnprior = self._generic_lnprior(**self.theta_prior[key])
if add_prior == "full" and self.theta_prior[key]["type"] == "unif":
lower = self.theta_prior[key]["lower"]
upper = self.theta_prior[key]["upper"]
r = upper - lower
xlim = [lower - 0.05 * r, upper + 0.05 * r]
x = np.linspace(xlim[0], xlim[1], 1000)
else:
xlim = ax.get_xlim()
x = np.linspace(s.min(), s.max(), 1000)
multiplier = self._get_rescale_multiplier_for_key(key)
subtractor = self._get_rescale_subtractor_for_key(key)
ax.plot(
(x - subtractor) * multiplier,
[np.exp(lnprior(xi)) for xi in x],
"-C3",
label="prior",
)
for j in range(i, self.ndim):
axes[j][i].set_xlim(xlim[0], xlim[1])
for k in range(0, i):
axes[i][k].set_ylim(xlim[0], xlim[1])
def _get_prior_bounds(self, normal_stds=2):
"""Get the lower/upper bounds of all priors
Parameters
----------
normal_stds: float
Number of standard deviations to cut normal (Gaussian) or half-norm
distributions at.
Returns
-------
prior_bounds: dict
Dictionary of ["lower","upper"] pairs for each parameter
norm_warning: bool
A flag that is true if any parameter has a norm or half-norm prior.
Caller functions may wish to warn the user that the prior has
been truncated at normal_stds.
"""
prior_bounds = {}
norm_trunc_warning = False
for key in self.theta_keys:
prior_bounds[key] = {}
prior_dict = self.theta_prior[key]
norm_trunc_warning = "norm" in prior_dict["type"] or norm_trunc_warning
if prior_dict["type"] == "unif":
prior_bounds[key]["lower"] = prior_dict["lower"]
prior_bounds[key]["upper"] = prior_dict["upper"]
elif prior_dict["type"] == "log10unif":
prior_bounds[key]["lower"] = 10 ** prior_dict["log10lower"]
prior_bounds[key]["upper"] = 10 ** prior_dict["log10upper"]
elif prior_dict["type"] == "norm":
prior_bounds[key]["lower"] = (
prior_dict["loc"] - normal_stds * prior_dict["scale"]
)
prior_bounds[key]["upper"] = (
prior_dict["loc"] + normal_stds * prior_dict["scale"]
)
elif prior_dict["type"] == "halfnorm":
prior_bounds[key]["lower"] = prior_dict["loc"]
prior_bounds[key]["upper"] = (
prior_dict["loc"] + normal_stds * prior_dict["scale"]
)
elif prior_dict["type"] == "neghalfnorm":
prior_bounds[key]["upper"] = prior_dict["loc"]
prior_bounds[key]["lower"] = (
prior_dict["loc"] - normal_stds * prior_dict["scale"]
)
elif prior_dict["type"] == "lognorm":
prior_bounds[key]["lower"] = np.exp(
prior_dict["loc"] - normal_stds * prior_dict["scale"]
)
prior_bounds[key]["upper"] = np.exp(
prior_dict["loc"] + normal_stds * prior_dict["scale"]
)
else:
raise ValueError(
"Not implemented for prior type {}".format(prior_dict["type"])
)
return prior_bounds, norm_trunc_warning
def plot_prior_posterior(
self,
normal_stds=2,
injection_parameters=None,
fig_and_axes=None,
save_fig=True,
):
"""Plot the prior and posterior probability distributions in the same figure
Parameters
----------
normal_stds: int
Bounds of priors in terms of their standard deviation. Only used if
`norm`, `halfnorm`, `neghalfnorm` or `lognorm` priors are given, otherwise ignored.
injection_parameters: dict
Dictionary containing the parameters of a signal. All parameters being searched must be
present as dictionary keys, otherwise this option is ignored.
fig_and_axes: tuple
(fig, axes) tuple to plot on.
save_fig: bool
If true, save the figure, else return the fig, axes.
Returns
-------
(fig, ax): (matplotlib.pyplot.figure, matplotlib.pyplot.axes)
If `save_fig` evaluates to `False`, return figure and axes.
"""
# Check injection parameters first
injection_parameters = injection_parameters or {}
missing_keys = set(self.theta_keys) - injection_parameters.keys()
if missing_keys:
logging.warning(
f"plot_prior_posterior(): Missing keys {missing_keys} in 'injection_parameters',"
" no injection parameters will be highlighted."
)
injection_parameters = None
if fig_and_axes is None:
fig, axes = plt.subplots(nrows=self.ndim, figsize=(8, 4 * self.ndim))
else:
fig, ax = fig_and_axes
if self.ndim == 1:
axes = [axes]
N = 1000
from scipy.stats import gaussian_kde
prior_bounds, _ = self._get_prior_bounds(normal_stds)
for i, (ax, key) in enumerate(zip(axes, self.theta_keys)):
prior_dict = self.theta_prior[key]
ln_prior_func = self._generic_lnprior(**prior_dict)
x = np.linspace(prior_bounds[key]["lower"], prior_bounds[key]["upper"], N)
prior = np.exp([ln_prior_func(xi) for xi in x]) # may not be vectorized
priorln = ax.plot(x, prior, "C3", label="prior")
ax.set(xlabel=self.theta_symbols[i], yticks=[])
s = self.samples[:, i]
while len(s) > 10 ** 4:
# random downsample to avoid slow calculation of kde
s = np.random.choice(s, size=int(len(s) / 2.0))
kde = gaussian_kde(s)
ax2 = ax.twinx()
postln = ax2.plot(x, kde.pdf(x), "k", label="posterior")
ax2.set(yticks=[], yticklabels=[])
if injection_parameters is not None:
injection = ax.axvline(
injection_parameters[key],
label="Injection",
color="purple",
ls="--",
)
plotlines = priorln + postln
labs = [plotline.get_label() for plotline in plotlines]
if injection_parameters is not None:
plotlines.append(injection)
labs.append("injection")
axes[0].legend(plotlines, labs, loc=1, framealpha=0.8)
if save_fig:
fig.savefig(os.path.join(self.outdir, self.label + "_prior_posterior.png"))
plt.close(fig)
else:
return fig, axes
def plot_cumulative_max(self, **kwargs):
"""Plot the cumulative twoF for the maximum posterior estimate.
This method accepts the same arguments as `pyfstat.core.ComputeFstat.plot_twoF_cumulative`,
except for `CFS_input`, which is taken from the loudest candidate; and `label` and `outdir`,
which are taken from the instance of this class.
For example, one can pass signal arguments to predic_twoF_cumulative through `PFS_kwargs`, or
set the number of segments using `num_segments_(CFS|PFS)`. The same applies for other options
such as `tstart`, `tend` or `savefig`. Every single of these arguments will be passed to
`pyfstat.core.ComputeFstat.plot_twoF_cumulative` as they are, using their default argument
otherwise.
See `pyfstat.core.ComputeFstat.plot_twoF_cumulative` for a comprehensive list of accepted
arguments and their default values.
Unlike the core function, here savefig=True is the default,
for consistency with other MCMC plotting functions.
"""
logging.info("Getting cumulative 2F")
d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.items():
if key not in d:
d[key] = val
if kwargs.get("savefig") is None:
kwargs["savefig"] = True
self.search.plot_twoF_cumulative(
CFS_input=d, label=self.label, outdir=self.outdir, **kwargs
)
def _generic_lnprior(self, **kwargs):
"""Return a lambda function of the pdf
Parameters
----------
**kwargs:
A dictionary containing 'type' of pdf and shape parameters
"""
def log_of_unif(x, a, b):
above = x < b
below = x > a
if type(above) is not np.ndarray:
if above and below:
return -np.log(b - a)
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(b - a)
return p
def log_of_log10unif(x, log10lower, log10upper):
log10x = np.log10(x)
above = log10x < log10upper
below = log10x > log10lower
if type(above) is not np.ndarray:
if above and below:
return -np.log(x * np.log(10) * (log10upper - log10lower))
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(x * np.log(10) * (log10upper - log10lower))
return p
def log_of_halfnorm(x, loc, scale):
if x < loc:
return -np.inf
else:
return -0.5 * (
(x - loc) ** 2 / scale ** 2 + np.log(0.5 * np.pi * scale ** 2)
)
def cauchy(x, x0, gamma):
return 1.0 / (np.pi * gamma * (1 + ((x - x0) / gamma) ** 2))
def exp(x, x0, gamma):
if x > x0:
return np.log(gamma) - gamma * (x - x0)
else:
return -np.inf
if kwargs["type"] == "unif":
return lambda x: log_of_unif(x, kwargs["lower"], kwargs["upper"])
if kwargs["type"] == "log10unif":
return lambda x: log_of_log10unif(
x, kwargs["log10lower"], kwargs["log10upper"]
)
elif kwargs["type"] == "halfnorm":
return lambda x: log_of_halfnorm(x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "neghalfnorm":
return lambda x: log_of_halfnorm(-x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "norm":
return lambda x: -0.5 * (
(x - kwargs["loc"]) ** 2 / kwargs["scale"] ** 2
+ np.log(2 * np.pi * kwargs["scale"] ** 2)
)
elif kwargs["type"] == "lognorm":
# as of scipy 1.4.1 and numpy 1.18.1 the following parametrisation
# should be consistent with np.random.lognormal in _generate_rv()
return lambda x: lognorm.pdf(
x, s=kwargs["scale"], scale=np.exp(kwargs["loc"])
)
else:
logging.info("kwargs:", kwargs)
raise ValueError("Prior pdf type {:s} unknown.".format(kwargs["type"]))
def _generate_rv(self, **kwargs):
dist_type = kwargs.pop("type")
if dist_type == "unif":
return np.random.uniform(low=kwargs["lower"], high=kwargs["upper"])
if dist_type == "log10unif":
return 10 ** (
np.random.uniform(low=kwargs["log10lower"], high=kwargs["log10upper"])
)
if dist_type == "norm":
return np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
if dist_type == "halfnorm":
return np.abs(np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"]))
if dist_type == "neghalfnorm":
return -1 * np.abs(
np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
)
if dist_type == "lognorm":
return np.random.lognormal(mean=kwargs["loc"], sigma=kwargs["scale"])
else:
raise ValueError("dist_type {} unknown".format(dist_type))
def _plot_walkers(
self,
symbols=None,
alpha=0.8,
color="k",
temp=0,
lw=0.1,
nprod=0,
add_det_stat_burnin=False,
fig=None,
axes=None,
xoffset=0,
injection_parameters=None,
plot_det_stat=False,
context="ggplot",
labelpad=5,
):
"""Plot all the chains from a sampler"""
if injection_parameters is not None:
if not isinstance(injection_parameters, dict):
raise ValueError("injection_parameters is not a dictionary")
missing_keys = set(self.theta_keys) - injection_parameters.keys()
if missing_keys:
logging.warning(
f"plot_walkers(): Missing keys {missing_keys} in 'injection_parameters',"
" argument will be ignored."
)
injection_parameters = None
else:
scaled_injection_parameters = {
key: (
injection_parameters[key]
- self._get_rescale_subtractor_for_key(key)
)
* self._get_rescale_multiplier_for_key(key)
for key in injection_parameters.keys()
}
if symbols is None:
symbols = self._get_labels()
if context not in plt.style.available:
raise ValueError(
(
"The requested context {} is not available; please select a"
" context from `plt.style.available`"
).format(context)
)
if np.ndim(axes) > 1:
axes = axes.flatten()
shape = self.sampler.chain.shape
if len(shape) == 3:
nwalkers, nsteps, ndim = shape
chain = self.sampler.chain[:, :, :].copy()
if len(shape) == 4:
ntemps, nwalkers, nsteps, ndim = shape
if temp < ntemps:
logging.info("Plotting temperature {} chains".format(temp))
else:
raise ValueError(
("Requested temperature {} outside of" "available range").format(
temp
)
)
chain = self.sampler.chain[temp, :, :, :].copy()
samples = chain.reshape((nwalkers * nsteps, ndim))
samples = self._scale_samples(samples, self.theta_keys)
chain = chain.reshape((nwalkers, nsteps, ndim))
if plot_det_stat:
extra_subplots = 1
else:
extra_subplots = 0
with plt.style.context((context)):
if fig is None and axes is None:
fig = plt.figure(figsize=(4, 3.0 * ndim))
ax = fig.add_subplot(ndim + extra_subplots, 1, 1)
axes = [ax] + [
fig.add_subplot(ndim + extra_subplots, 1, i)
for i in range(2, ndim + 1)
]
idxs = np.arange(chain.shape[1])
burnin_idx = chain.shape[1] - nprod
last_idx = burnin_idx
if ndim > 1:
for i in range(ndim):
axes[i].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, i].T
if burnin_idx > 0:
axes[i].plot(
xoffset + idxs[: last_idx + 1],
cs[: last_idx + 1],
color="C3",
alpha=alpha,
lw=lw,
)
axes[i].axvline(xoffset + last_idx, color="k", ls="--", lw=0.5)
axes[i].plot(
xoffset + idxs[burnin_idx:],
cs[burnin_idx:],
color="k",
alpha=alpha,
lw=lw,
)
if injection_parameters is not None:
axes[i].axhline(
scaled_injection_parameters[self.theta_keys[i]],
ls="--",
lw=2.0,
color="orange",
)
axes[i].set_xlim(0, xoffset + idxs[-1])
if symbols:
axes[i].set_ylabel(symbols[i], labelpad=labelpad)
else:
axes[0].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, temp].T
if burnin_idx:
axes[0].plot(
idxs[:burnin_idx],
cs[:burnin_idx],
color="C3",
alpha=alpha,
lw=lw,
)
axes[0].plot(
idxs[burnin_idx:], cs[burnin_idx:], color="k", alpha=alpha, lw=lw
)
if injection_parameters is not None:
axes[0].axhline(
scaled_injection_parameters[self.theta_keys[0]],
ls="--",
lw=5.0,
color="orange",
)
if symbols:
axes[0].set_ylabel(symbols[0], labelpad=labelpad)
axes[-1].set_xlabel(r"Number of steps", labelpad=0.2)
if plot_det_stat:
if len(axes) == ndim:
axes.append(fig.add_subplot(ndim + 1, 1, ndim + 1))
lnl = self.sampler.loglikelihood[temp, :, :]
if burnin_idx and add_det_stat_burnin:
burn_in_vals = lnl[:, :burnin_idx].flatten()
try:
detstat_burnin = (
burn_in_vals[~ | np.isnan(burn_in_vals) | numpy.isnan |
import unittest
import numpy as np
from math import pi
from airfoilprep import Polar, Airfoil, AirfoilAnalysis
class TestBlend(unittest.TestCase):
def setUp(self):
alpha = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11,
6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18,
14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25]
cl = [-0.071, 0.044, 0.144, 0.241, 0.338, 0.435, 0.535, 0.632,
0.728, 0.813, 0.883, 0.946, 1.001, 1.054, 1.056, 1.095,
1.138, 1.114, 1.073, 1.008, 0.95, 0.902, 0.795, 0.797, 0.8]
cd = [0.0122, 0.0106, 0.0114, 0.0134, 0.0136, 0.014, 0.0147,
0.0156, 0.0162, 0.0173, 0.0191, 0.0215, 0.0248, 0.0339,
0.0544, 0.0452, 0.0445, 0.067, 0.0748, 0.1028, 0.1473,
0.2819, 0.2819, 0.2819, 0.3]
cm = [-0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346, -0.0405,
-0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284, -0.0322,
-0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242, -0.1155,
-0.1068, -0.0981, -0.0894, -0.0807]
Re = 1
self.polar1 = Polar(Re, alpha, cl, cd, cm)
alpha = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11,
6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18, 14.18,
15.189, 16.17, 17.14, 18.06, 19.06, 20.07, 21.08, 22.09,
23.1, 25]
cl = [-0.0852, 0.0528, 0.1728, 0.2892, 0.4056, 0.522, 0.642, 0.7584,
0.8736, 0.9756, 1.0596, 1.1352, 1.2012, 1.2648, 1.2672, 1.314,
1.3656, 1.3368, 1.2876, 1.2096, 1.14, 1.0824, 0.954, 0.9564, 1,
1.2, 1.4, 1.6]
cd = [0.01464, 0.01272, 0.01368, 0.01608, 0.01632, 0.0168, 0.01764,
0.01872, 0.01944, 0.02076, 0.02292, 0.0258, 0.02976, 0.04068,
0.06528, 0.05424, 0.0534, 0.0804, 0.08976, 0.12336, 0.17676,
0.33828, 0.33828, 0.33828, 0.35, 0.4, 0.45, 0.5]
cm = [-0.0037, -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346,
-0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284,
-0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242,
-0.1155, -0.1068, -0.0981, -0.0894, -0.0807, -0.072, -0.0633]
self.polar2 = Polar(Re, alpha, cl, cd, cm)
def test_blend1(self):
polar3 = self.polar1.blend(self.polar2, 0.5)
alpha_blend = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09,
5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19,
13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07,
25]
cl_blend = [-0.078, 0.048, 0.158, 0.265, 0.372, 0.479, 0.589, 0.695,
0.801, 0.894, 0.971, 1.041, 1.101, 1.159, 1.162, 1.205,
1.252, 1.225, 1.181, 1.109, 1.045, 0.992, 0.875, 0.877,
1.200]
cd_blend = [0.0134, 0.0117, 0.0125, 0.0147, 0.0150, 0.0154, 0.0162,
0.0172, 0.0178, 0.0190, 0.0210, 0.0237, 0.0273, 0.0373,
0.0598, 0.0497, 0.0490, 0.0737, 0.0822, 0.1131, 0.1620,
0.3101, 0.3101, 0.3101, 0.4000]
cm_blend = [-0.00405, -0.00475, -0.00165, -0.0099, -0.0249, -0.0314,
-0.03755, -0.043, -0.0481, -0.04555, -0.03625, -0.0301,
-0.02825, -0.0303, -0.03415, -0.0362, -0.0378, -0.03955,
-0.06905, -0.11125, -0.11985, -0.11115,-0.10245, -0.09375,
-0.072]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend1_w_airfoil(self):
af1 = Airfoil([self.polar1])
af2 = Airfoil([self.polar2])
af3 = af1.blend(af2, 0.5)
polar3 = af3.polars[0] # kind of bad practice for me to be accessing this
alpha_blend = [-3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09,
5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19,
13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07,
25]
cl_blend = [-0.078, 0.048, 0.158, 0.265, 0.372, 0.479, 0.589, 0.695,
0.801, 0.894, 0.971, 1.041, 1.101, 1.159, 1.162, 1.205,
1.252, 1.225, 1.181, 1.109, 1.045, 0.992, 0.875, 0.877,
1.200]
cd_blend = [0.0134, 0.0117, 0.0125, 0.0147, 0.0150, 0.0154, 0.0162,
0.0172, 0.0178, 0.0190, 0.0210, 0.0237, 0.0273, 0.0373,
0.0598, 0.0497, 0.0490, 0.0737, 0.0822, 0.1131, 0.1620,
0.3101, 0.3101, 0.3101, 0.4000]
cm_blend = [-0.00405, -0.00475, -0.00165, -0.0099, -0.0249, -0.0314,
-0.03755, -0.043, -0.0481, -0.04555, -0.03625, -0.0301,
-0.02825, -0.0303, -0.03415, -0.0362, -0.0378, -0.03955,
-0.06905, -0.11125, -0.11985, -0.11115,-0.10245, -0.09375,
-0.072]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = | np.interp(alpha_blend, polar3.alpha, polar3.cd) | numpy.interp |
import torch
import os
from datetime import datetime
from time import time
import numpy as np
from mpi4py import MPI
from mpi_utils.mpi_utils import sync_networks, sync_grads
from rl_modules.replay_buffer import replay_buffer
from rl_modules.models import actor, actor_bilinear, critic, critic_bilinear, critic_sum,\
actor_large, critic_large
from rl_modules.renn_models import actor_ReNN, critic_ReNN
from rl_modules.attn_models import actor_attn, critic_attn
from rl_modules.biattn_models import critic_biattn, actor_biattn
from rl_modules.ma_models import actor_shared, actor_separated, actor_dropout, actor_multihead
from mpi_utils.normalizer import normalizer
from her_modules.her import her_sampler
import wandb
from tqdm import tqdm
"""
ddpg with HER (MPI-version)
"""
class ddpg_agent:
def __init__(self, args, env, env_params):
self.args = args
self.env = env
self.env_params = env_params
# MPI
self.comm = MPI.COMM_WORLD
self.nprocs = self.comm.Get_size()
# create the network and target network
if args.actor_shared:
self.actor_network = actor_shared(env_params)
self.actor_target_network = actor_shared(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.actor_separated:
self.actor_network = actor_separated(env_params)
self.actor_target_network = actor_separated(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.actor_dropout:
self.actor_network = actor_dropout(env_params)
self.actor_target_network = actor_dropout(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.actor_multihead:
self.actor_network = actor_multihead(env_params)
self.actor_target_network = actor_multihead(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
elif args.use_renn:
self.actor_network = actor_ReNN(env_params)
self.actor_target_network = actor_ReNN(env_params)
self.critic_network = critic_ReNN(env_params)
self.critic_target_network = critic_ReNN(env_params)
elif args.use_bilinear:
self.actor_network = actor_bilinear(env_params)
self.actor_target_network = actor_bilinear(env_params)
self.critic_network = critic_bilinear(env_params)
self.critic_target_network = critic_bilinear(env_params)
elif args.use_critic_sum:
self.actor_network = actor(env_params)
self.actor_target_network = actor(env_params)
self.critic_network = critic_sum(env_params)
self.critic_target_network = critic_sum(env_params)
elif args.use_attn:
self.actor_network = actor_attn(env_params)
self.actor_target_network = actor_attn(env_params)
self.critic_network = critic_attn(env_params)
self.critic_target_network = critic_attn(env_params)
elif args.use_biattn:
self.actor_network = actor_attn(env_params)
self.actor_target_network = actor_attn(env_params)
self.critic_network = critic_biattn(env_params)
self.critic_target_network = critic_biattn(env_params)
elif args.actor_large:
self.actor_network = actor_large(env_params)
self.actor_target_network = actor_large(env_params)
self.critic_network = critic_large(env_params)
self.critic_target_network = critic_large(env_params)
else:
self.actor_network = actor(env_params)
self.actor_target_network = actor(env_params)
self.critic_network = critic(env_params)
self.critic_target_network = critic(env_params)
if self.args.learn_from_expert:
assert args.resume, 'expert need model!'
self.new_actor_loss = []
self.expert_network = actor(env_params).eval()
# load paramters
if args.resume:
if self.args.model_path == None:
path = os.path.join(self.args.save_dir, self.args.env_name, self.args.name, 'model.pt')
else:
path = self.args.model_path
try:
o_dict, g_dict, actor_model, critic_model = torch.load(path, map_location=lambda storage, loc: storage)
# OLD Version o_mean, o_std, g_mean, g_std, actor_model, critic_model = torch.load(path, map_location=lambda storage, loc: storage)
except:
print('fail to load the model!')
exit()
print('loaded done!')
if self.args.learn_from_expert:
self.expert_network.load_state_dict(actor_model)
else:
self.actor_network.load_state_dict(actor_model)
self.critic_network.load_state_dict(critic_model)
# sync the networks across the cpus
sync_networks(self.actor_network)
sync_networks(self.critic_network)
# load the weights into the target networks
self.actor_target_network.load_state_dict(self.actor_network.state_dict())
self.critic_target_network.load_state_dict(self.critic_network.state_dict())
# if use gpu
if self.args.cuda:
self.actor_network.cuda()
self.critic_network.cuda()
self.actor_target_network.cuda()
self.critic_target_network.cuda()
# create the optimizer
self.actor_optim = torch.optim.Adam(self.actor_network.parameters(), lr=self.args.lr_actor)
self.critic_optim = torch.optim.Adam(self.critic_network.parameters(), lr=self.args.lr_critic)
# her sampler
self.her_module = her_sampler(self.args.replay_strategy, self.args.replay_k, self.env.compute_reward, random_unmoved = self.args.random_unmoved, not_relabel_unmoved = self.args.not_relabel_unmoved)
# create the replay buffer
self.buffer = replay_buffer(self.env_params, self.args.buffer_size, self.her_module.sample_her_transitions)
# create the normalizer
self.o_norm = normalizer(size=env_params['obs'], default_clip_range=self.args.clip_range)
self.g_norm = normalizer(size=env_params['goal'], default_clip_range=self.args.clip_range)
if args.resume:
# Note: if use object number curriculum, the normalizer need to be extended
self.o_norm.load(o_dict)
self.g_norm.load(g_dict)
# OLD VERSION self.o_norm.mean = o_mean
# self.o_norm.std = o_std
# self.g_norm.mean = g_mean
# self.g_norm.std = g_std
# create the dict for store the model
if MPI.COMM_WORLD.Get_rank() == 0:
# if not os.path.exists(self.args.save_dir):
# os.mkdir(self.args.save_dir, exist_ok=True)
# path to save the model
self.model_path = os.path.join(self.args.save_dir, self.args.env_name, self.args.name)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
# start wandb to log
if self.args.wandb:
wandb.init(
project = self.args.project,
group = self.args.group,
tags = self.args.tags,
name = self.args.name,
notes = f'Env:{self.args.env_name},Note:{self.args.note}'
)
def learn(self):
"""
train the network
"""
# warm up
if self.args.warmup:
self.warmup(100)
# start to collect samples
start_time = time()
collect_per_epoch = self.args.n_cycles * self.args.num_rollouts_per_mpi * self.env_params['max_timesteps']
self.global_relabel_rate = 0.3
curriculum_param = self.args.curriculum_init
curri_indicator = 0
for epoch in range(self.args.n_epochs):
# start curriculum
if self.args.curriculum and curri_indicator > self.args.curriculum_bar:
if curriculum_param < self.args.curriculum_end:
curriculum_param += self.args.curriculum_step
self.env.change(curriculum_param)
observation = self.env.reset()
# extend normalizer to new observation
o_size = len(observation['observation'])
g_size = len(observation['desired_goal'])
self.o_norm.change_size(new_size = o_size)
self.g_norm.change_size(new_size = g_size)
# extend buffer to new observation
self.buffer.change_size(max_timesteps=self.env._max_episode_steps,\
obs_size=o_size, goal_size=g_size)
num_useless_rollout = 0 # record number of useless rollout(ag not change)
for _ in tqdm(range(self.args.n_cycles)):
mb_obs, mb_ag, mb_g, mb_info, mb_actions = [], [], [], [], []
for _ in range(self.args.num_rollouts_per_mpi):
# try until collect successful experience
for j in range(self.args.max_trail_time):
# reset the rollouts
ep_obs, ep_ag, ep_g, ep_info, ep_actions = [], [], [], [], []
# reset the environment
observation = self.env.reset()
obs = observation['observation']
ag = observation['achieved_goal']
g = observation['desired_goal']
info = observation.get('info') # if no info, return None
# start to collect samples
ag_origin = ag
for t in range(self.env._max_episode_steps):
with torch.no_grad():
input_tensor = self._preproc_inputs(obs, g)
if self.args.collect_from_expert:
pi = self.expert_network(input_tensor)
else:
pi = self.actor_network(input_tensor)
action = self._select_actions(pi)
# feed the actions into the environment
observation_new, _, _, info = self.env.step(action)
# self.env.render()
obs_new = observation_new['observation']
ag_new = observation_new['achieved_goal']
# append rollouts
ep_obs.append(obs.copy())
ep_ag.append(ag.copy())
ep_g.append(g.copy())
ep_info.append(info.copy())
ep_actions.append(action.copy())
# re-assign the observation
obs = obs_new
ag = ag_new
# check if use this rollout
if_moved = np.linalg.norm(ag.reshape(-1,self.args.dim) - ag_origin.reshape(-1,self.args.dim), axis=-1) > 0.005
if self.args.trail_mode == 'all':
if_moved = if_moved.all()
elif self.args.trail_mode == 'any':
if_moved = if_moved.any()
else:
raise NotImplementedError
if if_moved:
break
else:
num_useless_rollout += 1
ep_obs.append(obs.copy())
ep_ag.append(ag.copy())
mb_obs.append(ep_obs)
mb_ag.append(ep_ag)
mb_info.append(ep_info)
mb_g.append(ep_g)
mb_actions.append(ep_actions)
# convert them into arrays
mb_obs = np.array(mb_obs)
mb_ag = np.array(mb_ag)
mb_g = np.array(mb_g)
mb_info = np.array(mb_info)
mb_actions = np.array(mb_actions)
# store the episodes
self.buffer.store_episode([mb_obs, mb_ag, mb_g, mb_info, mb_actions])
self._update_normalizer([mb_obs, mb_ag, mb_g, mb_info, mb_actions])
# train the network
self._update_network()
# soft update
self._soft_update_target_network(self.actor_target_network, self.actor_network)
self._soft_update_target_network(self.critic_target_network, self.critic_network)
# start to do the evaluation
data = self._eval_agent(render = ((epoch%10)==0 and self.args.render))
if self.args.curriculum_reward:
curri_indicator = data['reward']
else:
curri_indicator = data['success_rate']
# record relabel rate
local_relabel_rate = self.her_module.relabel_num/self.her_module.total_sample_num
local_random_relabel_rate = self.her_module.random_num/self.her_module.total_sample_num
local_not_relabel_rate = self.her_module.nochange_num/self.her_module.total_sample_num
local_data = | np.array([local_relabel_rate, local_random_relabel_rate, local_not_relabel_rate]) | numpy.array |
"""Tests for the atmos_flux_inversion package.
Includes tests using random data, analytic solutions, and checks that
different methods agree for simple problems.
"""
from __future__ import print_function, division
import fractions
import itertools
import operator
import os.path
import atexit
import pickle
import math
import sys
try:
from functools import reduce
except ImportError:
# reduce used to be a builtin
pass
import numpy as np
import numpy.linalg as np_la
import numpy.linalg as la
import numpy.testing as np_tst
import scipy.linalg
import scipy.sparse
import scipy.optimize
# Import from scipy.linalg if not using dask
from scipy.linalg import cholesky
from scipy.sparse.linalg.interface import LinearOperator, MatrixLinearOperator
import unittest2
import pyfftw
import pandas as pd
import xarray
try:
import sparse
HAVE_SPARSE = True
except ImportError:
HAVE_SPARSE = False
import atmos_flux_inversion.optimal_interpolation
import atmos_flux_inversion.correlations
import atmos_flux_inversion.covariances
import atmos_flux_inversion.variational
import atmos_flux_inversion.remapper
import atmos_flux_inversion.wrapper
import atmos_flux_inversion.linalg
import atmos_flux_inversion.noise
import atmos_flux_inversion.psas
import atmos_flux_inversion.util
from atmos_flux_inversion.linalg import tolinearoperator
if os.path.exists(".pyfftw.pickle"):
with open(".pyfftw.pickle", "rb") as wis_in:
WISDOM = pickle.load(wis_in)
if isinstance(WISDOM[0], str):
WISDOM = [wis.encode("ascii")
for wis in WISDOM]
pyfftw.import_wisdom(WISDOM)
del WISDOM, wis_in
def save_wisdom():
"""Save accumulated pyfftw wisdom.
Saves in hidden file in current directory.
Should help speed up subsequent test runs.
"""
with open(".pyfftw.pickle", "wb") as wis_out:
pickle.dump(pyfftw.export_wisdom(), wis_out, 2)
atexit.register(save_wisdom)
del save_wisdom
# If adding other inexact methods to the list tested, be sure to add
# those to the `if "var" in name or "psas" in name` and
# `if "psas" in name` tests as applicable.
ALL_METHODS = (
atmos_flux_inversion.optimal_interpolation.simple,
atmos_flux_inversion.optimal_interpolation.fold_common,
atmos_flux_inversion.optimal_interpolation.save_sum,
atmos_flux_inversion.optimal_interpolation.scipy_chol,
atmos_flux_inversion.variational.simple,
atmos_flux_inversion.variational.incremental,
atmos_flux_inversion.variational.incr_chol,
atmos_flux_inversion.psas.simple,
atmos_flux_inversion.psas.fold_common,
)
ITERATIVE_METHOD_START = 4
"""Where the iterative methods start in the above list.
Used to test failure modes for these solvers.
"""
PRECISE_DTYPE = np.float128
"""The dtype used to represent analytic results.
These are initialized as :class:`fractions.Fraction` then converted to
this dtype for the comparison.
"""
ITERATIVE_STATE_TOLERANCE = 1e-3
ITERATIVE_COVARIANCE_TOLERANCE = 1e-1
EXACT_TOLERANCE = 1e-7
DTYPE = np.float64
"""Default dtype for certain tests."""
def getname(method):
"""Descriptive name for the function.
A name combining the function name and module.
Parameters
----------
method: callable
Returns
-------
name: str
"""
module = method.__module__
group = module.split(".")[-1]
variant = method.__name__
return "{group:s} ({variant:s})".format(group=group,
variant=variant)
def expectFailureIf(condition):
"""Mark a test as XFAIL based on condition.
Wrapper to make :func:`unittest2.expectedFailure` conditional.
Parameters
----------
condition: bool
Returns
-------
decorator: func
"""
if condition:
return unittest2.expectedFailure
return lambda fun: fun
class TestInversionSimple(unittest2.TestCase):
"""Test inversions using simple cases."""
def test_scalar_equal_variance(self):
"""Test a direct measurement of a scalar state."""
bg = np.atleast_1d(2.)
bg_cov = np.atleast_2d(1.)
obs = np.atleast_1d(3.)
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS:
name = getname(method)
with self.subTest(method=name):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(post, 2.5)
np_tst.assert_allclose(post_cov, .5)
def test_scalar_unequal_variance(self):
"""Test assimilation of a direct measurement fo a scalar state.
Variances not equal.
"""
bg = np.atleast_1d(15.)
bg_cov = np.atleast_2d(2.)
obs = np.atleast_1d(14.)
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS:
with self.subTest(method=getname(method)):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(
post, PRECISE_DTYPE(14 + fractions.Fraction(1, 3)))
np_tst.assert_allclose(
post_cov, PRECISE_DTYPE(fractions.Fraction(2, 3)))
def test_multiple_priors(self):
"""Test doing multiple assimilations at once.
Simple test.
"""
bg = np.array([[2., 3.]])
bg_cov = np.atleast_2d(1.)
obs = np.array([[3., 4.]])
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS[:ITERATIVE_METHOD_START]:
name = getname(method)
with self.subTest(method=name):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(post, [[2.5, 3.5]])
np_tst.assert_allclose(post_cov, .5)
def test_homework_one(self):
"""Verify that this can reproduce the answers to HW1.
Make sure the answers here are within roundoff of the analytic
solutions.
"""
bg = np.array((18., 15., 22.))
bg_var = np.array((2., 2., 2.))
bg_corr = np.array(((1, .5, .25),
(.5, 1, .5),
(.25, .5, 1)))
obs = np.array((19., 14.))
obs_var = np.array((1., 1.))
obs_op = np.array(((1., 0., 0.),
(0., 1., 0.)))
bg_std = np.sqrt(bg_var)
bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std)))
# obs_std = np.sqrt(obs_var)
# Assume no correlations between observations.
obs_cov = np.diag(obs_var)
for method in ALL_METHODS:
# Setup for expected degradation of solutions
name = getname(method)
# The default for assert_allclose
cov_rtol = state_rtol = EXACT_TOLERANCE
with self.subTest(method=name):
# Also tested above in scalar_unequal_variance
with self.subTest(problem=3):
state_college_index = 1
post, post_cov = method(
bg[state_college_index],
bg_cov[state_college_index, state_college_index],
obs[state_college_index],
obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, state_college_index])
np_tst.assert_allclose(
post, np.asanyarray(14 + fractions.Fraction(1, 3),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
np_tst.assert_allclose(
post_cov, np.asanyarray(fractions.Fraction(2, 3),
dtype=PRECISE_DTYPE),
rtol=cov_rtol)
with self.subTest(problem=4):
state_college_index = 1
post, post_cov = method(
bg, bg_cov,
obs[state_college_index],
obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, :])
np_tst.assert_allclose(
post, np.asanyarray((17 + fractions.Fraction(2, 3),
14 + fractions.Fraction(1, 3),
21 + fractions.Fraction(2, 3)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=5):
pittsburgh_index = 0
post, post_cov = method(
bg, bg_cov,
obs[pittsburgh_index],
obs_cov[pittsburgh_index, pittsburgh_index],
obs_op[pittsburgh_index, :])
np_tst.assert_allclose(
post,
np.asanyarray((18 + fractions.Fraction(2, 3),
15 + fractions.Fraction(1, 3),
22 + fractions.Fraction(1, 6)),
PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=7):
state_college_index = 1
post, post_cov = method(
bg, bg_cov,
obs[state_college_index],
4 * obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, :])
np_tst.assert_allclose(
post, np.asanyarray((17 + fractions.Fraction(5, 6),
14 + fractions.Fraction(2, 3),
21 + fractions.Fraction(5, 6)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=8):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
# background correlations make this problem not
# strictly linear, at least without doing
# sequential inversions. Have not verified by hand
np_tst.assert_allclose(
post, np.asanyarray(
(18 + fractions.Fraction(1, 2),
14 + fractions.Fraction(1, 2),
21 + fractions.Fraction(3, 4)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
def test_sequential_assimilations(self):
"""Make sure this follows Bayes' rule."""
bg = np.array((18., 15., 22.))
bg_var = np.array((2., 2., 2.))
bg_corr = np.array(((1, .5, .25),
(.5, 1, .5),
(.25, .5, 1)))
obs = np.array((19., 14.))
obs_var = np.array((1., 1.))
obs_op = np.array(((1., 0., 0.),
(0., 1., 0.)))
bg_std = np.sqrt(bg_var)
bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std)))
# obs_std = np.sqrt(obs_var)
# Assume no correlations between observations.
obs_cov = np.diag(obs_var)
for method in ALL_METHODS:
name = getname(method)
if "var" in name.lower() or "psas" in name.lower():
state_rtol = ITERATIVE_STATE_TOLERANCE
cov_rtol = ITERATIVE_COVARIANCE_TOLERANCE
else:
# The default for assert_allclose
cov_rtol = state_rtol = EXACT_TOLERANCE
with self.subTest(method=name):
inter1, inter_cov1 = method(
bg, bg_cov, obs[0], obs_cov[0, 0],
obs_op[0, :])
post1, post_cov1 = method(
inter1, inter_cov1, obs[1], obs_cov[1, 1],
obs_op[1, :])
post2, post_cov2 = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(
post1, post2, rtol=state_rtol)
if "psas" in name.lower():
# The second covariance isn't positive definite (one
# positive entry) and no entry shares the order of
# magnitude between the two.
raise unittest2.SkipTest("Known Failure: PSAS Covariances")
np_tst.assert_allclose(
post_cov1, post_cov2, rtol=cov_rtol)
def test_iterative_failures(self):
"""Test failure modes of iterative solvers."""
bg_stds = np.logspace(-8, 1, 10)
bg_corr = scipy.linalg.toeplitz(
np.arange(1, .9, -.01))
bg_cov = np.diag(bg_stds).dot(bg_corr).dot(np.diag(bg_stds))
bg_vals = np.arange(10)
obs_op = np.eye(3, 10)
obs_vals = 10 - np.arange(3)
obs_cov = np.diag((10, 1e-3, 1e-6)) / 8
for method in ALL_METHODS[ITERATIVE_METHOD_START:]:
name = getname(method)
with self.subTest(method=name):
with self.assertRaises(
atmos_flux_inversion.ConvergenceError) as cxt_mgr:
method(bg_vals, bg_cov, obs_vals, obs_cov, obs_op)
conv_err = cxt_mgr.exception
self.assertTrue(hasattr(conv_err, "guess"))
self.assertTrue(hasattr(conv_err, "result"))
self.assertIsInstance(conv_err.result,
scipy.optimize.OptimizeResult)
self.assertTrue(hasattr(conv_err, "hess_inv"))
class TestGaussianNoise(unittest2.TestCase):
"""Test the properties of the gaussian noise."""
def test_ident_cov(self):
"""Test generation with identity as covariance."""
sample_shape = 3
cov = np.eye(sample_shape)
noise = atmos_flux_inversion.noise.gaussian_noise(cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros((sample_shape,)),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), cov,
rtol=1e-2, atol=1e-2)
def test_shape(self):
"""Make sure the returned shapes are correct."""
sample_shape = (3,)
sample_cov = np.eye(sample_shape[0])
for shape in ((), (6,), (2, 3)):
with self.subTest(shape=shape):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, shape)
self.assertEqual(res.shape, shape + sample_shape)
with self.subTest(shape=5):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, 5)
self.assertEqual(res.shape, (5,) + sample_shape)
with self.subTest(shape=None):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, None)
self.assertEqual(res.shape, sample_shape)
def test_operator(self):
"""Test that the code works with operator covariances."""
diagonal = (1, .5, .3, .2, .1)
sample_cov = atmos_flux_inversion.covariances.DiagonalOperator(
diagonal)
sample_shape = (len(diagonal),)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), np.diag(diagonal),
rtol=1e-2, atol=1e-2)
def test_kron_op(self):
"""Test that large kronecker operators don't break the handling."""
op1 = scipy.linalg.toeplitz(.6 ** np.arange(15))
diag = (1, .9, .8, .7, .6, .5, .4, .3, .2, .1)
op2 = atmos_flux_inversion.covariances.DiagonalOperator(diag)
combined = atmos_flux_inversion.util.kronecker_product(op1, op2)
noise = atmos_flux_inversion.noise.gaussian_noise(combined, int(1e5))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(combined.shape[0]),
rtol=1.1e-2, atol=1.1e-2)
np_tst.assert_allclose(np.cov(noise.T),
scipy.linalg.kron(op1, np.diag(diag)),
rtol=3e-2, atol=3e-2)
def test_off_diagonal(self):
"""Test that the code works with off-diagonal elements."""
sample_cov = scipy.linalg.toeplitz((1, .5, .25, .125))
sample_shape = (4,)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), sample_cov,
rtol=1e-2, atol=1e-2)
def test_slow_decay(self):
"""Test that the code handles slowly-decaying covariances."""
sample_cov = scipy.linalg.toeplitz(.8 ** np.arange(10))
sample_shape = (10,)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), sample_cov,
rtol=1e-2, atol=1e-2)
def test_fails(self):
"""Test that construction fails on invalid input."""
self.assertRaises(ValueError,
atmos_flux_inversion.noise.gaussian_noise,
np.ones(10))
self.assertRaises(ValueError,
atmos_flux_inversion.noise.gaussian_noise,
np.eye(3, 2))
class TestCorrelations(unittest2.TestCase):
"""Test the generation of correlation matrices."""
def test_far_correl(self):
"""Test the correlation between points far apart.
Should be zero.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=corr_class.__name__):
corr_fun = corr_class(1e-8)
corr = corr_fun(1e8)
self.assertAlmostEqual(corr, 0)
def test_near_correl(self):
"""Test 2D correlation between near points.
Should be one.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=corr_class.__name__):
corr_fun = corr_class(1e8)
corr = corr_fun(1e-8)
self.assertAlmostEqual(corr, 1)
def test_2d_np_fromfunction(self):
"""Test that the structure works with np.fromfunction.
This is how the integration tests will get background
covariances, so this needs to work.
"""
test_size = (int(15), int(20))
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=getname(corr_class)):
corr_fun = corr_class(2.)
corr = np.fromfunction(corr_fun.correlation_from_index,
shape=test_size * 2, dtype=float)
corr_mat = corr.reshape((np.prod(test_size),) * 2)
# test postitive definite
try:
chol_upper = cholesky(corr_mat)
except la.LinAlgError:
self.fail("corr_mat not positive definite")
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_2d_make_matrix(self):
"""Test make_matrix for 2D correlations.
Checks against original value.
This test is really slow.
"""
# 30x25 Gaussian 10 not close
test_nx = 30
test_ny = 20
test_points = test_ny * test_nx
# TODO: speed up
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 5, 10, 15):
with self.subTest(corr_class=getname(corr_class),
dist=dist):
if (
corr_class ==
atmos_flux_inversion.correlations.
GaussianCorrelation
):
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist)
)
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, (test_ny, test_nx))
# Make sure diagonal elements are ones
np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6)
# check if it matches the original
np_tst.assert_allclose(
corr_mat,
np.fromfunction(
corr_fun.correlation_from_index,
(test_ny, test_nx, test_ny, test_nx)
).reshape((test_points, test_points)),
# rtol=1e-13: Gaussian 10 and 15 fail
# atol=1e-15: Gaussian 1 and 5 fail
rtol=1e-5, atol=1e-6)
# check if it actually is positive definite
cholesky(corr_mat)
def test_1d_np_fromfunction(self):
"""Test that the structure works with np.fromfunction.
This is how the integration tests will get background
covariances, so this needs to work.
"""
test_size = (200,)
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=getname(corr_class)):
# This fails with a correlation length of 5
corr_fun = corr_class(2.)
corr = np.fromfunction(corr_fun.correlation_from_index,
shape=test_size * 2, dtype=float)
corr_mat = corr.reshape((np.prod(test_size),) * 2)
# test postitive definite
chol_upper = cholesky(corr_mat)
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_1d_make_matrix(self):
"""Test make_matrix for 1D correlations.
Checks against original value.
"""
test_nt = 200
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 5, 10, 30):
with self.subTest(corr_class=getname(corr_class),
dist=dist):
if (
corr_class ==
atmos_flux_inversion.correlations.
GaussianCorrelation
):
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist)
)
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun,
test_nt
)
# Make sure diagonal elements are ones
np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6)
# check if it matches the original
np_tst.assert_allclose(
corr_mat,
np.fromfunction(
corr_fun.correlation_from_index, (test_nt, test_nt)
).reshape((test_nt, test_nt)),
# rtol=1e-13: Gaussian 10 and 15 fail
# atol=1e-15: Gaussian 1 and 5 fail
rtol=2e-7, atol=5e-7
)
# check if it actually is positive definite
chol_upper = cholesky(corr_mat)
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_fft_correlation_structure(self):
"""Ensure the FFT-based operators satisfy conditions of correlation matrices.
Checks for symmetry and ones on the diagonal.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for test_shape in ((300,), (20, 30)):
test_size = int(np.prod(test_shape, dtype=int))
for dist in (1, 3, 10, 30):
for is_cyclic in (True, False):
corr_fun = corr_class(dist)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape, is_cyclic))
# This is the fastest way to get column-major
# order from da.eye.
corr_mat = corr_op.dot(np.eye(test_size).T)
with self.subTest(
corr_class=getname(corr_class), dist=dist,
test_shape=test_shape, is_cyclic=is_cyclic,
test="symmetry"):
np_tst.assert_allclose(corr_mat, corr_mat.T,
rtol=1e-14, atol=1e-15)
with self.subTest(
corr_class=getname(corr_class), dist=dist,
test_shape=test_shape, is_cyclic=is_cyclic,
test="self-correlation"):
np_tst.assert_allclose(np.diag(corr_mat), 1)
def test_1d_fft_correlation_cyclic(self):
"""Test HomogeneousIsotropicCorrelation for cyclic 1D arrays.
Check against `make_matrix` and ignore values near the edges
of the domain where the two methods are different.
"""
test_nt = 512
test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt),
np.eye(100, test_nt)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3, 10):
# Magic numbers
# May need to increase for larger test_nt
noncorr_dist = 20 + 8 * dist
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_nt)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_nt))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="no"):
np_tst.assert_allclose(
corr_op.dot(test_vec)[noncorr_dist:-noncorr_dist],
corr_mat.dot(test_vec)[noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1.5e-3)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="yes"):
if ((corr_class is atmos_flux_inversion.correlations.
GaussianCorrelation and
dist >= 3)):
# Gaussian(3) has FFT less
# well-conditioned than make_matrix
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist))
elif ((corr_class is atmos_flux_inversion.correlations.
BalgovindCorrelation and
dist == 10)):
# This one distance is problematic
# Roughly 3% of the points disagree
# for the last half of the tests
# I have no idea why
raise unittest2.SkipTest(
"Balgovind(10) correlations weird")
np_tst.assert_allclose(
corr_op.solve(
test_vec)[noncorr_dist:-noncorr_dist],
la.solve(
corr_mat,
test_vec)[noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=2e-3
)
def test_1d_fft_correlation_acyclic(self):
"""Test HomogeneousIsotropicCorrelation for acyclic 1D arrays.
Check against `make_matrix` and ignore values near the edges
of the domain where the two methods are different.
"""
test_nt = 512
test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt),
np.eye(100, test_nt)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3, 10):
# Magic numbers
# May need to increase for larger test_nt
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_nt)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_nt, False))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="no"):
np_tst.assert_allclose(
corr_op.dot(test_vec),
corr_mat.dot(test_vec),
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="yes"):
self.assertRaises(
NotImplementedError, corr_op.solve, test_vec)
def test_2d_fft_correlation_cyclic(self):
"""Test HomogeneousIsotropicCorrelation for cyclic 2D arrays.
Check against `make_matrix` and ignore values near the edges
where the two methods differ.
"""
test_shape = (20, 30)
test_size = np.prod(test_shape)
test_lst = (np.zeros(test_size),
np.ones(test_size),
np.arange(test_size),
np.eye(10 * test_shape[0], test_size)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3):
# Magic numbers
# May need to increase for larger domains
noncorr_dist = 20 + 8 * dist
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_shape)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="forward"):
np_tst.assert_allclose(
corr_op.dot(test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
corr_mat.dot(test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="backward"):
if ((corr_class is atmos_flux_inversion.correlations.
GaussianCorrelation and
dist >= 3)):
# Gaussian(3) has FFT less
# well-conditioned than make_matrix
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist))
np_tst.assert_allclose(
corr_op.solve(
test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
la.solve(
corr_mat,
test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1e-5)
def test_2d_fft_correlation_acyclic(self):
"""Test HomogeneousIsotropicCorrelation for acyclic 2D arrays.
Check against `make_matrix` and ignore values near the edges
where the two methods differ.
"""
test_shape = (20, 30)
test_size = np.prod(test_shape)
test_lst = (np.zeros(test_size),
np.ones(test_size),
np.arange(test_size),
np.eye(10 * test_shape[0], test_size)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3):
# Magic numbers
# May need to increase for larger domains
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_shape)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape, False))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="forward"):
np_tst.assert_allclose(
corr_op.dot(test_vec).reshape(test_shape),
corr_mat.dot(test_vec).reshape(test_shape),
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="backward"):
self.assertRaises(
NotImplementedError, corr_op.solve, test_vec)
def test_homogeneous_from_array_cyclic(self):
"""Make sure cyclic from_array can be roundtripped.
Also tests that odd state sizes work.
"""
test_size = 25
corr_class = atmos_flux_inversion.correlations.ExponentialCorrelation
for dist in (1, 3, 5):
with self.subTest(dist=dist):
corr_fun = corr_class(dist)
corr_op1 = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_size, True))
first_column = corr_op1.dot(np.eye(test_size, 1)[:, 0])
corr_op2 = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array(first_column))
np_tst.assert_allclose(
corr_op1.dot(np.eye(test_size)),
corr_op2.dot(np.eye(test_size)))
def test_kron_composition(self):
"""Test that `kron` works similar to composition of the domains."""
HomogeneousIsotropicCorrelation = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation
)
corr_class = atmos_flux_inversion.correlations.GaussianCorrelation
corr_fun = corr_class(5)
shape1 = (5,)
shape2 = (7,)
corr_op1 = (HomogeneousIsotropicCorrelation.
from_function(corr_fun, shape1))
corr_op2 = (HomogeneousIsotropicCorrelation.
from_function(corr_fun, shape2))
kron_corr = corr_op1.kron(corr_op2)
direct_corr = (HomogeneousIsotropicCorrelation.
from_function(corr_fun, shape1 + shape2))
self.assertEqual(kron_corr.shape, direct_corr.shape)
self.assertEqual(kron_corr._underlying_shape,
direct_corr._underlying_shape)
np_tst.assert_allclose(kron_corr._corr_fourier,
direct_corr._corr_fourier)
np_tst.assert_allclose(kron_corr._fourier_near_zero,
direct_corr._fourier_near_zero)
def test_kron_results(self):
"""Test the Kronecker product implementation."""
HomogeneousIsotropicCorrelation = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation)
corr_class = atmos_flux_inversion.correlations.ExponentialCorrelation
test_shapes = (20, 25, (5, 6))
distances = (3, 5,)
for dist1, shape1, dist2, shape2 in itertools.product(
distances, test_shapes, repeat=2):
with self.subTest(dist1=dist1, dist2=dist2):
corr_fun1 = corr_class(dist1)
corr_fun2 = corr_class(dist2)
corr_op1 = (
HomogeneousIsotropicCorrelation.
from_function(corr_fun1, shape1))
corr_op2 = (
HomogeneousIsotropicCorrelation.
from_function(corr_fun2, shape2))
size1 = np.prod(shape1)
size2 = np.prod(shape2)
corr_mat1 = corr_op1.dot(np.eye(size1))
corr_mat2 = corr_op2.dot(np.eye(size2))
full_corr1 = corr_op1.kron(corr_op2)
full_corr2 = scipy.linalg.kron(np.asarray(corr_mat1),
np.asarray(corr_mat2))
self.assertIsInstance(
corr_op1, HomogeneousIsotropicCorrelation)
test_vec = np.arange(size1 * size2)
np_tst.assert_allclose(
full_corr1.dot(test_vec),
full_corr2.dot(test_vec))
test_mat = np.eye(size1 * size2)
np_tst.assert_allclose(
full_corr1.dot(test_mat),
full_corr2.dot(test_mat))
def test_kron_delegate(self):
"""Test that kron delegates where appropriate."""
op1 = (atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array((1, .5, .25)))
mat2 = np.eye(5)
combined_op = op1.kron(mat2)
self.assertIsInstance(
combined_op,
atmos_flux_inversion.linalg.SchmidtKroneckerProduct
)
def test_sqrt_direct(self):
"""Test the square root in the most direct manner possible.
Checks whether matrices corresponding to sqrt.T@sqrt and the
original matrix are approximately equal.
"""
operator = (atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array((1, .5, .25, .125)))
sqrt = operator.sqrt()
sqrt_squared = sqrt.T.dot(sqrt)
mat = np.eye(4)
np_tst.assert_allclose(operator.dot(mat),
sqrt_squared.dot(mat))
def test_from_function_direct(self):
"""Directly test the output of from_function."""
corr_func = (atmos_flux_inversion.correlations.
ExponentialCorrelation(1 / np.log(2)))
from_function = (
atmos_flux_inversion.correlations.HomogeneousIsotropicCorrelation.
from_function)
toeplitz = scipy.linalg.toeplitz
with self.subTest(is_cyclic=False, nd=1):
corr_op = from_function(corr_func, [10], False)
np_tst.assert_allclose(
corr_op.dot(np.eye(10)),
toeplitz(0.5 ** np.arange(10)))
with self.subTest(is_cyclic=False, nd=2):
corr_op = from_function(corr_func, [2, 3], False)
same_row = toeplitz(0.5 ** np.array([0, 1, 2]))
other_row = toeplitz(
0.5 ** np.array([1, np.sqrt(2), np.sqrt(5)]))
np_tst.assert_allclose(
corr_op.dot(np.eye(6)),
np.block([[same_row, other_row],
[other_row, same_row]]))
corr_op = from_function(corr_func, [4, 6], False)
same_row = toeplitz(0.5 ** np.arange(6))
next_row = toeplitz(
0.5 ** np.array([1, np.sqrt(2), np.sqrt(5),
np.sqrt(10), | np.sqrt(17) | numpy.sqrt |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import emcee
import corner
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
# Reproducible results!
np.random.seed(123)
# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534
# Generate some synthetic data from the model.
N = 50
x = np.sort(10*np.random.rand(N))
yerr = 0.1+0.5*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)
# Plot the dataset and the true model.
xl = np.array([0, 10])
pl.errorbar(x, y, yerr=yerr, fmt=".k")
pl.plot(xl, m_true*xl+b_true, "k", lw=3, alpha=0.6)
pl.ylim(-9, 9)
pl.xlabel("$x$")
pl.ylabel("$y$")
pl.tight_layout()
pl.savefig("line-data.png")
# Do the least-squares fit and compute the uncertainties.
A = np.vstack((np.ones_like(x), x)).T
C = np.diag(yerr * yerr)
cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
b_ls, m_ls = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
print("""Least-squares results:
m = {0} ± {1} (truth: {2})
b = {3} ± {4} (truth: {5})
""".format(m_ls, np.sqrt(cov[1, 1]), m_true, b_ls, np.sqrt(cov[0, 0]), b_true))
# Plot the least-squares result.
pl.plot(xl, m_ls*xl+b_ls, "--k")
pl.savefig("line-least-squares.png")
# Define the probability function as likelihood * prior.
def lnprior(theta):
m, b, lnf = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
return 0.0
return -np.inf
def lnlike(theta, x, y, yerr):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
# Find the maximum likelihood value.
chi2 = lambda *args: -2 * lnlike(*args)
result = op.minimize(chi2, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
m_ml, b_ml, lnf_ml = result["x"]
print("""Maximum likelihood result:
m = {0} (truth: {1})
b = {2} (truth: {3})
f = {4} (truth: {5})
""".format(m_ml, m_true, b_ml, b_true, np.exp(lnf_ml), f_true))
# Plot the maximum likelihood result.
pl.plot(xl, m_ml*xl+b_ml, "k", lw=2)
pl.savefig("line-max-likelihood.png")
# Set up the sampler.
ndim, nwalkers = 3, 100
pos = [result["x"] + 1e-4* | np.random.randn(ndim) | numpy.random.randn |
import numpy as np
from scipy.interpolate import interp1d
from .hydrological_model import HydrologicalModel
from ..utilities import logN_rnd
class VrettasFung(HydrologicalModel):
"""
This class represents a hydrologic model based on the Vrettas-Fung papers.
1) <NAME>., and <NAME>. (2015), "Toward a new parameterization of
hydraulic conductivity in climate models: Simulation of rapid groundwater
fluctuations in Northern California", Journal of Advances in Modeling Earth
Systems, 07, doi:10.1002/2015MS000516.
2) <NAME>., and <NAME>. (2017), "Sensitivity of transpiration to
subsurface properties: Exploration with a 1-D model", Journal of Advances
in Modeling Earth Systems, 9, doi:10.1002/2016MS000901.
"""
def __init__(self, soil, porous, k_hc, theta_res, dz):
# Call the constructor of the parent class.
super().__init__(soil, porous, k_hc, theta_res, dz)
# Get all the underground boundaries.
(l0, l1, l2, l3) = porous.layers
# Test domain for the interpolation
# function in the saprolite domain.
z_sapr = np.arange(l1, l2 + 1)
# Make a interpolation function.
self.fun_sapr = interp1d(z_sapr,
np.linspace(k_hc.sat_soil,
k_hc.sat_saprolite, z_sapr.size))
# Test domain for the interpolation
# function in the weathered bedrock.
z_wbed = np.arange(l2, l3 + 1)
# Compute the two parameters of the
# exponential function:
p0 = k_hc.sat_saprolite
p1 = np.log(p0 / k_hc.sat_fresh_bedrock) / l3
# Make the interpolation function.
self.fun_wbed = interp1d(z_wbed,
p0 * np.exp(-np.linspace(0, l3, z_wbed.size) * p1))
# _end_def_
def __call__(self, psi, z, *args):
"""
A direct call to an object of this class will return the water content,
along other related quantities at a specific depth 'z', given the input
pressure head (suction).
:param psi: pressure head (suction) [dim_d x dim_m].
:param z: depth values (increasing downwards) [dim_d x 1].
:param args: in here we pass additional parameters for the noise model.
:return: q (water content), K (unsaturated hydraulic conductivity),
C (specific moisture capacity), Kbkg (background hydraulic conductivity),
and q_inf_max (max infiltration capacity) [dim_d x dim_m].
:raises ValueError: if there is a mismatch in the input dimensions.
"""
# Ensure the input is 1-D.
z, psi = np.atleast_1d(z, psi)
# Get the vector size.
dim_d, dim_m = psi.shape[0], None
# Check if the input is 2D.
if psi.ndim == 2:
dim_m = psi.shape[1]
# _end_if_
# Check the input dimensions (of the vertical domain).
if dim_d != z.shape[0]:
raise ValueError(f" {self.__class__.__name__}:"
f" Input size dimensions don't match:"
f" {dim_d} not equal to {z.shape[0]}.")
# _end_if_
# Get the porosity field at 'z'.
porous_z, *_ = self.porous(z)
# Make sure the porosity is at least 1-D.
porous_z = np.atleast_1d(porous_z)
# Vectorized version.
if dim_m is not None:
porous_z = porous_z.repeat(dim_m).reshape(dim_d, dim_m)
# _end_if_
# Initialise at None. This will cause an error
# if the n_rnd is not given as input. (revisit)
n_rnd = None
# Extract additional parameters.
if "n_rnd" in args[0]:
n_rnd = np.atleast_1d(args[0]["n_rnd"])
# _end_if_
# Pre-compute constant parameters.
delta_s = porous_z - self.theta_res
# Check if there are saturated cells.
id_sat = np.where(psi >= self.psi_sat)
# Compute the volumetric moisture content in unsaturated cells.
q = self.theta_res +\
delta_s * (1.0 + (self.alpha * np.abs(psi)) ** self.n) ** (-self.m)
# Volumetric water content in saturated cells.
q[id_sat] = porous_z[id_sat]
# Compute the effective saturation (Se \in [0,1]).
# (i.e. the "normalized" water content)
s_eff = (q - self.theta_res) / delta_s
# SAFEGUARD: THIS SHOULD NOT HAPPEN.
s_eff = np.minimum(np.maximum(s_eff, 0.0), 1.0)
# Get all the underground boundaries.
(l0, l1, l2, l3) = self.porous.layers
# Find the indexes of each underground layer.
soil_layer_idx = np.where((z >= l0) & (z < l1))
sapr_layer_idx = np.where((z >= l1) & (z < l2))
wbed_layer_idx = | np.where((z >= l2) & (z <= l3)) | numpy.where |
import numpy as np
import pandas as pd
import csv
import os
from datetime import datetime
class Console_export(object):
def __init__(self, path):
self.path = path + "_sim_summary.txt"
def printLog(self, *args, **kwargs):
print(*args, **kwargs)
with open(self.path,'a') as file:
print(*args, **kwargs, file=file)
def export_statistics_logging(statistics, parameters, resources):
if parameters['EXPORT_NO_LOGS']: return None
statistics['sim_end_time'] = datetime.now()
path = parameters['PATH_TIME']
ce = Console_export(path)
ce.printLog("Start logger ", datetime.now())
"""
Statistics & Logging
"""
# Cut-off last processes at end of simulation
for mach in range(parameters['NUM_MACHINES']):
list_of_stats = ['stat_machines_working', 'stat_machines_changeover', 'stat_machines_broken',
'stat_machines_idle']
for stat in list_of_stats:
if stat == 'stat_machines_working':
if resources['machines'][mach].last_process_start > statistics['time_end']:
resources['machines'][mach].last_process_start -= resources['machines'][mach].last_broken_time
if resources['machines'][mach].last_process_start + resources['machines'][mach].last_process_time > statistics['time_end']:
statistics[stat][mach] -= resources['machines'][mach].last_process_start + resources['machines'][mach].last_process_time - statistics['time_end']
if stat == 'stat_machines_broken':
if resources['machines'][mach].last_broken_start + resources['machines'][mach].last_broken_time > statistics['time_end']:
statistics[stat][mach] -= resources['machines'][mach].last_broken_start + resources['machines'][mach].last_broken_time - statistics['time_end']
statistics['stat_machines_working'] = np.true_divide(statistics['stat_machines_working'], statistics['time_end'])
statistics['stat_machines_changeover'] = np.true_divide(statistics['stat_machines_changeover'], statistics['time_end'])
statistics['stat_machines_broken'] = np.true_divide(statistics['stat_machines_broken'], statistics['time_end'])
statistics['stat_machines_idle'] = np.true_divide(statistics['stat_machines_idle'], statistics['time_end'])
statistics['stat_transp_working'] = np.true_divide(statistics['stat_transp_working'], statistics['time_end'])
statistics['stat_transp_walking'] = np.true_divide(statistics['stat_transp_walking'], statistics['time_end'])
statistics['stat_transp_handling'] = | np.true_divide(statistics['stat_transp_handling'], statistics['time_end']) | numpy.true_divide |
# dqn.py
# https://geektutu.com
from collections import deque
import random
import gym
import numpy as np
from tensorflow.keras import models, layers, optimizers
class DQN(object):
def __init__(self):
self.step = 0
self.update_freq = 200 # 模型更新频率
self.replay_size = 2000 # 训练集大小
self.replay_queue = deque(maxlen=self.replay_size)
self.model = self.create_model()
self.target_model = self.create_model()
def create_model(self):
"""创建一个隐藏层为100的神经网络"""
STATE_DIM, ACTION_DIM = 2, 3
model = models.Sequential([
layers.Dense(100, input_dim=STATE_DIM, activation='relu'),
layers.Dense(ACTION_DIM, activation="linear")
])
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(0.001))
return model
def act(self, s, epsilon=0.1):
"""预测动作"""
# 刚开始时,加一点随机成分,产生更多的状态
if np.random.uniform() < epsilon - self.step * 0.0002:
return np.random.choice([0, 1, 2])
return np.argmax(self.model.predict(np.array([s]))[0])
def save_model(self, file_path='MountainCar-v0-dqn.h5'):
print('model saved')
self.model.save(file_path)
def remember(self, s, a, next_s, reward):
"""历史记录,position >= 0.4时给额外的reward,快速收敛"""
if next_s[0] >= 0.4:
reward += 1
self.replay_queue.append((s, a, next_s, reward))
def train(self, batch_size=64, lr=1, factor=0.95):
if len(self.replay_queue) < self.replay_size:
return
self.step += 1
# 每 update_freq 步,将 model 的权重赋值给 target_model
if self.step % self.update_freq == 0:
self.target_model.set_weights(self.model.get_weights())
replay_batch = random.sample(self.replay_queue, batch_size)
s_batch = np.array([replay[0] for replay in replay_batch])
next_s_batch = | np.array([replay[2] for replay in replay_batch]) | numpy.array |
"""
64-bit rANS encoder/decoder
Based on https://arxiv.org/abs/1402.3392
x: compressed message, represented by current state of the encoder/decoder.
precision: the natural numbers are divided into ranges of size 2^precision.
start & freq: start indicates the beginning of the range in [0, 2^precision-1]
that the current symbol is represented by. freq is the length of the range for
the given symbol.
The probability distribution is quantized to 2^precision, where
P(symbol) ~= freq(symbol) / 2^precision
Compressed state is represented as a stack (head, tail)
"""
import numpy as np
import torch
RANS_L = 1 << 31 # the lower bound of the normalisation interval
def empty_message(shape):
return (np.full(shape, RANS_L, "uint64"), ())
def stack_extend(stack, arr):
return arr, stack
def stack_slice(stack, n):
# Pop elements from message stack if
# decoded value outside normalisation
# interval
slc = []
while n > 0:
arr, stack = stack
if n >= len(arr):
slc.append(arr)
n -= len(arr)
else:
slc.append(arr[:n])
stack = arr[n:], stack
break
return stack, np.concatenate(slc)
def push(x, starts, freqs, precisions):
"""
Encode a vector of symbols in x. Each symbol has range given by
[start, start + freq). All frequencies are assumed to sum to
"1 << precision", and the resulting bits get written to x.
Inputs:
x: Compressed message of form (head, tail)
starts: Starts of interval corresponding to symbols. Analogous to
CDF evaluated at each symbol.
freqs: Width of intervals corresponding to symbols.
precision: Determines normalization factor of probability distribution.
"""
head, tail = x
assert head.shape == starts.shape == freqs.shape, (
f"Inconsistent encoder shapes! head: {head.shape} | "
f"starts: {starts.shape} | freqs: {freqs.shape}")
# 32-bit Renormalization - restrict symbols to pre-images
x_max = ((RANS_L >> precisions) << 32) * freqs
idxs = head >= x_max
if | np.any(idxs) | numpy.any |
"""
load_xparm.py
(c) RIKEN 2015. All rights reserved.
Author: <NAME>
This software is released under the new BSD License; see LICENSE.
"""
from __future__ import print_function
import os, shutil, tempfile, subprocess
import numpy
from pymol import cmd, stored
from pymol.cgo import * # get constants
"""
XPARAM.XDS
1 50.0000 1.0000 -0.999997 -0.001378 -0.002095
1.071460 -0.001051 -0.000103 0.933305
3072 3072 0.073242 0.073242
159.917953 1541.014771 1538.410645
1.000000 0.000000 0.000000
0.000000 1.000000 0.000000
0.000000 0.000000 1.000000
18 75.7997 97.0630 202.3445 90.000 90.000 90.000
74.986534 -7.994404 7.661397
14.166846 72.044411 -63.483360
-1.222476 133.907318 151.692612
1. Starting image number (STARTING_FRAME=), spindle angle at start (STARTING_ANGLE=), oscillation range, and laboratory coordinates of the rotation axis.
2. Wavelength (A) and laboratory coordinates of the incident beam wavevector.
3.Number of pixels along the detector X-axis (NX=) and Y-axis (NY=) in a data image and pixel sizes (mm) (QX=, QY=) along X and Y.
4. Signed distance between crystal and detector (mm), detector X-coordinate (pixels) of origin, detector Y-coordinate (pixels) of origin.
5. Laboratory coordinates of the unit vector along the detector X-axis.
6. Laboratory coordinates of the unit vector along the detector Y-axis.
7. Laboratory coordinates of the unit vector along the detector normal.
8. Space group number and unit cell parameters (A and degrees).
9. Laboratory coordinates of the unit cell a-axis of the unrotated crystal.
10. Laboratory coordinates of the unit cell b-axis of the unrotated crystal.
11. Laboratory coordinates of the unit cell c-axis of the unrotated crystal.
"""
class XPARM:
def __init__(self, xparm_file):
lines = open(xparm_file).readlines()
is_new_format = "XPARM.XDS" in lines[0]
if not is_new_format:
starting_frame, starting_angle, osc_range, rotx, roty, rotz = lines[0].split()
wavelength, ibeamx, ibeamy, ibeamz = lines[1].split()
nx, ny, qx, qy = lines[2].split()
distance, orgx, orgy = lines[3].split()
Xx, Xy, Xz = lines[4].split()
Yx, Yy, Yz = lines[5].split()
Zx, Zy, Zz = lines[6].split()
spacegroup, a, b, c, alpha, beta, gamma = lines[7].split()
ax, ay, az = lines[8].split()
bx, by, bz = lines[9].split()
cx, cy, cz = lines[10].split()
else:
starting_frame, starting_angle, osc_range, rotx, roty, rotz = lines[1].split()
wavelength, ibeamx, ibeamy, ibeamz = lines[2].split()
spacegroup, a, b, c, alpha, beta, gamma = lines[3].split()
ax, ay, az = lines[4].split()
bx, by, bz = lines[5].split()
cx, cy, cz = lines[6].split()
nseg, nx, ny, qx, qy = lines[7].split()
orgx, orgy, distance = lines[8].split()
Xx, Xy, Xz = lines[9].split()
Yx, Yy, Yz = lines[10].split()
Zx, Zy, Zz = lines[11].split()
self.starting_frame = int(starting_frame)
self.starting_angle = float(starting_angle)
self.osc_range = float(osc_range)
self.rotation_axis = numpy.array((float(rotx), float(roty), float(rotz)))
self.wavelength = float(wavelength)
self.incident_beam = numpy.array((float(ibeamx), float(ibeamy), float(ibeamz)))
self.nx = float(nx)
self.ny = float(ny)
self.qx = float(qx)
self.qy = float(qy)
self.distance = float(distance)
self.origin = numpy.array((float(orgx), float(orgy)))
self.X_axis = numpy.array((float(Xx), float(Xy), float(Xz)))
self.Y_axis = numpy.array((float(Yx), float(Yy), float(Yz)))
self.Z_axis = numpy.array((float(Zx), float(Zy), float(Zz)))
self.spacegroup = int(spacegroup)
self.unit_cell = numpy.array((float(a), float(b), float(c), float(alpha), float(beta), float(gamma)))
self.a_axis = numpy.array((float(ax), float(ay), float(az)))
self.b_axis = numpy.array((float(bx), float(by), float(bz)))
self.c_axis = numpy.array((float(cx), float(cy), float(cz)))
# __init__()
# class XPARM
def reciprocal(a, b, c):
##
# @param a,b,c numpy.array
##
V = numpy.dot( | numpy.cross(a,b) | numpy.cross |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
def heaviside(x, value=1., unitstep=False, zero=False):
"""
Heaviside (or unit step) operator
H = 0 for x < 0
H = 1/2 for x = 0
H = 1 for x > 0
if unitstep
H = 0 for x < 0
H = 1 for x >= 0
if zero
H = 0 for x <= 0
H = 1 for x > 0
Definition
----------
def heaviside(x, value=1., unitstep=False, zero=False):
Input
-----
x value or array of values
Optional Input
--------------
value output is heaviside *= value
unitstep If True, H(0)=1 instead of 1/2
zero If True, H(0)=0 instead of 1/2
Output
------
Heaviside function 0, 1/2, and 1
Restrictions
------------
Returns False if error.
Examples
--------
>>> from autostring import astr
>>> print(astr(heaviside([-1,0.,1.]),1,pp=True))
['0.0' '0.5' '1.0']
>>> print(astr(heaviside([-1,0.,1.], zero=True),1,pp=True))
['0.0' '0.0' '1.0']
>>> print(astr(heaviside([-1,0.,1.], unitstep=True),1,pp=True))
['0.0' '1.0' '1.0']
>>> print(astr(heaviside([-1,0.,1.], value=2),1,pp=True))
['0.0' '1.0' '2.0']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2014 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Jan 2012
Modified, MC, Feb 2013 - ported to Python 3
MC, Apr 2014 - assert
"""
assert (zero+unitstep) < 2, 'unitstep and zero mutually exclusive.'
if zero:
out = np.where(np.ma.array(x) > 0., 1., 0.)
elif unitstep:
out = np.where(np.ma.array(x) >= 0., 1., 0.)
else:
out = (np.where( | np.ma.array(x) | numpy.ma.array |
from __future__ import print_function
import copy
import torch.utils.data
from IPython.core.debugger import Pdb;
from railrl.data_management.images import normalize_image
debug = Pdb().set_trace
# Adapted from pytorch examples
import torch
import torch.utils.data
from torch import nn, optim
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision.utils import save_image
from railrl.misc.eval_util import create_stats_ordered_dict
from railrl.misc.ml_util import ConstantSchedule
from railrl.pythonplusplus import identity
from railrl.torch import pytorch_util as ptu
from railrl.torch.core import PyTorchModule
from railrl.core import logger
import os.path as osp
from railrl.envs.multitask.point2d import MultitaskImagePoint2DEnv
import numpy as np
class ACAI(PyTorchModule):
def __init__(
self,
representation_size,
init_w=1e-3,
input_channels=1,
imsize=84,
added_fc_size=0,
hidden_init=ptu.fanin_init,
output_activation=identity,
min_variance=1e-4,
use_min_variance=True,
state_size=0,
):
super().__init__()
self.representation_size = representation_size
self.hidden_init = hidden_init
self.output_activation = output_activation
self.input_channels = input_channels
self.imsize = imsize
self.imlength = self.imsize ** 2 * self.input_channels
if min_variance is None:
self.log_min_variance = None
else:
self.log_min_variance = float(np.log(min_variance))
self.dist_mu = np.zeros(self.representation_size)
self.dist_std = np.ones(self.representation_size)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.added_fc_size = added_fc_size
self.init_w = init_w
self.conv1 = nn.Conv2d(input_channels, 32, kernel_size=5, stride=3)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=3)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size=5, stride=3)
self.bn3 = nn.BatchNorm2d(128)
# self.conv_output_dim = 1568 # kernel 2
self.conv_output_dim = 512 # kernel 3
# self.hidden = nn.Linear(self.conv_output_dim + added_fc_size, representation_size)
self.fc1 = nn.Linear(self.conv_output_dim, representation_size)
self.fc2 = nn.Linear(self.conv_output_dim, representation_size)
self.fc3 = nn.Linear(representation_size, self.conv_output_dim)
self.conv4 = nn.ConvTranspose2d(128, 64, kernel_size=5, stride=3)
self.conv5 = nn.ConvTranspose2d(64, 32, kernel_size=6, stride=3)
self.conv6 = nn.ConvTranspose2d(32, input_channels, kernel_size=6, stride=3)
# critic network
self.conv7 = nn.Conv2d(input_channels, 32, kernel_size=5, stride=3)
self.bn4 = nn.BatchNorm2d(32)
self.conv8 = nn.Conv2d(32, 64, kernel_size=5, stride=3)
self.bn5 = nn.BatchNorm2d(64)
self.conv9 = nn.Conv2d(64, 128, kernel_size=5, stride=3)
self.bn6 = nn.BatchNorm2d(128)
# self.hidden = nn.Linear(self.conv_output_dim + added_fc_size, representation_size)
self.fc4 = nn.Linear(self.conv_output_dim, representation_size)
self.init_weights(init_w)
def init_weights(self, init_w):
self.hidden_init(self.conv1.weight)
self.conv1.bias.data.fill_(0)
self.hidden_init(self.conv2.weight)
self.conv2.bias.data.fill_(0)
self.hidden_init(self.conv3.weight)
self.conv3.bias.data.fill_(0)
self.hidden_init(self.conv4.weight)
self.conv4.bias.data.fill_(0)
self.hidden_init(self.conv5.weight)
self.conv5.bias.data.fill_(0)
self.hidden_init(self.conv6.weight)
self.conv6.bias.data.fill_(0)
self.hidden_init(self.conv7.weight)
self.conv7.bias.data.fill_(0)
self.hidden_init(self.conv8.weight)
self.conv8.bias.data.fill_(0)
self.hidden_init(self.conv9.weight)
self.conv9.bias.data.fill_(0)
self.hidden_init(self.fc1.weight)
self.fc1.bias.data.fill_(0)
self.fc1.weight.data.uniform_(-init_w, init_w)
self.fc1.bias.data.uniform_(-init_w, init_w)
self.hidden_init(self.fc2.weight)
self.fc2.bias.data.fill_(0)
self.fc2.weight.data.uniform_(-init_w, init_w)
self.fc2.bias.data.uniform_(-init_w, init_w)
self.fc3.weight.data.uniform_(-init_w, init_w)
self.fc3.bias.data.uniform_(-init_w, init_w)
self.hidden_init(self.fc4.weight)
self.fc4.bias.data.fill_(0)
self.fc4.weight.data.uniform_(-init_w, init_w)
def encode(self, input):
# debug()
input = input.view(-1, self.imlength + self.added_fc_size)
conv_input = input.narrow(start=0, length=self.imlength, dimension=1)
x = conv_input.contiguous().view(-1, self.input_channels, self.imsize, self.imsize)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
h = x.view(-1, 512) # flatten
if self.added_fc_size != 0:
fc_input = input.narrow(start=self.imlength, length=self.added_fc_size, dimension=1)
h = torch.cat((h, fc_input), dim=1)
mu = self.output_activation(self.fc1(h))
if self.log_min_variance is None:
logvar = self.output_activation(self.fc2(h))
else:
logvar = self.log_min_variance + torch.abs(self.fc2(h))
return mu, logvar
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = ptu.Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
h = h3.view(-1, 128, 2, 2)
x = F.relu(self.conv4(h))
x = F.relu(self.conv5(x))
x = self.conv6(x).view(-1, self.imsize * self.imsize * self.input_channels)
return self.sigmoid(x)
def forward(self, x):
batch_size = x.size()[0]
x_2 = x[torch.randperm(batch_size).cuda()]
mu, logvar = self.encode(x)
mu_2, logvar_2 = self.encode(x_2)
z = self.reparameterize(mu, logvar)
z_2 = self.reparameterize(mu_2, logvar_2)
alpha = torch.rand(batch_size, 1)
alpha = 0.5 - torch.abs(0.5 - alpha)
a = Variable(torch.from_numpy(np.diagflat(alpha.numpy()))).cuda()
one_minus_a = Variable(torch.from_numpy(np.diagflat((1 - alpha).numpy()))).cuda()
z_alpha = a.matmul(z) + one_minus_a.matmul(z_2)
return self.decode(z), mu, logvar, self.critic(self.decode(z_alpha)).cuda(), \
Variable(alpha).cuda()
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
# TODO: is the deepcopy necessary?
self.__dict__.update(copy.deepcopy(d))
def critic(self, input):
input = input.view(-1, self.imlength + self.added_fc_size)
conv_input = input.narrow(start=0, length=self.imlength, dimension=1)
x = conv_input.contiguous().view(-1, self.input_channels, self.imsize, self.imsize)
x = F.relu(self.bn4(self.conv7(x)))
x = F.relu(self.bn5(self.conv8(x)))
x = F.relu(self.bn6(self.conv9(x)))
h = x.view(-1, 512) # flatten
if self.added_fc_size != 0:
fc_input = input.narrow(start=self.imlength, length=self.added_fc_size, dimension=1)
h = torch.cat((h, fc_input), dim=1)
mu = self.sigmoid(self.fc4(h)) * 0.5
return torch.mean(mu, 1)
class ACAITrainer():
def __init__(
self,
train_dataset,
test_dataset,
model,
batch_size=128,
log_interval=0,
beta=0.5,
beta_schedule=None,
imsize=84,
lr=1e-3,
do_scatterplot=False,
normalize=False,
state_sim_debug=False,
mse_weight=0.1,
is_auto_encoder=False,
lmbda=0.5,
mu=1,
gamma=0.2,
):
self.log_interval = log_interval
self.batch_size = batch_size
self.beta = beta
if is_auto_encoder:
self.beta = 0
self.beta_schedule = beta_schedule
if self.beta_schedule is None:
self.beta_schedule = ConstantSchedule(self.beta)
self.imsize = imsize
self.do_scatterplot = do_scatterplot
self.lmbda = lmbda
self.mu = mu
self.gamma = gamma
"""
I think it's a bit nicer if the caller makes this call, i.e.
```
m = ConvVAE(representation_size)
if ptu.gpu_enabled():
m.cuda()
t = ConvVAETrainer(train_data, test_data, m)
```
However, I'll leave this here for backwards-compatibility.
"""
if ptu.gpu_enabled():
model.cuda()
self.model = model
self.representation_size = model.representation_size
self.input_channels = model.input_channels
self.imlength = model.imlength
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
self.train_dataset, self.test_dataset = train_dataset, test_dataset
self.normalize = normalize
self.state_sim_debug = state_sim_debug
self.mse_weight = mse_weight
self.x_next_index = self.input_channels * self.imsize ** 2
if self.normalize:
self.train_data_mean = np.mean(self.train_dataset, axis=0)
# self.train_dataset = ((self.train_dataset - self.train_data_mean)) + 1 / 2
# self.test_dataset = ((self.test_dataset - self.train_data_mean)) + 1 / 2
def get_batch(self, train=True):
dataset = self.train_dataset if train else self.test_dataset
ind = np.random.randint(0, len(dataset), self.batch_size)
samples = dataset[ind, :]
samples = normalize_image(samples)
if self.normalize:
samples = ((samples - self.train_data_mean) + 1) / 2
return ptu.np_to_var(samples)
def get_debug_batch(self, train=True):
dataset = self.train_dataset if train else self.test_dataset
X, Y = dataset
ind = np.random.randint(0, Y.shape[0], self.batch_size)
X = X[ind, :]
Y = Y[ind, :]
return ptu.np_to_var(X), ptu.np_to_var(Y)
def get_batch_smooth(self, train=True):
dataset = self.train_dataset if train else self.test_dataset
ind = np.random.randint(0, len(dataset), self.batch_size)
samples = dataset[ind, :]
samples = normalize_image(samples)
if self.normalize:
samples = ((samples - self.train_data_mean) + 1) / 2
x_next, x = samples[:, :self.x_next_index], samples[:, self.x_next_index:]
return ptu.np_to_var(x_next), ptu.np_to_var(x)
def logprob(self, recon_x, x, mu, logvar):
# Divide by batch_size rather than setting size_average=True because
# otherwise the averaging will also happen across dimension 1 (the
# pixels)
return F.binary_cross_entropy(
recon_x,
x.narrow(start=0, length=self.imlength, dimension=1).contiguous().view(-1, self.imlength),
size_average=False,
) / self.batch_size
def kl_divergence(self, recon_x, x, mu, logvar):
return - torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1).mean()
def state_similarity_loss(self, model, encoded_x, states):
output = self.model.fc6(F.relu(self.model.fc5(encoded_x)))
return torch.norm(output - states) ** 2 / self.batch_size
def train_epoch(self, epoch, sample_batch=None, batches=100, from_rl=False):
self.model.train()
losses = []
bces = []
kles = []
mses = []
losses_c = []
beta = self.beta_schedule.get_value(epoch)
for batch_idx in range(batches):
data = self.get_batch()
if sample_batch is not None:
data = sample_batch(self.batch_size)
self.optimizer.zero_grad()
recon_batch, mu, logvar, predicted_alpha, alpha = self.model(data)
bce = self.logprob(recon_batch, data, mu, logvar)
kle = self.kl_divergence(recon_batch, data, mu, logvar)
loss = bce + beta * kle + self.lmbda * torch.norm(predicted_alpha, 2)
regularizer_a = self.model.critic(self.gamma * data + (1 - self.gamma) * recon_batch)
loss_c = torch.norm(predicted_alpha - alpha, 2) + self.mu * torch.norm(regularizer_a, 2)
loss.backward(retain_graph=True)
loss_c.backward()
losses.append(loss.data[0])
losses_c.append(loss_c.data[0])
bces.append(bce.data[0])
kles.append(kle.data[0])
if self.state_sim_debug:
mses.append(sim_loss.data[0])
self.optimizer.step()
if self.log_interval and batch_idx % self.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(self.train_loader.dataset),
100. * batch_idx / len(self.train_loader),
loss.data[0] / len(data)))
if not from_rl:
logger.record_tabular("train/epoch", epoch)
logger.record_tabular("train/critic_loss", np.mean(losses_c))
logger.record_tabular("train/BCE", np.mean(bces))
logger.record_tabular("train/KL", np.mean(kles))
if self.state_sim_debug:
logger.record_tabular("train/mse", | np.mean(mses) | numpy.mean |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara
import numpy as np
import pytest
import scipy.stats as st
from aesara import tensor as at
from numpy.testing import assert_allclose
from scipy.special import logsumexp
import pymc as pm
from pymc import (
Dirichlet,
Exponential,
Gamma,
LogNormal,
Metropolis,
Mixture,
Model,
MvNormal,
Normal,
NormalMixture,
Poisson,
sample,
)
from pymc.aesaraf import floatX
from pymc.distributions.shape_utils import to_tuple
from pymc.tests.helpers import SeededTest
pytestmark = pytest.mark.xfail(reason="Mixture not refactored.")
# Generate data
def generate_normal_mixture_data(w, mu, sd, size=1000):
component = np.random.choice(w.size, size=size, p=w)
mu, sd = np.broadcast_arrays(mu, sd)
out_size = to_tuple(size) + mu.shape[:-1]
mu_ = np.array([mu[..., comp] for comp in component.ravel()])
sd_ = np.array([sd[..., comp] for comp in component.ravel()])
mu_ = np.reshape(mu_, out_size)
sd_ = np.reshape(sd_, out_size)
x = np.random.normal(mu_, sd_, size=out_size)
return x
def generate_poisson_mixture_data(w, mu, size=1000):
component = np.random.choice(w.size, size=size, p=w)
mu = np.atleast_1d(mu)
out_size = to_tuple(size) + mu.shape[:-1]
mu_ = np.array([mu[..., comp] for comp in component.ravel()])
mu_ = np.reshape(mu_, out_size)
x = np.random.poisson(mu_, size=out_size)
return x
class TestMixture(SeededTest):
@classmethod
def setup_class(cls):
super().setup_class()
cls.norm_w = np.array([0.75, 0.25])
cls.norm_mu = np.array([0.0, 5.0])
cls.norm_sd = np.ones_like(cls.norm_mu)
cls.norm_x = generate_normal_mixture_data(cls.norm_w, cls.norm_mu, cls.norm_sd, size=1000)
cls.pois_w = np.array([0.4, 0.6])
cls.pois_mu = np.array([5.0, 20.0])
cls.pois_x = generate_poisson_mixture_data(cls.pois_w, cls.pois_mu, size=1000)
def test_dimensions(self):
a1 = Normal.dist(mu=0, sigma=1)
a2 = Normal.dist(mu=10, sigma=1)
mix = Mixture.dist(w=np.r_[0.5, 0.5], comp_dists=[a1, a2])
assert mix.mode.ndim == 0
assert mix.logp(0.0).ndim == 0
value = np.r_[0.0, 1.0, 2.0]
assert mix.logp(value).ndim == 1
def test_mixture_list_of_normals(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
Mixture(
"x_obs",
w,
[Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
observed=self.norm_x,
)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
)
def test_normal_mixture(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
NormalMixture("x_obs", w, mu, tau=tau, observed=self.norm_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
)
@pytest.mark.parametrize(
"nd,ncomp", [(tuple(), 5), (1, 5), (3, 5), ((3, 3), 5), (3, 3), ((3, 3), 3)], ids=str
)
def test_normal_mixture_nd(self, nd, ncomp):
nd = to_tuple(nd)
ncomp = int(ncomp)
comp_shape = nd + (ncomp,)
test_mus = np.random.randn(*comp_shape)
test_taus = np.random.gamma(1, 1, size=comp_shape)
observed = generate_normal_mixture_data(
w=np.ones(ncomp) / ncomp, mu=test_mus, sd=1 / np.sqrt(test_taus), size=10
)
with Model() as model0:
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
obs0 = NormalMixture(
"obs", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape, observed=observed
)
with Model() as model1:
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
comp_dist = [
Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
]
mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, shape=nd, observed=observed)
with Model() as model2:
# Expected to fail if comp_shape is not provided,
# nd is multidim and it does not broadcast with ncomp. If by chance
# it does broadcast, an error is raised if the mixture is given
# observed data.
# Furthermore, the Mixture will also raise errors when the observed
# data is multidimensional but it does not broadcast well with
# comp_dists.
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
if len(nd) > 1:
if nd[-1] != ncomp:
with pytest.raises(ValueError):
NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
mixture2 = None
else:
mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
else:
mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
observed_fails = False
if len(nd) >= 1 and nd != (1,):
try:
np.broadcast(np.empty(comp_shape), observed)
except Exception:
observed_fails = True
if observed_fails:
with pytest.raises(ValueError):
NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
obs2 = None
else:
obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
testpoint = model0.initial_point
testpoint["mus"] = test_mus
testpoint["taus"] = test_taus
assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
if mixture2 is not None and obs2 is not None:
assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
if mixture2 is not None:
assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
if obs2 is not None:
assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
def test_poisson_mixture(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.pois_w)), shape=self.pois_w.shape)
mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
Mixture("x_obs", w, Poisson.dist(mu), observed=self.pois_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.pois_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.pois_mu), rtol=0.1, atol=0.1
)
def test_mixture_list_of_poissons(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.pois_w)), shape=self.pois_w.shape)
mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=self.pois_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.pois_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.pois_mu), rtol=0.1, atol=0.1
)
def test_mixture_of_mvn(self):
mu1 = np.asarray([0.0, 1.0])
cov1 = np.diag([1.5, 2.5])
mu2 = np.asarray([1.0, 0.0])
cov2 = np.diag([2.5, 3.5])
obs = np.asarray([[0.5, 0.5], mu1, mu2])
with Model() as model:
w = Dirichlet("w", floatX(np.ones(2)), transform=None, shape=(2,))
mvncomp1 = MvNormal.dist(mu=mu1, cov=cov1)
mvncomp2 = MvNormal.dist(mu=mu2, cov=cov2)
y = Mixture("x_obs", w, [mvncomp1, mvncomp2], observed=obs)
# check logp of each component
complogp_st = np.vstack(
(
st.multivariate_normal.logpdf(obs, mu1, cov1),
st.multivariate_normal.logpdf(obs, mu2, cov2),
)
).T
complogp = y.distribution._comp_logp(aesara.shared(obs)).eval()
assert_allclose(complogp, complogp_st)
# check logp of mixture
testpoint = model.initial_point
mixlogp_st = logsumexp(np.log(testpoint["w"]) + complogp_st, axis=-1, keepdims=False)
assert_allclose(y.logp_elemwise(testpoint), mixlogp_st)
# check logp of model
priorlogp = st.dirichlet.logpdf(
x=testpoint["w"],
alpha=np.ones(2),
)
assert_allclose(model.logp(testpoint), mixlogp_st.sum() + priorlogp)
def test_mixture_of_mixture(self):
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
nbr = 4
with Model() as model:
# mixtures components
g_comp = Normal.dist(
mu=Exponential("mu_g", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
)
l_comp = LogNormal.dist(
mu=Exponential("mu_l", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
)
# weight vector for the mixtures
g_w = Dirichlet("g_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
l_w = Dirichlet("l_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
# mixture components
g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
# mixture of mixtures
mix_w = Dirichlet("mix_w", a=floatX(np.ones(2)), transform=None, shape=(2,))
mix = Mixture("mix", w=mix_w, comp_dists=[g_mix, l_mix], observed=np.exp(self.norm_x))
test_point = model.initial_point
def mixmixlogp(value, point):
floatX = aesara.config.floatX
priorlogp = (
st.dirichlet.logpdf(
x=point["g_w"],
alpha=np.ones(nbr) * 0.0000001,
).astype(floatX)
+ st.expon.logpdf(x=point["mu_g"]).sum(dtype=floatX)
+ st.dirichlet.logpdf(
x=point["l_w"],
alpha=np.ones(nbr) * 0.0000001,
).astype(floatX)
+ st.expon.logpdf(x=point["mu_l"]).sum(dtype=floatX)
+ st.dirichlet.logpdf(
x=point["mix_w"],
alpha=np.ones(2),
).astype(floatX)
)
complogp1 = st.norm.logpdf(x=value, loc=point["mu_g"]).astype(floatX)
mixlogp1 = logsumexp(
np.log(point["g_w"]).astype(floatX) + complogp1, axis=-1, keepdims=True
)
complogp2 = st.lognorm.logpdf(value, 1.0, 0.0, np.exp(point["mu_l"])).astype(floatX)
mixlogp2 = logsumexp(
np.log(point["l_w"]).astype(floatX) + complogp2, axis=-1, keepdims=True
)
complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
mixmixlogpg = logsumexp(
np.log(point["mix_w"]).astype(floatX) + complogp_mix, axis=-1, keepdims=False
)
return priorlogp, mixmixlogpg
value = np.exp(self.norm_x)[:, None]
priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
# check logp of mixture
assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
# check model logp
assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
# check input and check logp again
test_point["g_w"] = np.asarray([0.1, 0.1, 0.2, 0.6])
test_point["mu_g"] = np.exp(np.random.randn(nbr))
priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
def test_sample_prior_and_posterior(self):
def build_toy_dataset(N, K):
pi = np.array([0.2, 0.5, 0.3])
mus = [[1, 1, 1], [-1, -1, -1], [2, -2, 0]]
stds = [[0.1, 0.1, 0.1], [0.1, 0.2, 0.2], [0.2, 0.3, 0.3]]
x = | np.zeros((N, 3), dtype=np.float32) | numpy.zeros |
import numpy as np
from utils import *
class StateReducer:
"""
Class that generates the initial state of the decoder from the last states of the encoder
"""
def __init__(self, W_c = [], W_h= [], b_c = [], b_h = []):
self.W_c = | np.array(W_c) | numpy.array |
'''
root/code/block_definitions/utilities/argument_types.py
Overview:
Strongly typing the CGP and having a separate GA evolution for our arguments means it would be easiest to create individual classes for each of our argument data types but at least have all follow the same abstract class.
At the minimum, it needs a .value attribute to store the actual value of an instantiated argument class, and a method to mutate.
There is a lot of experimentation to be done to make sure that we have rhobust enough mutation methods: if I want a simple integer, but it needs to be a large value, is there any way to guarentee that ther will be an argument close enough?
Rules:
Basically only needs .value and mutate() defined.
'''
### packages
import numpy as np
from numpy import random as rnd
from copy import copy, deepcopy
from abc import ABC, abstractmethod
import re
### sys relative to root dir
import sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(dirname(dirname(realpath(__file__))))))
### absolute imports wrt root
from codes.utilities.custom_logging import ezLogging
class ArgumentType_Abstract(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def mutate(self):
pass
def __str__(self):
return "{}".format(self.value)
def __repr__(self):
return str(self)
'''
Mutation Methods:
Define a set of mutation methods to be called on to mutate all/any of the argument classes.
Currently, the way we set the boundaries of the uniform or the std of the normal distribution are entirely arbitrary. No analysis has been done on this yet.
There is a bias here in the code to try and encourage values to move away from negative values as you can see in the conditions if the value is 0...a lot needs to be changed with these methods.
'''
def mut_uniform(self):
if self.value == 0:
low = 0
high = 5
else:
low = self.value*.85
high = self.value * 1.15
ezLogging.debug("%s-%s - numpy.random.uniform(%f,%f)" % (None, None, low, high))
self.value = rnd.uniform(low,high)
def mut_normal(self):
if self.value == 0:
mean = 3
std = 3*.1
else:
mean = self.value
std = self.value * .1
ezLogging.debug("%s-%s - numpy.random.normal(%f,%f)" % (None, None, mean, std))
self.value = rnd.normal(mean, std)
class ArgumentType_Bool(ArgumentType_Abstract):
'''
just your basic bool. mutate always switches to opposite value
'''
def __init__(self, value=None):
if value is None:
self.value = bool(np.random.choice([True,False]))
else:
self.value = bool(value)
ezLogging.debug("%s-%s - Initialize ArgumentType_Bool Class to %f" % (None, None, self.value))
def mutate(self):
self.value = not self.value
ezLogging.debug("%s-%s - Mutated ArgumentType_Bool to %f" % (None, None, self.value))
class ArgumentType_Ints(ArgumentType_Abstract):
'''
To try and capture a large range of ints, 1/3 of ints will start at 5,
another third will start at 50, and the final third will start at 100.
Then all will mutate around that value.
All ints are bounded by 1 such that [1,?)...after mutating, we force anything
less than 1, to 1.
'''
def __init__(self, value=None):
if value is None:
roll = rnd.random_integers(0,2)
if roll == 0:
self.value = 5
elif roll == 1:
self.value = 50
elif roll == 2:
self.value = 100
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Ints Class to %f" % (None, None, self.value))
def mutate(self):
roll = rnd.random_integers(0,1)
if roll == 0:
self.mut_normal()
elif roll == 1:
self.mut_uniform()
else:
pass
if self.value < 1:
self.value = 1
else:
pass
self.value = int(self.value)
ezLogging.debug("%s-%s - Mutated ArgumentType_Ints to %f" % (None, None, self.value))
class ArgumentType_Pow2(ArgumentType_Abstract):
'''
This can be any number 2**i with i any int {1,2,3,4,5,6,7,8}
Commonly used in CNN for setting the size of the convolutions.
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Pow2 Class to %f" % (None, None, self.value))
def mutate(self):
#choices = rnd.random_integers(1, 8)
choices = list(np.arange(1,8+1))
if self.value in choices:
choices.remove(self.value) # works in-place
pow2 = np.random.choice(choices)
self.value = int(2**pow2)
ezLogging.debug("%s-%s - Mutated ArgumentType_Pow2 to %f" % (None, None, self.value))
class ArgumentType_TFActivation(ArgumentType_Abstract):
'''
possible values:
https://www.tensorflow.org/api_docs/python/tf/keras/activations
returns the actual function
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
elif value == 'None':
self.value = None
self.get_name()
else:
self.value = value
self.get_name()
ezLogging.debug("%s-%s - Initialize ArgumentType_TFActivation Class to %s" % (None, None, self.name))
def get_name(self):
if self.value is None:
self.name = "None"
else:
self.name = self.value.__qualname__
def mutate(self):
import tensorflow as tf
choices = [tf.nn.relu, tf.nn.sigmoid, tf.nn.tanh, tf.nn.elu, None]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
self.get_name()
ezLogging.debug("%s-%s - Mutated ArgumentType_TFActivation to %s" % (None, None, self.name))
class ArgumentType_TFFilterSize(ArgumentType_Abstract):
'''
quick way to pick [1,3,5,7]
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_TFFilterSize Class to %f" % (None, None, self.value))
def mutate(self):
choices = [1,3,5,7]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_TFFilterSize to %f" % (None, None, self.value))
class ArgumentType_FilterSize(ArgumentType_Abstract):
'''
quick way to pick [3,5,7]
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_FilterSize Class to %f" % (None, None, self.value))
def mutate(self):
choices = [3,5,7]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_FilterSize to %f" % (None, None, self.value))
class ArgumentType_TFPoolSize(ArgumentType_Abstract):
'''
quick way to pick [1,2,3,4]
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_TFPoolSize Class to %f" % (None, None, self.value))
def mutate(self):
choices = [1,2,3,4]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_TFPoolSize to %f" % (None, None, self.value))
class ArgumentType_SmallFloats(ArgumentType_Abstract):
'''
Here we get 'small' floats in that they are initialized with a normal
distribution centered around 10 so it would be extremely unlikely for it
to mutate to a number say 100+.
'''
def __init__(self, value=None):
if value is None:
self.value = rnd.random()*10
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_SmallFloats Class to %f" % (None, None, self.value))
def mutate(self):
roll = rnd.random_integers(0,1)
if roll == 0:
self.mut_normal()
elif roll == 1:
self.mut_uniform()
else:
pass
ezLogging.debug("%s-%s - Mutated ArgumentType_SmallFloats to %f" % (None, None, self.value))
class ArgumentType_Float0to100(ArgumentType_Abstract):
'''
going to try and use uniform distribution more in the init and in mutate.
maybe also have a way to do 'fine tune' mutation so it mutates to a more local number.
also limit values from 0 to 100
'''
def __init__(self, value=None):
if value is None:
self.mutate_unif100()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Float0to100 Class to %f" % (None, None, self.value))
def mutate_unif100(self):
self.value = rnd.uniform(0,100)
def mutate_unif_local(self):
# make it a range of 10
low = self.value-5
high = self.value+5
self.value = rnd.uniform(low, high)
# force value to be within 0 to 100
if (self.value < 0) or (self.value > 100):
self.mutate_unif100()
def mutate(self):
roll = rnd.random()
if roll < 2/3:
self.mutate_unif100()
else:
self.mutate_unif_local()
ezLogging.debug("%s-%s - Mutated ArgumentType_Float0to100 to %f" % (None, None, self.value))
class ArgumentType_Int0to100(ArgumentType_Float0to100):
'''
same as ArgumentType_Float0to100 but forced as an int
'''
def __init__(self, value=None):
super().__init__(value)
self.make_int()
def make_int(self):
self.value = int(self.value)
def mutate(self):
super().mutate()
self.make_int()
class ArgumentType_Float0to1(ArgumentType_Abstract):
'''
like ArgumentType_Float0to100 but go from [0 to 1)
mutate is just random uniform 0 to 1...may have to introduce fine tuneing...who knows
'''
def __init__(self, value=None):
if value is None:
self.mutate_unif1()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Float0to1 Class to %f" % (None, None, self.value))
def mutate_unif1(self):
self.value = np.random.random() #NOTE: [0,1) not (0,1)
def mutate_unif_local(self):
low = self.value-.05
high = self.value+.05
self.value = rnd.uniform(low, high)
# force value to be within 0 to 1
if (self.value < 0) or (self.value > 1):
self.mutate_unif1()
def mutate(self):
roll = rnd.random()
if roll < 2/3:
self.mutate_unif1()
else:
self.mutate_unif_local()
ezLogging.debug("%s-%s - Mutated ArgumentType_Float0to1 to %f" % (None, None, self.value))
class ArgumentType_Float0to10(ArgumentType_Float0to1):
'''
go from [0 to 10)
'''
def __init__(self, value=None):
if value is None:
self.mutate_unif1()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Float0to10 Class to %f" % (None, None, self.value))
def mutate_unif10(self):
self.value = np.random.random()*10 #NOTE: [0,10) not (0,10)
def mutate_unif_local(self):
low = self.value-.5
high = self.value+.5
self.value = rnd.uniform(low, high)
# force value to be within 0 to 1
if (self.value < 0) or (self.value > 1):
self.mutate_unif1()
def mutate(self):
roll = rnd.random()
if roll < 2/3:
self.mutate_unif1()
else:
self.mutate_unif_local()
ezLogging.debug("%s-%s - Mutated ArgumentType_Float0to10 to %f" % (None, None, self.value))
class ArgumentType_Int0to25(ArgumentType_Abstract):
'''
Augmentor.Pipeline.Rotate has a [0,25] limit so I'm using this to define that range
NOTE:
np.random.randint(low, high) ->[low, high)
np.random.random_integers(low, high) -> [low,high]
'''
def __init__(self, value=None):
if value is None:
self.mutate_unif_int25()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Int0to25 Class to %f" % (None, None, self.value))
def mutate_unif_int25(self):
self.value = rnd.random_integers(low=0, high=25)
def mutate_unif_localint(self):
# make it a range of 10
low = self.value-5
high = self.value+5
self.value = rnd.random_integers(low, high)
# force value to be within 0 to 100
if (self.value < 0) or (self.value > 25):
self.mutate_unif_int25()
def mutate(self):
roll = rnd.random()
if roll < 2/3:
self.mutate_unif_int25()
else:
self.mutate_unif_localint()
ezLogging.debug("%s-%s - Mutated ArgumentType_Int0to25 to %f" % (None, None, self.value))
class ArgumentType_Int1to10(ArgumentType_Abstract):
'''
[1,2,3,4,5,6,7,8,9,10]
NOTE:
np.random.randint(low, high) ->[low, high)
np.random.random_integers(low, high) -> [low,high]
'''
def __init__(self, value=None):
if value is None:
self.mutate_unif_int10()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Int1to10 Class to %f" % (None, None, self.value))
def mutate_unif_int10(self):
self.value = rnd.random_integers(low=1, high=10)
def mutate_unif_localint(self):
# make it a range of 6
low = self.value-3
high = self.value+3
self.value = rnd.random_integers(low, high)
# force value to be within 0 to 100
if (self.value < 1) or (self.value > 10):
self.mutate_unif_int10()
def mutate(self):
roll = rnd.random()
if roll < 2/3:
self.mutate_unif_int10()
else:
self.mutate_unif_localint()
ezLogging.debug("%s-%s - Mutated ArgumentType_Int1to10 to %f" % (None, None, self.value))
class ArgumentType_Int1to5(ArgumentType_Abstract):
'''
[1,2,3,4,5]
NOTE:
np.random.randint(low, high) ->[low, high)
np.random.random_integers(low, high) -> [low,high]
'''
def __init__(self, value=None):
if value is None:
self.mutate_unif_int5()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_Int1to5 Class to %f" % (None, None, self.value))
def mutate_unif_int5(self):
self.value = rnd.random_integers(low=1, high=5)
def mutate_unif_localint(self):
# make it a range of 2
low = self.value-1
high = self.value+1
self.value = rnd.random_integers(low, high)
# force value to be within 0 to 5
if (self.value < 1) or (self.value > 5):
self.mutate_unif_int5()
def mutate(self):
roll = rnd.random()
if roll < 2/3:
self.mutate_unif_int5()
else:
self.mutate_unif_localint()
ezLogging.debug("%s-%s - Mutated ArgumentType_Int1to5 to %f" % (None, None, self.value))
class ArgumentType_LimitedFloat0to1(ArgumentType_Abstract):
'''
limiting ArgumentType_Float0to1 so that our only choices are (0,1] every 0.05
NOTE that np.random.random() or np.random.uniform() is [0,1)
This is good for setting nonzero probability
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_LimitedFloat0to1 Class to %f" % (None, None, self.value))
def mutate(self):
delta = 0.05
choices = list(np.arange(0, 1, delta) + delta) #[0.05, 0.1, ..., 0.95, 1.0]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_LimitedFloat0to1 to %f" % (None, None, self.value))
class ArgumentType_PyTorchKernelSize(ArgumentType_Abstract):
'''
quick way to pick [1,3,5]
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_PyTorchKernelSize Class to %f" % (None, None, self.value))
def mutate(self):
choices = [1,3,5]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_PyTorchKernelSize to %f" % (None, None, self.value))
class ArgumentType_PyTorchStrideSize(ArgumentType_Abstract):
'''
quick way to pick [1,2,3]
'''
def __init__(self, value=None):
if value is None:
self.value = None
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_PyTorchStrideSize Class to %f" % (None, None, self.value))
def mutate(self):
choices = [1,2,3]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_PyTorchStrideSize to %f" % (None, None, self.value))
class ArgumentType_PyTorchPaddingSize(ArgumentType_Abstract):
'''
quick way to pick [0, 2, 4, -1], if -1 is chosen, should use automatic padding to cancel out kernel
'''
def __init__(self, value=None):
if value is None:
self.value = None # This way, we can mutate to None as well
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_PyTorchPaddingSize Class to %f" % (None, None, self.value))
def mutate(self):
choices = [0, 2, 4, -1]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_PyTorchPaddingSize to %f" % (None, None, self.value))
class ArgumentType_PyTorchActivation(ArgumentType_Abstract):
'''
Encodes Pytorch common activation functions
returns the actual function
'''
def __init__(self, value=None):
from torch import nn
if value is None:
self.value = None
self.mutate()
elif value == 'None':
self.value = None
self.get_name()
elif 'LeakyReLU' in value:
match = re.search('0.\d*', value)
if match is not None:
slope = float(value[match.start():match.end()])
self.value = nn.LeakyReLU(slope)
else:
self.value = nn.LeakyReLU()
self.get_name()
elif 'ReLU' in value:
self.value = nn.ReLU()
self.get_name()
elif 'Tanh' in value:
self.value = nn.Tanh()
self.get_name()
else:
raise Exception("Given Activation %s not a built in choice yet" % value)
ezLogging.debug("%s-%s - Initialize ArgumentType_PyTorchActivation Class to %s" % (None, None, self.name))
def get_name(self):
if self.value is None:
self.name = "None"
else:
#self.name = self.value.__qualname__ #since we are instantiating the activation, we can't use qualname
self.name = self.value._get_name()
def mutate(self):
from torch import nn
choices = [nn.ReLU(), nn.LeakyReLU(), nn.LeakyReLU(0.1), nn.Tanh(), None]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
self.get_name()
ezLogging.debug("%s-%s - Mutated ArgumentType_PyTorchActivation to %s" % (None, None, self.name))
class ArgumentType_PretrainingSteps(ArgumentType_Abstract):
'''
Quick way to pick a low number of training steps, used for SimGAN pretraining
'''
def __init__(self, value=None):
if value is None:
self.value = None # This way, we can mutate to None as well
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_PretrainingSteps Class to %f" % (None, None, self.value))
def mutate(self):
choices = [300, 400, 500, 600]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_PretrainingSteps to %f" % (None, None, self.value))
class ArgumentType_TrainingSteps(ArgumentType_Abstract):
'''
Quick way to pick a medium number of training steps, used for SimGAN training
'''
def __init__(self, value=None):
if value is None:
self.value = None # This way, we can mutate to None as well
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_TrainingSteps Class to %f" % (None, None, self.value))
def mutate(self):
choices = [3000, 4000, 5000]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = np.random.choice(choices)
ezLogging.debug("%s-%s - Mutated ArgumentType_TrainingSteps to %f" % (None, None, self.value))
class ArgumentType_LearningRate(ArgumentType_Abstract):
'''
Quick way to pick a learning rate value, used for SimGANs
'''
def __init__(self, value=None):
if value is None:
self.value = None # This way, we can mutate to None as well
self.mutate()
else:
self.value = value
ezLogging.debug("%s-%s - Initialize ArgumentType_LearningRate Class to %f" % (None, None, self.value))
def mutate(self):
choices = [1e-2, 5e-3, 1e-3, 5e-4, 1e-4]
if self.value in choices:
choices.remove(self.value) # works in-place
self.value = | np.random.choice(choices) | numpy.random.choice |
"""
This module contains routines for modeling cluster and source signals.
"""
import os
import sys
from pixell import enmap
import astropy
import astropy.wcs as enwcs
import astropy.io.fits as pyfits
import astropy.constants as constants
#from astropy.cosmology import FlatLambdaCDM
from astLib import *
from scipy import ndimage
from scipy import interpolate
from scipy import stats
import time
import astropy.table as atpy
import nemo
from . import maps
from . import catalogs
from . import photometry
from . import filters
from . import gnfw
from . import plotSettings
import numpy as np
import numpy.fft as fft
import math
import pylab as plt
import pickle
import operator
import pyximport; pyximport.install()
import nemoCython
import nemo
import glob
import shutil
import yaml
import warnings
#import IPython
np.random.seed()
#------------------------------------------------------------------------------------------------------------
# Global constants (we could move others here but then need to give chunky obvious names, not just e.g. h)
TCMB=2.72548
Mpc_in_cm=constants.pc.value*100*1e6
MSun_in_g=constants.M_sun.value*1000
# Default cosmology (e.g., for fitQ)
#fiducialCosmoModel=FlatLambdaCDM(H0 = 70.0, Om0 = 0.3, Ob0 = 0.05, Tcmb0 = TCMB)
# Default cosmology (e.g., for fitQ) - now based on CCL rather than astropy
Om0=0.3
Ob0=0.05
H0=70
sigma8=0.8
ns=0.95
transferFunction="boltzmann_camb"
on_rtd=os.environ.get('READTHEDOCS', None)
if on_rtd is None:
import pyccl as ccl
fiducialCosmoModel=ccl.Cosmology(Omega_c=Om0-Ob0, Omega_b=Ob0, h=0.01*H0, sigma8=sigma8, n_s=ns,
transfer_function=transferFunction)
# For CCL-based mass conversions
M200mDef=ccl.halos.MassDef(200, "matter", c_m_relation = 'Bhattacharya13')
M200cDef=ccl.halos.MassDef(200, "critical", c_m_relation = 'Bhattacharya13')
M500cDef=ccl.halos.MassDef(500, "critical")
else:
fiducialCosmoModel=None
M200mDef=None
M200cDef=None
M500cDef=None
#------------------------------------------------------------------------------------------------------------
class BeamProfile(object):
"""Describes the beam profile (i.e., the point spread function for some instrument in real space). This
can be either read from a white-space delimited text file (with the angle in degrees in the first column
and the response in the second column), or can be set directly using arrays.
Args:
beamFileName(:obj:`str`, optional): Path to text file containing a beam profile in the ACT format.
profile1d (:obj:`np.ndarray`, optional): One dimensional beam profile, with index 0 at the centre.
rDeg (:obj:`np.ndarray`, optional): Corresponding angular distance in degrees from the centre for
the beam profile.
Attributes:
profile1d (:obj:`np.ndarray`): One dimensional beam profile, with index 0 at the centre.
rDeg (:obj:`np.ndarray`): Corresponding angular distance in degrees from the centre for the
beam profile.
tck (:obj:`tuple`): Spline knots for interpolating the beam onto different angular binning
(in degrees), for use with :meth:`scipy.interpolate.splev`.
FWHMArcmin (float): Estimate of the beam FWHM in arcmin.
"""
def __init__(self, beamFileName = None, profile1d = None, rDeg = None):
if beamFileName is not None:
beamData=np.loadtxt(beamFileName).transpose()
self.profile1d=beamData[1]
self.rDeg=beamData[0]
else:
self.profile1d=profile1d
self.rDeg=rDeg
if self.profile1d is not None and self.rDeg is not None:
self.tck=interpolate.splrep(self.rDeg, self.profile1d)
# This is really just for sorting a list of beams by resolution
self.FWHMArcmin=self.rDeg[np.argmin(abs(self.profile1d-0.5))]*60*2
#------------------------------------------------------------------------------------------------------------
class QFit(object):
"""A class for managing the filter mismatch function, referred to as `Q` in the ACT papers from
`Hasselfield et al. (2013) <http://adsabs.harvard.edu/abs/2013JCAP...07..008H>`_ onwards.
Args:
QFitFileName (:obj:`str`): Path to a FITS-table format file as made by :meth:`fitQ`.
tileNames (:obj:`list`): If given, the Q-function will be defined only for these tiles (their names
must appear in the file specified by `QFitFileName`).
Attributes:
fitDict (:obj:`dict`): Dictionary of interpolation objects, indexed by `tileName`. You should not
need to access this directly - use :meth:`getQ` instead.
"""
def __init__(self, QFitFileName = None, tileNames = None):
self._zGrid=np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1.0, 1.2, 1.6, 2.0])
self._theta500ArcminGrid=np.logspace(np.log10(0.1), np.log10(55), 10)
self.zMin=(self._zGrid).min()
self.zMax=(self._zGrid).max()
self.zDependent=None
self.zDepThetaMax=None
self.fitDict={}
if QFitFileName is not None:
self.loadQ(QFitFileName, tileNames = tileNames)
def loadQ(self, source, tileNames = None):
"""Load the filter mismatch function Q (see `Hasselfield et al. 2013
<https://ui.adsabs.harvard.edu/abs/2013JCAP...07..008H/abstract>`_) as a dictionary of spline fits.
Args:
source (:obj:`nemo.startUp.NemoConfig` or str): Either the path to a .fits table (containing Q fits
for all tiles - this is normally ``selFn/QFit.fits``), or a :obj:`nemo.startUp.NemoConfig` object
(from which the path and tiles to use will be inferred).
tileNames (optional, list): A list of tiles for which the Q function spline fit coefficients
will be extracted. If source is a :obj:`nemo.startUp.NemoConfig` object, this should be set to
``None``.
Returns:
A dictionary (with tilNames as keys), containing spline knots for the Q function for each tile.
Q values can then be obtained by using these with :func:`scipy.interpolate.splev`.
"""
# Bit messy, but two modes here:
# - combined Q fit file for all tiles
# - single Q fit for a single tile (interim stage, when under nemo MPI run)
if type(source) == nemo.startUp.NemoConfig:
tileNames=source.tileNames
combinedQTabFileName=source.selFnDir+os.path.sep+"QFit.fits"
loadMode=None
if os.path.exists(combinedQTabFileName) == True:
tileNamesInFile=[]
with pyfits.open(combinedQTabFileName) as QTabFile:
for ext in QTabFile:
if type(ext) == astropy.io.fits.hdu.table.BinTableHDU:
tileNamesInFile.append(ext.name)
tileNamesInFile.sort()
if tileNames is None:
tileNames=tileNamesInFile
loadMode="combined"
else:
globStr=source.selFnDir+os.path.sep+"QFit#*.fits"
QTabFileNames=glob.glob(globStr)
loadMode="single"
if len(QTabFileNames) == 0:
raise Exception("could not find either '%s' or '%s' - needed to make QFit object" % (combinedQTabFileName, globStr))
zMin=self._zGrid.max()
zMax=self._zGrid.min()
for tileName in tileNames:
if loadMode == "combined":
QTab=atpy.Table().read(combinedQTabFileName, hdu = tileName)
elif loadMode == "single":
QTab=atpy.Table().read(source.selFnDir+os.path.sep+"QFit#%s.fits" % (tileName))
else:
raise Exception("loadMode is not defined")
if QTab['z'].min() < zMin:
self.zMin=QTab['z'].min()
if QTab['z'].max() > zMax:
self.zMax=QTab['z'].max()
self.fitDict[tileName]=self._makeInterpolator(QTab)
elif os.path.exists(source) == True:
# Inspect file and get tile names if MEF
if tileNames is None:
tileNames=[]
with pyfits.open(source) as QTab:
for ext in QTab:
if type(ext) == astropy.io.fits.hdu.table.BinTableHDU:
tileNames.append(ext.name)
zMin=self._zGrid.max()
zMax=self._zGrid.min()
for tileName in tileNames:
if tileName == '': # Individual, interim file name
assert(source.find("QFit#") > 0)
tileName=os.path.split(source)[-1].split("QFit#")[-1].split(".fits")[0]
QTab=atpy.Table().read(source)
else:
QTab=atpy.Table().read(source, hdu = tileName)
if QTab['z'].min() < zMin:
self.zMin=QTab['z'].min()
if QTab['z'].max() > zMax:
self.zMax=QTab['z'].max()
self.fitDict[tileName]=self._makeInterpolator(QTab)
def _makeInterpolator(self, QTab):
"""Inspects QTab, and makes an interpolator object - 2d if there is z-dependence, 1d if not.
"""
if QTab.meta['ZDEPQ'] == 0:
QTab.sort('theta500Arcmin')
spline=interpolate.InterpolatedUnivariateSpline(QTab['theta500Arcmin'], QTab['Q'], ext = 1)
if self.zDependent == True:
raise Exception("QFit contains a mixture of z-dependent and z-independent tables")
self.zDepThetaMax=None
self.zDependent=False
elif QTab.meta['ZDEPQ'] == 1:
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
spline=interpolate.LSQBivariateSpline(QTab['z'], QTab['theta500Arcmin'], QTab['Q'],
self._zGrid, self._theta500ArcminGrid)
zs=np.unique(QTab['z'])
thetaMaxs=[]
for z in zs:
thetaMaxs.append(QTab['theta500Arcmin'][QTab['z'] == z].max())
self.zDepThetaMax=interpolate.InterpolatedUnivariateSpline(zs, thetaMaxs)
if self.zDependent == False:
raise Exception("QFit contains a mixture of z-dependent and z-independent tables")
self.zDependent=True
else:
raise Exception("Valid ZDEPQ values are 0 or 1 only")
return spline
def getQ(self, theta500Arcmin, z = None, tileName = None):
"""Return the value of Q (the filter mismatch function) using interpolation.
Args:
theta500Arcmin (:obj:`float` or :obj:`np.ndarray`): The angular scale at which *Q* will
be calculated. This can be an array or a single value.
z (:obj:`float`, optional): Redshift, only used if *Q* is a function of
redshift, otherwise it is ignored. This must be a single value only,
i.e., not an array.
tileName (:obj:`str`, optional): The name of the tile to use for the *Q* function.
Returns:
The value of *Q* (an array or a single float, depending on the input).
Note:
In the case where *Q* is a function of redshift, values outside of the range for which
*Q* has been calculated will be filled with zeros (i.e., there is no extrapolation in
redshift).
"""
if z is not None:
if type(z) == np.ndarray and z.shape == (1,):
z=float(z)
if type(z) is not float and type(z) is not np.float64:
raise Exception("z must be a float, and not, e.g., an array")
if self.zDependent == True:
Qs=self.fitDict[tileName](z, theta500Arcmin)[0]
thetaMask=theta500Arcmin > self.zDepThetaMax(z)
Qs[thetaMask]=0.0
if z < self.zMin or z > self.zMax:
Qs=0
else:
# Univariate case handles own valid bounds checking
Qs=self.fitDict[tileName](theta500Arcmin)
return Qs
#------------------------------------------------------------------------------------------------------------
def fSZ(obsFrequencyGHz, TCMBAlpha = 0.0, z = None):
"""Returns the frequency dependence of the (non-relativistic) Sunyaev-Zel'dovich effect.
Args:
obsFrequencyGHz (float): Frequency in GHz at which to calculate fSZ.
TCMBAlpha (float, optional): This should always be zero unless you really do want to make a model
where CMB temperature evolves T0*(1+z)^{1-TCMBAlpha}.
z (float, optional): Redshift - needed only if TCMBAlpha is non-zero.
Returns:
Value of SZ spectral shape at given frequency (neglecting relativistic corrections).
"""
h=constants.h.value
kB=constants.k_B.value
sigmaT=constants.sigma_T.value
me=constants.m_e.value
c=constants.c.value
x=(h*obsFrequencyGHz*1e9)/(kB*TCMB)
if TCMBAlpha != 0 and z is not None:
assert(z >= 0)
x=x*np.power(1+z, TCMBAlpha)
fSZ=x*((np.exp(x)+1)/(np.exp(x)-1))-4.0
return fSZ
#------------------------------------------------------------------------------------------------------------
def calcRDeltaMpc(z, MDelta, cosmoModel, delta = 500, wrt = 'critical'):
"""Calculate RDelta (e.g., R500c, R200m etc.) in Mpc, for a halo with the given mass and redshift.
Args:
z (float): Redshift.
MDelta (float): Halo mass in units of solar masses, using the definition set by `delta` and `wrt`.
cosmoModel (:obj:`pyccl.Cosmology`): Cosmology object.
delta (float, optional): Overdensity (e.g., typically 500 or 200).
wrt (str, optional): Use 'critical' or 'mean' to set the definition of density with respect to the
critical density or mean density at the given redshift.
Returns:
RDelta (in Mpc)
"""
if type(MDelta) == str:
raise Exception("MDelta is a string - use, e.g., 1.0e+14 (not 1e14 or 1e+14)")
Ez=ccl.h_over_h0(cosmoModel, 1/(1+z))
if wrt == 'critical':
wrtDensity=ccl.physical_constants.RHO_CRITICAL*(Ez*cosmoModel['h'])**2
elif wrt == 'mean':
wrtDensity=ccl.omega_x(cosmoModel, 1/(1+z), 'matter')
#wrtDensity=cosmoModel.Om(z)*cosmoModel.critical_density(z).value
else:
raise Exception("wrt should be either 'critical' or 'mean'")
#wrtDensity=(wrtDensity*np.power(Mpc_in_cm, 3))/MSun_in_g # NOTE: not needed for CCL units (MSun, Mpc etc.)
RDeltaMpc=np.power((3*MDelta)/(4*np.pi*delta*wrtDensity), 1.0/3.0)
return RDeltaMpc
#------------------------------------------------------------------------------------------------------------
def calcR500Mpc(z, M500c, cosmoModel):
"""Calculate R500 (in Mpc), with respect to critical density.
Args:
z (float): Redshift.
M500c (float): Mass within R500c (i.e., with respect to critical density) in units of solar masses.
cosmoModel (`:obj:`pyccl.Cosmology`): Cosmology object.
Returns:
R500c (in Mpc)
"""
R500Mpc=calcRDeltaMpc(z, M500c, cosmoModel, delta = 500, wrt = 'critical')
return R500Mpc
#------------------------------------------------------------------------------------------------------------
def calcTheta500Arcmin(z, M500, cosmoModel):
"""Given `z`, `M500` (in MSun), returns the angular size equivalent to R:sub:`500c`, with respect to the
critical density.
Args:
z (float): Redshift.
M500 (float): Mass within R500c (i.e., with respect to critical density) in units of solar masses.
cosmoModel (`:obj:`pyccl.Cosmology`): Cosmology object.
Returns:
theta500c (in arcmin)
"""
R500Mpc=calcR500Mpc(z, M500, cosmoModel)
#theta500Arcmin=np.degrees(np.arctan(R500Mpc/cosmoModel.angular_diameter_distance(z).value))*60.0
theta500Arcmin=np.degrees(np.arctan(R500Mpc/ccl.angular_diameter_distance(cosmoModel, 1/(1+z))))*60.0
return theta500Arcmin
#------------------------------------------------------------------------------------------------------------
def makeArnaudModelProfile(z, M500, GNFWParams = 'default', cosmoModel = None):
"""Given z, M500 (in MSun), returns dictionary containing Arnaud model profile (well, knots from spline
fit, 'tckP' - assumes you want to interpolate onto an array with units of degrees) and parameters
(particularly 'y0', 'theta500Arcmin').
Use GNFWParams to specify a different shape. If GNFWParams = 'default', then the default parameters as listed
in gnfw.py are used, i.e.,
GNFWParams = {'P0': 8.403, 'c500': 1.177, 'gamma': 0.3081, 'alpha': 1.0510, 'beta': 5.4905, 'tol': 1e-7,
'npts': 100}
Otherwise, give a dictionary that specifies the wanted values. This would usually be specified as
GNFWParams in the filter params in the nemo .par file (see the example .par files).
If cosmoModel is None, use default (Om0, Ol0, H0) = (0.3, 0.7, 70 km/s/Mpc) cosmology.
Used by ArnaudModelFilter
"""
if cosmoModel is None:
cosmoModel=fiducialCosmoModel
if GNFWParams == 'default':
GNFWParams=gnfw._default_params
# Adjust tol for speed vs. range of b covered
bRange=np.linspace(0, 30, 1000)
cylPProfile=[]
tol=1e-6
for i in range(len(bRange)):
b=bRange[i]
cylPProfile.append(gnfw.integrated(b, params = GNFWParams))
if i > 0 and abs(cylPProfile[i] - cylPProfile[i-1]) < tol:
break
cylPProfile=np.array(cylPProfile)
bRange=bRange[:i+1]
# Normalise to 1 at centre
cylPProfile=cylPProfile/cylPProfile.max()
# Calculate R500Mpc, theta500Arcmin corresponding to given mass and redshift
theta500Arcmin=calcTheta500Arcmin(z, M500, cosmoModel)
# Map between b and angular coordinates
# NOTE: c500 now taken into account in gnfw.py
thetaDegRange=bRange*(theta500Arcmin/60.)
tckP=interpolate.splrep(thetaDegRange, cylPProfile)
return {'tckP': tckP, 'theta500Arcmin': theta500Arcmin, 'rDeg': thetaDegRange}
#------------------------------------------------------------------------------------------------------------
def makeBattagliaModelProfile(z, M500c, GNFWParams = 'default', cosmoModel = None):
"""Given z, M500 (in MSun), returns dictionary containing Battaglia+2012 model profile (well, knots from
spline fit, 'tckP' - assumes you want to interpolate onto an array with units of degrees) and parameters
(particularly 'y0', 'theta500Arcmin').
Use GNFWParams to specify a different shape. If GNFWParams = 'default', then the default parameters as
listed in Battaglia et al. 2012 are used, i.e., GNFWParams = {'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49,
'c500': 1.408, 'tol': 1e-7, 'npts': 100}. Note that the definitions/sign convention is slightly
different in Battaglia+2012 compared to Arnaud+2010 (we follow the latter).
Otherwise, give a dictionary that specifies the wanted values. This would usually be specified as
GNFWParams in the filter params in the nemo .par file (see the example .par files).
If cosmoModel is None, use default (Om0, Ol0, H0) = (0.3, 0.7, 70 km/s/Mpc) cosmology.
Used by ArnaudModelFilter
"""
if cosmoModel is None:
cosmoModel=fiducialCosmoModel
if GNFWParams == 'default':
# NOTE: These are Table 1 values from Battaglia+2012 for M500c
GNFWParams={'P0': 7.49, 'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49, 'c500': 1.408, 'tol': 1e-7, 'npts': 100}
# Redshift dependence
# (we do P0 here anyway but since we have arbitrary normalization that seems pointless)
# These are all defined for M200c in Battaglia+2012
# Parameters for shape are for M500c in Table 1 of Battaglia+2012
# NOTE: Some transforming between A10 <-> B12 conventions here
P0=GNFWParams['P0']
P0_alpha_m=0.226
P0_alpha_z=-0.957
xc=1/GNFWParams['c500']
xc_alpha_m=-0.0833
xc_alpha_z=0.853
beta=GNFWParams['beta']-0.3
beta_alpha_m=0.0480
beta_alpha_z=0.615
M200c=M500cToMdef(M500c, z, M200cDef, cosmoModel)
P0z=P0*np.power(M200c/1e14, P0_alpha_m)*np.power(1+z, P0_alpha_z)
xcz=xc*np.power(M200c/1e14, xc_alpha_m)*np.power(1+z, xc_alpha_z)
betaz=beta*np.power(M200c/1e14, beta_alpha_m)*np.power(1+z, beta_alpha_z)
# Some more B12 -> A10 notation conversion
GNFWParams['P0']=P0z
GNFWParams['beta']=betaz+0.3
GNFWParams['c500']=1/xcz
GNFWParams['gamma']=0.3
GNFWParams['alpha']=1.0
# Adjust tol for speed vs. range of b covered
bRange=np.linspace(0, 30, 1000)
cylPProfile=[]
tol=1e-6
for i in range(len(bRange)):
b=bRange[i]
cylPProfile.append(gnfw.integrated(b, params = GNFWParams))
if i > 0 and abs(cylPProfile[i] - cylPProfile[i-1]) < tol:
break
cylPProfile=np.array(cylPProfile)
bRange=bRange[:i+1]
# Normalise to 1 at centre
cylPProfile=cylPProfile/cylPProfile.max()
# Calculate R500Mpc, theta500Arcmin corresponding to given mass and redshift
theta500Arcmin=calcTheta500Arcmin(z, M500c, cosmoModel)
# Map between b and angular coordinates
# NOTE: c500 now taken into account in gnfw.py
thetaDegRange=bRange*(theta500Arcmin/60.)
tckP=interpolate.splrep(thetaDegRange, cylPProfile)
return {'tckP': tckP, 'theta500Arcmin': theta500Arcmin, 'rDeg': thetaDegRange}
#------------------------------------------------------------------------------------------------------------
def makeBeamModelSignalMap(degreesMap, wcs, beam, amplitude = None):
"""Makes a 2d signal only map containing the given beam.
Args:
degreesMap (:obj:`np.ndarray`): Map of angular distance from the object position.
wcs (:obj:`astWCS.WCS`): WCS corresponding to degreesMap.
beam (:obj:`BeamProfile` or str): Either a BeamProfile object, or a string that gives the path to a
text file that describes the beam profile.
amplitude (:obj: float, optional): Specifies the amplitude of the input signal (in map units,
e.g., uK), before beam convolution. This is only needed if this routine is being used to inject
sources into maps. It is not needed for making filter kernels.
Returns:
signalMap (:obj:`np.ndarray`)
Note:
The pixel window function is not applied here; use pixell.enmap.apply_window to do that (see
nemo.filters.filterMaps).
"""
if amplitude is None:
amplitude=1.0
if type(beam) == str:
beam=BeamProfile(beamFileName = beam)
profile1d=amplitude*beam.profile1d
# Turn 1d profile into 2d
r2p=interpolate.interp1d(beam.rDeg, profile1d, bounds_error=False, fill_value=0.0)
signalMap=r2p(degreesMap)
return signalMap
#------------------------------------------------------------------------------------------------------------
def makeArnaudModelSignalMap(z, M500, degreesMap, wcs, beam, GNFWParams = 'default', amplitude = None,
maxSizeDeg = 15.0, convolveWithBeam = True):
"""Makes a 2d signal only map containing an Arnaud model cluster.
Args:
z (float): Redshift; used for setting angular size.
M500 (float): Mass within R500, defined with respect to critical density; units are solar masses.
degreesMap (:obj:`numpy.ndarray`): A 2d array containing radial distance measured in degrees from
the centre of the model to be inserted. The output map will have the same dimensions and pixel
scale (see nemoCython.makeDegreesDistanceMap).
GNFWParams (dict, optional): Used to specify a different profile shape to the default (which follows
Arnaud et al. 2010). If GNFWParams = 'default', then the default parameters as listed in
gnfw.py are used, i.e., GNFWParams = {'gamma': 0.3081, 'alpha': 1.0510, 'beta': 5.4905,
'tol': 1e-7, 'npts': 100}. Otherwise, give a dictionary that specifies the wanted values. This
would usually be specified using the GNFWParams key in the .yml config used when running nemo
(see the examples/ directory).
amplitude (float, optional): Amplitude of the cluster, i.e., the central decrement (in map units,
e.g., uK), or the central Comptonization parameter (dimensionless), before beam convolution.
Not needed for generating filter kernels.
maxSizeDeg (float, optional): Use to limit the region over which the beam convolution is done,
for optimization purposes.
convolveWithBeam (bool, optional): If False, no beam convolution is done (it can be quicker to apply
beam convolution over a whole source-injected map rather than per object).
Returns:
signalMap (:obj:`np.ndarray`).
Note:
The pixel window function is not applied here; use pixell.enmap.apply_window to do that (see
nemo.filters.filterMaps).
"""
# Making the 1d profile itself is the slowest part (~1 sec)
signalDict=makeArnaudModelProfile(z, M500, GNFWParams = GNFWParams)
tckP=signalDict['tckP']
# Make cluster map (unit-normalised profile)
rDeg=np.linspace(0.0, maxSizeDeg, 5000)
profile1d=interpolate.splev(rDeg, tckP, ext = 1)
if amplitude is not None:
profile1d=profile1d*amplitude
r2p=interpolate.interp1d(rDeg, profile1d, bounds_error=False, fill_value=0.0)
signalMap=r2p(degreesMap)
if convolveWithBeam == True:
signalMap=maps.convolveMapWithBeam(signalMap, wcs, beam, maxDistDegrees = maxSizeDeg)
return signalMap
#------------------------------------------------------------------------------------------------------------
def makeBattagliaModelSignalMap(z, M500, degreesMap, wcs, beam, GNFWParams = 'default', amplitude = None,
maxSizeDeg = 15.0, convolveWithBeam = True):
"""Makes a 2d signal only map containing a Battaglia+2012 model cluster (taking into account the redshift
evolution described in Table 1 and equation 11 there).
Args:
z (float): Redshift; used for setting angular size.
M500 (float): Mass within R500, defined with respect to critical density; units are solar masses.
degreesMap (:obj:`numpy.ndarray`): A 2d array containing radial distance measured in degrees from
the centre of the model to be inserted. The output map will have the same dimensions and pixel
scale (see nemoCython.makeDegreesDistanceMap).
GNFWParams (dict, optional): Used to specify a different profile shape to the default (which follows
Battaglia et al. 2012). If GNFWParams = 'default', then the default parameters as listed in
Battaglia et al. 2012 are used, i.e., GNFWParams = {'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49,
'c500': 1.408, 'tol': 1e-7, 'npts': 100}. Note that the definitions/sign convention is slightly
different in Battaglia+2012 compared to Arnaud+2010 (we follow the latter).
Otherwise, give a dictionary that specifies the wanted values. This
would usually be specified using the GNFWParams key in the .yml config used when running nemo
(see the examples/ directory).
amplitude (float, optional): Amplitude of the cluster, i.e., the central decrement (in map units,
e.g., uK), or the central Comptonization parameter (dimensionless), before beam convolution.
Not needed for generating filter kernels.
maxSizeDeg (float, optional): Use to limit the region over which the beam convolution is done,
for optimization purposes.
convolveWithBeam (bool, optional): If False, no beam convolution is done (it can be quicker to apply
beam convolution over a whole source-injected map rather than per object).
Returns:
signalMap (:obj:`np.ndarray`).
Note:
The pixel window function is not applied here; use pixell.enmap.apply_window to do that (see
nemo.filters.filterMaps).
"""
if GNFWParams == 'default':
# NOTE: These are Table 1 values from Battaglia+2012 for M500c
GNFWParams={'P0': 7.49, 'gamma': 0.3, 'alpha': 1.0, 'beta': 4.49, 'c500': 1.408, 'tol': 1e-7, 'npts': 100}
# Making the 1d profile itself is the slowest part (~1 sec)
signalDict=makeBattagliaModelProfile(z, M500, GNFWParams = GNFWParams)
tckP=signalDict['tckP']
# Make cluster map (unit-normalised profile)
rDeg=np.linspace(0.0, maxSizeDeg, 5000)
profile1d=interpolate.splev(rDeg, tckP, ext = 1)
if amplitude is not None:
profile1d=profile1d*amplitude
r2p=interpolate.interp1d(rDeg, profile1d, bounds_error=False, fill_value=0.0)
signalMap=r2p(degreesMap)
if convolveWithBeam == True:
signalMap=maps.convolveMapWithBeam(signalMap, wcs, beam, maxDistDegrees = maxSizeDeg)
return signalMap
#------------------------------------------------------------------------------------------------------------
def getFRelWeights(config):
"""Returns a dictionary of frequency weights used in relativistic corrections for each tile. This is
cached in the selFn/ dir after the first time this routine is called.
"""
if 'photFilter' not in config.parDict.keys() or config.parDict['photFilter'] is None:
return {}
fRelWeightsFileName=config.selFnDir+os.path.sep+"fRelWeights.fits"
if os.path.exists(fRelWeightsFileName) == False:
fRelTab=atpy.Table()
fRelTab.add_column(atpy.Column(config.allTileNames, 'tileName'))
for tileCount in range(len(config.allTileNames)):
tileName=config.allTileNames[tileCount]
filterFileName=config.diagnosticsDir+os.path.sep+tileName+os.path.sep+"filter_%s#%s.fits" % (config.parDict['photFilter'], tileName)
with pyfits.open(filterFileName) as img:
for i in range(1, 10):
if 'RW%d_GHZ' % (i) in img[0].header.keys():
freqGHz=str(img[0].header['RW%d_GHZ' % (i)])
if freqGHz == '':
freqGHz='148.0'
print(">>> WARNING: setting freqGHz = '%s' in getFRelWeights - this is okay if you're running on a TILe-C y-map" % (freqGHz))
if freqGHz not in fRelTab.keys():
fRelTab.add_column(atpy.Column(np.zeros(len(config.allTileNames)), freqGHz))
fRelTab[freqGHz][tileCount]=img[0].header['RW%d' % (i)]
fRelTab.meta['NEMOVER']=nemo.__version__
fRelTab.write(fRelWeightsFileName, overwrite = True)
return loadFRelWeights(fRelWeightsFileName)
#------------------------------------------------------------------------------------------------------------
def loadFRelWeights(fRelWeightsFileName):
"""Returns a dictionary of frequency weights used in relativistic corrections for each tile (stored in
a .fits table, made by getFRelWeights).
"""
fRelTab=atpy.Table().read(fRelWeightsFileName)
fRelWeightsDict={}
for row in fRelTab:
fRelWeightsDict[row['tileName']]={}
for key in fRelTab.keys():
if key != 'tileName':
fRelWeightsDict[row['tileName']][float(key)]=row[key]
return fRelWeightsDict
#------------------------------------------------------------------------------------------------------------
def fitQ(config):
"""Calculates the filter mismatch function *Q* on a grid of scale sizes for each tile in the map. The
results are initially cached (with a separate .fits table for each tile) under the `selFn` directory,
before being combined into a single file at the end of a :ref:`nemoCommand` run.
The `GNFWParams` key in the `config` dictionary can be used to specify a different cluster profile shape.
Args:
config (:obj:`startUp.NemoConfig`): A NemoConfig object.
Note:
See :class:`QFit` for how to read in and use the output of this function.
"""
t0=time.time()
cosmoModel=fiducialCosmoModel
# Spin through the filter kernels
photFilterLabel=config.parDict['photFilter']
filterList=config.parDict['mapFilters']
for f in filterList:
if f['label'] == photFilterLabel:
ref=f
# This could be more general... but A10 model has no z-dependence, B12 model does
# So Q is a function of (theta500, z) for the latter
# We add a header keyword to the QFit.fits table to indicate if z-dependence important or not
# Everything is then handled internally by QFit class
if ref['class'].find("Arnaud") != -1:
makeSignalModelMap=makeArnaudModelSignalMap
zDepQ=0
elif ref['class'].find("Battaglia") != -1:
makeSignalModelMap=makeBattagliaModelSignalMap
zDepQ=1
else:
raise Exception("Signal model for Q calculation should either be 'Arnaud' or 'Battaglia'")
# M, z -> theta ranges for Q calc - what's most efficient depends on whether there is z-dependence, or not
# NOTE: ref filter that sets scale we compare to must ALWAYS come first
if zDepQ == 0:
# To safely (numerically, at least) apply Q at z ~ 0.01, we need to go to theta500 ~ 500 arcmin (< 10 deg)
MRange=[ref['params']['M500MSun']]
zRange=[ref['params']['z']]
minTheta500Arcmin=0.1
maxTheta500Arcmin=500.0
numPoints=50
theta500Arcmin_wanted=np.logspace(np.log10(minTheta500Arcmin), np.log10(maxTheta500Arcmin), numPoints)
zRange_wanted=np.zeros(numPoints)
zRange_wanted[np.less(theta500Arcmin_wanted, 3.0)]=2.0
zRange_wanted[np.logical_and(np.greater(theta500Arcmin_wanted, 3.0), np.less(theta500Arcmin_wanted, 6.0))]=1.0
zRange_wanted[np.logical_and(np.greater(theta500Arcmin_wanted, 6.0), np.less(theta500Arcmin_wanted, 10.0))]=0.5
zRange_wanted[np.logical_and(np.greater(theta500Arcmin_wanted, 10.0), np.less(theta500Arcmin_wanted, 20.0))]=0.1
zRange_wanted[np.logical_and(np.greater(theta500Arcmin_wanted, 20.0), np.less(theta500Arcmin_wanted, 30.0))]=0.05
zRange_wanted[np.greater(theta500Arcmin_wanted, 30.0)]=0.01
MRange_wanted=[]
for theta500Arcmin, z in zip(theta500Arcmin_wanted, zRange_wanted):
Ez=ccl.h_over_h0(cosmoModel, 1/(1+z))
criticalDensity=ccl.physical_constants.RHO_CRITICAL*(Ez*cosmoModel['h'])**2
R500Mpc=np.tan(np.radians(theta500Arcmin/60.0))*ccl.angular_diameter_distance(cosmoModel, 1/(1+z))
M500=(4/3.0)*np.pi*np.power(R500Mpc, 3)*500*criticalDensity
MRange_wanted.append(M500)
MRange=MRange+MRange_wanted
zRange=zRange+zRange_wanted.tolist()
signalMapSizeDeg=10.0
elif zDepQ == 1:
# On a z grid for evolving profile models (e.g., Battaglia et al. 2012)
MRange=[ref['params']['M500MSun']]
zRange=[ref['params']['z']]
zGrid=[0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1.0, 1.2, 1.6, 2.0]
minTheta500Arcmin=0.1
maxTheta500Arcmin=100.0 # 55' corresponds to M500c = 1e16 MSun at z = 0.05
numPoints=24
theta500Arcmin_wanted=np.logspace(np.log10(minTheta500Arcmin), np.log10(maxTheta500Arcmin), numPoints)
for z in zGrid:
MRange_wanted=[]
for theta500Arcmin in theta500Arcmin_wanted:
Ez=ccl.h_over_h0(cosmoModel, 1/(1+z))
criticalDensity=ccl.physical_constants.RHO_CRITICAL*(Ez*cosmoModel['h'])**2
R500Mpc=np.tan(np.radians(theta500Arcmin/60.0))*ccl.angular_diameter_distance(cosmoModel, 1/(1+z))
M500=(4/3.0)*np.pi*np.power(R500Mpc, 3)*500*criticalDensity
MRange_wanted.append(M500)
MRange=MRange+MRange_wanted
zRange=zRange+([z]*len(MRange_wanted))
signalMapSizeDeg=5.0
else:
raise Exception("valid values for zDepQ are 0 or 1")
# Here we save the fit for each tile separately...
# completeness.tidyUp will put them into one file at the end of a nemo run
for tileName in config.tileNames:
tileQTabFileName=config.selFnDir+os.path.sep+"QFit#%s.fits" % (tileName)
if os.path.exists(tileQTabFileName) == True:
print("... already done Q fit for tile %s ..." % (tileName))
continue
print("... fitting Q in tile %s ..." % (tileName))
# Load reference scale filter
foundFilt=False
for filt in config.parDict['mapFilters']:
if filt['label'] == config.parDict['photFilter']:
foundFilt=True
break
if foundFilt == False:
raise Exception("couldn't find filter that matches photFilter")
filterClass=eval('filters.%s' % (filt['class']))
filterObj=filterClass(filt['label'], config.unfilteredMapsDictList, filt['params'], \
tileName = tileName,
diagnosticsDir = config.diagnosticsDir+os.path.sep+tileName)
filterObj.loadFilter()
# Real space kernel or Fourier space filter?
if issubclass(filterObj.__class__, filters.RealSpaceMatchedFilter) == True:
realSpace=True
else:
realSpace=False
# Set-up the beams
beamsDict={}
for mapDict in config.parDict['unfilteredMaps']:
obsFreqGHz=mapDict['obsFreqGHz']
beamsDict[obsFreqGHz]=mapDict['beamFileName']
# A bit clunky but gets map pixel scale and shrinks map size we'll use for inserting signals
# signalMapSizeDeg set according to lowest z model (see above), using smaller for z dependent to save RAM
# (but then have a higher low-z cut where Q will be valid)
extMap=np.zeros(filterObj.shape)
wcs=filterObj.wcs
RADeg, decDeg=wcs.getCentreWCSCoords()
clipDict=astImages.clipImageSectionWCS(extMap, wcs, RADeg, decDeg, signalMapSizeDeg)
wcs=clipDict['wcs']
extMap=clipDict['data']
# Input signal maps to which we will apply filter(s)
# We do this once and store in a dictionary for speed
theta500ArcminDict={}
signalMapDict={}
signalMap=np.zeros(extMap.shape)
degreesMap=np.ones(signalMap.shape, dtype = float)*1e6
degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs, RADeg, decDeg, signalMapSizeDeg)
for z, M500MSun in zip(zRange, MRange):
key='%.2f_%.2f' % (z, np.log10(M500MSun))
signalMaps=[]
fSignalMaps=[]
y0=2e-4
for obsFreqGHz in list(beamsDict.keys()):
if mapDict['obsFreqGHz'] is not None: # Normal case
amplitude=maps.convertToDeltaT(y0, obsFreqGHz)
else: # TILe-C case
amplitude=y0
# NOTE: Q is to adjust for mismatched filter shape
# Yes, this should have the beam in it (certainly for TILe-C)
# NOTE: CCL can blow up for some of the extreme masses we try to feed in here
# (so we just skip those if it happens)
try:
signalMap=makeSignalModelMap(z, M500MSun, degreesMap, wcs, beamsDict[obsFreqGHz],
amplitude = amplitude, convolveWithBeam = True,
GNFWParams = config.parDict['GNFWParams'])
except:
continue
if realSpace == True:
signalMaps.append(signalMap)
else:
signalMaps.append(enmap.fft(signalMap))
signalMaps=np.array(signalMaps)
# Skip any failed ones (see above - CCL blowing up for extreme masses)
if len(signalMaps) == len(list(beamsDict.keys())):
signalMapDict[key]=signalMaps
theta500ArcminDict[key]=calcTheta500Arcmin(z, M500MSun, fiducialCosmoModel)
# Filter maps with the ref kernel
# NOTE: keep only unique values of Q, theta500Arcmin (or interpolation routines will fail)
Q=[]
QTheta500Arcmin=[]
Qz=[]
for z, M500MSun in zip(zRange, MRange):
key='%.2f_%.2f' % (z, np.log10(M500MSun))
if key in signalMapDict.keys():
filteredSignal=filterObj.applyFilter(signalMapDict[key])
peakFilteredSignal=filteredSignal.max()
if peakFilteredSignal not in Q:
Q.append(peakFilteredSignal)
QTheta500Arcmin.append(theta500ArcminDict[key])
Qz.append(z)
Q=np.array(Q)
Q=Q/Q[0]
# Sort and save as FITS table (interim - all tile files gets combined at end of nemo run)
QTab=atpy.Table()
QTab.add_column(atpy.Column(Q, 'Q'))
QTab.add_column(atpy.Column(QTheta500Arcmin, 'theta500Arcmin'))
QTab.add_column(atpy.Column(Qz, 'z'))
QTab.sort('theta500Arcmin')
QTab.meta['NEMOVER']=nemo.__version__
QTab.meta['ZDEPQ']=zDepQ
QTab.write(tileQTabFileName, overwrite = True)
# Test plot
Q=QFit(tileQTabFileName)
plotSettings.update_rcParams()
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.12, 0.11, 0.86, 0.88])
for z in [0.05, 0.1, 0.4, 1.0, 2.0]:
mask=(QTab['z'] == z)
if mask.sum() > 0:
plt.plot(QTab['theta500Arcmin'][mask], QTab['Q'][mask], '.', label = "z = %.2f" % (z))
thetaArr=np.logspace(np.log10(QTab['theta500Arcmin'][mask].min()),
np.log10(QTab['theta500Arcmin'][mask].max()), numPoints)
plt.plot(thetaArr, Q.getQ(thetaArr, z, tileName = tileName), 'k-')
plt.legend()
plt.semilogx()
plt.xlabel("$\\theta_{\\rm 500c}$ (arcmin)")
plt.ylabel("$Q$ ($\\theta_{\\rm 500c}$, $z$)")
plt.savefig(config.diagnosticsDir+os.path.sep+tileName+os.path.sep+"QFit_%s.pdf" % (tileName))
plt.savefig(config.diagnosticsDir+os.path.sep+tileName+os.path.sep+"QFit_%s.png" % (tileName))
plt.close()
t1=time.time()
print("... Q fit finished [tileName = %s, rank = %d, time taken = %.3f] ..." % (tileName, config.rank, t1-t0))
#------------------------------------------------------------------------------------------------------------
def makeCombinedQTable(config):
"""Writes dictionary of tables (containing individual tile Q fits) as a single .fits table.
Returns combined Q astropy table object
"""
outFileName=config.selFnDir+os.path.sep+"QFit.fits"
if os.path.exists(outFileName) == True:
return atpy.Table().read(outFileName)
QTabDict={}
for tileName in config.allTileNames:
QTabDict[tileName]=atpy.Table().read(config.selFnDir+os.path.sep+"QFit#%s.fits" % (tileName))
#----
# New - MEF
QTabMEF=pyfits.HDUList()
for tileName in config.allTileNames:
with pyfits.open(config.selFnDir+os.path.sep+"QFit#%s.fits" % (tileName)) as QTab:
QTab[1].name=tileName
QTabMEF.append(QTab[1].copy())
QTabMEF.writeto(outFileName, overwrite = True)
combinedQTab=QTabMEF
#----
# Old
#combinedQTab=atpy.Table()
#for tabKey in list(QTabDict.keys()):
#for colKey in QTabDict[tabKey].keys():
#if colKey == 'theta500Arcmin':
#if colKey not in combinedQTab.keys():
#combinedQTab.add_column(QTabDict[tabKey]['theta500Arcmin'], index = 0)
#else:
#combinedQTab.add_column(atpy.Column(QTabDict[tabKey][colKey].data, tabKey))
#combinedQTab.meta['NEMOVER']=nemo.__version__
#combinedQTab.write(outFileName, overwrite = True)
return combinedQTab
#------------------------------------------------------------------------------------------------------------
def calcWeightedFRel(z, M500, Ez, fRelWeightsDict):
"""Return fRel for the given (z, M500), weighted by frequency according to fRelWeightsDict
"""
fRels=[]
freqWeights=[]
for obsFreqGHz in fRelWeightsDict.keys():
if fRelWeightsDict[obsFreqGHz] > 0:
fRels.append(calcFRel(z, M500, Ez, obsFreqGHz = obsFreqGHz))
freqWeights.append(fRelWeightsDict[obsFreqGHz])
fRel=np.average(fRels, weights = freqWeights)
return fRel
#------------------------------------------------------------------------------------------------------------
def calcFRel(z, M500, Ez, obsFreqGHz = 148.0):
"""Calculates relativistic correction to SZ effect at specified frequency, given z, M500 in MSun.
This assumes the Arnaud et al. (2005) M-T relation, and applies formulae of Itoh et al. (1998)
As for H13, we return fRel = 1 + delta_SZE (see also Marriage et al. 2011)
"""
# NOTE: we should define constants somewhere else...
h=6.63e-34
kB=1.38e-23
sigmaT=6.6524586e-29
me=9.11e-31
e=1.6e-19
c=3e8
# Using Arnaud et al. (2005) M-T to get temperature
A=3.84e14
B=1.71
#TkeV=5.*np.power(((cosmoModel.efunc(z)*M500)/A), 1/B) # HMF/Astropy
#TkeV=5.*np.power(((cosmoModel.Ez(z)*M500)/A), 1/B) # Colossus
TkeV=5.*np.power(((Ez*M500)/A), 1/B)
TKelvin=TkeV*((1000*e)/kB)
# Itoh et al. (1998) eqns. 2.25 - 2.30
thetae=(kB*TKelvin)/(me*c**2)
X=(h*obsFreqGHz*1e9)/(kB*TCMB)
Xtw=X*(np.cosh(X/2.)/np.sinh(X/2.))
Stw=X/np.sinh(X/2.)
Y0=-4+Xtw
Y1=-10. + (47/2.)*Xtw - (42/5.)*Xtw**2 + (7/10.)*Xtw**3 + np.power(Stw, 2)*(-(21/5.) + (7/5.)*Xtw)
Y2=-(15/2.) + (1023/8.)*Xtw - (868/5.)*Xtw**2 + (329/5.)*Xtw**3 - (44/5.)*Xtw**4 + (11/30.)*Xtw**5 \
+ np.power(Stw, 2)*(-(434/5.) + (658/5.)*Xtw - (242/5.)*Xtw**2 + (143/30.)*Xtw**3) \
+ np.power(Stw, 4)*(-(44/5.) + (187/60.)*Xtw)
Y3=(15/2.) + (2505/8.)*Xtw - (7098/5.)*Xtw**2 + (14253/10.)*Xtw**3 - (18594/35.)*Xtw**4 + (12059/140.)*Xtw**5 - (128/21.)*Xtw**6 + (16/105.)*Xtw**7 \
+ np.power(Stw, 2)*(-(7098/10.) + (14253/5.)*Xtw - (102267/35.)*Xtw**2 + (156767/140.)*Xtw**3 - (1216/7.)*Xtw**4 + (64/7.)*Xtw**5) \
+ np.power(Stw, 4)*(-(18594/35.) + (205003/280.)*Xtw - (1920/7.)*Xtw**2 + (1024/35.)*Xtw**3) \
+ np.power(Stw, 6)*(-(544/21.) + (992/105.)*Xtw)
Y4=-(135/32.) + (30375/128.)*Xtw - (62391/10.)*Xtw**2 + (614727/40.)*Xtw**3 - (124389/10.)*Xtw**4 \
+ (355703/80.)*Xtw**5 - (16568/21.)*Xtw**6 + (7516/105.)*Xtw**7 - (22/7.)*Xtw**8 + (11/210.)*Xtw**9 \
+ np.power(Stw, 2)*(-(62391/20.) + (614727/20.)*Xtw - (1368279/20.)*Xtw**2 + (4624139/80.)*Xtw**3 - (157396/7.)*Xtw**4 \
+ (30064/7.)*Xtw**5 - (2717/7.)*Xtw**6 + (2761/210.)*Xtw**7) \
+ np.power(Stw, 4)*(-(124389/10.) + (6046951/160.)*Xtw - (248520/7.)*Xtw**2 + (481024/35.)*Xtw**3 - (15972/7.)*Xtw**4 + (18689/140.)*Xtw**5) \
+ np.power(Stw, 6)*(-(70414/21.) + (465992/105.)*Xtw - (11792/7.)*Xtw**2 + (19778/105.)*Xtw**3) \
+ np.power(Stw, 8)*(-(682/7.) + (7601/210.)*Xtw)
deltaSZE=((X**3)/(np.exp(X)-1)) * ((thetae*X*np.exp(X))/(np.exp(X)-1)) * (Y0 + Y1*thetae + Y2*thetae**2 + Y3*thetae**3 + Y4*thetae**4)
fRel=1+deltaSZE
return fRel
#------------------------------------------------------------------------------------------------------------
def getM500FromP(P, log10M, calcErrors = True):
"""Returns M500 as the maximum likelihood value from given P(log10M) distribution, together with
1-sigma error bars (M500, -M500Err, +M500 err).
"""
# Find max likelihood and integrate to get error bars
tckP=interpolate.splrep(log10M, P)
fineLog10M=np.linspace(log10M.min(), log10M.max(), 10000)
fineP=interpolate.splev(fineLog10M, tckP)
fineP=fineP/np.trapz(fineP, fineLog10M)
index=np.argmax(fineP)
clusterLogM500=fineLog10M[index]
clusterM500=np.power(10, clusterLogM500)/1e14
if calcErrors == True:
for n in range(fineP.shape[0]):
minIndex=index-n
maxIndex=index+n
if minIndex < 0 or maxIndex > fineP.shape[0]:
# This shouldn't happen; if it does, probably y0 is in the wrong units
# Previously we threw an exception here, but we can't if using this for forced photometry
#print("WARNING: outside M500 range - check y0 units or for problem at cluster location in map (if not in forced photometry mode)")
clusterM500MinusErr=0.
clusterM500PlusErr=0.
break
p=np.trapz(fineP[minIndex:maxIndex], fineLog10M[minIndex:maxIndex])
if p >= 0.6827:
clusterLogM500Min=fineLog10M[minIndex]
clusterLogM500Max=fineLog10M[maxIndex]
clusterM500MinusErr=(np.power(10, clusterLogM500)-np.power(10, clusterLogM500Min))/1e14
clusterM500PlusErr=(np.power(10, clusterLogM500Max)-np.power(10, clusterLogM500))/1e14
break
else:
clusterM500MinusErr=0.
clusterM500PlusErr=0.
return clusterM500, clusterM500MinusErr, clusterM500PlusErr
#------------------------------------------------------------------------------------------------------------
def y0FromLogM500(log10M500, z, tckQFit, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = 3e14, sigma_int = 0.2,
cosmoModel = None, applyRelativisticCorrection = True, fRelWeightsDict = {148.0: 1.0}):
"""Predict y0~ given logM500 (in MSun) and redshift. Default scaling relation parameters are A10 (as in
H13).
Use cosmoModel (:obj:`pyccl.Cosmology`) to change/specify cosmological parameters.
fRelWeightsDict is used to account for the relativistic correction when y0~ has been constructed
from multi-frequency maps. Weights should sum to 1.0; keys are observed frequency in GHz.
Returns y0~, theta500Arcmin, Q
NOTE: Depreciated? Nothing we have uses this.
"""
if type(Mpivot) == str:
raise Exception("Mpivot is a string - check Mpivot in your .yml config file: use, e.g., 3.0e+14 (not 3e14 or 3e+14)")
# Filtering/detection was performed with a fixed fiducial cosmology... so we don't need to recalculate Q
# We just need to recalculate theta500Arcmin and E(z) only
M500=np.power(10, log10M500)
theta500Arcmin=calcTheta500Arcmin(z, M500, cosmoModel)
Q=calcQ(theta500Arcmin, tckQFit)
# Relativistic correction: now a little more complicated, to account for fact y0~ maps are weighted sum
# of individual frequency maps, and relativistic correction size varies with frequency
if applyRelativisticCorrection == True:
fRels=[]
freqWeights=[]
for obsFreqGHz in fRelWeightsDict.keys():
fRels.append(calcFRel(z, M500, cosmoModel, obsFreqGHz = obsFreqGHz))
freqWeights.append(fRelWeightsDict[obsFreqGHz])
fRel=np.average(np.array(fRels), axis = 0, weights = freqWeights)
else:
fRel=1.0
# UPP relation according to H13
# NOTE: m in H13 is M/Mpivot
# NOTE: this goes negative for crazy masses where the Q polynomial fit goes -ve, so ignore those
y0pred=tenToA0*np.power(cosmoModel.efunc(z), 2)*np.power(M500/Mpivot, 1+B0)*Q*fRel
return y0pred, theta500Arcmin, Q
#------------------------------------------------------------------------------------------------------------
def calcMass(y0, y0Err, z, zErr, QFit, mockSurvey, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = 3e14,
sigma_int = 0.2, Ez_gamma = 2, onePlusRedshift_power = 0.0, applyMFDebiasCorrection = True, applyRelativisticCorrection = True,
calcErrors = True, fRelWeightsDict = {148.0: 1.0}, tileName = None):
"""Returns M500 +/- errors in units of 10^14 MSun, calculated assuming a y0 - M relation (default values
assume UPP scaling relation from Arnaud et al. 2010), taking into account the steepness of the mass
function. The approach followed is described in H13, Section 3.2.
Here, mockSurvey is a MockSurvey object. We're using this to handle the halo mass function calculations
(in turn using the Colossus module). Supplying mockSurvey is no longer optional (and handles setting the
cosmology anyway when initialised or updated).
tckQFit is a set of spline knots, as returned by fitQ.
If applyMFDebiasCorrection == True, apply correction that accounts for steepness of mass function.
If applyRelativisticCorrection == True, apply relativistic correction (weighted by frequency using the
contents of fRelWeightsDict).
If calcErrors == False, error bars are not calculated, they are just set to zero.
fRelWeightsDict is used to account for the relativistic correction when y0~ has been constructed
from multi-frequency maps. Weights should sum to 1.0; keys are observed frequency in GHz.
Returns dictionary with keys M500, M500_errPlus, M500_errMinus
"""
if y0 < 0:
raise Exception('y0 cannot be negative')
if y0 > 1e-2:
raise Exception('y0 is suspiciously large - probably you need to multiply by 1e-4')
P=calcPMass(y0, y0Err, z, zErr, QFit, mockSurvey, tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot,
sigma_int = sigma_int, Ez_gamma = Ez_gamma, onePlusRedshift_power = onePlusRedshift_power,
applyMFDebiasCorrection = applyMFDebiasCorrection,
applyRelativisticCorrection = applyRelativisticCorrection, fRelWeightsDict = fRelWeightsDict,
tileName = tileName)
M500, errM500Minus, errM500Plus=getM500FromP(P, mockSurvey.log10M, calcErrors = calcErrors)
label=mockSurvey.mdefLabel
return {'%s' % (label): M500, '%s_errPlus' % (label): errM500Plus, '%s_errMinus' % (label): errM500Minus}
#------------------------------------------------------------------------------------------------------------
def calcPMass(y0, y0Err, z, zErr, QFit, mockSurvey, tenToA0 = 4.95e-5, B0 = 0.08, Mpivot = 3e14,
sigma_int = 0.2, Ez_gamma = 2, onePlusRedshift_power = 0.0, applyMFDebiasCorrection = True,
applyRelativisticCorrection = True, fRelWeightsDict = {148.0: 1.0}, return2D = False,
tileName = None):
"""Calculates P(M500) assuming a y0 - M relation (default values assume UPP scaling relation from Arnaud
et al. 2010), taking into account the steepness of the mass function. The approach followed is described
in H13, Section 3.2. The binning for P(M500) is set according to the given mockSurvey, as are the assumed
cosmological parameters.
This routine is used by calcMass.
Ez_gamma is E(z)^gamma factor (we assumed E(z)^2 previously)
onePlusRedshift_power: added multiplication by (1+z)**onePlusRedshift_power (for checking evolution)
If return2D == True, returns a grid of same dimensions / binning as mockSurvey.z, mockSurvey.log10M,
normalised such that the sum of the values is 1.
"""
# For marginalising over photo-z errors (we assume +/-5 sigma is accurate enough)
if zErr > 0:
zMin=z-zErr*5
zMax=z+zErr*5
zMask=np.logical_and(np.greater_equal(mockSurvey.z, zMin), np.less(mockSurvey.z, zMax))
zRange=mockSurvey.z[zMask]
#if zMin <= 0:
#zMin=1e-3
#zRange=np.arange(zMin, zMax, 0.005)
Pz=np.exp(-np.power(z-zRange, 2)/(2*(np.power(zErr, 2))))
Pz=Pz/np.trapz(Pz, zRange)
else:
zRange=[z]
Pz=np.ones(len(zRange))
log_y0=np.log(y0)
log_y0Err=y0Err/y0
# NOTE: Swap below if want to use bigger log10M range...
log10Ms=mockSurvey.log10M
#log10MStep=mockSurvey.log10M[1]-mockSurvey.log10M[0]
#log10Ms=np.arange(-100.0, 100.0, log10MStep)
PArr=[]
for k in range(len(zRange)):
zk=zRange[k]
# We've generalised mockSurvey to be able to use e.g. M200m, but Q defined for theta500c
# So, need a mapping between M500c and whatever mass definition used in mockSurvey
# This only needed for extracting Q, fRel values
if mockSurvey.delta != 500 or mockSurvey.rhoType != "critical":
log10M500c_zk=np.log10(mockSurvey.mdef.translate_mass(mockSurvey.cosmoModel,
np.power(10, log10Ms),
1/(1+zk), mockSurvey._M500cDef))
else:
log10M500c_zk=log10Ms
mockSurvey_zIndex=np.argmin(abs(mockSurvey.z-zk))
theta500s=interpolate.splev(log10M500c_zk, mockSurvey.theta500Splines[mockSurvey_zIndex], ext = 3)
Qs=QFit.getQ(theta500s, zk, tileName = tileName)
fRels=interpolate.splev(log10M500c_zk, mockSurvey.fRelSplines[mockSurvey_zIndex], ext = 3)
fRels[np.less_equal(fRels, 0)]=1e-4 # For extreme masses (> 10^16 MSun) at high-z, this can dip -ve
y0pred=tenToA0*np.power(mockSurvey.Ez[mockSurvey_zIndex], Ez_gamma)*np.power(np.power(10, log10Ms)/Mpivot, 1+B0)*Qs
y0pred=y0pred*np.power(1+zk, onePlusRedshift_power)
if applyRelativisticCorrection == True:
y0pred=y0pred*fRels
if np.less(y0pred, 0).sum() > 0:
# This generally means we wandered out of where Q is defined (e.g., beyond mockSurvey log10M limits)
# Or fRel can dip -ve for extreme mass at high-z (can happen with large Om0)
raise Exception("Some predicted y0 values are negative.")
log_y0pred=np.log(y0pred)
Py0GivenM=np.exp(-np.power(log_y0-log_y0pred, 2)/(2*(np.power(log_y0Err, 2)+np.power(sigma_int, 2))))
Py0GivenM=Py0GivenM/np.trapz(Py0GivenM, log10Ms)
# Mass function de-bias
if applyMFDebiasCorrection == True:
PLog10M=mockSurvey.getPLog10M(zk)
PLog10M=PLog10M/np.trapz(PLog10M, log10Ms)
else:
PLog10M=1.0
P=Py0GivenM*PLog10M*Pz[k]
PArr.append(P)
# 2D PArr is what we would want to project onto (M, z) grid
PArr=np.array(PArr)
# Marginalised over z uncertainty
P=np.sum(PArr, axis = 0)
P=P/np.trapz(P, log10Ms)
# Reshape to (M, z) grid - use this if use different log10M range to mockSurvey
#tck=interpolate.splrep(log10Ms, P)
#P=interpolate.splev(mockSurvey.log10M, tck, ext = 1)
if return2D == True:
P2D=np.zeros(mockSurvey.clusterCount.shape)
if zErr == 0:
P2D[np.argmin(abs(mockSurvey.z-z))]=PArr
else:
P2D[zMask]=PArr
P=P2D/P2D.sum()
#astImages.saveFITS("test.fits", P.transpose(), None)
return P
#------------------------------------------------------------------------------------------------------------
# Mass conversion routines
# For getting x(f) - see Hu & Kravtsov
x=np.linspace(1e-3, 10, 1000)
fx=(x**3)*(np.log(1+1./x)-np.power(1+x, -1))
XF_TCK=interpolate.splrep(fx, x)
FX_TCK=interpolate.splrep(x, fx)
#------------------------------------------------------------------------------------------------------------
def gz(zIn, zMax = 1000, dz = 0.1):
"""Calculates linear growth factor at redshift z. Use Dz if you want normalised to D(z) = 1.0 at z = 0.
See http://www.astronomy.ohio-state.edu/~dhw/A873/notes8.pdf for some notes on this.
"""
zRange=np.arange(zIn, zMax, dz)
HzPrime=[]
for zPrime in zRange:
HzPrime.append(astCalc.Ez(zPrime)*astCalc.H0)
HzPrime=np.array(HzPrime)
gz=astCalc.Ez(zIn)*np.trapz((np.gradient(zRange)*(1+zRange)) / np.power(HzPrime, 3), zRange)
return gz
#------------------------------------------------------------------------------------------------------------
def calcDz(zIn):
"""Calculate linear growth factor, normalised to D(z) = 1.0 at z = 0.
"""
return gz(zIn)/gz(0.0)
#------------------------------------------------------------------------------------------------------------
def criticalDensity(z):
"""Returns the critical density at the given z.
"""
G=4.301e-9 # in MSun-1 km2 s-2 Mpc, see Robotham GAMA groups paper
Hz=astCalc.H0*astCalc.Ez(z)
rho_crit=((3* | np.power(Hz, 2) | numpy.power |
import unittest
import numpy as np
import pytest
from l5kit.data import (AGENT_DTYPE, ChunkedDataset, FRAME_DTYPE, get_agents_slice_from_frames,
get_tl_faces_slice_from_frames, SCENE_DTYPE, TL_FACE_DTYPE)
from l5kit.simulation.utils import disable_agents, get_frames_subset, insert_agent
class TestAgentInsert(unittest.TestCase):
def _get_simple_dataset(self) -> ChunkedDataset:
# build a simple dataset with 3 frames
# frame 0:
# agent 0
# agent 1
# agent 2
# frame 1:
# agent 0
# agent 1
# frame 2:
# agent 0
dataset = ChunkedDataset("")
dataset.scenes = np.zeros(1, dtype=SCENE_DTYPE)
dataset.frames = np.zeros(3, dtype=FRAME_DTYPE)
dataset.agents = np.zeros(6, dtype=AGENT_DTYPE)
dataset.scenes[0]["frame_index_interval"] = (0, 3)
dataset.frames["agent_index_interval"] = [(0, 3), (3, 5), (5, 6)]
dataset.agents["track_id"] = [0, 1, 2, 0, 1, 0]
return dataset
def test_invalid(self) -> None:
# try to insert out of bounds
with self.assertRaises(ValueError):
insert_agent(
np.zeros(1, dtype=AGENT_DTYPE), 100, self._get_simple_dataset()
)
# try to insert in a multi-scene dataset
with self.assertRaises(ValueError):
dataset = self._get_simple_dataset()
dataset.scenes = np.concatenate([dataset.scenes, dataset.scenes])
insert_agent(np.zeros(1, dtype=AGENT_DTYPE), 100, dataset)
def test_update(self) -> None:
# try to update agent 0 in frame 0
dataset = self._get_simple_dataset()
agent = | np.zeros(1, dtype=AGENT_DTYPE) | numpy.zeros |
import numpy as np
# intra-package python imports
from ..kinetics.massaction import MassAction
from .cells import Cell
from .genes import TwoStateGene
class TwoStateCell(Cell):
"""
Class defines a cell with one or more protein coding genes. Transcription is based on a twostate model.
Attributes:
off_states (dict) - {name: node_id} pairs
on_states (dict) - {name: node_id} pairs
dosage (int) - dosage of each gene (used to set initial conditions)
Inherited Attributes:
transcripts (dict) - {name: node_id} pairs
proteins (dict) - {name: node_id} pairs
phosphorylated (dict) - {name: node_id} pairs
nodes (np.ndarray) - vector of node indices
node_key (dict) - {state dimension: node id} pairs
reactions (list) - list of reaction objects
stoichiometry (np.ndarray) - stoichiometric coefficients, (N,M)
N (int) - number of nodes
M (int) - number of reactions
I (int) - number of inputs
"""
def __init__(self,
genes=(),
I=1,
dosage=2,
**kwargs):
"""
Instantiate twostate cell with one or more protein coding genes.
Args:
genes (tuple) - names of genes
I (int) - number of input channels
dosage (int) - dosage of each gene (used to set initial conditions)
kwargs: keyword arguments for add_genes
"""
self.off_states = {}
self.on_states = {}
self.dosage = dosage
super().__init__(genes, I, **kwargs)
@property
def ic(self):
""" Default initial condition. """
ic = np.zeros(self.N, dtype=np.int64)
for off_state in self.off_states.values():
ic[off_state] = self.dosage
return ic
def constrain_ic(self, ic):
"""
Constrains initial condition to specified gene dosage.
Args:
ic (np.ndarray[double]) - initial condition
"""
for gene in self.off_states.keys():
# get current dosage specified by initial condition
off_state = self.off_states[gene]
on_state = self.on_states[gene]
currrent_dosage = ic[off_state] + ic[on_state]
# if dosage is correct, leave as is
if currrent_dosage == self.dosage:
continue
# if dosage is too low, add to the off state
elif currrent_dosage < self.dosage:
ic[off_state] += (self.dosage - currrent_dosage)
# if dosage is too high, remove from the on state first
while ic[off_state] + ic[on_state] > self.dosage:
if ic[on_state] > 0:
ic[on_state] -= 1
else:
ic[off_state] -= 1
def add_gene(self, **kwargs):
"""
Add individual gene.
kwargs: keyword arguments for Gene instantiation
"""
gene = TwoStateGene(**kwargs)
# update nodes and reactions
shift = self.nodes.size
added_node_ids = np.arange(shift, shift+gene.nodes.size)
self.update_reaction_dimensions(added_node_ids=added_node_ids)
# add new nodes
self.nodes = np.append(self.nodes, added_node_ids)
self.reactions.extend([rxn.shift(shift) for rxn in gene.reactions])
# update dictionaries
self.off_states.update({k: v+shift for k,v in gene.off_states.items()})
self.on_states.update({k: v+shift for k,v in gene.on_states.items()})
self.transcripts.update({k: v+shift for k,v in gene.transcripts.items()})
self.proteins.update({k: v+shift for k,v in gene.proteins.items()})
def add_activation(self,
gene,
activator,
k=1,
atp_sensitive=False,
carbon_sensitive=False,
ribosome_sensitive=False,
**labels
):
"""
Add gene activation reaction.
Args:
gene (str) - target gene name
activator (str) - names of activating protein
k (float) - activation rate constant
labels (dict) - additional labels for reaction
"""
# define reaction name
labels['name'] = gene+' activation'
# define stoichiometry
stoichiometry = np.zeros(self.nodes.size, dtype=np.int64)
stoichiometry[self.on_states[gene]] = 1
stoichiometry[self.off_states[gene]] = -1
# define propensity
propensity = np.zeros(self.nodes.size, dtype=np.int64)
propensity[self.off_states[gene]] = 1
input_dependence = np.zeros(self.I, dtype=np.int64)
if 'IN' in activator:
if '_' in activator:
input_dependence[int(activator.split('_')[-1])] = 1
else:
input_dependence[0] = 1
else:
propensity[self.proteins[activator]] = 1
# define reaction
rxn = MassAction(
stoichiometry=stoichiometry,
propensity=propensity,
input_dependence=input_dependence,
k=k,
atp_sensitive=atp_sensitive,
carbon_sensitive=carbon_sensitive,
ribosome_sensitive=ribosome_sensitive,
labels=labels)
# add reaction
self.reactions.append(rxn)
def add_transcriptional_repressor(self,
actuator,
target,
k=1.,
atp_sensitive=False,
carbon_sensitive=False,
ribosome_sensitive=False,
**labels
):
"""
Add transcriptional repressor.
Args:
actuator (str) - actuating substrate
target (float) - target gene
k (float) - maximum degradation rate
atp_sensitive (bool) - scale rate with metabolism
ribosome_sensitive (bool) - scale rate with ribosomes
labels (dict) - additional labels for reaction
"""
# define reaction name
labels['name'] = target+' repression'
# define stoichiometry
stoichiometry = np.zeros(self.N, dtype=np.int64)
stoichiometry[self.on_states[target]] = -1
stoichiometry[self.off_states[target]] = 1
# define propensity
propensity = np.zeros(self.N, dtype=np.int64)
propensity[self.on_states[target]] = 1
input_dependence = | np.zeros(self.I, dtype=np.int64) | numpy.zeros |
import argparse
import numpy as np
from itertools import count
from collections import namedtuple
import cv2
import sys
import IPython
import matplotlib.pyplot as plt
import os
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from snakeEnv import GridWorld
from collections import deque
import random
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('runs/snakeNet_CNN_AC')
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward,next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.concatenate(state), action, reward, np.concatenate(next_state), done
def __len__(self):
return len(self.buffer)
# Snake game
env = GridWorld(grid_state=True)
env.seed(42)
torch.manual_seed(42)
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=2, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(128, 64, kernel_size=4, stride=2)
self.fc1 = nn.Linear(1024, 512)
# Actor layer
self.action_head = nn.Linear(512, 4)
# Critic layer
self.critic_head = nn.Linear(512, 1)
def forward(self, x):
x = x/255.0
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = F.relu(x)
# actor: choses action to take from state s_t
# by returning probability of each action
action_probs = F.softmax(self.action_head(x), dim=-1)
# critic, judges current state value
state_val = self.critic_head(x)
return action_probs, state_val
device = torch.device("cpu")#"cuda:0" if torch.cuda.is_available() else "cpu")
print("Training on: ", device)
save_net_path = "SnakeNet_CNN_AC"
# save_net_path = "drive/My Drive/SnakeNet_CNN_AC"
model = Policy()
if os.path.exists(save_net_path):
model.load_state_dict(torch.load(save_net_path))
model.eval()
model.to(device)
temp_optimizer = optim.Adam(model.parameters(), lr=7e-4)
eps = | np.finfo(np.float32) | numpy.finfo |
import numpy as np
from warnings import warn
from typing import List, Callable
from desdeo_emo.selection.SelectionBase import SelectionBase
from desdeo_emo.population.Population import Population
from desdeo_emo.othertools.ReferenceVectors import ReferenceVectors
from typing import TYPE_CHECKING
from desdeo_emo.othertools.ProbabilityWrong import Probability_wrong
import os
os.environ["OMP_NUM_THREADS"] = "1"
if TYPE_CHECKING:
from pyRVEA.allclasses import ReferenceVectors
class Prob_Hybrid_APD_Select_v3_1(SelectionBase):
"""The selection operator for the RVEA algorithm. Read the following paper for more
details.
<NAME>, <NAME>, <NAME> and <NAME>, A Reference Vector Guided
Evolutionary Algorithm for Many-objective Optimization, IEEE Transactions on
Evolutionary Computation, 2016
Parameters
----------
pop : Population
The population instance
time_penalty_function : Callable
A function that returns the time component in the penalty function.
alpha : float, optional
The RVEA alpha parameter, by default 2
"""
def __init__(
self, pop: Population, time_penalty_function: Callable, alpha: float = 2
):
self.time_penalty_function = time_penalty_function
self.alpha = alpha
self.n_of_objectives = pop.problem.n_of_objectives
def do(self, pop: Population, vectors: ReferenceVectors) -> List[int]:
"""Select individuals for mating on basis of APD and probabilistic APD.
Args:
fitness (list): Fitness of the current population.
uncertainty (list) : Uncertainty of the predicted objective values
vectors (ReferenceVectors): Class containing reference vectors.
penalty_factor (float): Multiplier of angular deviation from Reference
vectors. See RVEA paper for details.
ideal (list): ideal point for the population.
Uses the min fitness value if None.
Returns:
[type]: A list of indices of the selected individuals.
"""
fitness = pop.fitness
uncertainty = pop.uncertainity
penalty_factor = self._partial_penalty_factor()
refV = vectors.neighbouring_angles_current
# Normalization - There may be problems here
fmin = np.amin(fitness, axis=0)
translated_fitness = fitness - fmin
fitness_norm = np.linalg.norm(translated_fitness, axis=1)
fitness_norm = np.repeat(fitness_norm, len(translated_fitness[0, :])).reshape(
len(fitness), len(fitness[0, :])
)
normalized_fitness = np.divide(translated_fitness, fitness_norm) # Checked, works.
cosine = np.dot(normalized_fitness, np.transpose(vectors.values))
if cosine[np.where(cosine > 1)].size:
#print(
# "RVEA.py line 60 cosine larger than 1 decreased to 1:"
#)
cosine[np.where(cosine > 1)] = 1
if cosine[np.where(cosine < 0)].size:
#print(
# "RVEA.py line 64 cosine smaller than 0 decreased to 0:"
#)
cosine[np.where(cosine < 0)] = 0
# Calculation of angles between reference vectors and solutions
theta = np.arccos(cosine)
# Reference vector asub_population_indexssignment
assigned_vectors = np.argmax(cosine, axis=1)
selection = np.array([], dtype=int)
#########################################################
refV = vectors.neighbouring_angles_current
fmin = np.amin(fitness, axis=0)
# fmin = np.array([0,0])
translated_fitness = fitness - fmin
pwrong = Probability_wrong(mean_values=translated_fitness, stddev_values=uncertainty, n_samples=1000)
pwrong.vect_sample_f()
fitness_norm = np.linalg.norm(pwrong.f_samples, axis=1)
fitness_norm = np.repeat(np.reshape(fitness_norm, (len(fitness), 1, pwrong.n_samples)), len(fitness[0, :]), axis=1)
normalized_fitness = np.divide(pwrong.f_samples, fitness_norm) # Checked, works.
cosine = np.tensordot(normalized_fitness, np.transpose(vectors.values), axes=([1], [0]))
cosine = np.transpose(cosine, (0, 2, 1))
if cosine[np.where(cosine > 1)].size:
#print(
# "RVEA.py line 60 cosine larger than 1 decreased to 1:"
#)
cosine[np.where(cosine > 1)] = 1
if cosine[np.where(cosine < 0)].size:
#print(
# "RVEA.py line 64 cosine smaller than 0 decreased to 0:"
#)
cosine[np.where(cosine < 0)] = 0
# Calculation of angles between reference vectors and solutions
theta2 = np.arccos(cosine)
# Reference vector asub_population_indexssignment
# pwrong.compute_pdf(cosine)
# Compute rank of cos theta (to be vectorized)
rank_cosine = np.mean(cosine, axis=2)
# print("Rank cosine:")
# print(rank_cosine)
assigned_vectors2 = np.argmax(rank_cosine, axis=1)
selection = np.array([], dtype=int)
# Selection
vector_selection = None
# fig = plt.figure(1, figsize=(6, 6))
# ax = fig.add_subplot(111)
for i in range(0, len(vectors.values)):
sub_population_index = np.atleast_1d(
np.squeeze(np.where(assigned_vectors == i))
)
sub_population_fitness = pwrong.f_samples[sub_population_index]
#sub_population_fitness = translated_fitness[sub_population_index]
if len(sub_population_fitness > 0):
angles = theta2[sub_population_index, i]
angles = np.divide(angles, refV[i]) # This is correct.
# You have done this calculation before. Check with fitness_norm
# Remove this horrible line
sub_pop_fitness_magnitude = np.sqrt(
np.sum(np.power(sub_population_fitness, 2), axis=1)
)
sub_popfm = np.reshape(sub_pop_fitness_magnitude,
(1, len(sub_pop_fitness_magnitude[:, 0]), pwrong.n_samples))
angles = np.reshape(angles, (1, len(angles), pwrong.n_samples))
#apd = np.multiply(
# np.mean(sub_popfm,axis=2),
# (1 + np.dot(penalty_factor, np.mean(angles, axis=2))),
#)
#rank_apd = apd
apd = np.multiply(
sub_popfm,
(1 + | np.dot(penalty_factor, angles) | numpy.dot |
# %% id="-kMcwx2S7jOR"
import pandas as pd
import numpy as np
import tensorflow as tf
from keras.layers import Dense, Activation, BatchNormalization, Dropout
from keras import regularizers
from keras.optimizers import RMSprop, Adam, SGD
import keras
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
import math
from sklearn.metrics import mean_absolute_error
import joblib
dts = pd.read_csv('solarpowergeneration.csv')
dts.head(10)
X = dts.iloc[:, :].values[:-1]
y = dts.iloc[:, -1].values[1:]
print(X.shape, y.shape)
y = np.reshape(y, (-1,1))
y.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
print("Train Shape: {} {} \nTest Shape: {} {}".format(X_train.shape, y_train.shape, X_test.shape, y_test.shape))
from sklearn.preprocessing import StandardScaler
# input scaling
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# outcome scaling:
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)
y_test = sc_y.transform(y_test)
reconstructed_model = keras.models.load_model("my_h5_model.h5")
# Convert using float fallback quantization
X_train = X_train.astype(np.uint8)
# Save the models as files
import pathlib
tflite_models_dir = pathlib.Path("C:/Users/NGOCNEO/Desktop/Solar-Power-Generation-Forecasting-main/Solar-Power-Generation-Forecasting-main/ANN")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
# Save the unquantized/float model:
tflite_model_file = tflite_models_dir/"tflite_model_defaults.tflite"
# tflite_model_file.write_bytes(tflite_model_defaults)
# Save the quantized model:
tflite_model_quant_fallback_quantization = tflite_models_dir/"tflite_model_quant_fallback_quantization.tflite"
# tflite_model_quant_file.write_bytes(tflite_model_quant_fallback_quantization)
# Save the unquantized/float model:
tflite_model_quant_optimize = tflite_models_dir/"tflite_model_quant_optimize.tflite"
# tflite_model_file.write_bytes(tflite_model_quant_optimize)
# Save the quantized model:
tflite_model_quant_integer_only_quantization = tflite_models_dir/"tflite_model_quant_integer_only_quantization.tflite"
# tflite_model_quant_file.write_bytes(tflite_model_quant_integer_only_quantization)
# run model
def run_tflite(tflite_model_file,predict_test):
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
test_image = | np.expand_dims(X_train[0], axis=0) | numpy.expand_dims |
# fig_flat_pn_excitation.py ---
# Author: <NAME>
# Created: Wed Dec 19 13:36:58 2018 (-0500)
# Last-Updated: Thu Dec 20 19:31:50 2018 (-0500)
# By: <NAME>
# Version: $Id$
# Code:
"""This script plots the GGN response when the PN excitation is flat
with a constant active population (not shifting)"""
from __future__ import print_function
import sys
if sys.platform == 'win32':
sys.path.append('D:/subhasis_ggn/model/analysis')
else:
sys.path += ['/home/rays3/projects/ggn/analysis',
'/home/rays3/projects/ggn/morphutils',
'/home/rays3/projects/ggn/nrn',
'/home/rays3/apps/matrix-entropy',
'/home/rays3/apps/Spike-Contrast/Python3']
import os
import argparse
import h5py as h5
import numpy as np
import time
# import random
import matplotlib
import matplotlib.colors as colors
from matplotlib import pyplot as plt
from matplotlib import gridspec
# import yaml
# import pint
import collections
import operator
import itertools as it
import pandas as pd
import yaml
import network_data_analysis as nda
# from matplotlib.backends.backend_pdf import PdfPages
import pn_kc_ggn_plot_mpl as myplot
import clust_fixed_net_multi_trial as cl
plt.rc('font', size=11)
analysis_dir = 'D:/subhasis_ggn/model/analysis'
datadir = 'D:/biowulf_stage/olfactory_network'
os.chdir(analysis_dir)
# jid = 9674116 # this one produced spikes in 4473 KCs out of 50k
# jids = [
# '9674118',
# '9674119',
# '9673663',
# '9674019',
# '9673664',
# '9674069',
# '9674087',
# '9674120',
# '9673863']
# ^-These will not do - constant GGN->KC inhibition with 2% weakly inhibited
jid = '9674118'
fname = nda.find_h5_file(jid, datadir)
gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=[3, 1], hspace=0.05)
fig = plt.figure()
ax0 = fig.add_subplot(gs[0])
ax1 = fig.add_subplot(gs[1], sharex=ax0)
axes = [ax0, ax1]
with h5.File(fname, 'r') as fd:
print('jid: {} spiking KCs: {}'.format(jid, len(nda.get_spiking_kcs(fd))))
print(yaml.dump(nda.load_config(fd), default_style=''))
# pn_st = []
# pn_id = []
# for pn in fd[nda.pn_st_path].values():
# pn_st.append(pn.value)
# pn_id.append([int(pn.name.rpartition('_')[-1])] * len(pn))
# ax0.plot(np.concatenate(pn_st[::10]), np.concatenate(pn_id[::10]), 'k,')
kc_x, kc_y = nda.get_event_times(fd[nda.kc_st_path])
ax0.plot(np.concatenate(kc_x[::10]), | np.concatenate(kc_y[::10]) | numpy.concatenate |
import numpy as np
import numpy.linalg as la
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from tqdm import tqdm
import sys
import SBW_util as util
from matplotlib.animation import FuncAnimation
# init cond
# plots 1D
# report (todo's, structure, division of work )
# 2D scheme correspondence
# stability
# convergence plots
# test case w oxygen
eps_u = 0.001 # 0.01
eps_v = 0.001 # 0.001
gamma_u = 0.005# 0.05
zeta = 0.0
alpha_v = 0.1
beta_v = 0.1
eta_w = 10.0
def constr_lineqU(U, W, V, N, M, T):
'''
N: nb of x grid points (int)
T: current timestep (int)
U: discrete solution of u (np.array)
W: discrete solution of w (np.array)
V: discrete solution of v (np.array)
M: nb of time steps (int)
'''
h = 1.0/float(N)
k = 1.0/float(M)
#assert(U.shape == W.shape and W.shape == V.shape, 'Dim error')
#assert(U.shape[1] ==N and U.shape[0] == M, 'Dim error')
DT = 0
X_length = N
A2Ut = np.zeros((X_length, X_length))
A1Ut = np.zeros((X_length, X_length))
fU = np.zeros((X_length, ))
# BOUNDARY CONDITIONS
A2Ut[0,0], A2Ut[0, 1] = -1, 1 # left boundary
A2Ut[-1, -2], A2Ut[-1,-1] = -1, 1 # right boundary
A1Ut[0,0], A1Ut[0, 1] = -1, 1 # left boundary
A1Ut[-1, -2], A1Ut[-1,-1] = -1, 1 # right boundary
# A1 UM+1 = f - A2 UM
for i in range(1, X_length-1): # for each x in space do
A2Ut[i, i] = -1 - zeta*(-2*eps_u*k/(h**2)+ gamma_u*k/(h**2)*(W[T-DT, i+1]+W[T-DT, i-1]-2*W[T-DT, i])) # contribution of UN
A2Ut[i, i+1] = - zeta*(eps_u*k/(h**2)+gamma_u*k/(4*h**2)*(W[T-DT, i+1]-W[T-DT,i-1])) # contribution of UN-1
A2Ut[i, i-1] = - zeta*(eps_u*k/(h**2)-gamma_u*k/(4*h**2)*(W[T-DT, i+1]-W[T-DT,i-1]))
A1Ut[i,i] = 1 - (1-zeta)*(-2*eps_u*k/(h**2)+ gamma_u*k/(h**2)*(W[T-DT, i+1]+W[T-DT, i-1]-2*W[T-DT, i]))
A1Ut[i, i+1] = - (1-zeta)*(eps_u*k/(h**2)+gamma_u*k/(4*h**2)*(W[T-DT, i+1]-W[T-DT,i-1]))
A1Ut[i, i-1] = -(1-zeta)*(eps_u*k/(h**2)-gamma_u*k/(4*h**2)*(W[T-DT, i+1]-W[T-DT,i-1]))
dummy = A2Ut@U[T-1,:]
fU = fU - dummy
return A1Ut, fU, A2Ut
def constr_lineqV(U, W, V, N, M, T):
'''
N: nb of x grid points (int)
T: current timestep (int)
U: discrete solution of u (np.array)
W: discrete solution of w (np.array)
V: discrete solution of v (np.array)
M: nb of time steps (int)
'''
k = 1.0/float(M)
h = 1.0/float(N)
#k = 0.25*h**2*1.0/eps_v
#assert(U.shape == W.shape and W.shape == V.shape, 'Dim error')
#assert(U.shape[1]==N and U.shape[0] == M, 'Dim error')
X_length = N
A2Vt = np.zeros((X_length, X_length))
A1Vt = | np.zeros((X_length, X_length)) | numpy.zeros |
# -*-coding:utf-8-*
# author: wangxy
from __future__ import print_function
import os, numpy as np, time, cv2, torch
from os import listdir
from os.path import join
from file_operation.file import load_list_from_folder, mkdir_if_inexistence, fileparts
from detection.detection import Detection_2D, Detection_3D_only, Detection_3D_Fusion
from tracking.tracker import Tracker
from datasets.datafusion import datafusion2Dand3D
from datasets.coordinate_transformation import convert_3dbox_to_8corner, convert_x1y1x2y2_to_tlwh
from visualization.visualization_3d import show_image_with_boxes
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
def is_image_file(filename):
return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.PNG', '.JPG', '.JPEG'])
def compute_color_for_id(label):
"""
Simple function that adds fixed color depending on the id
"""
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
class DeepFusion(object):
def __init__(self, max_age, min_hits):
'''
:param max_age: The maximum frames in which an object disappears.
:param min_hits: The minimum frames in which an object becomes a trajectory in succession.
'''
self.max_age = max_age
self.min_hits = min_hits
self.tracker = Tracker(max_age,min_hits)
self.reorder = [3, 4, 5, 6, 2, 1, 0]
self.reorder_back = [6, 5, 4, 0, 1, 2, 3]
self.frame_count = 0
def update(self,detection_3D_fusion,detection_2D_only,detection_3D_only,detection_3Dto2D_only,
additional_info, calib_file):
dets_3d_fusion = np.array(detection_3D_fusion['dets_3d_fusion'])
dets_3d_fusion_info = np.array(detection_3D_fusion['dets_3d_fusion_info'])
dets_3d_only = np.array(detection_3D_only['dets_3d_only'])
dets_3d_only_info = np.array(detection_3D_only['dets_3d_only_info'])
if len(dets_3d_fusion) == 0:
dets_3d_fusion = dets_3d_fusion
else:
dets_3d_fusion = dets_3d_fusion[:,self.reorder] # convert [h,w,l,x,y,z,rot_y] to [x,y,z,rot_y,l,w,h]
if len(dets_3d_only) == 0:
dets_3d_only = dets_3d_only
else:
dets_3d_only = dets_3d_only[:, self.reorder]
detection_3D_fusion = [Detection_3D_Fusion(det_fusion, dets_3d_fusion_info[i]) for i, det_fusion in enumerate(dets_3d_fusion)]
detection_3D_only = [Detection_3D_only(det_only, dets_3d_only_info[i]) for i, det_only in enumerate(dets_3d_only)]
detection_2D_only = [Detection_2D(det_fusion) for i, det_fusion in enumerate(detection_2D_only)]
self.tracker.predict_2d()
self.tracker.predict_3d()
self.tracker.update(detection_3D_fusion, detection_3D_only, detection_3Dto2D_only, detection_2D_only, calib_file, iou_threshold=0.5)
self.frame_count += 1
outputs = []
for track in self.tracker.tracks_3d:
if track.is_confirmed():
bbox = np.array(track.pose[self.reorder_back])
outputs.append(np.concatenate(([track.track_id_3d], bbox, track.additional_info)).reshape(1, -1))
if len(outputs) > 0:
outputs = np.stack(outputs, axis=0)
return outputs
@staticmethod
def _xywh_to_tlwh(bbox_xywh): # Convert the coordinate format of the bbox box from center x, y, w, h to upper left x, upper left y, w, h
if isinstance(bbox_xywh, np.ndarray):
bbox_tlwh = bbox_xywh.copy()
elif isinstance(bbox_xywh, torch.Tensor):
bbox_tlwh = bbox_xywh.clone()
bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.
bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.
return bbox_tlwh
def _tlwh_to_xyxy(self, bbox_tlwh):
x, y, w, h = bbox_tlwh
x1 = max(int(x), 0)
x2 = min(int(x+w), 0)
y1 = max(int(y), 0)
y2 = min(int(y+h), 0)
return x1, y1, x2, y2
def _tlwh_to_x1y1x2y2(self, bbox_tlwh):
x, y, w, h = bbox_tlwh
x1 = x
x2 = x + w
y1 = y
y2 = y + h
return x1, y1, x2, y2
if __name__ == '__main__':
# Define the file name
data_root = 'datasets/kitti/train'
detections_name_3D = '3D_pointrcnn_Car_val'
detections_name_2D = '2D_rrc_Car_val'
# Define the file path
calib_root = os.path.join(data_root, 'calib_train')
dataset_dir = os.path.join(data_root,'image_02_train')
detections_root_3D = os.path.join(data_root, detections_name_3D)
detections_root_2D = os.path.join(data_root, detections_name_2D)
# Define the file path of results.
save_root = 'results/train' # The root directory where the result is saved
txt_path_0 = os.path.join(save_root, 'data'); mkdir_if_inexistence(txt_path_0)
image_path_0 = os.path.join(save_root, 'image'); mkdir_if_inexistence(image_path_0)
# Open file to save in list.
det_id2str = {1: 'Pedestrian', 2: 'Car', 3: 'Cyclist'}
calib_files = os.listdir(calib_root)
detections_files_3D = os.listdir(detections_root_3D)
detections_files_2D = os.listdir(detections_root_2D)
image_files = os.listdir(dataset_dir)
detection_file_list_3D, num_seq_3D = load_list_from_folder(detections_files_3D, detections_root_3D)
detection_file_list_2D, num_seq_2D = load_list_from_folder(detections_files_2D, detections_root_2D)
image_file_list, _ = load_list_from_folder(image_files, dataset_dir)
total_time, total_frames, i = 0.0, 0, 0 # Tracker runtime, total frames and Serial number of the dataset
tracker = DeepFusion(max_age=25, min_hits=3) # Tracker initialization
# Iterate through each data set
for seq_file_3D, image_filename in zip(detection_file_list_3D, image_files):
print('--------------Start processing the {} dataset--------------'.format(image_filename))
total_image = 0 # Record the total frames in this dataset
seq_file_2D = detection_file_list_2D[i]
seq_name, datasets_name, _ = fileparts(seq_file_3D)
txt_path = txt_path_0 + "\\" + image_filename + '.txt'
image_path = image_path_0 + '\\' + image_filename; mkdir_if_inexistence(image_path)
calib_file = [calib_file for calib_file in calib_files if calib_file==seq_name ]
calib_file_seq = os.path.join(calib_root, ''.join(calib_file))
image_dir = os.path.join(dataset_dir, image_filename)
image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)]
seq_dets_3D = | np.loadtxt(seq_file_3D, delimiter=',') | numpy.loadtxt |
import numpy as np
import collections
def _xv_from_uni(xi,zmax,gct):
"""
Generate a z vertex displacement from a uniform random variable
both zmax and gct should be in cm
"""
if xi > 0.:
return gct*np.log(xi) + zmax
else:
return -100.*zmax
xv_from_uni = np.vectorize(_xv_from_uni)
def _det_hit_condition(_pv, _pl, det_rad, zmax, xv, Ethr=1.):
"""
returns true if lepton hits a circular detector of radius det_rad,
if it originates from a vector that decays a distance xv from the detector
pv = relativistic 4 vector momentum
pl = relativistic 4 lepton momentum
det_rad = detector radius in cm
xv = z distance of the vector decay vertex from the detector in cm
"""
#Ethr = 1. # E137 Ecal detector threshold energy
pv = np.array(_pv)
pl = np.array(_pl)
# xv < 0 corresponds to decays beyond the detector
if xv < 0:
return False
#print pv
pvt = pv[1:3]
pvz = pv[3]
plt = pl[1:3]
plz = pl[3]
# transverse displacement of vector when it decays
#print xv#,(zmax-xv)*pvt,pvz
vec_delta = (zmax-xv)*pvt/pvz
#print pvt/pvz, np.linalg.norm(vec_delta)
# point at which lepton momentum crosses the detector x-y plane
rvec = vec_delta + xv*plt/plz
#print rvec, np.sqrt(np.dot(rvec,rvec))
return (pl[0] >= Ethr) and ( | np.dot(rvec,rvec) | numpy.dot |
#import pandas as pd
import numpy as np
import random
from tqdm import tqdm
#from sklearn.linear_model import LinearRegression
#from pandas.core.common import SettingWithCopyWarning
#import warnings
#from .dbtonumpy import eurusd_prices
#warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
from datetime import datetime, timedelta
import datetime as dt
start_date = dt.date.today()
y = dt.timedelta(days=1*365)
end_date = start_date + y
nb_paths = 10
initial_price = 1.10
def r2_score_and_slope(y):
"""takes numpy array of prices and returns r2 score, slope and constant"""
y = np.array(y)
x = np.vstack([list(range(len(y))),np.ones(len(y))]).T
m, c = np.linalg.lstsq(x, y, rcond=None)[0]
y_hat = [(xx*m + c) for xx in list(range(len(y)))]
y_bar = np.sum(y)/len(y)
ssreg = np.sum((y_hat-y_bar)**2)
sstot = np.sum((y - y_bar)**2)
r_2 = ssreg / sstot
return r_2, m, c
import datetime as dt
def monte_carlo(arr, n_days=500, paths=100,detrend=True,starting_point = 1.1):
"""Monte carlo simulation for date range - start date and end date
n is number of simualations
detrend will take trend out of data - i.e. absolute all values and assign + or - to returns
with 50/50 probability"""
if detrend:
ss = np.absolute(arr.reshape(1,-1))
ones = np.random.choice([-1,1],len(arr))
ss = ss * ones
sampled_returns = np.random.choice(ss[0], size=(n_days, paths)) + 1
#print(sampled_returns)
else:
sampled_returns = np.random.choice(array.reshape(1,-1)[0], size=(n_days, paths)) + 1
date_list = [(datetime.today() + timedelta(days = i)) for i in range(n_days)]
cum_returns = np.cumprod(sampled_returns,axis=0) * starting_point
#df_price = pd.DataFrame(cum_returns, index = date_list)
return [date_list,cum_returns]
def p_and_l_np(arr, all_trades):
arr = np.array(arr)
trades = np.array(all_trades)
current_position = np.cumsum(trades)
pos_value = arr * current_position
cost = -arr*trades
p_and_l = (pos_value + np.cumsum(cost))/(arr)
return p_and_l, current_position
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def mean_reversion_np(arr,pda=50,devs=1,window=20):
arr = | np.array(arr) | numpy.array |
"""
Preprocess dataset
usage:
preprocess.py [options] <wav-dir>...
options:
--output-dir=<dir> Directory where processed outputs are saved. [default: data_dir].
-h, --help Show help message.
"""
import os
from docopt import docopt
import numpy as np
import math, pickle, os
from audio import *
from hparams import hparams as hp
from utils import *
from tqdm import tqdm
class MelVocoder:
def __init__(self):
self._mel_basis = None
def fft(self, y, sample_rate, use_preemphasis=False):
if use_preemphasis:
pre_y = self.preemphasis(y)
else:
pre_y = y
D = self._stft(pre_y, sample_rate)
return D.transpose()
def ifft(self, y, sample_rate):
y = y.transpose()
return self._istft(y, sample_rate)
def melspectrogram(self, y, sample_rate, num_mels, use_preemphasis=False):
if use_preemphasis:
pre_y = self.preemphasis(y)
else:
pre_y = y
D = self._stft(pre_y, sample_rate)
S = self._amp_to_db(self._linear_to_mel(np.abs(D), sample_rate, num_mels))
return self._normalize(S)
def preemphasis(self, x):
return signal.lfilter([1, -0.97], [1], x)
def _istft(self, y, sample_rate):
n_fft, hop_length, win_length = self._stft_parameters(sample_rate)
return librosa.istft(y, hop_length=hop_length, win_length=win_length)
def _stft(self, y, sample_rate):
n_fft, hop_length, win_length = self._stft_parameters(sample_rate)
return librosa.stft(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window='hann')
def _linear_to_mel(self, spectrogram, sample_rate, num_mels):
if self._mel_basis is None:
self._mel_basis = self._build_mel_basis(sample_rate, num_mels)
return | np.dot(self._mel_basis, spectrogram) | numpy.dot |
import os
import numpy as np
import pandas as pd
import nibabel as nib
from nilearn import plotting
import xml.etree.ElementTree as ET
class Connectome:
"""Connectome class
This class contains the main functions required for loading and manipulating connectome graphs
"""
def __init__(self,
filename:str,
scale:int = 1,
parcellated_image_path:str = None):
"""__init__ to initialise class
Arguments:
filename {str} -- full pathname for the graph csv of graphml
"""
#initalise input variables
self.filename = filename
self.scale = scale
if parcellated_image_path == None:
root_dir = os.path.split(os.path.dirname(__file__))[0]
if root_dir.endswith('mbmgroup-refs'):
pass
elif root_dir.endswith('site-packages'):
directory_list = os.path.split(os.path.dirname(__file__))[0].split('/')
root_dir = '/'.join(directory_list[:-4])
else:
pass
self.parcellated_image_path = root_dir + f'/Data/mni_parcellations/mni-parcellation-scale{self.scale}_atlas.nii.gz'
else:
self.parcellated_image_path = parcellated_image_path
#read graph format
if filename.endswith('.graphml'):
self._read_graphml()
elif filename.endswith('.csv'):
self._read_csv()
#get tags needed
self._region_tag = self._find_tag_graphml(tag = 'dn_name')
self._number_of_fibers_tag = self._find_tag_graphml(tag = 'number_of_fibers')
#initialise graph variables
self.n_Nodes = 0
self.node_id = | np.zeros((0, 2)) | numpy.zeros |
#================================================================
#
# File name : BipedalWalker-v3_PPO
# Author : PyLessons
# Created date: 2020-10-18
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/Reinforcement_Learning
# Description : BipedalWalker-v3 PPO continuous agent
# TensorFlow : 2.3.1
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # -1:cpu, 0:first gpu
import random
import gym
import pylab
import numpy as np
import tensorflow as tf
from tensorboardX import SummaryWriter
#tf.config.experimental_run_functions_eagerly(True) # used for debuging and development
tf.compat.v1.disable_eager_execution(
) # usually using this for fastest performance
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.optimizers import Adam, RMSprop, Adagrad, Adadelta
from tensorflow.keras import backend as K
import copy
from threading import Thread, Lock
from multiprocessing import Process, Pipe
import time
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError:
pass
class Environment(Process):
def __init__(self,
env_idx,
child_conn,
env_name,
state_size,
action_size,
visualize=False):
super(Environment, self).__init__()
self.env = gym.make(env_name)
self.is_render = visualize
self.env_idx = env_idx
self.child_conn = child_conn
self.state_size = state_size
self.action_size = action_size
def run(self):
super(Environment, self).run()
state = self.env.reset()
state = np.reshape(state, [1, self.state_size])
self.child_conn.send(state)
while True:
action = self.child_conn.recv()
#if self.is_render and self.env_idx == 0:
#self.env.render()
state, reward, done, info = self.env.step(action)
state = np.reshape(state, [1, self.state_size])
if done:
state = self.env.reset()
state = np.reshape(state, [1, self.state_size])
self.child_conn.send([state, reward, done, info])
class Actor_Model:
def __init__(self, input_shape, action_space, lr, optimizer):
X_input = Input(input_shape)
self.action_space = action_space
X = Dense(512,
activation="relu",
kernel_initializer=tf.random_normal_initializer(
stddev=0.01))(X_input)
X = Dense(
256,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(X)
X = Dense(
64,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(X)
output = Dense(self.action_space, activation="tanh")(X)
self.Actor = Model(inputs=X_input, outputs=output)
self.Actor.compile(loss=self.ppo_loss_continuous,
optimizer=optimizer(lr=lr))
#print(self.Actor.summary())
def ppo_loss_continuous(self, y_true, y_pred):
advantages, actions, logp_old_ph, = y_true[:, :
1], y_true[:, 1:1 + self.
action_space], y_true[:,
1
+
self
.
action_space]
LOSS_CLIPPING = 0.2
logp = self.gaussian_likelihood(actions, y_pred)
ratio = K.exp(logp - logp_old_ph)
p1 = ratio * advantages
p2 = tf.where(advantages > 0, (1.0 + LOSS_CLIPPING) * advantages,
(1.0 - LOSS_CLIPPING) * advantages) # minimum advantage
actor_loss = -K.mean(K.minimum(p1, p2))
return actor_loss
def gaussian_likelihood(self, actions, pred): # for keras custom loss
log_std = -0.5 * np.ones(self.action_space, dtype=np.float32)
pre_sum = -0.5 * (
((actions - pred) /
(K.exp(log_std) + 1e-8))**2 + 2 * log_std + K.log(2 * np.pi))
return K.sum(pre_sum, axis=1)
def predict(self, state):
return self.Actor.predict(state)
class Critic_Model:
def __init__(self, input_shape, action_space, lr, optimizer):
X_input = Input(input_shape)
old_values = Input(shape=(1, ))
V = Dense(512,
activation="relu",
kernel_initializer=tf.random_normal_initializer(
stddev=0.01))(X_input)
V = Dense(
256,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(V)
V = Dense(
64,
activation="relu",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(V)
value = Dense(1, activation=None)(V)
self.Critic = Model(inputs=[X_input, old_values], outputs=value)
self.Critic.compile(loss=[self.critic_PPO2_loss(old_values)],
optimizer=optimizer(lr=lr))
def critic_PPO2_loss(self, values):
def loss(y_true, y_pred):
LOSS_CLIPPING = 0.2
clipped_value_loss = values + K.clip(y_pred - values,
-LOSS_CLIPPING, LOSS_CLIPPING)
v_loss1 = (y_true - clipped_value_loss)**2
v_loss2 = (y_true - y_pred)**2
value_loss = 0.5 * K.mean(K.maximum(v_loss1, v_loss2))
#value_loss = K.mean((y_true - y_pred) ** 2) # standard PPO loss
return value_loss
return loss
def predict(self, state):
return self.Critic.predict([state, np.zeros((state.shape[0], 1))])
class PPOAgent:
# PPO Main Optimization Algorithm
def __init__(self, env_name, model_name=""):
# Initialization
# Environment and PPO parameters
self.env_name = env_name
self.env = gym.make(env_name)
self.action_size = self.env.action_space.shape[0]
self.state_size = self.env.observation_space.shape
self.EPISODES = 200000 # total episodes to train through all environments
self.episode = 0 # used to track the episodes total count of episodes played through all thread environments
self.max_average = 0 # when average score is above 0 model will be saved
self.lr = 0.00025
self.epochs = 10 # training epochs
self.shuffle = True
self.Training_batch = 512
#self.optimizer = RMSprop
self.optimizer = Adam
self.replay_count = 0
self.writer = SummaryWriter(comment="_" + self.env_name + "_" +
self.optimizer.__name__ + "_" +
str(self.lr))
# Instantiate plot memory
self.scores_, self.episodes_, self.average_ = [], [], [
] # used in matplotlib plots
# Create Actor-Critic network models
self.Actor = Actor_Model(input_shape=self.state_size,
action_space=self.action_size,
lr=self.lr,
optimizer=self.optimizer)
self.Critic = Critic_Model(input_shape=self.state_size,
action_space=self.action_size,
lr=self.lr,
optimizer=self.optimizer)
self.Actor_name = f"{self.env_name}_PPO_Actor.h5"
self.Critic_name = f"{self.env_name}_PPO_Critic.h5"
#self.load() # uncomment to continue training from old weights
# do not change bellow
self.log_std = -0.5 * np.ones(self.action_size, dtype=np.float32)
self.std = np.exp(self.log_std)
def act(self, state):
# Use the network to predict the next action to take, using the model
pred = self.Actor.predict(state)
low, high = -1.0, 1.0 # -1 and 1 are boundaries of tanh
action = pred + np.random.uniform(low, high,
size=pred.shape) * self.std
action = np.clip(action, low, high)
logp_t = self.gaussian_likelihood(action, pred, self.log_std)
return action, logp_t
def gaussian_likelihood(self, action, pred, log_std):
# https://github.com/hill-a/stable-baselines/blob/master/stable_baselines/sac/policies.py
pre_sum = -0.5 * (
((action - pred) /
(np.exp(log_std) + 1e-8))**2 + 2 * log_std + np.log(2 * np.pi))
return np.sum(pre_sum, axis=1)
def discount_rewards(self, reward): #gaes is better
# Compute the gamma-discounted rewards over an episode
# We apply the discount and normalize it to avoid big variability of rewards
gamma = 0.99 # discount rate
running_add = 0
discounted_r = np.zeros_like(reward)
for i in reversed(range(0, len(reward))):
running_add = running_add * gamma + reward[i]
discounted_r[i] = running_add
discounted_r -= np.mean(discounted_r) # normalizing the result
discounted_r /= (np.std(discounted_r) + 1e-8
) # divide by standard deviation
return discounted_r
def get_gaes(self,
rewards,
dones,
values,
next_values,
gamma=0.99,
lamda=0.90,
normalize=True):
deltas = [
r + gamma * (1 - d) * nv - v
for r, d, nv, v in zip(rewards, dones, next_values, values)
]
deltas = np.stack(deltas)
gaes = copy.deepcopy(deltas)
for t in reversed(range(len(deltas) - 1)):
gaes[t] = gaes[t] + (1 - dones[t]) * gamma * lamda * gaes[t + 1]
target = gaes + values
if normalize:
gaes = (gaes - gaes.mean()) / (gaes.std() + 1e-8)
return np.vstack(gaes), np.vstack(target)
def replay(self, states, actions, rewards, dones, next_states, logp_ts):
# reshape memory to appropriate shape for training
states = np.vstack(states)
next_states = np.vstack(next_states)
actions = np.vstack(actions)
logp_ts = np.vstack(logp_ts)
# Get Critic network predictions
values = self.Critic.predict(states)
next_values = self.Critic.predict(next_states)
# Compute discounted rewards and advantages
#discounted_r = self.discount_rewards(rewards)
#advantages = np.vstack(discounted_r - values)
advantages, target = self.get_gaes(rewards, dones, np.squeeze(values),
np.squeeze(next_values))
'''
pylab.plot(adv,'.')
pylab.plot(target,'-')
ax=pylab.gca()
ax.grid(True)
pylab.subplots_adjust(left=0.05, right=0.98, top=0.96, bottom=0.06)
pylab.show()
if str(episode)[-2:] == "00": pylab.savefig(self.env_name+"_"+self.episode+".png")
'''
# stack everything to numpy array
# pack all advantages, predictions and actions to y_true and when they are received
# in custom loss function we unpack it
y_true = | np.hstack([advantages, actions, logp_ts]) | numpy.hstack |
import yaml
import sys, math
import numpy as np
import os
import copy
from .dynamics import DynamicSystem, State
from .lookup import LookUpTable
EPS = 1e-4 # small value for divison by zero
FT2MTR = 0.3048 # ft to meter
SQRT_3 = 1.7320508075688772 # sqrt(3)
TWO_D_PI = 0.6366197723675814 # 2/pi
R2D = 180/math.pi # Rad to deg
D2R = 1/R2D
class WindDynamics(DynamicSystem):
"""Wind model for aircraft simulations with Dryden Turbulence model."""
_observations = ["WIND_N", "WIND_E", "WIND_D"]
def __init__(self, params, dt):
super(WindDynamics, self).__init__(dt)
self.eta_norm = 1.0/np.sqrt(dt)
self.turbulence_level = params['TURB_LVL']
self.wind_dir = params['WIND_DIR']*D2R
self.wind_speed = params['WIND_SPD']
self.wind_mean_ned = self.wind_speed*np.array([np.cos(self.wind_dir), np.sin(self.wind_dir), 0], dtype=np.float)
self.__register_states()
self.TEP = LookUpTable(7,12) # Turbulence Exceedence Probability Lookup Table
self.TEP << 500.0 << 1750.0 << 3750.0 << 7500.0 << 15000.0 << 25000.0 << 35000.0 << 45000.0 << 55000.0 << 65000.0 << 75000.0 << 80000.0 \
<< 1 << 3.2 << 2.2 << 1.5 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 \
<< 2 << 4.2 << 3.6 << 3.3 << 1.6 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 \
<< 3 << 6.6 << 6.9 << 7.4 << 6.7 << 4.6 << 2.7 << 0.4 << 0.0 << 0.0 << 0.0 << 0.0 << 0.0 \
<< 4 << 8.6 << 9.6 << 10.6 << 10.1 << 8.0 << 6.6 << 5.0 << 4.2 << 2.7 << 0.0 << 0.0 << 0.0 \
<< 5 << 11.8 << 13.0 << 16.0 << 15.1 << 11.6 << 9.7 << 8.1 << 8.2 << 7.9 << 4.9 << 3.2 << 2.1 \
<< 6 << 15.6 << 17.6 << 23.0 << 23.6 << 22.1 << 20.0 << 16.0 << 15.1 << 12.1 << 7.9 << 6.2 << 5.1 \
<< 7 << 18.7 << 21.5 << 28.4 << 30.2 << 30.7 << 31.0 << 25.2 << 23.1 << 17.5 << 10.7 << 8.4 << 7.2
def __register_states(self):
self._register_state('us', np.zeros(1, dtype=np.float)) # States related to u
self._register_state('vs', np.zeros(2, dtype=np.float)) # States related to v
self._register_state('ws', np.zeros(2, dtype=np.float)) # States related to w
def reset(self):
self.state['us'] = np.zeros(1, dtype=np.float)
self.state['vs'] = np.zeros(2, dtype=np.float)
self.state['ws'] = np.zeros(2, dtype=np.float)
def step_start(self):
"""Randomly generate white noise for turbulence input
"""
self.eta = | np.random.randn(3) | numpy.random.randn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.