prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import os
import sys
import cv2
from cython_modules import lfit_cython
import csv
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import gridplot
from bokeh.io import export_png
from scipy.io import wavfile
from scipy.interpolate import interp1d
from scipy.signal import medfilt
from scipy.optimize import curve_fit
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
from collections import Counter
from config import * #FPS, F_0, AUDIO_RATE, FOURCC, FIND_OSCILLATING, NUM_FRAMES_IN_HISTORY, MAX_KALMAN_LEARNING_TIME
class MovingObj:
def __init__(self, center):
self.previous_centers = [center]
self.kalman = self.prepareKF()
self.updateKF()
self.num_frames_detected = 1
self.num_not_found = 0
self.is_being_tracked = False
self.tracked_frame_indices = []
self.is_oscillating = False
self.diff_at_f0=(0.0,0.0)
def prepareKF(self):
kalman = cv2.KalmanFilter(4, 2)
kalman.measurementMatrix = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
kalman.transitionMatrix = np.array(
[[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
kalman.processNoiseCov = 0.3 * np.eye(4).astype(np.float32)
kalman.measurementNoiseCov = 0.3 * np.eye(2).astype(np.float32)
return kalman
def updateKF(self):
self.kalman.correct(
np.array(self.previous_centers[-1], dtype=np.float32))
def firstcenter(self):
return self.previous_centers[0]
def lastcenter(self):
return self.previous_centers[-1]
def predictnow(self):
if self.num_frames_detected < MAX_KALMAN_LEARNING_TIME or not self.is_being_tracked:
if self.num_frames_detected > NUM_FRAMES_IN_HISTORY:
#linear extrapolation
pos = 2 * \
np.array(self.previous_centers[-1]) - \
np.array(self.previous_centers[-2])
return list(pos)
else:
return list(self.lastcenter())
if self.is_being_tracked:
return self.kalman.predict()[:2][:, 0]
def addcenter(self, cen):
self.previous_centers.append((cen[0], cen[1]))
self.updateKF()
self.num_frames_detected += 1
self.num_not_found = 0
if self.num_frames_detected >= 3:
self.is_being_tracked = True
if FIND_OSCILLATING:
self.determine_oscillation(
fps=FPS, f_0=F_0, min_frames=100) # CHANGE 1000 TO 100
def drop(self):
self.num_not_found += 1
if self.num_not_found > MAX_KALMAN_LEARNING_TIME:
self.is_being_tracked = False
def track_points(self):
if self.is_being_tracked:
return (self.previous_centers[-2], self.previous_centers[-1])
def get_mean_drift(self, min_frames=100):
"""
min_frames: the minimum number of frames the objct must be tracked in to be considered in the calculation
"""
if self.num_frames_detected >= min_frames:
initial_center = self.firstcenter()
final_center = self.lastcenter()
this_x_drift = (
final_center[0] - initial_center[0]) / float(self.num_frames_detected)
this_y_drift = (
final_center[1] - initial_center[1]) / float(self.num_frames_detected)
self.mean_x_drift = this_x_drift
self.mean_y_drift = this_y_drift
else:
self.mean_x_drift = None
self.mean_y_drift = None
def determine_oscillation(self, fps=FPS, f_0=F_0, min_frames=100):
"""
fps: sampling frequency of motion i.e. # of frames per second recorded
f_0: the frequency we are investigating oscillation at
min_frames: the minimum number of frames the objct must be tracked in to be considered in the calculation
"""
if fps < 2 * f_0:
raise ValueError(
'sampling frequency does not satisfy Nyquist sampling theorem!')
if self.num_frames_detected < min_frames:
self.fft_frequencies = None
self.x_fft = None
self.y_fft = None
self.is_oscillating = False
return
initial_center = self.firstcenter()
x_pos = np.array([c[0] - initial_center[0]
for c in self.previous_centers])
y_pos = np.array([c[1] - initial_center[1]
for c in self.previous_centers])
n = len(self.previous_centers)
len_out = n // 2 + 1
maxf = fps / 2.0 if n % 2 == 0 else fps * (n - 1) / (2.0 * n)
self.fft_frequencies = np.log10(
maxf * np.arange(1, len_out) / len_out).astype(np.float32)
f_0_index = np.argmin(np.abs(self.fft_frequencies - np.log10(f_0)))
x_fft = np.fft.rfft(np.array(x_pos))
y_fft = np.fft.rfft(np.array(y_pos))
x_amp = np.abs(x_fft).astype(np.float32)
self.x_fft = np.log10(x_amp)[1:] / np.log10(x_amp.max())
y_amp = np.abs(y_fft).astype(np.float32)
self.y_fft = np.log10(y_amp)[1:] / np.log10(y_amp.max())
_iter = 20
_threshold = 0.2
good_frac = 0.5
x_res,x_osc = lfit_cython.linear_ransac1D(
self.fft_frequencies, self.x_fft, _iter, _threshold, good_frac, f_0_index)
y_res,y_osc = lfit_cython.linear_ransac1D(
self.fft_frequencies, self.y_fft, _iter, _threshold, good_frac, f_0_index)
self.is_oscillating = x_osc or y_osc
self.diff_at_f0=(x_res,y_res)
def show_fft(self, p, axis, color='red', display=False):
if axis == 'x':
p.line(self.fft_frequencies, self.x_fft, color=color)
elif axis == 'y':
p.line(self.fft_frequencies, self.y_fft, color=color)
if display:
show(p)
class Waitbar(object):
def __init__(self, winname, size=[500, 100], color=[0, 0, 255],txtsize=0.5):
self.winname = winname
self.color = np.array(color)
self.window = cv2.namedWindow(winname, cv2.WINDOW_NORMAL)
self.winsize = size
cv2.resizeWindow(self.winname, size[0], size[1])
self.blank = 255 * np.ones((size[1], size[0], 3), dtype=np.uint8)
self.pixel_level = 0
self.start_time = time.time()
self.txtsize=txtsize
def update(self, level):
remaining = self.estimate_time_remaining(level)
image = np.copy(self.blank)
self.pixel_level = int(level * self.winsize[0])
image[int(0.3 * self.winsize[1]):-int(0.3 * self.winsize[1]),
:self.pixel_level, :] = self.color
msg = '{:.2f} % Done'.format(level * 100)
cv2.putText(image, msg, (0, int(0.2 * self.winsize[1])),
cv2.FONT_HERSHEY_COMPLEX, self.txtsize, (0, 0, 0))
sec = int(remaining - 60 * (remaining // 60))
msg = 'Time remaining: {} min, {} seconds'.format(
int(remaining // 60), sec)
cv2.putText(image, msg, (0, int(0.9 * self.winsize[1])),
cv2.FONT_HERSHEY_COMPLEX, self.txtsize, (0, 0, 0))
return image
def estimate_time_remaining(self, level):
speed = level / (time.time() - self.start_time)
remaining = (1 / speed) - level
return remaining
def nms(data, th=0.1, w=13):
xs = data[0]
ys = data[1]
scores = data[2]
indices = np.argsort(scores)[::-1]
idxs = indices[:]
picked = []
while(len(indices) > 0):
picked.append(indices[0])
indices = indices[1:][~np.bitwise_and(np.abs(
xs[indices[0]] - xs[indices[1:]]) < w, np.abs(ys[indices[0]] - ys[indices[1:]]) < w)]
return [xs[picked], ys[picked]]
def computepairwise(matrix1, matrix2):
assert len(matrix1.shape) == 2, 'First argument is not 2D'
assert len(matrix2.shape) == 2, 'Second argument is not 2D'
assert matrix1.shape[1] == matrix2.shape[
1], 'Matrices have different number of features'
result = np.zeros((matrix1.shape[0], matrix2.shape[0]), dtype=np.float32)
for feature in range(matrix1.shape[1]):
diff = (np.repeat(matrix1[:, feature][:, None], matrix2.shape[
0], axis=1) - matrix2[:, feature][:, None].T) # ,axis=1
# print(diff.shape,matrix1.shape[0],matrix2.shape[0])
assert diff.shape == (matrix1.shape[0], matrix2.shape[
0]), 'there is a bug in your program'
result += diff**2
return np.sqrt(result)
def matchcentertoobj(centers, tracked_objs, frame_idx):
current_predictions = np.array(
[list(obj.lastcenter()) for obj in tracked_objs]) # list(obj.lastcenter())
# current_predictions=current_predictions[:,:,0] #obj.predictnow()
# print(current_predictions.shape)
# Nx2 array
# centers is Mx2 array
# compute pairwise distances (NxM)
# if M<N be careful
# if M >= N, possibly match existing centers to new centers if distance is below a threshold,
# maintain a list of used indices
# match existing centers to that new center with which it has minimum
# distance
centers = np.array(centers)
# print(current_predictions.shape)
distance = computepairwise(current_predictions, centers) # NxM
# print(distance)
possible_matches = np.argmin(distance, axis=1)
used_indices = []
for idx, match in enumerate(possible_matches):
# if match occurs more than once, choose the minimum distance
candidates = []
candidates.append(distance[idx, match])
for idx2 in range(len(possible_matches[idx + 1:])):
if match == possible_matches[idx + 1 + idx2]:
candidates.append(distance[idx + 1 + idx2, match])
# if len(candidates)>1:
# pass
# print('Duplicate matches found') #this happens VERY often
if np.argmin(candidates) != 0:
# this means another point has lower distance than this point, so
# this point has no matches
tracked_objs[idx].drop()
else:
# print(candidates)
if candidates[0] < 50:
if possible_matches[idx] not in used_indices:
tracked_objs[idx].addcenter(centers[possible_matches[idx]])
tracked_objs[idx].tracked_frame_indices.append(frame_idx)
used_indices.append(possible_matches[idx])
else:
tracked_objs[idx].drop()
def draw_full_paths_of_these_beads(initial_frame, beads_ids, tracked_objs, color='green'):
'''
initial_frame: A clean frame on which paths are to be drawn
bead_nos: a list containing ids of beads to draw
'''
written_frame = initial_frame[:]
blank = np.zeros(
(initial_frame.shape[0], initial_frame.shape[1]), dtype=np.uint8)
for idx in beads_ids:
obj = tracked_objs[idx]
for cidx in range(1, len(obj.previous_centers)):
blank = cv2.line(blank, obj.previous_centers[
cidx - 1], obj.previous_centers[cidx], 255, 1)
textid = str(idx)
cv2.putText(written_frame, textid, obj.lastcenter(),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))
channels = {'blue': 0, 'green': 1, 'red': 2}
idx = channels[color]
data32 = initial_frame[:, :, idx].astype(np.int32)
np.clip(data32 + blank, 0, 255, out=data32)
written_frame[:, :, idx] = data32.astype(np.uint8)
return written_frame
def drawtrajectory(previous, tracked_objs, this_frame, bead_indices, color='green'):
# previous: a dark frmae like matrix with only the trajectories drawn
# this_frame: frame on which to draw trajectory
channels = {'blue': 0, 'green': 1, 'red': 2}
for _beadidx in bead_indices:
if tracked_objs[_beadidx].is_being_tracked:
previous = cv2.line(previous, tracked_objs[_beadidx].track_points()[
0], tracked_objs[_beadidx].track_points()[1], 255, 1)
idx = channels[color]
#this_frame[:,:,:] = this_frame[:,:,:]*((previous[:,:])[:,:,np.newaxis])
data32 = this_frame[:, :, idx].astype(np.int32)
np.clip(data32 + previous, 0, 255, out=data32)
this_frame[:, :, idx] = data32.astype(np.uint8)
return previous, this_frame
def writedistances(frame, tracked_objs):
finddist = lambda tp1, tp2: np.sqrt(
(tp1[0] - tp2[0])**2 + (tp1[1] - tp2[1])**2)
copied = frame[:]
for idx, obj in enumerate(tracked_objs):
if True: # obj.num_frames_detected > 5:
center = lambda: tuple(
(np.array(obj.previous_centers[0]) + np.array(obj.previous_centers[-1])) // 2)
textid = str(idx)
cv2.putText(copied, textid, obj.lastcenter(),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255))
return copied
def get_mean_drift(objs, min_frames=100):
"""
objs: tracked_objs, a list of beads (MovingObj) being tracked
min_frames: the minimum number of frames an objct must be tracked in to be considered in the calculation
"""
x_drift = 0.0
y_drift = 0.0
num_beads_counted = 0
for obj in objs:
if obj.num_frames_detected >= min_frames:
num_beads_counted += 1
initial_center = obj.previous_centers[0]
final_center = obj.previous_centers[-1]
this_x_drift = (
final_center[0] - initial_center[0]) / float(obj.num_frames_detected)
this_y_drift = (
final_center[1] - initial_center[1]) / float(obj.num_frames_detected)
x_drift += this_x_drift
y_drift += this_y_drift
def save_beads(filename, tracked_objs):
with open(filename, 'w') as f:
pos_dict = {idx: obj.previous_centers for idx,
obj in enumerate(tracked_objs)}
time_dict = {idx: obj.tracked_frame_indices for idx,
obj in enumerate(tracked_objs)}
combined = [pos_dict, time_dict]
f.write(str(combined))
def load_beads(filename):
loaded_beads = []
with open(filename, 'r') as f:
beads_dict = eval(f.read())[0]
for bead_num in sorted(beads_dict.keys()):
_bead = MovingObj((0, 0))
_bead.previous_centers = beads_dict[bead_num]
_bead.num_frames_detected = len(_bead.previous_centers)
loaded_beads.append(_bead)
return loaded_beads
def text2csv(fname):
with open(fname, 'r') as f:
bead_positions = eval(f.read())[0]
f = open(fname[:fname.rfind('.')] + '.csv', 'w')
writer = csv.writer(f)
bead_numbers = sorted(list(bead_positions.keys()), key=lambda x: len(
bead_positions[x]), reverse=True)
duplicated = []
for b in bead_numbers:
duplicated.extend([str(b) + '-X', str(b) + '-Y'])
writer.writerow(duplicated)
max_idx = len(bead_positions[bead_numbers[0]])
for idx in range(max_idx):
beads_in_this_row = len(
[b for b in bead_numbers if len(bead_positions[b]) > idx])
row = []
for b in bead_numbers[:beads_in_this_row]:
row.extend(list(bead_positions[b][idx]))
writer.writerow(row)
f.close()
def highlight_stopped_beads(frame, tracked_objs, total_frames, bead_radius, std_threshold=1.0, strict=True, end=-1):
n_stopped = 0
stopped_idxs = []
for idx, obj in enumerate(tracked_objs):
if len(obj.previous_centers) < 2:
is_stopped = True
elif len(obj.previous_centers) >= 0.5 * total_frames:
cen_x, cen_y = list(zip(*obj.previous_centers[end - 100:end]))
cx, cy = np.std(cen_x) <= std_threshold, np.std(
cen_y) <= std_threshold
# conditions for satisfying stopping criteria
is_stopped = (cx and cy) if strict else (cx or cy)
else:
is_stopped = False
if is_stopped:
n_stopped += 1
stopped_idxs.append(idx)
frame = cv2.circle(
frame, obj.previous_centers[-1], bead_radius, (0, 0, 255), -1)
print(('Number of stopped beads={}'.format(n_stopped)))
return frame, n_stopped, stopped_idxs
def save_to_audio(tracked_objs, obj_nums, folder):
for num in obj_nums:
bx, by = list(zip(*tracked_objs[num].previous_centers))
bx, by = | np.array(bx) | numpy.array |
import numpy as np
import time
import matplotlib.pyplot as plt
# -------- TASK A --------
INDEX = 175715
c = int(str(INDEX)[-2])
d = int(str(INDEX)[-1])
e = int(str(INDEX)[3])
f = int(str(INDEX)[2])
a1 = 5 + e
a2 = -1
a3 = -1
N = 9*c*d
A = np.zeros((N, N))
i, j = np.indices(A.shape)
A[i == j+2] = a3
A[i == j+1] = a2
A[i == j] = a1
A[i == j-1] = a2
A[i == j-2] = a3
b = np.fromfunction(lambda i, j: np.sin(j*(f+1)), (1, N))
# -------- TASK B GAUSS --------
def gauss(A, b, min_norm):
iter = 0
x = np.zeros((b.size, 1))
L = np.tril(A)
U = A - L
current_norm = np.inf
while current_norm > min_norm:
x = np.linalg.inv(L)@(b - U@x)
current_norm = np.linalg.norm(np.matmul(A, x) - b)
iter += 1
assert not np.isnan(current_norm), "Norm is nan"
assert not np.isinf(current_norm), "Norm is inf"
return x, iter
result, iter = gauss(A, b, 10 ** -6)
print("Iteracje gauss:", iter)
# -------- TASK B JACOBI --------
def jacobi(A, b, min_norm):
iter = 0
x = np.zeros((b.size, 1))
L = np.tril(A, -1)
U = np.triu(A, 1)
D = np.diag(A)
fst = -(L + U) / D
snd = b / D
current_norm = np.inf
while current_norm > min_norm:
x = fst@x + snd
current_norm = np.linalg.norm(np.matmul(A, x) - b)
iter += 1
assert not np.isnan(current_norm), "Norm is nan"
assert not np.isinf(current_norm), "Norm is inf"
return x, iter
result, iter = jacobi(A, b, 10 ** -6)
print("Iteracje jacobi:", iter)
# -------- TASK C --------
# Wywołałem powyższy kod dla wartości a1=3 i dla obydwu metod wyrzuca
# assert np.isinf(current_norm), więc metody nie zbiegają się
# -------- TASK D --------
def lu(A):
n = A.shape[0]
U = A.copy()
L = np.eye(n, dtype=np.double)
for i in range(n):
factor = U[i + 1:, i] / U[i, i]
L[i + 1:, i] = factor
U[i + 1:] -= factor[:, np.newaxis] * U[i]
return L, U
def forward_substitution(L, b):
n = L.shape[0]
y = np.zeros_like(b, dtype=np.double);
y[0] = b[0] / L[0, 0]
for i in range(1, n):
y[i] = (b[i] - np.dot(L[i, :i], y[:i])) / L[i, i]
return y
def back_substitution(U, y):
n = U.shape[0]
x = np.zeros_like(y, dtype=np.double);
x[-1] = y[-1] / U[-1, -1]
for i in range(n - 2, -1, -1):
x[i] = (y[i] - np.dot(U[i, i:], x[i:])) / U[i, i]
return x
def lu_solve(A, b):
L, U = lu(A)
b = np.squeeze(b)
y = forward_substitution(L, b)
return back_substitution(U, y)
a1 = 3
x_test = lu_solve(A, b)
print("Norma z residuum dla LU:", np.linalg.norm(np.matmul(A, x_test) - b))
# -------- TASK E --------
a1 = 5 + e
Ns = [100, 500, 1000, 2000, 3000]
czasy_gauss = []
czasy_jacobi = []
czasy_lu = []
for i in Ns:
N = i
A = np.zeros((N, N))
i, j = | np.indices(A.shape) | numpy.indices |
"""
Filename: visualization.py
Purpose: Set of go-to plotting functions
Author: <NAME>
Date created: 28.11.2018
Possible problems:
1.
"""
import os
import numpy as np
from tractor.galaxy import ExpGalaxy
from tractor import EllipseE
from tractor.galaxy import ExpGalaxy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, SymLogNorm
from matplotlib.patches import Ellipse
from matplotlib.patches import Rectangle
from skimage.segmentation import find_boundaries
from astropy.visualization import hist
from scipy import stats
import config as conf
import matplotlib.cm as cm
import random
from time import time
from astropy.io import fits
import logging
logger = logging.getLogger('farmer.visualization')
# Random discrete color generator
colors = cm.rainbow(np.linspace(0, 1, 1000))
cidx = np.arange(0, 1000)
random.shuffle(cidx)
colors = colors[cidx]
def plot_background(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
vmin, vmax = brick.background_images[idx].min(), brick.background_images[idx].max()
vmin = -vmax
img = ax.imshow(brick.background_images[idx], cmap='RdGy', norm=SymLogNorm(linthresh=0.03))
# plt.colorbar(img, ax=ax)
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_background.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_mask(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
img = ax.imshow(brick.masks[idx])
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_mask.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_brick(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
backlevel, noisesigma = brick.backgrounds[idx]
vmin, vmax = np.max([backlevel + noisesigma, 1E-5]), brick.images[idx].max()
# vmin, vmax = brick.images[idx].min(), brick.images[idx].max()
if vmin > vmax:
logger.warning(f'{band} brick not plotted!')
return
vmin = -vmax
norm = SymLogNorm(linthresh=0.03)
img = ax.imshow(brick.images[idx], cmap='RdGy', origin='lower', norm=norm)
# plt.colorbar(img, ax=ax)
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_brick.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_blob(myblob, myfblob):
fig, ax = plt.subplots(ncols=4, nrows=1+myfblob.n_bands, figsize=(5 + 5*myfblob.n_bands, 10), sharex=True, sharey=True)
back = myblob.backgrounds[0]
mean, rms = back[0], back[1]
noise = np.random.normal(mean, rms, size=myfblob.dims)
tr = myblob.solution_tractor
norm = LogNorm(np.max([mean + rms, 1E-5]), myblob.images.max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
# img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
mmask = myblob.masks[0].copy()
mmask[mmask==1] = np.nan
ax[0, 0].imshow(myblob.images[0], **img_opt)
ax[0, 0].imshow(mmask, alpha=0.5, cmap='Greys')
ax[0, 1].imshow(myblob.solution_model_images[0] + noise, **img_opt)
ax[0, 2].imshow(myblob.images[0] - myblob.solution_model_images[0], cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[0, 3].imshow(myblob.solution_chi_images[0], cmap='RdGy', vmin = -7, vmax = 7)
ax[0, 0].set_ylabel(f'Detection ({myblob.bands[0]})')
ax[0, 0].set_title('Data')
ax[0, 1].set_title('Model')
ax[0, 2].set_title('Data - Model')
ax[0, 3].set_title('$\chi$-map')
band = myblob.bands[0]
for j, src in enumerate(myblob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = myblob.solved_chisq[j]
topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
ystart = 0.99 - j * 0.4
ax[0, 3].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[0, 3].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[0, 3].text(1.05, ystart - 0.3, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
objects = myblob.bcatalog[j]
e = Ellipse(xy=(objects['x'], objects['y']),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax[0, 0].add_artist(e)
try:
for i in np.arange(myfblob.n_bands):
back = myfblob.backgrounds[i]
mean, rms = back[0], back[1]
noise = np.random.normal(mean, rms, size=myfblob.dims)
tr = myfblob.solution_tractor
# norm = LogNorm(np.max([mean + rms, 1E-5]), myblob.images.max(), clip='True')
# img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[i+1, 0].imshow(myfblob.images[i], **img_opt)
ax[i+1, 1].imshow(myfblob.solution_model_images[i] + noise, **img_opt)
ax[i+1, 2].imshow(myfblob.images[i] - myfblob.solution_model_images[i], cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[i+1, 3].imshow(myfblob.solution_chi_images[i], cmap='RdGy', vmin = -7, vmax = 7)
ax[i+1, 0].set_ylabel(myfblob.bands[i])
band = myfblob.bands[i]
for j, src in enumerate(myfblob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = myfblob.solution_chisq[j, i]
Nres = myfblob.n_residual_sources[i]
topt = dict(color=colors[j], transform = ax[i+1, 3].transAxes)
ystart = 0.99 - j * 0.4
ax[i+1, 3].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[i+1, 3].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[i+1, 3].text(1.05, ystart - 0.3, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
if Nres > 0:
ax[i+1, 3].text(1.05, ystart - 0.4, f'{Nres} residual sources found!', **topt)
res_x = myfblob.residual_catalog[i]['x']
res_y = myfblob.residual_catalog[i]['y']
for x, y in zip(res_x, res_y):
ax[i+1, 3].scatter(x, y, marker='+', color='r')
for s, src in enumerate(myfblob.solution_catalog):
x, y = src.pos
color = colors[s]
for i in np.arange(1 + myfblob.n_bands):
for j in np.arange(4):
ax[i,j].plot([x, x], [y - 10, y - 5], c=color)
ax[i,j].plot([x - 10, x - 5], [y, y], c=color)
except:
logger.warning('Could not plot multiwavelength diagnostic figures')
[[ax[i,j].set(xlim=(0,myfblob.dims[1]), ylim=(0,myfblob.dims[0])) for i in np.arange(myfblob.n_bands+1)] for j in np.arange(4)]
#fig.suptitle(f'Solution for {blob_id}')
fig.subplots_adjust(wspace=0.01, hspace=0, right=0.8)
if myblob._is_itemblob:
sid = myblob.bcatalog['source_id'][0]
fig.savefig(os.path.join(conf.PLOT_DIR, f'{myblob.brick_id}_B{myblob.blob_id}_S{sid}.pdf'))
else:
fig.savefig(os.path.join(conf.PLOT_DIR, f'{myblob.brick_id}_B{myblob.blob_id}.pdf'))
plt.close()
def plot_srcprofile(blob, src, sid, bands=None):
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_srcprofile.pdf')
elif (len(bands) == 1) & (bands[0] == conf.MODELING_NICKNAME):
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_srcprofile.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{nickname}_srcprofile.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{bands[0]}_srcprofile.pdf')
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(outpath)
for idx, band in zip(bidx, bands):
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
rband = conf.MODELING_NICKNAME
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
# zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
if band_name == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
rband = band
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
rband = conf.MODELING_NICKNAME + '_' + band_name
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
rband = conf.MODELING_NICKNAME + '_' + band
# information
bid = blob.blob_id
bsrc = blob.bcatalog[blob.bcatalog['source_id'] == sid]
ra, dec = bsrc['RA'][0], bsrc['DEC'][0]
if nickname == conf.MODELING_NICKNAME:
xp0, yp0 = bsrc['x_orig'][0] - blob.subvector[1], bsrc['y_orig'][0] - blob.subvector[0]
else:
xp0, yp0 = bsrc['x_orig'][0] - blob.subvector[1] - blob.mosaic_origin[1] + conf.BRICK_BUFFER, bsrc['y_orig'][0] - blob.subvector[0] - blob.mosaic_origin[0] + conf.BRICK_BUFFER
xp, yp = src.pos[0], src.pos[1]
xps, yps = xp, yp
flux, flux_err = bsrc[f'FLUX_{band}'][0], bsrc[f'FLUXERR_{band}'][0]
mag, mag_err = bsrc[f'MAG_{band}'][0], bsrc[f'MAGERR_{band}'][0]
n_blob = bsrc['N_BLOB'][0]
chi2 = bsrc[f'CHISQ_{band}'][0]
snr = bsrc[f'SNR_{band}'][0]
is_resolved = False
if src.name not in ('PointSource', 'SimpleGalaxy'):
is_resolved = True
col = np.array(bsrc.colnames)[np.array([tcoln.startswith('REFF') for tcoln in bsrc.colnames])][0]
rband = col[len('REFF_'):]
reff, reff_err = np.exp(bsrc[f'REFF_{rband}'][0])*conf.PIXEL_SCALE, np.exp(bsrc[f'REFF_{rband}'][0])*bsrc[f'REFF_ERR_{rband}'][0]*2.303*conf.PIXEL_SCALE
ab, ab_err = bsrc[f'AB_{rband}'][0], bsrc[f'AB_ERR_{rband}'][0]
if ab == -99.0:
ab = -99
ab_err = -99
theta, theta_err = bsrc[f'THETA_{rband}'][0], bsrc[f'THETA_ERR_{rband}'][0]
if 'Sersic' in src.name:
nre, nre_err = bsrc[f'N_{rband}'][0], bsrc[f'N_ERR_{rband}'][0]
# images
img = blob.images[idx]
wgt = blob.weights[idx]
err = 1. / np.sqrt(wgt)
mask = blob.masks[idx]
seg = blob.segmap.copy()
seg[blob.segmap != sid] = 0
mod = blob.solution_model_images[idx]
chi = blob.solution_tractor.getChiImage(idx)
chi[blob.segmap != sid] = 0
res = img - mod
rms = np.median(blob.background_rms_images[idx])
xpix, ypix = np.nonzero(seg)
dx, dy = (np.max(xpix) - np.min(xpix)) / 2., (np.max(ypix) - np.min(ypix)) / 2.
buff = np.min([conf.BLOB_BUFFER, 10.])
xlim, ylim = np.array([-(dx + buff), (dx + buff)]) * conf.PIXEL_SCALE, np.array([-(dy + buff), (dy + buff)]) * conf.PIXEL_SCALE
h, w = np.shape(img)
dw, dh = w - xp - 1, h - yp - 1
extent = np.array([-xp, dw, -yp, dh]) * conf.PIXEL_SCALE
xp0, yp0 = (xp0 - xp) * conf.PIXEL_SCALE, (yp0 - yp) * conf.PIXEL_SCALE
xp, yp = 0., 0.
if is_resolved:
aeff = reff #* conf.PIXEL_SCALE
beff = reff / ab #* conf.PIXEL_SCALE
xa = xp + np.cos(np.deg2rad(90-theta)) * np.array([-1, 1]) * aeff
ya = yp + np.sin(np.deg2rad(90-theta)) * np.array([-1, 1]) * aeff
xb = xp + np.cos(np.deg2rad(theta)) * np.array([-1, 1]) * beff
yb = yp + np.sin(np.deg2rad(theta)) * np.array([1, -1]) * beff
# tests
res_seg = res[blob.segmap==sid].flatten()
try:
k2, p_norm = stats.normaltest(res_seg)
except:
k2, p_norm = -99, -99
chi_seg = chi[blob.segmap==sid].flatten()
chi_sig = np.std(chi_seg)
chi_mu = np.mean(chi_seg)
# plotting
fig, ax = plt.subplots(ncols=4, nrows=4, figsize=(15, 15))
# row 1 -- image, info
if rms > 0.95*np.nanmax(img):
normmin = 1.05*np.nanmin(abs(img))
else:
normmin = rms
normmax = 0.95*np.nanmax(img)
if normmin != normmax:
norm = LogNorm(normmin, normmax, clip='True')
else:
norm=None
ax[0,0].imshow(img, norm=norm, cmap='Greys', extent=extent)
ax[0,0].text(0.05, 1.03, band, transform=ax[0,0].transAxes)
ax[0,0].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[0,0].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[0,0].set(xlim=xlim, ylim=ylim)
ax[0,1].axis('off')
ax[0,2].axis('off')
ax[0,3].axis('off')
ax[0,1].text(0, 0.90,
s = f'Source: {sid} | Blob: {bid} | Brick: {blob.brick_id} | RA: {ra:6.6f}, Dec: {dec:6.6f}',
transform=ax[0,1].transAxes)
if is_resolved:
if 'Sersic' in src.name:
ax[0,1].text(0, 0.70,
s = f'{src.name} with Reff: {reff:3.3f}+/-{reff_err:3.3f}, n: {nre:3.3f}+/-{nre_err:3.3f}, A/B: {ab:3.3f}+/-{ab_err:3.3f}, Theta: {theta:3.3f}+/-{theta_err:3.3f}',
transform=ax[0,1].transAxes)
else:
ax[0,1].text(0, 0.70,
s = f'{src.name} with Reff: {reff:3.3f}+/-{reff_err:3.3f}, A/B: {ab:3.3f}+/-{ab_err:3.3f}, and Theta: {theta:3.3f}+/-{theta_err:3.3f}',
transform=ax[0,1].transAxes)
else:
ax[0,1].text(0, 0.70,
s = f'{src.name}',
transform=ax[0,1].transAxes)
ax[0,1].text(0, 0.50,
s = f'{band} | {flux:3.3f}+/-{flux_err:3.3f} uJy | {mag:3.3f}+/-{mag_err:3.3f} AB | S/N = {snr:3.3f}',
transform=ax[0,1].transAxes)
ax[0,1].text(0, 0.30,
s = f'Chi2/N: {chi2:3.3f} | N_blob: {n_blob} | '+r'$\mu(\chi)$'+f'={chi_mu:3.3f}, '+r'$\sigma(\chi)$'+f'={chi_sig:3.3f} | K2-test: {k2:3.3f}',
transform=ax[0,1].transAxes)
# row 2 -- image, weights, mask, segment
ax[1,0].imshow(img, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[1,0].text(0.05, 1.03, 'Image', transform=ax[1,0].transAxes)
ax[1,0].set(xlim=xlim, ylim=ylim)
ax[1,0].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,1].imshow(err, cmap='Greys', extent=extent)
ax[1,1].text(0.05, 1.03, r'med($\sigma$)'+f'={rms*10**(-0.4 * (zpt - 23.9)):5.5f} uJy', transform=ax[1,1].transAxes)
ax[1,1].set(xlim=xlim, ylim=ylim)
ax[1,1].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,2].imshow(mask, cmap='Greys', extent=extent)
ax[1,2].text(0.05, 1.03, 'Blob', transform=ax[1,2].transAxes)
ax[1,2].set(xlim=xlim, ylim=ylim)
ax[1,2].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,3].imshow(~seg, cmap='Greys', extent=extent)
ax[1,3].text(0.05, 1.03, 'Segment', transform=ax[1,3].transAxes)
ax[1,3].set(xlim=xlim, ylim=ylim)
ax[1,3].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,0].scatter(xp, yp, c='royalblue', marker='x')
ax[1,2].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[1,2].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[1,3].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[1,3].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[1,2].plot()
# row 3 -- image, model, residual, chi
ax[2,0].imshow(img/err, vmin=-3, vmax=3, cmap='RdGy', extent=extent)
ax[2,0].text(0.05, 1.03, 'S/N', transform=ax[2,0].transAxes)
ax[2,0].set(xlim=xlim, ylim=ylim)
ax[2,0].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
# ax[2,1].imshow(mod, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[2,1].imshow(mod, norm=norm, cmap='Greys', extent=extent)
ax[2,1].text(0.05, 1.03, 'Model', transform=ax[2,1].transAxes)
ax[2,1].set(xlim=xlim, ylim=ylim)
ax[2,1].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[2,2].imshow(res, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[2,2].text(0.05, 1.03, 'Residual', transform=ax[2,2].transAxes)
ax[2,2].set(xlim=xlim, ylim=ylim)
ax[2,2].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[2,3].imshow(chi, vmin=-3, vmax=3, cmap='RdGy', extent=extent)
ax[2,3].text(0.05, 1.03, r'$\chi$', transform=ax[2,3].transAxes)
ax[2,3].set(xlim=xlim, ylim=ylim)
ax[2,3].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
if is_resolved:
ax[2,0].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,0].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,1].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,1].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,2].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,2].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,3].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,3].plot(xb, yb, c='royalblue', alpha=0.7)
else:
ax[2,0].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,1].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,2].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,3].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
# row 4 -- psf, x-slice, y-slice, hist
psfmodel = blob.psfimg[band]
xax = np.arange(-np.shape(psfmodel)[0]/2 + 0.5, np.shape(psfmodel)[0]/2 + 0.5)
[ax[3,0].plot(xax * 0.15, psfmodel[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(psfmodel)[0])]
ax[3,0].axvline(0, ls='dotted', c='k')
ax[3,0].set(xlim=(-5, 5), yscale='log', ylim=(1E-6, 1E-1), xlabel='arcsec')
ax[3,0].text(0.05, 1.03, 'PSF', transform=ax[3,0].transAxes)
# x slice
imgx = blob.images[idx][:, int(xps)]
errx = 1./np.sqrt(blob.weights[idx][:, int(xps)])
modx = blob.solution_model_images[idx][:, int(xps)]
sign = 1
if bsrc[f'RAWFLUX_{band}'][0] < 0:
sign = -1
modxlo = blob.solution_model_images[idx][:, int(xps)] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] - sign * bsrc[f'RAWFLUXERR_{band}'][0])
modxhi = blob.solution_model_images[idx][:, int(xps)] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] + sign * bsrc[f'RAWFLUXERR_{band}'][0])
resx = imgx - modx
# y slice
imgy = blob.images[idx][int(yps), :]
erry = 1./np.sqrt(blob.weights[idx][int(yps), :])
mody = blob.solution_model_images[idx][int(yps), :]
if bsrc[f'RAWFLUX_{band}'][0] < 0:
sign = -1
modylo = blob.solution_model_images[idx][int(yps), :] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] - sign * bsrc[f'RAWFLUXERR_{band}'][0])
modyhi = blob.solution_model_images[idx][int(yps), :] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] + sign * bsrc[f'RAWFLUXERR_{band}'][0])
resy = imgy - mody
ylim = (0.9*np.min([np.min(imgx), np.min(imgy)]), 1.1*np.max([np.max(imgx), np.max(imgy)]))
xax = np.linspace(extent[2], extent[3]+conf.PIXEL_SCALE, len(imgx))
ax[3,2].errorbar(xax, imgx, yerr=errx, c='k')
ax[3,2].plot(xax, modx, c='r')
ax[3,2].fill_between(xax, modxlo, modxhi, color='r', alpha=0.3)
ax[3,2].plot(xax, resx, c='g')
ax[3,2].axvline(0, ls='dotted', c='k')
ax[3,2].set(ylim =ylim, xlabel='arcsec', xlim=xlim)
ax[3,2].text(0.05, 1.03, 'Y', transform=ax[3,2].transAxes)
yax = np.linspace(extent[0], extent[1]+conf.PIXEL_SCALE, len(imgy))
ax[3,1].errorbar(yax, imgy, yerr=erry, c='k')
ax[3,1].plot(yax, mody, c='r')
ax[3,1].fill_between(yax, modylo, modyhi, color='r', alpha=0.3)
ax[3,1].plot(yax, resy, c='g')
ax[3,1].axvline(0, ls='dotted', c='k')
ax[3,1].set(ylim=ylim, xlabel='arcsec', xlim=xlim)
ax[3,1].text(0.05, 1.03, 'X', transform=ax[3,1].transAxes)
hist(chi_seg, ax=ax[3,3], bins='freedman', histtype='step', density=True)
ax[3,3].axvline(0, ls='dotted', color='grey')
ax[3,3].text(0.05, 1.03, 'Residual '+r'$\sigma(\chi)$'+f'={chi_sig:3.3f}', transform=ax[3,3].transAxes)
ax[3,3].set(xlim=(-10, 10), xlabel=r'$\chi$')
ax[3,3].axvline(chi_mu, c='royalblue', ls='dashed')
ax[3,3].axvline(0, c='grey', ls='dashed', alpha=0.3)
ax[3,3].axvline(chi_mu-chi_sig, c='royalblue', ls='dotted')
ax[3,3].axvline(chi_mu+chi_sig, c='royalblue', ls='dotted')
ax[3,3].axvline(-1, c='grey', ls='dotted', alpha=0.3)
ax[3,3].axvline(1, c='grey', ls='dotted', alpha=0.3)
pdf.savefig(fig)
plt.close()
logger.info(f'Saving figure: {outpath}')
pdf.close()
def plot_apertures(blob, band=None):
pass
def plot_iterblob(blob, tr, iteration, bands=None):
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{nickname}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{bands[0]}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{conf.MODELING_NICKNAME}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{nickname}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{bands[0]}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(outpath)
for idx, band in zip(bidx, bands):
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
cat = tr.getCatalog()
xp, yp = [src.pos[0] for src in cat], [src.pos[1] for src in cat]
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
# image
img = blob.images[idx]
# model
mod = tr.getModelImage(idx)
# residual
res = img - mod
# chi2
chi2 = tr.getChiImage(idx)
fig, ax = plt.subplots(ncols=4)
ax[0].imshow(img, **img_opt)
ax[1].imshow(mod, **img_opt)
ax[2].imshow(res, **img_opt)
ax[3].imshow(chi2, cmap='RdGy', vmin=-5, vmax=5)
fig.suptitle(f'Blob {blob.blob_id} | {band} | iter: {iteration}')
[ax[i].scatter(xp, yp, marker='x', c='royalblue') for i in np.arange(4)]
[ax[i].set_title(title, fontsize=20) for i, title in enumerate(('Image', 'Model', 'Image-Model', '$\chi^{2}$'))]
pdf.savefig(fig)
plt.close()
logger.info(f'Saving figure: {outpath}')
pdf.close()
def plot_modprofile(blob, band=None):
if band is None:
band = conf.MODELING_NICKNAME
idx = 0
else:
idx = blob._band2idx(band, bands=blob.bands)
psfmodel = blob.psfimg[band]
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
noise = np.random.normal(mean, rms, size=blob.dims)
tr = blob.solution_tractor
norm = LogNorm(mean + 3*rms, blob.images[idx].max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
xlim = (-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2)
fig, ax = plt.subplots(ncols = 5, nrows = 2, figsize=(20,10))
ax[1,0].imshow(blob.images[idx], **img_opt)
ax[1,1].imshow(blob.solution_model_images[idx], **img_opt)
residual = blob.images[idx] - blob.solution_model_images[idx]
ax[1,2].imshow(residual, **img_opt)
xax = np.arange(-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2)
[ax[0,0].plot(xax * 0.15, blob.images[idx][x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(blob.images[idx])[0])]
ax[0,0].axvline(0, ls='dotted', c='k')
ax[0,0].set(yscale='log', xlabel='arcsec')
xax = np.arange(-np.shape(blob.solution_model_images[idx])[1]/2, np.shape(blob.solution_model_images[idx])[1]/2)
[ax[0,1].plot(xax * 0.15, blob.solution_model_images[idx][x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(blob.solution_model_images[idx])[0])]
ax[0,1].axvline(0, ls='dotted', c='k')
ax[0,1].set(yscale='log', xlabel='arcsec')
xax = np.arange(-np.shape(residual)[1]/2, np.shape(residual)[1]/2)
[ax[0,2].plot(xax * 0.15, residual[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(residual)[0])]
ax[0,2].axvline(0, ls='dotted', c='k')
ax[0,2].set(yscale='log', xlabel='arcsec')
norm = LogNorm(1e-5, 0.1*np.nanmax(psfmodel), clip='True')
img_opt = dict(cmap='Blues', norm=norm)
ax[1,3].imshow(psfmodel, norm=norm, extent=0.15 *np.array([-np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2, -np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2,]))
ax[1,3].set(xlim=xlim, ylim=xlim)
xax = np.arange(-np.shape(psfmodel)[0]/2 + 0.5, np.shape(psfmodel)[0]/2 + 0.5)
[ax[0,3].plot(xax * 0.15, psfmodel[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(psfmodel)[0])]
ax[0,3].axvline(0, ls='dotted', c='k')
ax[0,3].set(xlim=xlim, yscale='log', ylim=(1E-6, 1E-1), xlabel='arcsec')
for j, src in enumerate(blob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = blob.solution_chisq[j, idx]
band = band.replace(' ', '_')
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
mag = zpt - 2.5 * np.log10(flux)
topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
ystart = 0.99 - j * 0.5
ax[0, 4].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[0, 4].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.3, f' M({band}) = {mag:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.4, f' zpt({band}) = {zpt:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.5, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
ax[0, 4].axis('off')
ax[1, 4].axis('off')
for i in np.arange(3):
ax[0, i].set(xlim=(0.15*xlim[0], 0.15*xlim[1]), ylim=(np.nanmedian(blob.images[idx]), blob.images[idx].max()))
# ax[1, i].set(xlim=(-15, 15), ylim=(-15, 15))
ax[0, 3].set(xlim=(0.15*xlim[0], 0.15*xlim[1]))
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{band}_debugprofile.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{band}_debugprofile.pdf')
logger.info(f'Saving figure: {outpath}')
fig.savefig(outpath)
plt.close()
def plot_xsection(blob, band, src, sid):
if band is None:
band = conf.MODELING_NICKNAME
idx = 0
else:
idx = blob._band2idx(band, bands=blob.bands)
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
fig, ax = plt.subplots(ncols=2)
posx, posy = src.pos[0], src.pos[1]
try:
# x slice
imgx = blob.images[idx][:, int(posx)]
errx = 1/np.sqrt(blob.weights[idx][:, int(posx)])
modx = blob.solution_model_images[idx][:, int(posx)]
resx = imgx - modx
# y slice
imgy = blob.images[idx][int(posy), :]
erry = 1/np.sqrt(blob.weights[idx][int(posy), :])
mody = blob.solution_model_images[idx][int(posy), :]
resy = imgy - mody
except:
plt.close()
logger.warning('Could not make plot -- object may have escaped?')
return
# idea: show areas outside segment in grey
ylim = (0.9*np.min([np.min(imgx), np.min(imgy)]), 1.1*np.max([np.max(imgx), np.max(imgy)]))
xax = np.arange(-np.shape(blob.images[idx])[0]/2, np.shape(blob.images[idx])[0]/2) * conf.PIXEL_SCALE
ax[0].errorbar(xax, imgx, yerr=errx, c='k')
ax[0].plot(xax, modx, c='r')
ax[0].plot(xax, resx, c='g')
ax[0].axvline(0, ls='dotted', c='k')
ax[0].set(ylim =ylim, xlabel='arcsec')
yax = np.arange(-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2) * conf.PIXEL_SCALE
ax[1].errorbar(yax, imgy, yerr=erry, c='k')
ax[1].plot(yax, mody, c='r')
ax[1].plot(yax, resy, c='g')
ax[1].axvline(0, ls='dotted', c='k')
ax[1].set(ylim=ylim, xlabel='arcsec')
# for j, src in enumerate(blob.solution_catalog):
# try:
# mtype = src.name
# except:
# mtype = 'PointSource'
# flux = src.getBrightness().getFlux(band)
# chisq = blob.solution_chisq[j, idx]
# band = band.replace(' ', '_')
# if band == conf.MODELING_NICKNAME:
# zpt = conf.MODELING_ZPT
# else:
# zpt = conf.MULTIBAND_ZPT[idx]
# mag = zpt - 2.5 * np.log10(flux)
# topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
# ystart = 0.99 - j * 0.4
# ax[0, 4].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
# ax[0, 4].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
# ax[0, 4].text(1.05, ystart - 0.3, f' M({band}) = {mag:4.4f}', **topt)
# ax[0, 4].text(1.05, ystart - 0.4, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{band}_xsection.pdf')
logger.info(f'Saving figure: {outpath}')
fig.savefig(outpath)
plt.close()
def plot_detblob(blob, fig=None, ax=None, band=None, level=0, sublevel=0, final_opt=False, init=False):
if band is None:
idx = 0
band = ''
else:
# print(blob.bands)
# print(band)
idx = np.argwhere(np.array(blob.bands) == band)[0][0]
back = blob.background_images[idx]
rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(rms)
noise = np.random.normal(mean, rms, size=blob.dims)
tr = blob.solution_tractor
norm = LogNorm(np.max([mean + rms, 1E-5]), blob.images.max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
# Init
if fig is None:
plt.ioff()
fig, ax = plt.subplots(figsize=(24,48), ncols=6, nrows=13)
# Detection image
ax[0,0].imshow(blob.images[idx], **img_opt)
[ax[0,i].axis('off') for i in np.arange(1, 6)]
if blob.n_sources == 1:
objects = blob.bcatalog
e = Ellipse(xy=(objects['x'], objects['y']),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor(colors[0])
ax[0, 0].add_artist(e)
else:
for j, src in enumerate(blob.bcatalog):
objects = blob.bcatalog[j]
e = Ellipse(xy=(objects['x'], objects['y']),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor(colors[j])
ax[0, 0].add_artist(e)
ax[0,1].text(0.1, 0.9, f'Blob #{blob.blob_id}', transform=ax[0,1].transAxes)
ax[0,1].text(0.1, 0.8, f'{blob.n_sources} source(s)', transform=ax[0,1].transAxes)
[ax[1,i+1].set_title(title, fontsize=20) for i, title in enumerate(('Model', 'Model+Noise', 'Image-Model', '$\chi^{2}$', 'Residuals'))]
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_{band}.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{conf.MODELING_NICKNAME}_{band}.pdf')
fig.savefig(outpath)
logger.info(f'Saving figure: {outpath}')
elif final_opt:
nrow = 4 * level + 2* sublevel + 2
[[ax[i,j].axis('off') for i in np.arange(nrow+1, 11)] for j in np.arange(0, 6)]
ax[11,0].axis('off')
residual = blob.images[idx] - blob.pre_solution_model_images[idx]
ax[11,1].imshow(blob.pre_solution_model_images[idx], **img_opt)
ax[11,2].imshow(blob.pre_solution_model_images[idx] + noise, **img_opt)
ax[11,3].imshow(residual, cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[11,4].imshow(blob.tr.getChiImage(idx), cmap='RdGy', vmin = -5, vmax = 5)
bins = np.linspace(np.nanmin(residual), np.nanmax(residual), 30)
minx, maxx = 0, 0
for i, src in enumerate(blob.bcatalog):
res_seg = residual[blob.segmap==src['source_id']].flatten()
ax[11,5].hist(res_seg, bins=20, histtype='step', color=colors[i], density=True)
resmin, resmax = np.nanmin(res_seg), np.nanmax(res_seg)
if resmin < minx:
minx = resmin
if resmax > maxx:
maxx = resmax
ax[11,5].set_xlim(minx, maxx)
ax[11,5].axvline(0, c='grey', ls='dotted')
ax[11,5].set_ylim(bottom=0)
ax[12,0].axis('off')
residual = blob.images[idx] - blob.solution_model_images[idx]
ax[12,1].imshow(blob.solution_model_images[idx], **img_opt)
ax[12,2].imshow(blob.solution_model_images[idx] + noise, **img_opt)
ax[12,3].imshow(residual, cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[12,4].imshow(blob.tr.getChiImage(idx), cmap='RdGy', vmin = -5, vmax = 5)
ax[12,1].set_ylabel('Solution')
bins = np.linspace(np.nanmin(residual), np.nanmax(residual), 30)
minx, maxx = 0, 0
for i, src in enumerate(blob.bcatalog):
res_seg = residual[blob.segmap==src['source_id']].flatten()
ax[12,5].hist(res_seg, bins=20, histtype='step', color=colors[i], density=True)
resmin, resmax = np.nanmin(res_seg), np.nanmax(res_seg)
if resmin < minx:
minx = resmin
if resmax > maxx:
maxx = resmax
ax[12,5].set_xlim(minx, maxx)
ax[12,5].axvline(0, c='grey', ls='dotted')
ax[12,5].set_ylim(bottom=0)
# Solution params
for i, src in enumerate(blob.solution_catalog):
ax[1,0].text(0.1, 0.8 - 0.4*i, f'#{blob.bcatalog[i]["source_id"]} Model:{src.name}', color=colors[i], transform=ax[1,0].transAxes)
ax[1,0].text(0.1, 0.8 - 0.4*i - 0.1, f' Flux: {src.brightness[0]:3.3f}', color=colors[i], transform=ax[1,0].transAxes)
ax[1,0].text(0.1, 0.8 - 0.4*i - 0.2, ' '+r'$\chi^{2}$/N:'+f'{blob.solution_chisq[i,0]:3.3f}', color=colors[i], transform=ax[1,0].transAxes)
#fig.subplots_adjust(wspace=0, hspace=0)
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_{band}.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{conf.MODELING_NICKNAME}_{band}.pdf')
fig.savefig(outpath)
plt.close()
logger.info(f'Saving figure: {outpath}')
else:
if conf.DECISION_TREE == 1:
if init:
nrow = 4 * level + 2 * sublevel + 1
else:
nrow = 4 * level + 2 * sublevel + 2
elif conf.DECISION_TREE == 2:
if level == 0:
if sublevel == 0:
nrow = 1
if sublevel == 1:
nrow = 3
if level == 1:
if sublevel == 0:
nrow = 5
if level == 2:
if sublevel == 0:
nrow = 7
if level == 3:
if sublevel == 0:
nrow = 9
if not init:
nrow += 1
residual = blob.images[idx] - blob.tr.getModelImage(idx)
ax[nrow,0].axis('off')
ax[nrow,1].imshow(blob.tr.getModelImage(idx), **img_opt)
ax[nrow,2].imshow(blob.tr.getModelImage(idx) + noise, **img_opt)
ax[nrow,3].imshow(residual, cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[nrow,4].imshow(blob.tr.getChiImage(idx), cmap='RdGy', vmin = -5, vmax = 5)
if conf.DECISION_TREE == 1:
models = {1:'PointSource', 3:'SimpleGalaxy', 5:'ExpGalaxy', 7:'DevGalaxy', 9:'CompositeGalaxy'}
elif conf.DECISION_TREE == 2:
models = {1:'PointSource', 3:'SimpleGalaxy', 5:'SersicGalaxy', 7:'SersicCoreGalaxy', 9:'CompositeGalaxy'}
if init:
ax[nrow,1].set_ylabel(models[nrow])
bins = np.linspace(np.nanmin(residual), np.nanmax(residual), 30)
minx, maxx = 0, 0
for i, src in enumerate(blob.bcatalog):
if np.shape(residual) != np.shape(blob.segmap):
plt.figure()
plt.imshow(blob.segmap, cmap='Greys', norm=LogNorm())
plt.savefig(os.path.join(conf.PLOT_DIR,'debug_segmap.pdf'))
plt.figure()
plt.imshow(residual, cmap='Greys', norm=LogNorm())
plt.savefig(os.path.join(conf.PLOT_DIR,'debug_residual.pdf'))
res_seg = residual[blob.segmap==src['source_id']].flatten()
ax[nrow,5].hist(res_seg, histtype='step', color=colors[i], density=True)
resmin, resmax = np.nanmin(res_seg), np.nanmax(res_seg)
if resmin < minx:
minx = resmin
if resmax > maxx:
maxx = resmax
if not init:
ax[nrow,4].text(0.02, 0.9 - 0.1*i, r'$\chi^{2}$/N'+f'={blob.rchisq[i, level, sublevel]:2.2f} | BIC={blob.bic[i, level, sublevel]:2.2f}',
color=colors[i], transform=ax[nrow,4].transAxes)
ax[nrow,5].set_xlim(minx, maxx)
ax[nrow,5].axvline(0, c='grey', ls='dotted')
ax[nrow,5].set_ylim(bottom=0)
for i, src in enumerate(blob.bcatalog):
x, y = src['x'], src['y']
color = colors[i]
if not blob._solved[i]:
ax[nrow,1].plot([x, x], [y - 10, y - 5], ls='dotted', c=color)
ax[nrow,1].plot([x - 10, x - 5], [y, y], ls='dotted', c=color)
else:
ax[nrow,1].plot([x, x], [y - 10, y - 5], c=color)
ax[nrow,1].plot([x - 10, x - 5], [y, y], c=color)
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_{band}.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{conf.MODELING_NICKNAME}_{band}.pdf')
fig.savefig(outpath)
logger.info(f'Saving figure: {outpath}')
return fig, ax
def plot_fblob(blob, band, fig=None, ax=None, final_opt=False, debug=False):
idx = np.argwhere(blob.bands == band)[0][0]
back = blob.backgrounds[idx]
mean, rms = back[0], back[1]
noise = np.random.normal(mean, rms, size=blob.dims)
tr = blob.solution_tractor
norm = LogNorm(np.max([mean + rms, 1E-5]), 0.98*blob.images.max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
if final_opt:
ax[2,0].axis('off')
residual = blob.images[idx] - blob.solution_model_images[idx]
ax[2,1].imshow(blob.solution_model_images[idx], **img_opt)
ax[2,2].imshow(blob.solution_model_images[idx] + noise, **img_opt)
ax[2,3].imshow(residual, cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[2,4].imshow(blob.tr.getChiImage(idx), cmap='RdGy', vmin = -5, vmax = 5)
ax[2,1].set_ylabel('Solution')
bins = np.arange(np.nanpercentile(residua, 5), np.nanpercentile(residual, 95), 0.1)
minx, maxx = 0, 0
for i, src in enumerate(blob.bcatalog):
img_seg = blob.images[idx][blob.segmap==src['source_id']].flatten()
ax[2,5].hist(img_seg, bins=bins, linestyle='dotted', histtype='step', color=colors[i], density=True)
res_seg = residual[blob.segmap==src['source_id']].flatten()
ax[2,5].hist(res_seg, bins=bins, histtype='step', color=colors[i], density=True)
resmin, resmax = np.nanpercentile(res_seg, 5), np.nnanpercentile(res_seg, 95)
if resmin < minx:
minx = resmin
if resmax > maxx:
maxx = resmax
ax[2,5].set_xlim(-5, 5) #minx, maxx)
ax[2,5].axvline(0, c='grey', ls='dotted')
ax[2,5].set_ylim(bottom=0)
dof = '/N'
# Solution params
for i, src in enumerate(blob.solution_catalog):
original_zpt = np.array(conf.MULTIBAND_ZPT)[idx]
target_zpt = 23.9
flux_ujy = src.getBrightness().getFlux(band) * 10 ** (0.4 * (target_zpt - original_zpt))
flux_var = blob.forced_variance
fluxerr_ujy = np.sqrt(flux_var[i].brightness.getParams()[idx]) * 10**(0.4 * (target_zpt - original_zpt))
ax[1,0].text(0.1, 0.8 - 0.4*i, f'#{blob.bcatalog[i]["source_id"]} Model:{src.name}', color=colors[i], transform=ax[1,0].transAxes)
ax[1,0].text(0.1, 0.8 - 0.4*i - 0.1, f' Flux: {flux_ujy:3.3f}+\-{fluxerr_ujy:3.3f} uJy', color=colors[i], transform=ax[1,0].transAxes)
ax[1,0].text(0.1, 0.8 - 0.4*i - 0.2, f' Chi2{dof}: {blob.solution_chisq[i,idx]:3.3f}', color=colors[i], transform=ax[1,0].transAxes)
#fig.subplots_adjust(wspace=0, hspace=0)
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{blob.bands[idx]}.pdf')
fig.savefig(outpath)
plt.close()
logger.info(f'Saving figure: {outpath}')
else:
# Init
if fig is None:
plt.ioff()
fig, ax = plt.subplots(figsize=(24,8), ncols=6, nrows=3)
# Detection image
ax[0,0].imshow(blob.images[idx], **img_opt)
[ax[0,i].axis('off') for i in np.arange(1, 6)]
for j, src in enumerate(blob.bcatalog):
objects = blob.bcatalog[j]
position = [src[f'x'], src[f'y']]
position[0] -= (blob.subvector[1] + blob.mosaic_origin[1] - conf.BRICK_BUFFER)
position[1] -= (blob.subvector[0] + blob.mosaic_origin[0] - conf.BRICK_BUFFER)
e = Ellipse(xy=(position[0], position[1]),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor(colors[j])
ax[0, 0].add_artist(e)
ax[0,1].text(0.1, 0.9, f'{band} | Blob #{blob.blob_id}', transform=ax[0,1].transAxes)
ax[0,1].text(0.1, 0.8, f'{blob.n_sources} source(s)', transform=ax[0,1].transAxes)
[ax[0,j].axis('off') for j in np.arange(1, 6)]
ax[1,0].axis('off')
residual = blob.images[idx] - blob.tr.getModelImage(idx)
ax[1,0].axis('off')
ax[1,1].imshow(blob.tr.getModelImage(idx), **img_opt)
ax[1,2].imshow(blob.tr.getModelImage(idx) + noise, **img_opt)
ax[1,3].imshow(residual, cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[1,4].imshow(blob.tr.getChiImage(idx), cmap='RdGy', vmin = -5, vmax = 5)
bins = np.linspace(np.nanmin(residual), np.nanmax(residual), 30)
minx, maxx = 0, 0
for i, src in enumerate(blob.bcatalog):
res_seg = residual[blob.segmap==src['source_id']].flatten()
try:
k2, p_norm = stats.normaltest(res_seg)
except:
k2, p_norm = -99, -99
ax[1,5].hist(res_seg, bins=20, histtype='step', color=colors[i], density=True)
resmin, resmax = np.nanmin(res_seg), np.nanmax(res_seg)
if resmin < minx:
minx = resmin
if resmax > maxx:
maxx = resmax
ax[1,5].text(0.05, 0.9 - 0.1*i, s=f"k={k2:3.3f} (p={p_norm:2.2f})", trasnform=ax[1,5].transAxes)
ax[1,5].set_xlim(minx, maxx)
ax[1,5].axvline(0, c='grey', ls='dotted')
ax[1,5].set_ylim(bottom=0)
[ax[1,i+1].set_title(title, fontsize=20) for i, title in enumerate(('Model', 'Model+Noise', 'Image-Model', '$\chi^{2}$', 'Residuals'))]
if debug:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{blob.bands[idx]}_DEBUG.pdf')
fig.savefig(outpath)
plt.close()
logger.info(f'DEBUG | Saving figure: {outpath}')
return fig, ax
def plot_blobmap(brick, image=None, band=None, catalog=None, mode='rms'):
if image is None:
image = brick.images[0]
if band is None:
band = brick.bands[0]
if catalog is None:
catalog = brick.catalog
fig, ax = plt.subplots(figsize=(20,20))
# imgs_marked = mark_boundaries(brick.images[0], brick.blobmap, color='red')[:,:,0]
imgs_marked = find_boundaries(brick.blobmap, mode='thick').astype(int)
imgs_marked[imgs_marked==0] = -99
backlevel, noisesigma = brick.backgrounds[0]
if mode == 'log':
vmin, vmax = np.max([backlevel + noisesigma, 1E-5]), brick.images[0].max()
norm = LogNorm(np.max([backlevel + noisesigma, 1E-5]), 0.9*np.max(image), clip='True')
ax.imshow(image, cmap='Greys', origin='lower', norm=norm)
elif mode == 'rms':
ax.imshow(image - backlevel, cmap='RdGy', origin='lower', vmin=-3*noisesigma, vmax=3*noisesigma)
mycmap = plt.cm.Greens
mycmap.set_under('k', alpha=0)
ax.imshow(imgs_marked, alpha=0.9, cmap=mycmap, vmin=0, zorder=2, origin='lower')
ax.scatter(catalog['x'], catalog['y'], marker='+', color='limegreen', s=0.1)
ax.add_patch(Rectangle((conf.BRICK_BUFFER, conf.BRICK_BUFFER), conf.BRICK_HEIGHT, conf.BRICK_WIDTH, fill=False, alpha=0.3, edgecolor='purple', linewidth=1))
for i in np.arange(brick.n_blobs):
idx, idy = np.nonzero(brick.blobmap == i+1)
xlo, xhi = np.min(idx) - conf.BLOB_BUFFER, np.max(idx) + 1 + conf.BLOB_BUFFER
ylo, yhi = np.min(idy) - conf.BLOB_BUFFER, np.max(idy) + 1 + conf.BLOB_BUFFER
w = xhi - xlo #+ 2 * conf.BLOB_BUFFER
h = yhi - ylo #+ 2 * conf.BLOB_BUFFER
rect = Rectangle((ylo, xlo), h, w, fill=False, alpha=0.3,
edgecolor='red', zorder=3, linewidth=1)
ax.add_patch(rect)
ax.annotate(str(i+1), (ylo, xlo), color='r', fontsize=2)
#ax.scatter(x + width/2., y + height/2., marker='+', c='r')
# Add collection to axes
#ax.axis('off')
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_{mode}_blobmaster.pdf')
ax.axis('off')
ax.margins(0,0)
fig.suptitle(brick.bands[0])
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_ldac(tab_ldac, band, xlims=None, ylims=None, box=False, sel=None):
fig, ax = plt.subplots()
xbin = np.arange(0, 15, 0.1)
ybin = np.arange(12, 26, 0.1)
ax.hist2d(tab_ldac['FLUX_RADIUS'], tab_ldac['MAG_AUTO'], bins=(xbin, ybin), cmap='Greys', norm=LogNorm())
if box:
rect = Rectangle((xlims[0], ylims[0]), xlims[1] - xlims[0], ylims[1] - ylims[0], fill=False, alpha=0.3,
edgecolor='r', facecolor=None, zorder=3, linewidth=1)
ax.add_patch(rect)
if (sel is not None) & box:
ax.scatter(tab_ldac['FLUX_RADIUS'][sel], tab_ldac['MAG_AUTO'][sel], s=0.1, c='r')
fig.subplots_adjust(bottom = 0.15)
ax.set(xlabel='Flux Radius (px)', xlim=(0, 15),
ylabel='Mag Auto (AB)', ylim=(26, 12))
ax.grid()
if sel is not None:
nsel = np.sum(sel)
ax.text(x=0.05, y=0.95, s=f'N = {nsel}', transform=ax.transAxes)
fig.savefig(os.path.join(conf.PLOT_DIR, f'{band}_box_{box}_ldac.pdf'), overwrite=True)
plt.close()
def plot_psf(psfmodel, band, show_gaussian=False):
fig, ax = plt.subplots(ncols=3, figsize=(30,10))
norm = LogNorm(1e-8, 0.1*np.nanmax(psfmodel), clip='True')
img_opt = dict(cmap='Blues', norm=norm)
ax[0].imshow(psfmodel, **img_opt, extent=conf.PIXEL_SCALE *np.array([-np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2, -np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2,]))
ax[0].set(xlim=(-15,15), ylim=(-15, 15))
ax[0].axvline(0, color='w', ls='dotted')
ax[0].axhline(0, color='w', ls='dotted')
xax = np.arange(-np.shape(psfmodel)[0]/2 + 0.5, np.shape(psfmodel)[0]/2+0.5)
[ax[1].plot(xax * conf.PIXEL_SCALE, psfmodel[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(psfmodel)[1])]
ax[1].axvline(0, ls='dotted', c='k')
ax[1].set(xlim=(-15, 15), yscale='log', ylim=(1E-6, 1E-1), xlabel='arcsec')
if show_gaussian:
from scipy.optimize import curve_fit
def gaus(x,a,x0,sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
mean = 0
sigma = 1
ax[1].plot(xax * conf.PIXEL_SCALE,psfmodel[int(np.shape(psfmodel)[1]/2)], 'r')
popt,pcov = curve_fit(gaus,xax * conf.PIXEL_SCALE,psfmodel[int(np.shape(psfmodel)[1]/2)],p0=[1,mean,sigma])
ax[1].plot(xax*conf.PIXEL_SCALE,gaus(xax*conf.PIXEL_SCALE,*popt),'green')
x = xax
y = x.copy()
xv, yv = | np.meshgrid(x, y) | numpy.meshgrid |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# # PyKOALA: KOALA data processing and analysis
# by <NAME> and <NAME>
# Extra work by <NAME> (MQ PACE student)
# Plus Taylah and Matt (sky subtraction)
from __future__ import absolute_import, division, print_function
from past.utils import old_div
version = "Version 0.72 - 13th February 2020"
import copy
import os.path as pth
import sys
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate
from scipy.ndimage.interpolation import shift
import scipy.signal as sig
from .constants import C, PARSEC as pc
from .utils.cube_alignment import offset_between_cubes, compare_cubes, align_n_cubes
from .utils.flux import search_peaks, fluxes, dfluxes, substract_given_gaussian
from .utils.io import read_table, save_rss_fits, save_fits_file
from .utils.moffat import fit_Moffat
from .utils.plots import (
plot_redshift_peaks, plot_weights_for_getting_smooth_spectrum,
plot_correction_in_fibre_p_fibre, plot_suspicious_fibres_graph, plot_skyline_5578,
plot_offset_between_cubes, plot_response, plot_telluric_correction, plot_plot
)
from .utils.sky_spectrum import scale_sky_spectrum, median_filter
from .utils.spectrum_tools import rebin_spec_shift, smooth_spectrum
from .utils.utils import (
FitsExt, FitsFibresIFUIndex, coord_range, median_absolute_deviation,
)
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# -----------------------------------------------------------------------------
# Define constants
# -----------------------------------------------------------------------------
DATA_PATH = pth.join(pth.dirname(__file__), "data")
# -----------------------------------------------------------------------------
# Define COLOUR scales
# -----------------------------------------------------------------------------
fuego_color_map = colors.LinearSegmentedColormap.from_list(
"fuego",
(
(0.25, 0, 0),
(0.5, 0, 0),
(1, 0, 0),
(1, 0.5, 0),
(1, 0.75, 0),
(1, 1, 0),
(1, 1, 1),
),
N=256,
gamma=1.0,
)
fuego_color_map.set_bad("lightgray")
plt.register_cmap(cmap=fuego_color_map)
projo = [0.25, 0.5, 1, 1.0, 1.00, 1, 1]
pverde = [0.00, 0.0, 0, 0.5, 0.75, 1, 1]
pazul = [0.00, 0.0, 0, 0.0, 0.00, 0, 1]
# -----------------------------------------------------------------------------
# RSS CLASS
# -----------------------------------------------------------------------------
class RSS(object):
"""
Collection of row-stacked spectra (RSS).
Attributes
----------
wavelength: np.array(float)
Wavelength, in Angstroms.
intensity: np.array(float)
Intensity :math:`I_\lambda` per unit wavelength.
variance: np.array(float)
Variance :math:`\sigma^2_\lambda` per unit wavelength
(note the square in the definition of the variance).
"""
# -----------------------------------------------------------------------------
def __init__(self):
self.description = "Undefined row-stacked spectra (RSS)"
self.n_spectra = 0
self.n_wave = 0
self.wavelength = np.zeros((0))
self.intensity = np.zeros((0, 0))
self.intensity_corrected = self.intensity
self.variance = np.zeros_like(self.intensity)
self.RA_centre_deg = 0.0
self.DEC_centre_deg = 0.0
self.offset_RA_arcsec = np.zeros((0))
self.offset_DEC_arcsec = np.zeros_like(self.offset_RA_arcsec)
self.ALIGNED_RA_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.ALIGNED_DEC_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.relative_throughput = np.ones((0)) # Added by ANGEL, 16 Sep
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def compute_integrated_fibre(
self,
list_spectra="all",
valid_wave_min=0,
valid_wave_max=0,
min_value=0.1,
plot=False,
title=" - Integrated values",
warnings=True,
text="...",
correct_negative_sky=False,
):
"""
Compute the integrated flux of a fibre in a particular range, valid_wave_min to valid_wave_max.
Parameters
----------
list_spectra: float (default "all")
list with the number of fibres for computing integrated value
if using "all" it does all fibres
valid_wave_min, valid_wave_max : float
the integrated flux value will be computed in the range [valid_wave_min, valid_wave_max]
(default = , if they all 0 we use [self.valid_wave_min, self.valid_wave_max]
min_value: float (default 0)
For values lower than min_value, we set them as min_value
plot : Boolean (default = False)
Plot
title : string
Title for the plot
text: string
A bit of extra text
warnings : Boolean (default = False)
Write warnings, e.g. when the integrated flux is negative
correct_negative_sky : Boolean (default = False)
Corrects negative values making 0 the integrated flux of the lowest fibre
Example
----------
integrated_fibre_6500_6600 = star1r.compute_integrated_fibre(valid_wave_min=6500, valid_wave_max=6600,
title = " - [6500,6600]", plot = True)
"""
print("\n Computing integrated fibre values {}".format(text))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if valid_wave_min == 0:
valid_wave_min = self.valid_wave_min
if valid_wave_max == 0:
valid_wave_max = self.valid_wave_max
self.integrated_fibre = np.zeros(self.n_spectra)
region = np.where(
(self.wavelength > valid_wave_min) & (self.wavelength < valid_wave_max)
)
waves_in_region = len(region[0])
n_negative_fibres = 0
negative_fibres = []
for i in range(self.n_spectra):
self.integrated_fibre[i] = np.nansum(self.intensity_corrected[i, region])
if self.integrated_fibre[i] < 0:
if warnings:
print(
" WARNING: The integrated flux in fibre {:4} is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(
i, self.integrated_fibre[i]/waves_in_region
))
n_negative_fibres = n_negative_fibres + 1
# self.integrated_fibre[i] = min_value
negative_fibres.append(i)
if len(negative_fibres) != 0:
print("\n> Number of fibres with integrated flux < 0 : {:4}, that is the {:5.2f} % of the total !".format(
n_negative_fibres, n_negative_fibres * 100.0 / self.n_spectra
))
negative_fibres_sorted = []
integrated_intensity_sorted = np.argsort(
self.integrated_fibre/waves_in_region
)
for fibre_ in range(n_negative_fibres):
negative_fibres_sorted.append(integrated_intensity_sorted[fibre_])
# print "\n> Checking results using",n_negative_fibres,"fibres with the lowest integrated intensity"
# print " which are :",negative_fibres_sorted
if correct_negative_sky:
min_sky_value = self.integrated_fibre[negative_fibres_sorted[0]]
min_sky_value_per_wave = min_sky_value/waves_in_region
print(
"\n> Correcting negative values making 0 the integrated flux of the lowest fibre, which is {:4} with {:10.2f} counts/wave".format(
negative_fibres_sorted[0], min_sky_value_per_wave
))
# print self.integrated_fibre[negative_fibres_sorted[0]]
self.integrated_fibre = self.integrated_fibre - min_sky_value
for i in range(self.n_spectra):
self.intensity_corrected[i] = (
self.intensity_corrected[i] - min_sky_value_per_wave
)
else:
print(
"\n> Adopting integrated flux = {:5.2f} for all fibres with negative integrated flux (for presentation purposes)".format(
min_value
))
for i in negative_fibres_sorted:
self.integrated_fibre[i] = min_value
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0:
# if warnings: print " WARNING: The integrated flux in fibre {:4} STILL is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(i,self.integrated_fibre[i]/waves_in_region)
if plot:
# print"\n Plotting map with integrated values:"
self.RSS_map(
self.integrated_fibre,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
title=title,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def identify_el(
self,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
fibre=0,
broad=1.0,
verbose=True,
plot=True,
):
"""
Identify fibres with highest intensity (high_fibres=10).
Add all in a single spectrum.
Identify emission features.
These emission features should be those expected in all the cube!
Also, choosing fibre=number, it identifies el in a particular fibre.
Parameters
----------
high_fibres: float (default 10)
use the high_fibres highest intensity fibres for identifying
brightest_line : string (default "Ha")
string name with the emission line that is expected to be the brightest in integrated spectrum
cut: float (default 1.5)
The peak has to have a cut higher than cut to be considered as emission line
fibre: integer (default 0)
If fibre is given, it identifies emission lines in the given fibre
broad: float (default 1.0)
Broad (FWHM) of the expected emission lines
verbose : boolean (default = True)
Write results
plot : boolean (default = False)
Plot results
Example
----------
self.el=self.identify_el(high_fibres=10, brightest_line = "Ha",
cut=2., verbose=True, plot=True, fibre=0, broad=1.5)
"""
if fibre == 0:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
if verbose:
print("\n> Identifying emission lines using the {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
combined_high_spectrum = np.nansum(self.intensity_corrected[region], axis=0)
else:
combined_high_spectrum = self.intensity_corrected[fibre]
if verbose:
print("\n> Identifying emission lines in fibre {}".format(fibre))
# Search peaks
peaks, peaks_name, peaks_rest, continuum_limits = search_peaks(
self.wavelength,
combined_high_spectrum,
plot=plot,
cut=cut,
brightest_line=brightest_line,
verbose=False,
)
p_peaks_l = []
p_peaks_fwhm = []
# Do Gaussian fit and provide center & FWHM (flux could be also included, not at the moment as not abs. flux-cal done)
if verbose:
print("\n Emission lines identified:")
for eline in range(len(peaks)):
lowlow = continuum_limits[0][eline]
lowhigh = continuum_limits[1][eline]
highlow = continuum_limits[2][eline]
highhigh = continuum_limits[3][eline]
resultado = fluxes(
self.wavelength,
combined_high_spectrum,
peaks[eline],
verbose=False,
broad=broad,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
plot=plot,
fcal=False,
)
p_peaks_l.append(resultado[1])
p_peaks_fwhm.append(resultado[5])
if verbose:
print(" {:3}. {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(
eline + 1,
peaks_name[eline],
peaks_rest[eline],
p_peaks_l[eline],
p_peaks_fwhm[eline],
))
return [peaks_name, peaks_rest, p_peaks_l, p_peaks_fwhm]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def correct_high_cosmics_and_defects(
self,
step=50,
correct_high_cosmics=False,
fibre_p=0,
remove_5578=False, # if fibre_p=fibre plots the corrections in that fibre
clip_high=100,
warnings=False,
plot=True,
plot_suspicious_fibres=True,
verbose=False,
fig_size=12,
):
"""
Task for correcting high cosmics and CCD defects using median values of nearby pixels.
2dFdr corrects for (the majority) of the cosmic rays, usually correct_high_cosmics = False.
ANGEL COMMENT: Check, probably can be improved using MATT median running + plotting outside
Parameters
----------
rect_high_cosmics: boolean (default = False)
Correct ONLY CCD defects
re_p: integer (default = 0)
Plots the corrections in fibre fibre_p
ove_5578: boolean (default = False)
Removes skyline 5578 (blue spectrum) using Gaussian fit
ND CHECK: This also MODIFIES the throughput correction correcting for flux_5578_medfilt /median_flux_5578_medfilt
step: integer (default = 50)
Number of points for calculating median value
clip_high : float (default = 100)
Minimum value of flux/median in a pixel to be consider as a cosmic
if s[wave] > clip_high*fit_median[wave] -> IT IS A COSMIC
verbose: boolean (default = False)
Write results
warnings: boolean (default = False)
Write warnings
plot: boolean (default = False)
Plot results
plot_suspicious_fibres: boolean (default = False)
Plots fibre(s) that could have a cosmic left (but it could be OK)
IF self.integrated_fibre[fibre]/median_running[fibre] > max_value -> SUSPICIOUS FIBRE
Example
----------
self.correct_high_cosmics_and_defects(correct_high_cosmics=False, step=40, remove_5578 = True,
clip_high=120, plot_suspicious_fibres=True, warnings=True, verbose=False, plot=True)
"""
print("\n> Correcting for high cosmics and CCD defects...")
wave_min = self.valid_wave_min # CHECK ALL OF THIS...
wave_max = self.valid_wave_max
wlm = self.wavelength
if correct_high_cosmics == False:
print(" Only CCD defects (nan and negative values) are considered.")
else:
print(" Using clip_high = {} for high cosmics".format(clip_high))
print(" IMPORTANT: Be sure that any emission or sky line is fainter than clip_high/continuum !! ")
flux_5578 = [] # For correcting sky line 5578 if requested
if wave_min < 5578 and remove_5578:
print(" Sky line 5578 will be removed using a Gaussian fit...")
integrated_fibre_uncorrected = self.integrated_fibre
print(" ")
output_every_few = np.sqrt(self.n_spectra) + 1
next_output = -1
max_ratio_list = []
for fibre in range(self.n_spectra):
if fibre > next_output:
sys.stdout.write("\b" * 30)
sys.stdout.write(
" Cleaning... {:5.2f}% completed".format(
fibre * 100.0 / self.n_spectra
)
)
sys.stdout.flush()
next_output = fibre + output_every_few
s = self.intensity_corrected[fibre]
running_wave = []
running_step_median = []
cuts = np.int(self.n_wave/step) # using np.int instead of // for improved readability
for cut in range(cuts):
if cut == 0:
next_wave = wave_min
else:
next_wave = np.nanmedian(
(wlm[np.int(cut * step)] + wlm[np.int((cut + 1) * step)])/2
)
if next_wave < wave_max:
running_wave.append(next_wave)
# print("SEARCHFORME1", step, running_wave[cut])
region = np.where(
(wlm > running_wave[cut] - np.int(step/2)) # step/2 doesn't need to be an int, but probably
& (wlm < running_wave[cut] + np.int(step/2)) # want it to be so the cuts are uniform.
)
# print('SEARCHFORME3', region)
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
running_wave.append(wave_max)
region = np.where((wlm > wave_max - step) & (wlm < wave_max))
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
for i in range(len(running_step_median)):
if np.isnan(running_step_median[i]) == True:
if i < 10:
running_step_median[i] = np.nanmedian(running_step_median[0:9])
if i > 10:
running_step_median[i] = np.nanmedian(
running_step_median[-9:-1]
)
a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
running_wave, running_step_median, 7
)
fit_median = (
a0x
+ a1x * wlm
+ a2x * wlm ** 2
+ a3x * wlm ** 3
+ a4x * wlm ** 4
+ a5x * wlm ** 5
+ a6x * wlm ** 6
+ a7x * wlm ** 7
)
if fibre == fibre_p:
espectro_old = copy.copy(self.intensity_corrected[fibre, :])
espectro_fit_median = fit_median
for wave in range(self.n_wave): # (1,self.n_wave-3):
if s[wave] < 0:
s[wave] = fit_median[wave] # Negative values for median values
if np.isnan(s[wave]) == True:
s[wave] = fit_median[wave] # nan for median value
if (
correct_high_cosmics and fit_median[wave] > 0
): # NEW 15 Feb 2019, v7.1 2dFdr takes well cosmic rays
if s[wave] > clip_high * fit_median[wave]:
if verbose:
print(" "
"CLIPPING HIGH = {} in fibre {} w = {} value= {} v/median= {}".format(clip_high, fibre, wlm[wave], s[wave], s[wave]/fit_median[wave])) # " median=",fit_median[wave]
s[wave] = fit_median[wave]
if fibre == fibre_p:
espectro_new = copy.copy(s)
max_ratio_list.append(np.nanmax(s/fit_median))
self.intensity_corrected[fibre, :] = s
# Removing Skyline 5578 using Gaussian fit if requested
if wave_min < 5578 and remove_5578:
resultado = fluxes(
wlm, s, 5578, plot=False, verbose=False
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre] = resultado[11]
flux_5578.append(resultado[3])
sys.stdout.write("\b" * 30)
sys.stdout.write(" Cleaning... 100.00 completed")
sys.stdout.flush()
max_ratio = np.nanmax(max_ratio_list)
print("\n Maximum value found of flux/continuum = {}".format(max_ratio))
if correct_high_cosmics:
print(" Recommended value for clip_high = {} , here we used {}".format(int(max_ratio + 1), clip_high))
# Plot correction in fibre p_fibre
if fibre_p > 0:
plot_correction_in_fibre_p_fibre(fig_size,
wlm,
espectro_old,
espectro_fit_median,
espectro_new,
fibre_p,
clip_high)
# print" "
if correct_high_cosmics == False:
text = "for spectra corrected for defects..."
title = " - Throughput + CCD defects corrected"
else:
text = "for spectra corrected for high cosmics and defects..."
title = " - Throughput + high-C & D corrected"
self.compute_integrated_fibre(
valid_wave_min=wave_min,
valid_wave_max=wave_max,
text=text,
plot=plot,
title=title,
)
if plot:
print(" Plotting integrated fibre values before and after correcting for high cosmics and CCD defects:\n")
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(integrated_fibre_uncorrected, "r", label="Uncorrected", alpha=0.5)
plt.ylabel("Integrated Flux")
plt.xlabel("Fibre")
plt.ylim(
[np.nanmin(self.integrated_fibre), np.nanmax(self.integrated_fibre)]
)
plt.title(self.description)
# Check if integrated value is high
median_running = []
step_f = 10
max_value = 2.0 # For stars this is not accurate, as i/m might be between 5 and 100 in the fibres with the star
skip = 0
suspicious_fibres = []
for fibre in range(self.n_spectra):
if fibre < step_f:
median_value = np.nanmedian(
self.integrated_fibre[0: np.int(step_f)]
)
skip = 1
if fibre > self.n_spectra - step_f:
median_value = np.nanmedian(
self.integrated_fibre[-1 - np.int(step_f): -1]
)
skip = 1
if skip == 0:
median_value = np.nanmedian(
self.integrated_fibre[
fibre - np.int(step_f/2): fibre + np.int(step_f/2) # np.int is used instead of // of readability
]
)
median_running.append(median_value)
if self.integrated_fibre[fibre]/median_running[fibre] > max_value:
print(" Fibre {} has a integrated/median ratio of {} -> Might be a cosmic left!".format(fibre, self.integrated_fibre[fibre]/median_running[fibre]))
label = np.str(fibre)
plt.axvline(x=fibre, color="k", linestyle="--")
plt.text(fibre, self.integrated_fibre[fibre] / 2.0, label)
suspicious_fibres.append(fibre)
skip = 0
plt.plot(self.integrated_fibre, label="Corrected", alpha=0.6)
plt.plot(median_running, "k", label="Median", alpha=0.6)
plt.legend(frameon=False, loc=1, ncol=3)
plt.minorticks_on()
#plt.show()
#plt.close()
if plot_suspicious_fibres == True and len(suspicious_fibres) > 0:
# Plotting suspicious fibres..
figures = plot_suspicious_fibres_graph(
self,
suspicious_fibres,
fig_size,
wave_min,
wave_max,
intensity_corrected_fiber=self.intensity_corrected)
if remove_5578 and wave_min < 5578:
print(" Skyline 5578 has been removed. Checking throughput correction...")
flux_5578_medfilt = sig.medfilt(flux_5578, np.int(5))
median_flux_5578_medfilt = np.nanmedian(flux_5578_medfilt)
extra_throughput_correction = flux_5578_medfilt/median_flux_5578_medfilt
# plt.plot(extra_throughput_correction)
# plt.show()
# plt.close()
if plot:
fig = plot_skyline_5578(fig_size, flux_5578, flux_5578_medfilt)
print(" Variations in throughput between {} and {} ".format(
np.nanmin(extra_throughput_correction), np.nanmax(extra_throughput_correction)
))
print(" Applying this extra throughtput correction to all fibres...")
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :]/extra_throughput_correction[i]
)
self.relative_throughput = (
self.relative_throughput * extra_throughput_correction
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def clean_sky_residuals(
self,
extra_w=1.3,
step=25,
dclip=3.0,
wave_min=0,
wave_max=0,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
This task HAVE TO BE USED WITH EXTREME CARE
as it has not been properly tested!!!
It CAN DELETE REAL (faint) ABSORPTION/EMISSION features in spectra!!!
Use the "1dfit" option for getting a better sky substraction
ANGEL is keeping this here just in case it is eventually useful...
Parameters
----------
extra_w
step
dclip
wave_min
wave_max
verbose
plot
fig_size
fibre
Returns
-------
"""
# verbose=True
wlm = self.wavelength
if wave_min == 0:
wave_min = self.valid_wave_min
if wave_max == 0:
wave_max = self.valid_wave_max
# Exclude ranges with emission lines if needed
exclude_ranges_low = []
exclude_ranges_high = []
exclude_ranges_low_ = []
exclude_ranges_high_ = []
if self.el[1][0] != 0:
# print " Emission lines identified in the combined spectrum:"
for el in range(len(self.el[0])):
# print " {:3}. - {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(el+1,self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el])
if (
self.el[0][el] == "Ha" or self.el[1][el] == 6583.41
): # Extra extend for Ha and [N II] 6583
extra = extra_w * 1.6
else:
extra = extra_w
exclude_ranges_low_.append(
self.el[2][el] - self.el[3][el] * extra
) # center-1.3*FWHM/2
exclude_ranges_high_.append(
self.el[2][el] + self.el[3][el] * extra
) # center+1.3*FWHM/2
# print self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el],exclude_ranges_low[el],exclude_ranges_high[el],extra
# Check overlapping ranges
skip_next = 0
for i in range(len(exclude_ranges_low_) - 1):
if skip_next == 0:
if exclude_ranges_high_[i] > exclude_ranges_low_[i + 1]:
# Ranges overlap, now check if next range also overlaps
if i + 2 < len(exclude_ranges_low_):
if exclude_ranges_high_[i + 1] > exclude_ranges_low_[i + 2]:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 2])
skip_next = 2
if verbose:
print("Double overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 1])
skip_next = 1
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i])
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
if skip_next == 1:
skip_next = 0
if skip_next == 2:
skip_next = 1
if verbose:
print(exclude_ranges_low_[i], exclude_ranges_high_[i], skip_next)
if skip_next == 0:
exclude_ranges_low.append(exclude_ranges_low_[-1])
exclude_ranges_high.append(exclude_ranges_high_[-1])
if verbose:
print(exclude_ranges_low_[-1], exclude_ranges_high_[-1], skip_next)
# print "\n> Cleaning sky residuals in range [",wave_min,",",wave_max,"] avoiding emission lines... "
print("\n> Cleaning sky residuals avoiding emission lines... ")
if verbose:
print(" Excluded ranges using emission line parameters:")
for i in range(len(exclude_ranges_low_)):
print(exclude_ranges_low_[i], exclude_ranges_high_[i])
print(" Excluded ranges considering overlaps: ")
for i in range(len(exclude_ranges_low)):
print(exclude_ranges_low[i], exclude_ranges_high[i])
print(" ")
else:
exclude_ranges_low.append(20000.0)
exclude_ranges_high.append(30000.0)
print("\n> Cleaning sky residuals...")
say_status = 0
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {} ...".format(fibre))
say_status = say_status + 100
s = self.intensity_corrected[fibre]
fit_median = smooth_spectrum(
wlm,
s,
step=step,
wave_min=wave_min,
wave_max=wave_max,
weight_fit_median=1.0,
plot=False,
)
old = []
if plot:
for i in range(len(s)):
old.append(s[i])
disp = s - fit_median
dispersion = np.nanmedian(np.abs(disp))
rango = 0
imprimir = 1
for i in range(len(wlm) - 1):
# if wlm[i] > wave_min and wlm[i] < wave_max : # CLEAN ONLY IN VALID WAVEVELENGTHS
if (
wlm[i] >= exclude_ranges_low[rango]
and wlm[i] <= exclude_ranges_high[rango]
):
if verbose == True and imprimir == 1:
print(" Excluding range [ {} , {} ] as it has an emission line".format(
exclude_ranges_low[rango], exclude_ranges_high[rango]))
if imprimir == 1:
imprimir = 0
# print " Checking ", wlm[i]," NOT CORRECTED ",s[i], s[i]-fit_median[i]
else:
if np.isnan(s[i]) == True:
s[i] = fit_median[i] # nan for median value
if (
disp[i] > dispersion * dclip
and disp[i + 1] < -dispersion * dclip
):
s[i] = fit_median[i]
s[i + 1] = fit_median[i + 1] # "P-Cygni-like structures
if verbose:
print(" Found P-Cygni-like feature in {}".format(wlm[i]))
if disp[i] > dispersion * dclip or disp[i] < -dispersion * dclip:
s[i] = fit_median[i]
if verbose:
print(" Clipping feature in {}".format(wlm[i]))
if wlm[i] > exclude_ranges_high[rango] and imprimir == 0:
if verbose:
print(" Checked {} End range {} {} {}".format(
wlm[i], rango,
exclude_ranges_low[rango],
exclude_ranges_high[rango]
)
)
rango = rango + 1
imprimir = 1
if rango == len(exclude_ranges_low):
rango = len(exclude_ranges_low) - 1
# print " Checking ", wlm[i]," CORRECTED IF NEEDED",s[i], s[i]-fit_median[i]
# if plot:
# for i in range(6):
# plt.figure(figsize=(fig_size, fig_size/2.5))
# plt.plot(wlm,old-fit_median, "r-", alpha=0.4)
# plt.plot(wlm,fit_median-fit_median,"g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
# plt.plot(wlm,s-fit_median, "b-", alpha=0.7)
#
# for exclude in range(len(exclude_ranges_low)):
# plt.axvspan(exclude_ranges_low[exclude], exclude_ranges_high[exclude], facecolor='g', alpha=0.15,zorder=3)
#
# plt.ylim(-100,200)
# if i == 0: plt.xlim(wlm[0]-10,wlm[-1]+10)
# if i == 1: plt.xlim(wlm[0],6500) # THIS IS FOR 1000R
# if i == 2: plt.xlim(6500,6700)
# if i == 3: plt.xlim(6700,7000)
# if i == 4: plt.xlim(7000,7300)
# if i == 5: plt.xlim(7300,wlm[-1])
# plt.minorticks_on()
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
if plot:
for i in range(6):
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(wlm, old, "r-", alpha=0.4)
plt.plot(wlm, fit_median, "g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
plt.plot(wlm, s, "b-", alpha=0.7)
for exclude in range(len(exclude_ranges_low)):
plt.axvspan(
exclude_ranges_low[exclude],
exclude_ranges_high[exclude],
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.ylim(-300, 300)
if i == 0:
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
if i == 1:
plt.xlim(wlm[0], 6500) # THIS IS FOR 1000R
if i == 2:
plt.xlim(6500, 6700)
if i == 3:
plt.xlim(6700, 7000)
if i == 4:
plt.xlim(7000, 7300)
if i == 5:
plt.xlim(7300, wlm[-1])
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
self.intensity_corrected[fibre, :] = s
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def fit_and_substract_sky_spectrum(
self,
sky,
w=1000,
spectra=1000,
# If rebin == True, it fits all wavelengths to be at the same wavelengths that SKY spectrum...
rebin=False,
brightest_line="Ha",
brightest_line_wavelength=6563.0,
maxima_sigma=3.0,
ymin=-50,
ymax=1000,
wmin=0,
wmax=0,
auto_scale_sky=False,
warnings=False,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
Given a 1D sky spectrum, this task fits
sky lines of each spectrum individually and substracts sky
Needs the observed wavelength (brightest_line_wavelength) of the brightest emission line (brightest_line) .
w is the wavelength
spec the 2D spectra
Parameters
----------
sky
w
spectra
rebin
brightest_line
brightest_line_wavelength
maxima_sigma
ymin
ymax
wmin
wmax
auto_scale_sky
warnings
verbose
plot
fig_size
fibre
Returns
-------
"""
if brightest_line_wavelength == 6563:
print("\n\n> WARNING: This is going to FAIL as the wavelength of the brightest emission line has not been included !!!")
print(" USING brightest_line_wavelength = 6563 as default ...\n\n")
brightest_line_wavelength_rest = 6562.82
if brightest_line == "O3" or brightest_line == "O3b":
brightest_line_wavelength_rest = 5006.84
if brightest_line == "Hb" or brightest_line == "hb":
brightest_line_wavelength_rest = 4861.33
print(" Using {:3} at rest wavelength {:6.2f} identified by the user at {:6.2f} to avoid fitting emission lines...".format(
brightest_line, brightest_line_wavelength_rest, brightest_line_wavelength
))
redshift = brightest_line_wavelength/brightest_line_wavelength_rest - 1.0
if w == 1000:
w = self.wavelength
if spectra == 1000:
spectra = copy.deepcopy(self.intensity_corrected)
if wmin == 0:
wmin = w[0]
if wmax == 0:
wmax = w[-1]
# Read file with sky emission lines
sky_lines_file = "sky_lines.dat"
(
sl_center,
sl_name,
sl_fnl,
sl_lowlow,
sl_lowhigh,
sl_highlow,
sl_highhigh,
sl_lmin,
sl_lmax,
) = read_table(sky_lines_file, ["f", "s", "f", "f", "f", "f", "f", "f", "f"])
number_sl = len(sl_center)
# MOST IMPORTANT EMISSION LINES IN RED
# 6300.30 [OI] -0.263 30.0 15.0 20.0 40.0
# 6312.10 [SIII] -0.264 30.0 18.0 5.0 20.0
# 6363.78 [OI] -0.271 20.0 4.0 5.0 30.0
# 6548.03 [NII] -0.296 45.0 15.0 55.0 75.0
# 6562.82 Ha -0.298 50.0 25.0 35.0 60.0
# 6583.41 [NII] -0.300 62.0 42.0 7.0 35.0
# 6678.15 HeI -0.313 20.0 6.0 6.0 20.0
# 6716.47 [SII] -0.318 40.0 15.0 22.0 45.0
# 6730.85 [SII] -0.320 50.0 30.0 7.0 35.0
# 7065.28 HeI -0.364 30.0 7.0 7.0 30.0
# 7135.78 [ArIII] -0.374 25.0 6.0 6.0 25.0
# 7318.39 [OII] -0.398 30.0 6.0 20.0 45.0
# 7329.66 [OII] -0.400 40.0 16.0 10.0 35.0
# 7751.10 [ArIII] -0.455 30.0 15.0 15.0 30.0
# 9068.90 [S-III] -0.594 30.0 15.0 15.0 30.0
el_list_no_z = [
6300.3,
6312.10,
6363.78,
6548.03,
6562.82,
6583.41,
6678.15,
6716.47,
6730.85,
7065.28,
7135.78,
7318.39,
7329.66,
7751.1,
9068.9,
]
el_list = (redshift + 1) * np.array(el_list_no_z)
# [OI] [SIII] [OI] Ha+[NII] HeI [SII] HeI [ArIII] [OII] [ArIII] [SIII]
el_low_list_no_z = [
6296.3,
6308.1,
6359.8,
6544.0,
6674.2,
6712.5,
7061.3,
7131.8,
7314.4,
7747.1,
9063.9,
]
el_high_list_no_z = [
6304.3,
6316.1,
6367.8,
6590.0,
6682.2,
6736.9,
7069.3,
7139.8,
7333.7,
7755.1,
9073.9,
]
el_low_list = (redshift + 1) * np.array(el_low_list_no_z)
el_high_list = (redshift + 1) * np.array(el_high_list_no_z)
# Double Skylines
dsky1 = [
6257.82,
6465.34,
6828.22,
6969.70,
7239.41,
7295.81,
7711.50,
7750.56,
7853.391,
7913.57,
7773.00,
7870.05,
8280.94,
8344.613,
9152.2,
9092.7,
9216.5,
8827.112,
8761.2,
0,
] # 8760.6, 0]#
dsky2 = [
6265.50,
6470.91,
6832.70,
6978.45,
7244.43,
7303.92,
7715.50,
7759.89,
7860.662,
7921.02,
7780.43,
7879.96,
8288.34,
8352.78,
9160.9,
9102.8,
9224.8,
8836.27,
8767.7,
0,
] # 8767.2, 0] #
say_status = 0
# plot=True
# verbose = True
# warnings = True
self.wavelength_offset_per_fibre = []
self.sky_auto_scale = []
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
verbose = True
warnings = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {:4} ... ({:6.2f} % completed) ...".format(
fibre,
fibre * 100.0 / self.n_spectra
)
)
say_status = say_status + 20
# Gaussian fits to the sky spectrum
sl_gaussian_flux = []
sl_gaussian_sigma = []
sl_gauss_center = []
skip_sl_fit = [] # True emission line, False no emission line
j_lines = 0
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
sky_sl_gaussian_fitted = copy.deepcopy(sky)
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in sky spectrum...")
for i in range(number_sl):
if sl_center[i] > el_high:
while sl_center[i] > el_high:
j_lines = j_lines + 1
if j_lines < len(el_low_list) - 1:
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
# print "Change to range ",el_low,el_high
else:
el_low = w[-1] + 1
el_high = w[-1] + 2
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=2.1 * 2.355,
broad2=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
) # Broad is FWHM for Gaussian sigm a= 1,
di = di + 1
else:
resultado = fluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigm a= 1,
sl_gaussian_flux.append(resultado[3])
sky_sl_gaussian_fitted = resultado[11]
sl_gauss_center.append(resultado[1])
sl_gaussian_sigma.append(resultado[5] / 2.355)
if el_low < sl_center[i] < el_high:
if verbose:
print(" SKY line {} in EMISSION LINE !".format(sl_center[i]))
skip_sl_fit.append(True)
else:
skip_sl_fit.append(False)
# print " Fitted wavelength for sky line ",sl_center[i]," : ",resultado[1]," ",resultado[5]
if plot_fit:
if verbose:
print(" Fitted wavelength for sky line {} : {} sigma = {}".format(
sl_center[i], sl_gauss_center[i], sl_gaussian_sigma[i])
)
wmin = sl_lmin[i]
wmax = sl_lmax[i]
# Gaussian fit to object spectrum
object_sl_gaussian_flux = []
object_sl_gaussian_sigma = []
ratio_object_sky_sl_gaussian = []
dif_center_obj_sky = []
spec = spectra[fibre]
object_sl_gaussian_fitted = copy.deepcopy(spec)
object_sl_gaussian_center = []
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in fibre {} of object data...".format(fibre))
for i in range(number_sl):
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if skip_sl_fit[i]:
if verbose:
print(" SKIPPING SKY LINE {} as located within the range of an emission line!".format(
sl_center[i]))
object_sl_gaussian_flux.append(
float("nan")
) # The value of the SKY SPECTRUM
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
else:
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=sl_gaussian_sigma[i] * 2.355,
broad2=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
)
di = di + 1
if (
resultado[3] > 0
and resultado[5] / 2.355 < maxima_sigma
and resultado[13] > 0
and resultado[14] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
use_sigma = resultado[5] / 2.355
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(use_sigma)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
else:
resultado = fluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigma= 1,
# print sl_center[i],sl_gaussian_sigma[i], resultado[5]/2.355, maxima_sigma
if (
resultado[3] > 0 and resultado[5] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(resultado[5] / 2.355)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
ratio_object_sky_sl_gaussian.append(
old_div(object_sl_gaussian_flux[i], sl_gaussian_flux[i])
) # TODO: to remove once sky_line_fitting is active and we can do 1Dfit
# Scale sky lines that are located in emission lines or provided negative values in fit
# reference_sl = 1 # Position in the file! Position 1 is sky line 6363.4
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
if verbose:
print("\n> Correcting skylines for which we couldn't get a Gaussian fit...\n")
for i in range(number_sl):
if skip_sl_fit[i] == True:
# Use known center, sigma of the sky and peak
gauss_fix = sl_gaussian_sigma[i]
small_center_correction = 0.0
# Check if center of previous sky line has a small difference in wavelength
small_center_correction = np.nanmedian(dif_center_obj_sky[0:i])
if verbose:
print("- Small correction of center wavelength of sky line {} : {}".format(
sl_center[i], small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
sl_center[i] + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# Substract second Gaussian if needed !!!!!
for di in range(len(dsky1) - 1):
if sl_center[i] == dsky1[di]:
if verbose:
print(" This was a double sky line, also substracting {} at {}".format(
dsky2[di], np.array(dsky2[di]) + small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
np.array(dsky2[di]) + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# wmin,wmax = 6100,6500
# ymin,ymax= -100,400
#
# wmin,wmax = 6350,6700
# wmin,wmax = 7100,7700
# wmin,wmax = 7600,8200
# wmin,wmax = 8200,8500
# wmin,wmax = 7350,7500
# wmin,wmax=6100, 8500 #7800, 8000#6820, 6850 #6700,7000 #6300,6450#7500
# wmin,wmax = 8700,9300
# ymax=800
if plot:
plt.figure(figsize=(11, 4))
plt.plot(w, spec, "y", alpha=0.7, label="Object")
plt.plot(
w,
object_sl_gaussian_fitted,
"k",
alpha=0.5,
label="Obj - sky fitted",
)
plt.plot(w, sky_sl_gaussian_fitted, "r", alpha=0.5, label="Sky fitted")
plt.plot(w, spec - sky, "g", alpha=0.5, label="Obj - sky")
plt.plot(
w,
object_sl_gaussian_fitted - sky_sl_gaussian_fitted,
"b",
alpha=0.9,
label="Obj - sky fitted - rest sky",
)
plt.xlim(wmin, wmax)
plt.ylim(ymin, ymax)
ptitle = "Fibre " + np.str(fibre) # +" with rms = "+np.str(rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux [counts]")
plt.legend(frameon=True, loc=2, ncol=5)
plt.minorticks_on()
for i in range(len(el_list)):
plt.axvline(x=el_list[i], color="k", linestyle="--", alpha=0.5)
for i in range(number_sl):
if sl_fnl[i] == 1:
plt.axvline(
x=sl_center[i], color="brown", linestyle="-", alpha=1
)
else:
plt.axvline(
x=sl_center[i], color="y", linestyle="--", alpha=0.6
)
for i in range(len(dsky2) - 1):
plt.axvline(x=dsky2[i], color="orange", linestyle="--", alpha=0.6)
# plt.show()
# plt.close()
offset = np.nanmedian(
np.array(object_sl_gaussian_center) - np.array(sl_gauss_center)
)
if verbose:
# reference_sl = 1 # Position in the file!
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
# print "\n n line fsky fspec fspec/fsky l_obj-l_sky fsky/6363.4 sigma_sky sigma_fspec"
# #print "\n n c_object c_sky c_obj-c_sky"
# for i in range(number_sl):
# if skip_sl_fit[i] == False: print "{:2} {:6.1f} {:8.2f} {:8.2f} {:7.4f} {:5.2f} {:6.3f} {:6.3f} {:6.3f}" .format(i+1,sl_center[i],sl_gaussian_flux[i],object_sl_gaussian_flux[i],ratio_object_sky_sl_gaussian[i],object_sl_gaussian_center[i]-sl_gauss_center[i],sl_ref_ratio[i],sl_gaussian_sigma[i],object_sl_gaussian_sigma[i])
# #if skip_sl_fit[i] == False: print "{:2} {:9.3f} {:9.3f} {:9.3f}".format(i+1, object_sl_gaussian_center[i], sl_gauss_center[i], dif_center_obj_sky[i])
#
print("\n> Median center offset between OBJ and SKY : {} A\n> Median gauss for the OBJECT {} A".format(offset, np.nanmedian(object_sl_gaussian_sigma)))
print("> Median flux OBJECT / SKY = {}".format(np.nanmedian(ratio_object_sky_sl_gaussian)))
self.wavelength_offset_per_fibre.append(offset)
# plt.plot(object_sl_gaussian_center, ratio_object_sky_sl_gaussian, "r+")
if auto_scale_sky:
if verbose:
print("\n> As requested, using this value to scale sky spectrum before substraction... ")
auto_scale = np.nanmedian(ratio_object_sky_sl_gaussian)
self.sky_auto_scale.append(np.nanmedian(ratio_object_sky_sl_gaussian))
# self.sky_emission = auto_scale * self.sky_emission
else:
auto_scale = 1.0
self.sky_auto_scale.append(1.0)
if rebin:
if verbose:
print("\n> Rebinning the spectrum of fibre {} to match sky spectrum...".format(fibre))
f = object_sl_gaussian_fitted
f_new = rebin_spec_shift(w, f, offset)
else:
f_new = object_sl_gaussian_fitted
self.intensity_corrected[fibre] = (
f_new - auto_scale * sky_sl_gaussian_fitted
)
# check offset center wavelength
# good_sl_center=[]
# good_sl_center_dif=[]
# plt.figure(figsize=(14, 4))
# for i in range(number_sl):
# if skip_sl_fit[i] == False:
# plt.plot(sl_center[i],dif_center_obj_sky[i],"g+", alpha=0.7, label="Object")
# good_sl_center.append(sl_center[i])
# good_sl_center_dif.append(dif_center_obj_sky[i])
#
# a1x,a0x = np.polyfit(good_sl_center, good_sl_center_dif, 1)
# fx = a0x + a1x*w
# #print a0x, a1x
# offset = np.nanmedian(good_sl_center_dif)
# print "median =",offset
# plt.plot(w,fx,"b", alpha=0.7, label="Fit")
# plt.axhline(y=offset, color='r', linestyle='--')
# plt.xlim(6100,9300)
# #plt.ylim(ymin,ymax)
# ptitle = "Fibre "+np.str(fibre)#+" with rms = "+np.str(rms[i])
# plt.title(ptitle)
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("c_obj - c_sky")
# #plt.legend(frameon=True, loc=2, ncol=4)
# plt.minorticks_on()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def do_extinction_curve(
self, observatory_file=pth.join(DATA_PATH, "ssoextinct.dat"), plot=True
):
"""
Parameters
----------
observatory_file
plot
Returns
-------
"""
print("\n> Computing extinction at given airmass...")
# Read data
data_observatory = np.loadtxt(observatory_file, unpack=True)
extinction_curve_wavelengths = data_observatory[0]
extinction_curve = data_observatory[1]
extinction_corrected_airmass = 10 ** (0.4 * self.airmass * extinction_curve)
# Make fit
tck = interpolate.splrep(
extinction_curve_wavelengths, extinction_corrected_airmass, s=0
)
self.extinction_correction = interpolate.splev(self.wavelength, tck, der=0)
# Plot
if plot:
plt.figure(figsize=(10, 5))
plt.plot(extinction_curve_wavelengths, extinction_corrected_airmass, "+")
plt.xlim(np.min(self.wavelength), np.max(self.wavelength))
cinco_por_ciento = 0.05 * (
np.max(self.extinction_correction) - np.min(self.extinction_correction)
)
plt.ylim(
np.min(self.extinction_correction) - cinco_por_ciento,
np.max(self.extinction_correction) + cinco_por_ciento,
)
plt.plot(self.wavelength, self.extinction_correction, "g")
plt.minorticks_on()
plt.title("Correction for extinction using airmass = {}".format(self.airmass))
plt.ylabel("Flux correction")
plt.xlabel("Wavelength [$\AA$]")
# plt.show()
# plt.close()
# Correct for extinction at given airmass
print(" Airmass = {}".format(self.airmass))
print(" Observatory file with extinction curve : {}".format(observatory_file))
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * self.extinction_correction
)
print(" Intensities corrected for extinction stored in self.intensity_corrected !")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def find_sky_emission(
self,
intensidad=[0, 0],
plot=True,
n_sky=200,
sky_fibres=[1000],
sky_wave_min=0,
sky_wave_max=0,
norm=colors.LogNorm(),
):
"""
Parameters
----------
intensidad
plot
n_sky
sky_fibres
sky_wave_min
sky_wave_max
norm
Returns
-------
"""
if sky_wave_min == 0:
sky_wave_min = self.valid_wave_min
if sky_wave_max == 0:
sky_wave_max = self.valid_wave_max
if np.nanmedian(intensidad) == 0:
intensidad = self.intensity_corrected
ic = 1
else:
ic = 0
if sky_fibres[0] == 1000: # As it was original
# sorted_by_flux = np.argsort(flux_ratio) ORIGINAL till 21 Jan 2019
# NEW 21 Jan 2019: Assuming cleaning of cosmics and CCD defects, we just use the spaxels with the LOWEST INTEGRATED VALUES
self.compute_integrated_fibre(
valid_wave_min=sky_wave_min, valid_wave_max=sky_wave_max, plot=False
)
sorted_by_flux = np.argsort(
self.integrated_fibre
) # (self.integrated_fibre)
print("\n> Identifying sky spaxels using the lowest integrated values in the [ {} , {}] range ...".format(sky_wave_min, sky_wave_max))
# if plot:
# # print "\n Plotting fluxes and flux ratio: "
# plt.figure(figsize=(10, 4))
# plt.plot(flux_ratio[sorted_by_flux], 'r-', label='flux ratio')
# plt.plot(flux_sky[sorted_by_flux], 'c-', label='flux sky')
# plt.plot(flux_object[sorted_by_flux], 'k-', label='flux object')
# plt.axvline(x=n_sky)
# plt.xlabel("Spaxel")
# plt.ylabel("Flux")
# plt.yscale('log')
# plt.legend(frameon=False, loc=4)
# plt.show()
# Angel routine: just take n lowest spaxels!
optimal_n = n_sky
print(" We use the lowest {} fibres for getting sky. Their positions are:".format(optimal_n))
# Compute sky spectrum and plot it
self.sky_fibres = sorted_by_flux[:optimal_n]
self.sky_emission = np.nanmedian(
intensidad[sorted_by_flux[:optimal_n]], axis=0
)
print(" List of fibres used for sky saved in self.sky_fibres")
else: # We provide a list with sky positions
print(" We use the list provided to get the sky spectrum")
print(" sky_fibres = {}".format(sky_fibres))
self.sky_fibres = np.array(sky_fibres)
self.sky_emission = np.nanmedian(intensidad[self.sky_fibres], axis=0)
if plot:
self.RSS_map(
self.integrated_fibre, None, self.sky_fibres, title=" - Sky Spaxels"
) # flux_ratio
# print " Combined sky spectrum:"
plt.figure(figsize=(10, 4))
plt.plot(self.wavelength, self.sky_emission, "c-", label="sky")
plt.yscale("log")
plt.ylabel("FLux")
plt.xlabel("Wavelength [$\AA$]")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=self.valid_wave_min, color="k", linestyle="--")
plt.axvline(x=self.valid_wave_max, color="k", linestyle="--")
plt.ylim([np.nanmin(intensidad), np.nanmax(intensidad)])
plt.minorticks_on()
plt.title("{} - Combined Sky Spectrum".format(self.description))
plt.legend(frameon=False)
# plt.show()
# plt.close()
# Substract sky in all intensities
self.intensity_sky_corrected = np.zeros_like(self.intensity)
for i in range(self.n_spectra):
if ic == 1:
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
)
if ic == 0:
self.intensity_sky_corrected[i, :] = (
self.intensity_corrected[i, :] - self.sky_emission
)
last_sky_fibre = self.sky_fibres[-1]
# Plot median value of fibre vs. fibre
if plot:
median_sky_corrected = np.zeros(self.n_spectra)
for i in range(self.n_spectra):
if ic == 1:
median_sky_corrected[i] = np.nanmedian(
self.intensity_corrected[i, :], axis=0
)
if ic == 0:
median_sky_corrected[i] = np.nanmedian(
self.intensity_sky_corrected[i, :], axis=0
)
plt.figure(figsize=(10, 4))
plt.plot(median_sky_corrected)
plt.plot(
[0, 1000],
[
median_sky_corrected[last_sky_fibre],
median_sky_corrected[last_sky_fibre],
],
"r",
)
plt.minorticks_on()
plt.ylabel("Median Flux")
plt.xlabel("Fibre")
plt.yscale("log")
plt.ylim([np.nanmin(median_sky_corrected), np.nanmax(median_sky_corrected)])
plt.title(self.description)
plt.legend(frameon=False)
# plt.show()
# plt.close()
print(" Sky spectrum obtained and stored in self.sky_emission !! ")
print(" Intensities corrected for sky emission and stored in self.intensity_corrected !")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def find_relative_throughput(
self,
ymin=10000,
ymax=200000, # nskyflat=False,
kernel_sky_spectrum=5,
wave_min_scale=0,
wave_max_scale=0,
plot=True,
):
"""
Determine the relative transmission of each spectrum
using a skyflat.
Parameters
----------
ymin
ymax
kernel_sky_spectrum
wave_min_scale
wave_max_scale
plot
Returns
-------
"""
# These are for the normalized flat:
# fit_skyflat_degree=0, step=50, wave_min_flat=0, wave_max_flat=0):
print("\n> Using this skyflat to find relative throughput (a scale per fibre)...")
# Check grating to chose wavelength range to get median values
if wave_min_scale == 0 and wave_max_scale == 0:
if self.grating == "1000R":
wave_min_scale = 6600.0
wave_max_scale = 6800.0
print(" For 1000R, we use the median value in the [6600, 6800] range.")
if self.grating == "1500V":
wave_min_scale = 5100.0
wave_max_scale = 5300.0
print(" For 1500V, we use the median value in the [5100, 5300] range.")
if self.grating == "580V":
wave_min_scale = 4700.0
wave_max_scale = 4800.0
print(" For 580V, we use the median value in the [4700, 4800] range.")
if self.grating == "385R":
wave_min_scale = 6600.0
wave_max_scale = 6800.0
print(" For 385R, we use the median value in the [6600, 6800] range.")
# print(" For {}, we use the median value in the [{}, {}] range.".format(
# self.grating, wave_min_scale, wave_max_scale))
else:
if wave_min_scale == 0:
wave_min_scale = self.wavelength[0]
if wave_max_scale == 0:
wave_max_scale = self.wavelength[-1]
print(" As given by the user, we use the median value in the [{} , {}] range.".format(wave_min_scale, wave_max_scale))
median_region = np.zeros(self.n_spectra)
for i in range(self.n_spectra):
region = np.where(
(self.wavelength > wave_min_scale) & (self.wavelength < wave_max_scale)
)
median_region[i] = np.nanmedian(self.intensity[i, region])
median_value_skyflat = np.nanmedian(median_region)
self.relative_throughput = median_region/median_value_skyflat
print(" Median value of skyflat in the [ {} , {}] range = {}".format(wave_min_scale, wave_max_scale, median_value_skyflat))
print(" Individual fibre corrections: min = {} max = {}".format(np.nanmin(self.relative_throughput), np.nanmax(self.relative_throughput)))
if plot:
plt.figure(figsize=(10, 4))
x = list(range(self.n_spectra))
plt.plot(x, self.relative_throughput)
# plt.ylim(0.5,4)
plt.minorticks_on()
plt.xlabel("Fibre")
plt.ylabel("Throughput using scale")
plt.title("Throughput correction using scale")
# plt.show()
# plt.close()
# print "\n Plotting spectra WITHOUT considering throughput correction..."
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
plt.plot(self.wavelength, self.intensity[i, ])
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra WITHOUT considering any throughput correction")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.ylim(ymin, ymax)
plt.minorticks_on()
# plt.show()
# plt.close()
# print " Plotting spectra CONSIDERING throughput correction..."
plt.figure(figsize=(10, 4))
for i in range(self.n_spectra):
# self.intensity_corrected[i,] = self.intensity[i,] * self.relative_throughput[i]
plot_this = self.intensity[i, ]/self.relative_throughput[i]
plt.plot(self.wavelength, plot_this)
plt.ylim(ymin, ymax)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.title("Spectra CONSIDERING throughput correction (scale)")
plt.xlim(self.wavelength[0] - 10, self.wavelength[-1] + 10)
plt.axvline(x=wave_min_scale, color="k", linestyle="--")
plt.axvline(x=wave_max_scale, color="k", linestyle="--")
# plt.show()
# plt.close()
print("\n> Using median value of skyflat considering a median filter of {} ...".format(kernel_sky_spectrum)) # LUKE
median_sky_spectrum = np.nanmedian(self.intensity, axis=0)
self.response_sky_spectrum = np.zeros_like(self.intensity)
rms = np.zeros(self.n_spectra)
plot_fibres = [100, 500, 501, 900]
pf = 0
for i in range(self.n_spectra):
self.response_sky_spectrum[i] = (
(self.intensity[i]/self.relative_throughput[i])/median_sky_spectrum
)
filter_response_sky_spectrum = sig.medfilt(
self.response_sky_spectrum[i], kernel_size=kernel_sky_spectrum
)
rms[i] = np.nansum(
np.abs(self.response_sky_spectrum[i] - filter_response_sky_spectrum)
)/np.nansum(self.response_sky_spectrum[i])
if plot:
if i == plot_fibres[pf]:
plt.figure(figsize=(10, 4))
plt.plot(
self.wavelength,
self.response_sky_spectrum[i],
alpha=0.3,
label="Response Sky",
)
plt.plot(
self.wavelength,
filter_response_sky_spectrum,
alpha=0.7,
linestyle="--",
label="Filtered Response Sky",
)
plt.plot(
self.wavelength,
self.response_sky_spectrum[i]/filter_response_sky_spectrum,
alpha=1,
label="Normalized Skyflat",
)
plt.xlim(self.wavelength[0] - 50, self.wavelength[-1] + 50)
plt.ylim(0.95, 1.05)
ptitle = "Fibre {} with rms = {}".format(i, rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.legend(frameon=False, loc=3, ncol=1)
# plt.show()
# plt.close()
if pf < len(plot_fibres) - 1:
pf = pf + 1
print(" median rms = {} min rms = {} max rms = {}".format(np.nanmedian(rms), np.nanmin(rms),np.nanmax(rms)))
# if plot:
# plt.figure(figsize=(10, 4))
# for i in range(self.n_spectra):
# #plt.plot(self.wavelength,self.intensity[i,]/median_sky_spectrum)
# plot_this = self.intensity[i,] / self.relative_throughput[i] /median_sky_spectrum
# plt.plot(self.wavelength, plot_this)
# plt.xlabel("Wavelength [$\AA$]")
# plt.title("Spectra CONSIDERING throughput correction (scale) / median sky spectrum")
# plt.xlim(self.wavelength[0]-10,self.wavelength[-1]+10)
# plt.ylim(0.7,1.3)
# plt.minorticks_on()
# plt.show()
# plt.close()
#
# plt.plot(self.wavelength, median_sky_spectrum, color='r',alpha=0.7)
# plt.plot(self.wavelength, filter_median_sky_spectrum, color='blue',alpha=0.7)
# plt.show()
# plt.close()
#
# plt.plot(self.wavelength, median_sky_spectrum/filter_median_sky_spectrum, color='r',alpha=0.7)
# plt.show()
# plt.close()
# for i in range(2):
# response_sky_spectrum_ = self.intensity[500+i,] / self.relative_throughput[500+i] /median_sky_spectrum
# filter_response_sky_spectrum = sig.medfilt(response_sky_spectrum_,kernel_size=kernel_sky_spectrum)
# rms=np.nansum(np.abs(response_sky_spectrum_ - filter_response_sky_spectrum))/np.nansum(response_sky_spectrum_)
# for i in range(5):
# filter_response_sky_spectrum_ = (self.intensity[500+i,] / self.relative_throughput[500+i] ) / median_sky_spectrum
# filter_response_sky_spectrum = sig.medfilt(filter_response_sky_spectrum_,kernel_size=kernel_sky_spectrum)
#
# plt.plot(self.wavelength, filter_response_sky_spectrum,alpha=0.7)
# plt.ylim(0.95,1.05)
# plt.show()
# plt.close()
print("\n> Relative throughput using skyflat scaled stored in self.relative_throughput !!")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def get_telluric_correction(
self,
n_fibres=10,
correct_from=6850.0,
correct_to=10000.0,
apply_tc=False,
step=10,
combined_cube=False,
weight_fit_median=0.5,
exclude_wlm=[
[6450, 6700],
[6850, 7050],
[7130, 7380],
], # This is range for 1000R
wave_min=0,
wave_max=0,
plot=True,
fig_size=12,
verbose=False,
):
""" # TODO BLAKE: always use false, use plots to make sure it's good. prob just save as a different file.
Get telluric correction using a spectrophotometric star
Parameters
----------
n_fibres: integer
number of fibers to add for obtaining spectrum
correct_from : float
wavelength from which telluric correction is applied (default = 6850)
apply_tc : boolean (default = False)
apply telluric correction to data
exclude_wlm=[[6450,6700],[6850,7050], [7130,7380]]:
Wavelength ranges not considering for normalising stellar continuum
Example
----------
telluric_correction_star1 = star1r.get_telluric_correction(n_fibres=15)
"""
print("\n> Obtaining telluric correction using spectrophotometric star...")
if combined_cube:
wlm = self.combined_cube.wavelength
else:
wlm = self.wavelength
if wave_min == 0:
wave_min = wlm[0]
if wave_max == 0:
wave_max = wlm[-1]
if combined_cube:
if self.combined_cube.seeing == 0:
self.combined_cube.half_light_spectrum(
5, plot=plot, min_wave=wave_min, max_wave=wave_max
)
estrella = self.combined_cube.integrated_star_flux
else:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
intensidad = self.intensity_corrected
region = []
for fibre in range(n_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
estrella = np.nansum(intensidad[region], axis=0)
smooth_med_star = smooth_spectrum(
wlm,
estrella,
wave_min=wave_min,
wave_max=wave_max,
step=step,
weight_fit_median=weight_fit_median,
exclude_wlm=exclude_wlm,
plot=plot,
verbose=verbose,
)
telluric_correction = np.ones(len(wlm))
for l in range(len(wlm)):
if wlm[l] > correct_from and wlm[l] < correct_to:
telluric_correction[l] = smooth_med_star[l]/estrella[l] # TODO: should be float, check when have star data
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
if combined_cube:
print(" Telluric correction for this star ({}) :".format(self.combined_cube.object))
plt.plot(wlm, estrella, color="b", alpha=0.3)
plt.plot(wlm, estrella * telluric_correction, color="g", alpha=0.5)
plt.ylim(np.nanmin(estrella), np.nanmax(estrella))
else:
print(" Example of telluric correction using fibres {} and {} :".format(region[0], region[1]))
plt.plot(wlm, intensidad[region[0]], color="b", alpha=0.3)
plt.plot(
wlm,
intensidad[region[0]] * telluric_correction,
color="g",
alpha=0.5,
)
plt.plot(wlm, intensidad[region[1]], color="b", alpha=0.3)
plt.plot(
wlm,
intensidad[region[1]] * telluric_correction,
color="g",
alpha=0.5,
)
plt.ylim(
np.nanmin(intensidad[region[1]]), np.nanmax(intensidad[region[0]])
) # CHECK THIS AUTOMATICALLY
plt.axvline(x=wave_min, color="k", linestyle="--")
plt.axvline(x=wave_max, color="k", linestyle="--")
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
plt.xlabel("Wavelength [$\AA$]")
if exclude_wlm[0][0] != 0:
for i in range(len(exclude_wlm)):
plt.axvspan(
exclude_wlm[i][0], exclude_wlm[i][1], color="r", alpha=0.1
)
plt.minorticks_on()
# plt.show()
# plt.close()
if apply_tc: # Check this
print(" Applying telluric correction to this star...")
if combined_cube:
self.combined_cube.integrated_star_flux = (
self.combined_cube.integrated_star_flux * telluric_correction
)
for i in range(self.combined_cube.n_rows):
for j in range(self.combined_cube.n_cols):
self.combined_cube.data[:, i, j] = (
self.combined_cube.data[:, i, j] * telluric_correction
)
else:
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :] * telluric_correction
)
return telluric_correction
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectrum(self, spectrum_number, sky=True, xmin=0, xmax=0, ymax=0, ymin=0):
"""
Plot spectrum of a particular spaxel.
Parameters
----------
spectrum_number:
spaxel to show spectrum.
sky:
if True substracts the sky
Example
-------
>>> rss1.plot_spectrum(550, sky=True)
"""
if sky:
spectrum = self.intensity_corrected[spectrum_number]
else:
spectrum = self.intensity_corrected[spectrum_number] + self.sky_emission
plt.plot(self.wavelength, spectrum)
# error = 3*np.sqrt(self.variance[spectrum_number])
# plt.fill_between(self.wavelength, spectrum-error, spectrum+error, alpha=.1)
if xmin != 0 or xmax != 0 or ymax != 0 or ymin != 0:
if xmin == 0:
xmin = self.wavelength[0]
if xmax == 0:
xmax = self.wavelength[-1]
if ymin == 0:
ymin = np.nanmin(spectrum)
if ymax == 0:
ymax = np.nanmax(spectrum)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_spectra(
self,
list_spectra="all",
wavelength_range=[0],
xmin="",
xmax="",
ymax=1000,
ymin=-100,
fig_size=10,
save_file="",
sky=True,
):
"""
Plot spectrum of a list pf spaxels.
Parameters
----------
list_spectra:
spaxels to show spectrum. Default is all.
save_file:
(Optional) Save plot in file "file.extension"
fig_size:
Size of the figure (in x-axis), default: fig_size=10
Example
-------
>>> rss1.plot_spectra([1200,1300])
"""
plt.figure(figsize=(fig_size, fig_size / 2.5))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if len(wavelength_range) == 2:
plt.xlim(wavelength_range[0], wavelength_range[1])
if xmin == "":
xmin = np.nanmin(self.wavelength)
if xmax == "":
xmax = np.nanmax(self.wavelength)
# title = "Spectrum of spaxel {} in {}".format(spectrum_number, self.description)
# plt.title(title)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
for i in list_spectra:
self.plot_spectrum(i, sky)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def plot_combined_spectrum(
self,
list_spectra="all",
sky=True,
median=False,
xmin="",
xmax="",
ymax="",
ymin="",
fig_size=10,
save_file="",
plot=True,
):
"""
Plot combined spectrum of a list and return the combined spectrum.
Parameters
----------
list_spectra:
spaxels to show combined spectrum. Default is all.
sky:
if True substracts the sky
Example
-------
>>> rss1.plot_spectrum(550, sky=True)
"""
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
spectrum = np.zeros_like(self.intensity_corrected[list_spectra[0]])
value_list = []
# Note: spectrum of fibre is located in position fibre-1, e.g., spectrum of fibre 1 -> intensity_corrected[0]
if sky:
for fibre in list_spectra:
value_list.append(self.intensity_corrected[fibre - 1])
else:
for fibre in list_spectra:
value_list.append(
self.intensity_corrected[fibre - 1] + self.sky_emission
)
if median:
spectrum = np.nanmedian(value_list, axis=0)
else:
spectrum = np.nansum(value_list, axis=0)
if plot:
plt.figure(figsize=(fig_size, fig_size / 2.5))
if xmin == "":
xmin = np.nanmin(self.wavelength)
if xmax == "":
xmax = np.nanmax(self.wavelength)
if ymin == "":
ymin = np.nanmin(spectrum)
if ymax == "":
ymax = np.nanmax(spectrum)
plt.plot(self.wavelength, spectrum)
if len(list_spectra) == list_spectra[-1] - list_spectra[0] + 1:
title = "{} - Combined spectrum in range [{},{}]".format(
self.description, list_spectra[0], list_spectra[-1]
)
else:
title = "Combined spectrum using requested fibres"
plt.title(title)
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Relative Flux")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if save_file == "":
#plt.show()
pass
else:
plt.savefig(save_file)
plt.close()
return spectrum
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def flux_between(self, lambda_min, lambda_max, list_spectra=[]):
"""
Parameters
----------
lambda_min
lambda_max
list_spectra
Returns
-------
"""
index_min = np.searchsorted(self.wavelength, lambda_min)
index_max = np.searchsorted(self.wavelength, lambda_max) + 1
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
n_spectra = len(list_spectra)
fluxes = np.empty(n_spectra)
variance = np.empty(n_spectra)
for i in range(n_spectra):
fluxes[i] = np.nanmean(self.intensity[list_spectra[i], index_min:index_max])
variance[i] = np.nanmean(
self.variance[list_spectra[i], index_min:index_max]
)
return fluxes * (lambda_max - lambda_min), variance * (lambda_max - lambda_min)
# WARNING: Are we overestimating errors?
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def median_between(self, lambda_min, lambda_max, list_spectra=[]):
"""
Parameters
----------
lambda_min
lambda_max
list_spectra
Returns
-------
"""
index_min = np.searchsorted(self.wavelength, lambda_min)
index_max = np.searchsorted(self.wavelength, lambda_max) + 1
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
n_spectra = len(list_spectra)
medians = np.empty(n_spectra)
for i in range(n_spectra):
medians[i] = np.nanmedian(
self.intensity[list_spectra[i], index_min:index_max]
)
return medians
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def line_flux(
self,
left_min,
left_max,
line_min,
line_max,
right_min,
right_max,
list_spectra=[],
):
"""
Parameters
----------
left_min
left_max
line_min
line_max
right_min
right_max
list_spectra
Returns
-------
"""
# TODO: can remove old_div once this function is understood, currently not called in whole module.
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
line, var_line = self.flux_between(line_min, line_max, list_spectra)
left, var_left = old_div(self.flux_between(left_min, left_max, list_spectra), (
left_max - left_min
))
right, var_right = old_div(self.flux_between(right_min, right_max, list_spectra), (
left_max - left_min
))
wavelength_left = old_div((left_min + left_max), 2)
wavelength_line = old_div((line_min + line_max), 2)
wavelength_right = old_div((right_min + right_max), 2)
continuum = left + old_div((right - left) * (wavelength_line - wavelength_left), (
wavelength_right - wavelength_left
))
var_continuum = old_div((var_left + var_right), 2)
return (
line - continuum * (line_max - line_min),
var_line + var_continuum * (line_max - line_min),
)
# WARNING: Are we overestimating errors?
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def RSS_map(
self,
variable,
norm=colors.LogNorm(),
list_spectra=[],
title=" - RSS map",
color_bar_text="Integrated Flux [Arbitrary units]",
):
"""
Plot map showing the offsets, coloured by variable.
Parameters
----------
variable
norm
list_spectra
title
color_bar_text
Returns
-------
"""
if len(list_spectra) == 0:
list_spectra = list(range(self.n_spectra))
plt.figure(figsize=(10, 10))
plt.scatter(
self.offset_RA_arcsec[list_spectra],
self.offset_DEC_arcsec[list_spectra],
c=variable[list_spectra],
cmap=fuego_color_map,
norm=norm,
s=260,
marker="h",
)
plt.title(self.description + title)
plt.xlim(
np.nanmin(self.offset_RA_arcsec) - 0.7,
np.nanmax(self.offset_RA_arcsec) + 0.7,
)
plt.ylim(
np.nanmin(self.offset_DEC_arcsec) - 0.7,
np.nanmax(self.offset_DEC_arcsec) + 0.7,
)
plt.xlabel("$\Delta$ RA [arcsec]")
plt.ylabel("$\Delta$ DEC [arcsec]")
plt.minorticks_on()
plt.grid(which="both")
plt.gca().invert_xaxis()
cbar = plt.colorbar()
plt.clim(np.nanmin(variable[list_spectra]), np.nanmax(variable[list_spectra]))
cbar.set_label(color_bar_text, rotation=90, labelpad=40)
cbar.ax.tick_params()
# plt.show()
# plt.close()
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def RSS_image(
self,
image=[0],
norm=colors.Normalize(),
cmap="seismic_r",
clow=0,
chigh=0,
labelpad=10,
title=" - RSS image",
color_bar_text="Integrated Flux [Arbitrary units]",
fig_size=13.5,
):
"""
Plot RSS image coloured by variable.
cmap = "binary_r" nice greyscale
Parameters
----------
image
norm
cmap
clow
chigh
labelpad
title
color_bar_text
fig_size
Returns
-------
"""
if | np.nanmedian(image) | numpy.nanmedian |
import os
import h5py
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.contrib.slim as slim
from PIL import Image
from pyntcloud import PyntCloud
from sklearn.metrics import auc, roc_curve
matplotlib.use('TkAgg') #uncomment this line if you are using macos
def show_all_trainable_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def display_point(pts, color, color_label=None, title=None, fname=None, axis="on", marker_size=5):
pts = np.squeeze(pts)
if isinstance(color, np.ndarray):
color = np_color_to_hex_str(np.squeeze(color))
DPI = 300
PIX_h = 1000
MARKER_SIZE = marker_size
if color_label is None:
PIX_w = PIX_h
else:
PIX_w = PIX_h * 2
X = pts[:, 0]
Y = pts[:, 2]
Z = pts[:, 1]
max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
fig = plt.figure()
fig.set_size_inches(PIX_w / DPI, PIX_h / DPI)
if axis == "off":
plt.subplots_adjust(top=1.2, bottom=-0.2, right=1.5, left=-0.5, hspace=0, wspace=-0.7)
plt.margins(0, 0)
if color_label is None:
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, - Y, Z, c=color, edgecolors="none", s=MARKER_SIZE, depthshade=True)
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
plt.axis(axis)
# ax.set_yticklabels([])
# ax.set_xticklabels([])
# ax.set_zticklabels([])
# ax.set_axis_off()
if title is not None:
ax.set_title(title, fontdict={'fontsize': 30})
ax.set_aspect("equal")
# ax.grid("off")
if fname:
plt.savefig(fname, transparent=True, dpi=DPI)
plt.close(fig)
else:
plt.show()
else:
ax = fig.add_subplot(121, projection='3d')
bx = fig.add_subplot(122, projection='3d')
ax.scatter(X, Y, Z, c=color, edgecolors="none", s=MARKER_SIZE, depthshade=True)
bx.scatter(X, Y, Z, c=color_label, edgecolors="none", s=MARKER_SIZE, depthshade=True)
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
bx.set_xlim(mid_x - max_range, mid_x + max_range)
bx.set_ylim(mid_y - max_range, mid_y + max_range)
bx.set_zlim(mid_z - max_range, mid_z + max_range)
bx.patch.set_alpha(0)
ax.set_aspect("equal")
ax.grid("off")
bx.set_aspect("equal")
bx.grid("off")
ax.axis('off')
bx.axis("off")
plt.axis('off')
if fname:
plt.savefig(fname, transparent=True, dpi=DPI)
plt.close(fig)
else:
plt.show()
def int16_to_hex_str(color):
hex_str = ""
color_map = {i: str(i) for i in range(10)}
color_map.update({10: "A", 11: "B", 12: "C", 13: "D", 14: "E", 15: "F", 16: "F"})
# print(color_map)
hex_str += color_map[color // 16]
hex_str += color_map[color % 16]
return hex_str
def horizontal_concatnate_pic(fout, *fnames):
images = [Image.open(i).convert('RGB') for i in fnames]
# images = map(Image.open, fnames)
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset, 0))
x_offset += im.size[0]
new_im.save(fout)
def vertical_concatnate_pic(fout, *fnames):
images = [Image.open(i).convert('RGB') for i in fnames]
# images = map(Image.open, fnames)
widths, heights = zip(*(i.size for i in images))
max_widths = max(widths)
width_ratio = [max_widths / width for width in widths]
new_height = [int(width_ratio[idx]) * height for idx, height in enumerate(heights)]
new_images = [i.resize((max_widths, new_height[idx])) for idx, i in enumerate(images)]
total_heights = sum(new_height)
new_im = Image.new('RGB', (max_widths, total_heights))
x_offset = 0
for im in new_images:
new_im.paste(im, (0, x_offset))
x_offset += im.size[1]
new_im.save(fout)
def rgb_to_hex_str(*rgb):
hex_str = "#"
for item in rgb:
hex_str += int16_to_hex_str(item)
return hex_str
def np_color_to_hex_str(color):
"""
:param color: an numpy array of shape (N, 3)
:return: a list of hex color strings
"""
hex_list = []
for rgb in color:
hex_list.append(rgb_to_hex_str(rgb[0], rgb[1], rgb[2]))
return hex_list
def load_h5(path, *kwd):
f = h5py.File(path)
list_ = []
for item in kwd:
list_.append(f[item][:])
print("{0} of shape {1} loaded!".format(item, f[item][:].shape))
if item == "ndata" or item == "data":
pass # print(np.mean(f[item][:], axis=1))
if item == "color":
print("color is of type {}".format(f[item][:].dtype))
return list_
def load_single_cat_h5(cat, num_pts, type, *kwd):
fpath = os.path.join("./data/category_h5py", cat, "PTS_{}".format(num_pts), "ply_data_{}.h5".format(type))
return load_h5(fpath, *kwd)
def printout(flog, data): # follow up
print(data)
flog.write(data + '\n')
def save_ply(data, color, fname):
color = color.astype(np.uint8)
df1 = pd.DataFrame(data, columns=["x", "y", "z"])
df2 = pd.DataFrame(color, columns=["red", "green", "blue"])
pc = PyntCloud(pd.concat([df1, df2], axis=1))
pc.to_file(fname)
def label2onehot(labels, m):
idx = np.eye(m)
onehot_labels = np.zeros(shape=(labels.shape[0], m))
for idx, i in enumerate(labels):
onehot_labels[idx] = idx[i]
return onehot_labels
def multiclass_AUC(y_true, y_pred):
"""
:param y_true: shape (num_instance,)
:param y_pred: shape (num_instance, num_class)
:return:
"""
num_classes = np.unique(y_true).shape[0]
print(num_classes)
tpr = dict()
fpr = dict()
roc_auc = dict()
num_instance = dict()
total_roc_auc = 0
for i in range(num_classes):
binary_label = np.where(y_true == i, 1, 0)
class_score = y_pred[:, i]
num_instance[i] = np.sum(y_true == i)
fpr[i], tpr[i], _ = roc_curve(binary_label, class_score)
roc_auc[i] = auc(fpr[i], tpr[i])
total_roc_auc += roc_auc[i]
return total_roc_auc / 16
# print(roc_auc, num_instance)
def softmax(logits):
"""
:param logits: of shape (num_instance, num_classes)
:return: prob of the same shape as that of logits, with each row summed up to 1
"""
assert logits.shape[-1] == 16
regulazation = np.max(logits, axis=-1) # (num_instance,)
logits -= regulazation[:, np.newaxis]
prob = np.exp(logits) / np.sum(np.exp(logits), axis=-1)[:, np.newaxis]
assert prob.shape == logits.shape
return prob
def construct_label_weight(label, weight):
"""
:param label: a numpy ndarray of shape (batch_size,) with each entry in [0, num_class)
:param weight: weight list of length num_class
:return: a numpy ndarray of the same shape as that of label
"""
return [weight[i] for i in label]
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
B, N, C = batch_data.shape
assert (clip > 0)
jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip)
jittered_data += batch_data
return jittered_data
def generate_sphere(num_pts, radius):
# http://electron9.phys.utk.edu/vectors/3dcoordinates.htm
r = np.random.rand(num_pts, 1) * radius
f = np.random.rand(num_pts, 1) * np.pi * 2
q = np.random.rand(num_pts, 1) * np.pi
x = r * np.sin(q) * np.cos(f)
y = r * np.sin(q) * np.sin(f)
z = r * | np.cos(q) | numpy.cos |
import gym
# 生成仿真环境
env = gym.make('Taxi-v3')
# 重置仿真环境
obs = env.reset()
# 渲染环境当前状态
#env.render()
m = env.observation_space.n # size of the state space
n = env.action_space.n # size of action space
print(m,n)
print("出租车问题状态数量为{:d},动作数量为{:d}。".format(m, n))
import numpy as np
# Intialize the Q-table and hyperparameters
# Q表,大小为 m*n
Q = np.zeros([m,n])
Q2=np.zeros([m,n])
# 回报的折扣率
gamma = 0.97
# 分幕式训练中最大幕数
max_episode = 1000
# 每一幕最长步数
max_steps = 1000
# 学习率参数
alpha = 0.7
# 随机探索概率
epsilon = 0
for i in range(max_episode):
# Start with new environment
s = env.reset()
done = False
counter = 0
for _ in range(max_steps):
# Choose an action using epsilon greedy policy
p = np.random.rand()
if p>epsilon or np.any(Q[s,:])==False:
a = env.action_space.sample()
else:
a = np.argmax(Q[s, :])
# 请根据 epsilon-贪婪算法 选择动作 a
# p > epsilon 或尚未学习到某个状态的价值时,随机探索
# 其它情况,利用已经觉得的价值函数进行贪婪选择 (np.argmax)
# ======= 将代码补充到这里
# ======= 补充代码结束
#env.step(a) //根据所选动作action执行一步
# 返回新的状态、回报、以及是否完成
s_new, r, done, _ = env.step(a)
#r是执行a后得到的奖励,s是执行之前的状态
# 请根据贝尔曼方程,更新Q表 (np.max)
# ======= 将代码补充到这里
Q[s,a] =(1-alpha)*Q[s,a]+alpha*(r+gamma*np.max(Q[s_new,:]))
# ======= 补充代码结束
# print(Q[s,a],r)
s = s_new
if done:
break
print(Q)
s = env.reset()
done = False
env.render()
#Test the learned Agent
for i in range(max_steps):
a = np.argmax(Q[s,:])
s, _, done, _ = env.step(a)
#env.render()
if done:
break
rewards = []# ======= 将代码补充到这里
rewards2=[]
for _ in range(100):
s = env.reset()
done = False
#env.render()
rprestep = []
#Test the learned Agent
for i in range(max_steps):
a = np.argmax(Q[s, :])
s, reward0, done, _ = env.step(a)
rprestep.append(reward0)
env.render()
if done:
break
print('----------- ')
rewards.append( | np.sum(rprestep) | numpy.sum |
#!/usr/bin/env python
"""
MagPy-General: Standard pymag package containing the following classes:
Written by <NAME>, <NAME> 2011/2012/2013/2014
Written by <NAME>, <NAME>, <NAME> 2015/2016
Version 0.3 (starting May 2016)
License:
https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import logging
import os
import sys
import tempfile
# ----------------------------------------------------------------------------
# Part 1: Import routines for packages
# ----------------------------------------------------------------------------
logpygen = '' # temporary logger variable
badimports = [] # List of missing packages
nasacdfdir = "c:\CDF Distribution\cdf33_1-dist\lib"
# Logging
# ---------
# Select the user's home directory (platform independent) or environment path
if "MAGPY_LOG_PATH" in os.environ:
path_to_log = os.environ["MAGPY_LOG_PATH"]
if not os.path.exists(path_to_log):
os.makedirs(path_to_log)
else:
path_to_log = tempfile.gettempdir()
def setup_logger(name, warninglevel=logging.WARNING, logfilepath=path_to_log,
logformat='%(asctime)s %(levelname)s - %(name)-6s - %(message)s'):
"""Basic setup function to create a standard logging config. Default output
is to file in /tmp/dir."""
logfile=os.path.join(logfilepath,'magpy.log')
# Check file permission/existance
if not os.path.isfile(logfile):
pass
else:
if os.access(logfile, os.W_OK):
pass
else:
for count in range (1,100):
logfile=os.path.join(logfilepath,'magpy{:02}.log'.format(count))
value = os.access(logfile, os.W_OK)
if value or not os.path.isfile(logfile):
count = 100
break
try:
logging.basicConfig(filename=logfile,
filemode='w',
format=logformat,
level=logging.INFO)
except:
logging.basicConfig(format=logformat,
level=logging.INFO)
logger = logging.getLogger(name)
# Define a Handler which writes "setLevel" messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(warninglevel)
logger.addHandler(console)
return logger
# Package loggers to identify info/problem source
logger = setup_logger(__name__)
# DEPRECATED: replaced by individual module loggers, delete these when sure they're no longer needed:
loggerabs = logging.getLogger('abs')
loggertransfer = logging.getLogger('transf')
loggerdatabase = logging.getLogger('db')
loggerstream = logging.getLogger('stream')
loggerlib = logging.getLogger('lib')
loggerplot = logging.getLogger('plot')
# Special loggers for event notification
stormlogger = logging.getLogger('stream')
logger.info("Initiating MagPy...")
from magpy.version import __version__
logger.info("MagPy version "+str(__version__))
magpyversion = __version__
# Standard packages
# -----------------
try:
import csv
import pickle
import types
import struct
import re
import time, string, os, shutil
#import locale
import copy as cp
import fnmatch
import dateutil.parser as dparser
from tempfile import NamedTemporaryFile
import warnings
from glob import glob, iglob, has_magic
from itertools import groupby
import operator # used for stereoplot legend
from operator import itemgetter
# The following packages are not identically available for python3
try: # python2
import copy_reg as copyreg
except ImportError: # python3
import copyreg as copyreg
# Python 2 and 3: alternative 4
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request, ProxyHandler, install_opener, build_opener
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, ProxyHandler, install_opener, build_opener
"""
try: # python2
import urllib2
except ImportError: # python3
import urllib.request
"""
try: # python2
import thread
except ImportError: # python3
import _thread
try: # python2
from StringIO import StringIO
pyvers = 2
except ImportError: # python 3
from io import StringIO
pyvers = 3
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: standard packages.\n"
badimports.append(e)
# operating system
try:
PLATFORM = sys.platform
logger.info("Running on platform: {}".format(PLATFORM))
except:
PLATFORM = 'unkown'
# Matplotlib
# ----------
try:
import matplotlib
gui_env = ['TKAgg','GTKAgg','Qt4Agg','WXAgg','Agg']
try:
if not os.isatty(sys.stdout.fileno()): # checks if stdout is connected to a terminal (if not, cron is starting the job)
logger.info("No terminal connected - assuming cron job and using Agg for matplotlib")
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
matplotlib.use('Agg') # For using cron
except:
logger.warning("Problems with identfying cron job - windows system?")
pass
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: problem with matplotlib.\n"
badimports.append(e)
try:
version = matplotlib.__version__.replace('svn', '')
try:
version = map(int, version.replace("rc","").split("."))
MATPLOTLIB_VERSION = list(version)
except:
version = version.strip("rc")
MATPLOTLIB_VERSION = version
logger.info("Loaded Matplotlib - Version %s" % str(MATPLOTLIB_VERSION))
for gui in gui_env:
try:
logger.info("Testing backend {}".format(gui))
try: # will be important from matplotlib3.3 onwards
matplotlib.use(gui, force=True)
except:
matplotlib.use(gui, warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
logger.info("Using backend: {}".format(matplotlib.get_backend()))
from matplotlib.colors import Normalize
from matplotlib.widgets import RectangleSelector, RadioButtons
#from matplotlib.colorbar import ColorbarBase
from matplotlib import mlab
from matplotlib.dates import date2num, num2date
import matplotlib.cm as cm
from pylab import *
from datetime import datetime, timedelta
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError with matplotlib package. Please install to proceed.\n"
logpygen += " ... if installed please check the permissions on .matplotlib in your homedirectory.\n"
badimports.append(e)
# Numpy & SciPy
# -------------
try:
logger.info("Loading Numpy and SciPy...")
import numpy as np
import scipy as sp
from scipy import interpolate
from scipy import stats
from scipy import signal
from scipy.interpolate import UnivariateSpline
from scipy.ndimage import filters
import scipy.optimize as op
import math
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: Python numpy-scipy required - please install to proceed.\n"
badimports.append(e)
# NetCDF
# ------
try:
#print("Loading Netcdf4 support ...")
from netCDF4 import Dataset
except ImportError as e:
#logpygen += "MagPy initiation ImportError: NetCDF not available.\n"
#logpygen += "... if you want to use NetCDF format support please install a current version.\n"
#badimports.append(e)
pass
# NASACDF - SpacePy
# -----------------
def findpath(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return root
try:
logger.info("Loading SpacePy package cdf support ...")
try:
# check for windows
nasacdfdir = findpath('libcdf.dll','C:\CDF_Distribution') ## new path since nasaCDF3.6
if not nasacdfdir:
nasacdfdir = findpath('libcdf.dll','C:\CDF Distribution')
if nasacdfdir:
os.environ["CDF_LIB"] =str(nasacdfdir)
logger.info("Using CDF lib in %s" % nasacdfdir)
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
else:
# create exception and try linux
x=1/0
except:
os.putenv("CDF_LIB", "/usr/local/cdf/lib")
logger.info("using CDF lib in /usr/local/cdf")
### If files (with tt_2000) have been generated with an outdated leapsecondtable
### an exception will occur - to prevent that:
### 1. make sure to use a actual leapsecond table - update cdf regularly
### 2. temporarly set cdf_validate environment variable to no
# This is how option 2 is included TODO -- add this to initialization options
# as an update of cdf is the way to go and not just deactivating the error message
os.putenv("CDF_VALIDATE", "no")
logger.info("... deactivating cdf validation")
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
except ImportError as e:
logpygen += "MagPy initiation ImportError: NASA cdf not available.\n"
logpygen += "... if you want to use NASA CDF format support please install a current version.\n"
badimports.append(e)
if logpygen == '':
logpygen = "OK"
else:
logger.info(logpygen)
logger.info("Missing packages:")
for item in badimports:
logger.info(item)
logger.info("Moving on anyway...")
### Some Python3/2 compatibility code
### taken from http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
unicode = unicode
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
# Storing function - http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods#edit2155350
# by <NAME>
# Used here to pickle baseline functions from header and store it in a cdf key.
# Not really a transparent method but working nicely. Underlying functional parameters to reconstruct the fit
# are stored as well but would require a link to the absolute data.
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method)
# ----------------------------------------------------------------------------
# Part 2: Define Dictionaries
# ----------------------------------------------------------------------------
# Keys available in DataStream Object:
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
# Empty key values at initiation of stream:
KEYINITDICT = {'time':0,'x':float('nan'),'y':float('nan'),'z':float('nan'),'f':float('nan'),
't1':float('nan'),'t2':float('nan'),'var1':float('nan'),'var2':float('nan'),
'var3':float('nan'),'var4':float('nan'),'var5':float('nan'),'dx':float('nan'),
'dy':float('nan'),'dz':float('nan'),'df':float('nan'),'str1':'-','str2':'-',
'str3':'-','str4':'-','flag':'0000000000000000-','comment':'-','typ':'xyzf',
'sectime':float('nan')}
FLAGKEYLIST = KEYLIST[:16]
# KEYLIST[:8] # only primary values with time
# KEYLIST[1:8] # only primary values without time
# Formats supported by MagPy read function:
PYMAG_SUPPORTED_FORMATS = {
'IAGA':['rw','IAGA 2002 text format'],
'WDC':['rw','World Data Centre format'],
'IMF':['rw', 'Intermagnet Format'],
'IAF':['rw', 'Intermagnet archive Format'],
'BLV':['rw','Baseline format Intermagnet'],
'IYFV':['rw','Yearly mean format Intermagnet'],
'DKA':['rw', 'K value format Intermagnet'],
'DIDD':['rw','Output format from MinGeo DIDD'],
'GSM19':['r', 'Output format from GSM19 magnetometer'],
'COVJSON':['rw', 'Coverage JSON'],
'JSON':['rw', 'JavaScript Object Notation'],
'LEMIHF':['r', 'LEMI text format data'],
'LEMIBIN':['r','Current LEMI binary data format'],
'LEMIBIN1':['r','Deprecated LEMI binary format at WIC'],
'OPT':['r', 'Optical hourly data from WIK'],
'PMAG1':['r','Deprecated ELSEC from WIK'],
'PMAG2':['r', 'Current ELSEC from WIK'],
'GDASA1':['r', 'GDAS binary format'],
'GDASB1':['r', 'GDAS text format'],
'RMRCS':['r', 'RCS data output from Richards perl scripts'],
'RCS':['r', 'RCS raw output'],
'METEO':['r', 'Winklbauer METEO files'],
'NEIC':['r', 'WGET data from USGS - NEIC'],
'LNM':['r', 'Thies Laser-Disdrometer'],
'IWT':['r', 'IWT Tiltmeter data'],
'LIPPGRAV':['r', 'Lippmann Tiltmeter data'],
'GRAVSG':['r', 'GWR TSF data'],
'CR800':['r', 'CR800 datalogger'],
'IONO':['r', 'IM806 Ionometer'],
'RADON':['r', 'single channel analyser gamma data'],
'USBLOG':['r', 'USB temperature logger'],
#'SERSIN':['r', '?'],
#'SERMUL':['r', '?'],
'PYSTR':['rw', 'MagPy full ascii'],
'AUTODIF':['r', 'Deprecated - AutoDIF ouput data'],
'AUTODIF_FREAD':['r', 'Deprecated - Special format for AutoDIF read-in'],
'PYBIN':['r', 'MagPy own binary format'],
'PYASCII':['rw', 'MagPy basic ASCII'],
'POS1TXT':['r', 'POS-1 text format output data'],
'POS1':['r', 'POS-1 binary output at WIC'],
'PMB':['r', 'POS pmb file'],
'QSPIN':['r', 'QSPIN ascii output'],
#'PYNC':['r', 'MagPy NetCDF variant (too be developed)'],
#'DTU1':['r', 'ASCII Data from the DTUs FGE systems'],
#'BDV1':['r', 'Budkov GDAS data variant'],
'GFZTMP':['r', 'GeoForschungsZentrum ascii format'],
'GFZKP':['r', 'GeoForschungsZentrum KP-Index format'],
'PHA':['r', 'Potentially Hazardous Asteroids (PHAs) from the International Astronomical Unions Minor Planet Center, (json, incomplete)'],
'PREDSTORM':['r','PREDSTORM space weather prediction data format'],
'CSV':['rw','comma-separated CSV data'],
'IMAGCDF':['rw','Intermagnet CDF Format'],
'PYCDF':['rw', 'MagPy CDF variant'],
'NOAAACE':['r', 'NOAA ACE satellite data format'],
'NETCDF':['r', 'NetCDF4 format, NOAA DSCOVR satellite data archive format'],
'LATEX':['w','LateX data'],
'CS':['r','Cesium G823'],
#'SFDMI':['r', 'San Fernando variometer'],
#'SFGSM':['r', 'San Fernando GSM90'],
'UNKOWN':['-','Unknown']
}
"""
PYMAG_SUPPORTED_FORMATS = {
'IAGA':'rw', # IAGA 2002 text format
'WDC':'rw', # World Data Centre format
'IMF':'rw', # Intermagnet Format
'IAF':'rw', # Intermagnet archive Format
'IMAGCDF', # Intermagnet CDF Format
'BLV', # Baseline format Intermagnet
'IYFV', # Yearly mean format Intermagnet
'DKA', # K value format Intermagnet
'DIDD', # Output format from DIDD
'GSM19', # Output format from GSM19 magnetometer
'COVJSON', # Coverage JavaScript Object Notation
'JSON', # JavaScript Object Notation
'LEMIHF', # LEMI text format data
'LEMIBIN', # Current LEMI binary data format at WIC
'LEMIBIN1', # Deprecated LEMI binary format at WIC
'OPT', # Optical hourly data from WIK
'PMAG1', # Deprecated ELSEC from WIK
'PMAG2', # Current ELSEC from WIK
'GDASA1', # ?
'GDASB1', # ?
'RMRCS', # RCS data output from Richards perl scripts
'RCS', # RCS data output from Richards perl scripts
'METEO', # RCS data output in METEO files
'NEIC', # WGET data from USGS - NEIC
'LNM', # LaserNiederschlagsMonitor files
'IWT', # Tiltmeter data files at cobs
'LIPPGRAV', # Lippmann Tiltmeter data files at cobs
'CR800', # Data from the CR800 datalogger
'IONO', # Data from IM806 Ionometer
'RADON', # ?
'USBLOG', # ?
'SERSIN', # ?
'SERMUL', # ?
'PYSTR', # MagPy full ascii
'AUTODIF', # AutoDIF ouput data
'AUTODIF_FREAD',# Special format for AutoDIF read-in
'PYCDF', # MagPy CDF variant
'PYBIN', # MagPy own format
'PYASCII', # MagPy basic ASCII
'POS1TXT', # POS-1 text format output data
'POS1', # POS-1 binary output at WIC
'PMB', # POS pmb output
'QSPIN', # QSpin output
'PYNC', # MagPy NetCDF variant (too be developed)
'DTU1', # ASCII Data from the DTU's FGE systems
'SFDMI', # ?
'SFGSM', # ?
'BDV1', # ?
'GFZKP', # GeoForschungsZentrum KP-Index format
'NOAAACE', # NOAA ACE satellite data format
'PREDSTORM' # PREDSTORM space weather prediction data format
'CSV', # comma-separated CSV data with isoformat date in first column
'LATEX', # LateX data
'CS', # ?
'UNKOWN' # 'Unknown'?
}
"""
# ----------------------------------------------------------------------------
# Part 3: Example files for easy access and tests
# ----------------------------------------------------------------------------
from pkg_resources import resource_filename
example1 = resource_filename('magpy', 'examples/example1.zip') #Zip compressed IAGA02
example2 = resource_filename('magpy', 'examples/example2.cdf') #MagPy CDF with F
example3 = resource_filename('magpy', 'examples/example3.txt') #PyStr Baseline
example4 = resource_filename('magpy', 'examples/example4.cdf') #MagPy CDF
example5 = resource_filename('magpy', 'examples/example5.sec') #Imag CDF
example6a = resource_filename('magpy', 'examples/example6a.txt') #DI file
example6b = resource_filename('magpy', 'examples/example6b.txt') #DI file
# ----------------------------------------------------------------------------
# Part 4: Main classes -- DataStream, LineStruct and
# PyMagLog (To be removed)
# ----------------------------------------------------------------------------
class DataStream(object):
"""
Creates a list object from input files /url data
data is organized in columns
keys are column identifier:
key in keys: see KEYLIST
A note on headers:
ALWAYS INITIATE STREAM WITH >>> stream = DataStream([],{}).
All available methods:
----------------------------
- stream.ext(self, columnstructure): # new version of extend function for column operations
- stream.add(self, datlst):
- stream.clear_header(self):
- stream.extend(self,datlst,header):
- stream.union(self,column):
- stream.findtime(self,time):
- stream._find_t_limits(self):
- stream._print_key_headers(self):
- stream._get_key_headers(self,**kwargs):
- stream.sorting(self):
- stream._get_line(self, key, value):
- stream._remove_lines(self, key, value):
- stream._remove_columns(self, keys):
- stream._get_column(self, key):
- stream._put_column(self, column, key, **kwargs):
- stream._move_column(self, key, put2key):
- stream._clear_column(self, key):
- stream._reduce_stream(self, pointlimit=100000):
- stream._aic(self, signal, k, debugmode=None):
- stream._get_k(self, **kwargs):
- stream._get_k_float(self, value, **kwargs):
- stream._get_max(self, key, returntime=False):
- stream._get_min(self, key, returntime=False):
- stream._gf(self, t, tau):
- stream._hf(self, p, x):
- stream._residual_func(self, func, y):
- stream._tau(self, period):
- stream._convertstream(self, coordinate, **kwargs):
- stream._det_trange(self, period):
- stream._is_number(self, s):
- stream._normalize(self, column):
- stream._testtime(self, time):
- stream._drop_nans(self, key):
- stream.aic_calc(self, key, **kwargs):
- stream.baseline(self, absolutestream, **kwargs):
- stream.bindetector(self,key,text=None,**kwargs):
- stream.calc_f(self, **kwargs):
- stream.cut(self,length,kind=0,order=0):
- stream.dailymeans(self):
- stream.date_offset(self, offset):
- stream.delta_f(self, **kwargs):
- stream.dict2stream(self,dictkey='DataBaseValues')
- stream.differentiate(self, **kwargs):
- stream.eventlogger(self, key, values, compare=None, stringvalues=None, addcomment=None, debugmode=None):
- stream.extract(self, key, value, compare=None, debugmode=None):
- stream.extrapolate(self, start, end):
- stream.filter(self, **kwargs):
- stream.fit(self, keys, **kwargs):
- stream.flag_outlier(self, **kwargs):
- stream.flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate):
- stream.func2stream(self,function,**kwargs):
- stream.func_add(self,function,**kwargs):
- stream.func_subtract(self,function,**kwargs):
- stream.get_gaps(self, **kwargs):
- stream.get_sampling_period(self):
- stream.samplingrate(self, **kwargs):
- stream.integrate(self, **kwargs):
- stream.interpol(self, keys, **kwargs):
- stream.k_fmi(self, **kwargs):
- stream.mean(self, key, **kwargs):
- stream.multiply(self, factors):
- stream.offset(self, offsets):
- stream.randomdrop(self, percentage=None, fixed_indicies=None):
- stream.remove(self, starttime=starttime, endtime=endtime):
- stream.remove_flagged(self, **kwargs):
- stream.resample(self, keys, **kwargs):
- stream.rotation(self,**kwargs):
- stream.scale_correction(self, keys, scales, **kwargs):
- stream.smooth(self, keys, **kwargs):
- stream.steadyrise(self, key, timewindow, **kwargs):
- stream.stream2dict(self,dictkey='DataBaseValues')
- stream.stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None)
- stream.trim(self, starttime=None, endtime=None, newway=False):
- stream.variometercorrection(self, variopath, thedate, **kwargs):
- stream.write(self, filepath, **kwargs):
Application methods:
----------------------------
- stream.aic_calc(key) -- returns stream (with !var2! filled with aic values)
- stream.baseline() -- calculates baseline correction for input stream (datastream)
- stream.dailymeans() -- for DI stream - obtains variometer corrected means fo basevalues
- stream.date_offset() -- Corrects the time column of the selected stream by the offst
- stream.delta_f() -- Calculates the difference of x+y+z to f
- stream.differentiate() -- returns stream (with !dx!,!dy!,!dz!,!df! filled by derivatives)
- stream.extrapolate() -- read absolute stream and extrapolate the data
- stream.fit(keys) -- returns function
- stream.filter() -- returns stream (changes sampling_period; in case of fmi ...)
- stream.find_offset(stream_a, stream_b) -- Finds offset of two data streams. (Not optimised.)
- stream.flag_stream() -- Add flags to specific times or time ranges
- stream.func2stream() -- Combine stream and function (add, subtract, etc)
- stream.func_add() -- Add a function to the selected values of the data stream
- stream.func_subtract() -- Subtract a function from the selected values of the data stream
- stream.get_gaps() -- Takes the dominant sample frequency and fills non-existing time steps
- stream.get_sampling_period() -- returns the dominant sampling frequency in unit ! days !
- stream.integrate() -- returns stream (integrated vals at !dx!,!dy!,!dz!,!df!)
- stream.interpol(keys) -- returns function
- stream.k_fmi() -- Calculating k values following the fmi approach
- stream.linestruct2ndarray() -- converts linestrcut data to ndarray. should be avoided
- stream.mean() -- Calculates mean values for the specified key, Nan's are regarded for
- stream.offset() -- Apply constant offsets to elements of the datastream
- stream.plot() -- plot keys from stream
- stream.powerspectrum() -- Calculating the power spectrum following the numpy fft example
- stream.remove_flagged() -- returns stream (removes data from stream according to flags)
- stream.resample(period) -- Resample stream to given sampling period.
- stream.rotation() -- Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs
- stream.selectkeys(keys) -- ndarray: remove all data except for provided keys (and flag/comment)
- stream.smooth(key) -- smooth the data using a window with requested size
- stream.spectrogram() -- Creates a spectrogram plot of selected keys
- stream.stream2flaglist() -- make flaglist out of stream
- stream.trim() -- returns stream within new time frame
- stream.use_sectime() -- Swap between primary and secondary time (if sectime is available)
- stream.variometercorrection() -- Obtain average DI values at certain timestep(s)
- stream.write() -- Writing Stream to a file
Supporting INTERNAL methods:
----------------------------
A. Standard functions and overrides for list like objects
- self.clear_header(self) -- Clears headers
- self.extend(self,datlst,header) -- Extends stream object
- self.sorting(self) -- Sorts object
B. Internal Methods I: Line & column functions
- self._get_column(key) -- returns a numpy array of selected columns from Stream
- self._put_column(key) -- adds a column to a Stream
- self._move_column(key, put2key) -- moves one column to another key
- self._clear_column(key) -- clears a column to a Stream
- self._get_line(self, key, value) -- returns a LineStruct element corresponding to the first occurence of value within the selected key
- self._reduce_stream(self) -- Reduces stream below a certain limit.
- self._remove_lines(self, key, value) -- removes lines with value within the selected key
- self.findtime(self,time) -- returns index and line for which time equals self.time
B. Internal Methods II: Data manipulation functions
- self._aic(self, signal, k, debugmode=None) -- returns float -- determines Akaki Information Criterion for a specific index k
- self._get_k(self, **kwargs) -- Calculates the k value according to the Bartels scale
- self._get_k_float(self, value, **kwargs) -- Like _get_k, but for testing single values and not full stream keys (used in filtered function)
- self._gf(self, t, tau): -- Gauss function
- self._hf(self, p, x) -- Harmonic function
- self._residual_func(self, func, y) -- residual of the harmonic function
- self._tau(self, period) -- low pass filter with -3db point at period in sec (e.g. 120 sec)
B. Internal Methods III: General utility & NaN handlers
- self._convertstream(self, coordinate, **kwargs) -- Convert coordinates of x,y,z columns in stream
- self._det_trange(self, period) -- starting with coefficients above 1%
- self._find_t_limits(self) -- return times of first and last stream data points
- self._testtime(time) -- returns datetime object
- self._get_min(key) -- returns float
- self._get_max(key) -- returns float
- self._normalize(column) -- returns list,float,float -- normalizes selected column to range 0,1
- nan_helper(self, y) -- Helper to handle indices and logical indices of NaNs
- self._print_key_headers(self) -- Prints keys in datastream with variable and unit.
- self._get_key_headers(self) -- Returns keys in datastream.
- self._drop_nans(self, key) -- Helper to drop lines with NaNs in any of the selected keys.
- self._is_number(self, s) -- ?
Supporting EXTERNAL methods:
----------------------------
Useful functions:
- array2stream -- returns a data stream -- converts a list of arrays to a datastream
- linestruct2ndarray -- returns a data ndarray -- converts a old linestruct format
- denormalize -- returns list -- (column,startvalue,endvalue) denormalizes selected column from range 0,1 ro sv,ev
- find_nearest(array, value) -- find point in array closest to value
- maskNAN(column) -- Tests for NAN values in array and usually masks them
- nearestPow2(x) -- Find power of two nearest to x
*********************************************************************
Standard function description format:
DEFINITION:
Description of function purpose and usage.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- variable: (type) Description.
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> alldata = mergeStreams(pos_stream, lemi_stream, keys=['<KEY>'])
APPLICATION:
Code for simple application.
*********************************************************************
Standard file description format:
Path: *path* (magpy.acquisition.pos1protocol)
Part of package: *package* (acquisition)
Type: *type* (type of file/package)
PURPOSE:
Description...
CONTAINS:
*ThisClass: (Class)
What is this class for?
thisFunction: (Func) Description
DEPENDENCIES:
List all non-standard packages required for file.
+ paths of all MagPy package dependencies.
CALLED BY:
Path to magpy packages that call this part, e.g. magpy.bin.acquisition
*********************************************************************
"""
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
def __init__(self, container=None, header={},ndarray=None):
if container is None:
container = []
self.container = container
if ndarray is None:
ndarray = np.array([np.asarray([]) for elem in KEYLIST])
self.ndarray = ndarray ## Test this! -> for better memory efficiency
#if header is None:
# header = {'Test':'Well, it works'}
#header = {}
self.header = header
#for key in KEYLIST:
# setattr(self,key,np.asarray([]))
#self.header = {'Test':'Well, it works'}
self.progress = 0
# ------------------------------------------------------------------------
# A. Standard functions and overrides for list like objects
# ------------------------------------------------------------------------
def ext(self, columnstructure): # new version of extend function for column operations
"""
the extend and add functions must be replaced in case of
speed optimization
"""
for key in KEYLIST:
self.container.key = np.append(self.container.key, columnstructure.key, 1)
def add(self, datlst):
#try:
assert isinstance(self.container, (list, tuple))
self.container.append(datlst)
#except:
# print list(self.container).append(datlst)
def length(self):
#try:
if len(self.ndarray[0]) > 0:
ll = [len(elem) for elem in self.ndarray]
return ll
else:
try: ## might fail if LineStruct is empty (no time)
if len(self) == 1 and np.isnan(self[0].time):
return [0]
else:
return [len(self)]
except:
return [0]
def replace(self, datlst):
# Replace in stream
# - replace value with existing data
# Method was used by K calc - replaced by internal method there
newself = DataStream()
assert isinstance(self.container, (list, tuple))
ti = list(self._get_column('time'))
try:
ind = ti.index(datlst.time)
except ValueError:
self = self.add(datlst)
return self
except:
return self
li = [elem for elem in self]
del li[ind]
del ti[ind]
li.append(datlst)
return DataStream(li,self.header)
def copy(self):
"""
DESCRIPTION:
method for copying content of a stream to a new stream
APPLICATION:
for non-destructive methods
"""
#print self.container
#assert isinstance(self.container, (list, tuple))
co = DataStream()
#co.header = self.header
newheader = {}
for el in self.header:
newheader[el] = self.header[el]
array = [[] for el in KEYLIST]
if len(self.ndarray[0])> 0:
for ind, key in enumerate(KEYLIST):
liste = []
for val in self.ndarray[ind]: ## This is necessary to really copy the content
liste.append(val)
array[ind] = np.asarray(liste)
co.container = [LineStruct()]
else:
for el in self:
li = LineStruct()
for key in KEYLIST:
if key == 'time':
li.time = el.time
else:
#exec('li.'+key+' = el.'+key)
elkey = getattr(el,key)
setattr(li, key, elkey)
co.add(li)
return DataStream(co.container,newheader,np.asarray(array, dtype=object))
def __str__(self):
return str(self.container)
def __repr__(self):
return str(self.container)
def __getitem__(self, var):
try:
if var in NUMKEYLIST:
return self.ndarray[self.KEYLIST.index(var)].astype(np.float64)
else:
return self.ndarray[self.KEYLIST.index(var)]
except:
return self.container.__getitem__(var)
def __setitem__(self, var, value):
self.ndarray[self.KEYLIST.index(var)] = value
def __len__(self):
return len(self.container)
def clear_header(self):
"""
Remove header information
"""
self.header = {}
def extend(self,datlst,header,ndarray):
array = [[] for key in KEYLIST]
self.container.extend(datlst)
self.header = header
# Some initial check if any data set except timecolumn is contained
datalength = len(ndarray)
#t1 = datetime.utcnow()
if pyvers and pyvers == 2:
ch1 = '-'.encode('utf-8') # not working with py3
ch2 = ''.encode('utf-8')
else:
ch1 = '-'
ch2 = ''
try:
test = []
for col in ndarray:
col = np.array(list(col))
#print (np.array(list(col)).dtype)
if col.dtype in ['float64','float32','int32','int64']:
try:
x = np.asarray(col)[~np.isnan(col)]
except: # fallback 1 -> should not needed any more
#print ("Fallback1")
x = np.asarray([elem for elem in col if not np.isnan(elem)])
else:
#y = np.asarray(col)[col!='-']
#x = np.asarray(y)[y!='']
y = np.asarray(col)[col!=ch1]
x = np.asarray(y)[y!=ch2]
test.append(x)
test = np.asarray(test,dtype=object)
except:
# print ("Fallback -- pretty slowly")
#print ("Fallback2")
test = [[elem for elem in col if not elem in [ch1,ch2]] for col in ndarray]
#t2 = datetime.utcnow()
#print (t2-t1)
emptycnt = [len(el) for el in test if len(el) > 0]
if self.ndarray.size == 0:
self.ndarray = ndarray
elif len(emptycnt) == 1:
print("Tyring to extend with empty data set")
#self.ndarray = np.asarray((list(self.ndarray)).extend(list(ndarray)))
else:
for idx,elem in enumerate(self.ndarray):
if len(ndarray[idx]) > 0:
if len(self.ndarray[idx]) > 0 and len(self.ndarray[0]) > 0:
array[idx] = np.append(self.ndarray[idx], ndarray[idx]).astype(object)
#array[idx] = np.append(self.ndarray[idx], ndarray[idx],1).astype(object)
elif len(self.ndarray[0]) > 0: # only time axis present so far but no data within this elem
fill = ['-']
key = KEYLIST[idx]
if key in NUMKEYLIST or key=='sectime':
fill = [float('nan')]
nullvals = np.asarray(fill * len(self.ndarray[0]))
#array[idx] = np.append(nullvals, ndarray[idx],1).astype(object)
array[idx] = np.append(nullvals, ndarray[idx]).astype(object)
else:
array[idx] = ndarray[idx].astype(object)
self.ndarray = np.asarray(array, dtype=object)
def union(self,column):
seen = set()
seen_add = seen.add
return [ x for x in column if not (x in seen or seen_add(x))]
def removeduplicates(self):
"""
DESCRIPTION:
Identify duplicate time stamps and remove all data.
Lines with first occurence are kept.
"""
# get duplicates in time column
def list_duplicates(seq):
seen = set()
seen_add = seen.add
return [idx for idx,item in enumerate(seq) if item in seen or seen_add(item)]
if not len(self.ndarray[0]) > 0:
print ("removeduplicates: works only with ndarrays")
return
duplicateindicies = list_duplicates(self.ndarray[0])
array = [[] for key in KEYLIST]
for idx, elem in enumerate(self.ndarray):
if len(elem) > 0:
newelem = np.delete(elem, duplicateindicies)
array[idx] = newelem
return DataStream(self, self.header, np.asarray(array,dtype=object))
def start(self, dateformt=None):
st,et = self._find_t_limits()
return st
def end(self, dateformt=None):
st,et = self._find_t_limits()
return et
def findtime(self,time,**kwargs):
"""
DEFINITION:
Find a line within the container which contains the selected time step
or the first line following this timestep (since 0.3.99 using mode 'argmax')
VARIABLES:
startidx (int) index to start search with (speeding up)
endidx (int) index to end search with (speeding up)
mode (string) define search mode (fastest would be 'argmax')
RETURNS:
The index position of the line and the line itself
"""
startidx = kwargs.get('startidx')
endidx = kwargs.get('endidx')
mode = kwargs.get('mode')
#try:
# from bisect import bisect
#except ImportError:
# print("Import error")
st = date2num(self._testtime(time))
if len(self.ndarray[0]) > 0:
if startidx and endidx:
ticol = self.ndarray[0][startidx:endidx]
elif startidx:
ticol = self.ndarray[0][startidx:]
elif endidx:
ticol = self.ndarray[0][:endidx]
else:
ticol = self.ndarray[0]
try:
if mode =='argmax':
## much faster since 0.3.99 (used in flag_stream)
indexes = [np.argmax(ticol>=st)]
else:
## the following method is used until 0.3.98
indexes = [i for i,x in enumerate(ticol) if x == st] ### FASTER
# Other methods
# #############
#indexes = [i for i,x in enumerate(ticol) if np.allclose(x,st,rtol=1e-14,atol=1e-17)] # if the two time equal within about 0.7 milliseconds
#indexes = [bisect(ticol, st)] ## SELECTS ONLY INDEX WHERE VALUE SHOULD BE inserted
#indexes = [ticol.index(st)]
#print("findtime", indexes)
if not len(indexes) == 0:
if startidx:
retindex = indexes[0] + startidx
else:
retindex = indexes[0]
#print("Findtime index:",retindex)
return retindex, LineStruct()
else:
return 0, []
#return list(self.ndarray[0]).index(st), LineStruct()
except:
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
for index, line in enumerate(self):
if line.time == st:
return index, line
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
def _find_t_limits(self):
"""
DEFINITION:
Find start and end times in stream.
RETURNS:
Two datetime objects, start and end.
"""
if len(self.ndarray[0]) > 0:
t_start = num2date(np.min(self.ndarray[0].astype(float))).replace(tzinfo=None)
t_end = num2date(np.max(self.ndarray[0].astype(float))).replace(tzinfo=None)
else:
try: # old type
t_start = num2date(self[0].time).replace(tzinfo=None)
t_end = num2date(self[-1].time).replace(tzinfo=None)
except: # empty
t_start,t_end = None,None
return t_start, t_end
def _print_key_headers(self):
print("%10s : %22s : %28s" % ("MAGPY KEY", "VARIABLE", "UNIT"))
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
except:
header = None
try:
unit = self.header['unit-col-'+key]
except:
unit = None
print("%10s : %22s : %28s" % (key, header, unit))
def _get_key_headers(self,**kwargs):
"""
DEFINITION:
get a list of existing numerical keys in stream.
PARAMETERS:
kwargs:
- limit: (int) limit the lenght of the list
- numerical: (bool) if True, select only numerical keys
RETURNS:
- keylist: (list) a list like ['x','y','z']
EXAMPLE:
>>> data_stream._get_key_headers(limit=1)
"""
limit = kwargs.get('limit')
numerical = kwargs.get('numerical')
if numerical:
TESTLIST = FLAGKEYLIST
else:
TESTLIST = KEYLIST
keylist = []
"""
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
try:
unit = self.header['unit-col-'+key]
except:
unit = None
keylist.append(key)
except:
header = None
"""
if not len(keylist) > 0: # e.g. Testing ndarray
for ind,elem in enumerate(self.ndarray): # use the long way
if len(elem) > 0 and ind < len(TESTLIST):
if not TESTLIST[ind] == 'time':
keylist.append(TESTLIST[ind])
if not len(keylist) > 0: # e.g. header col-? does not contain any info
#for key in FLAGKEYLIST[1:]: # use the long way
for key in TESTLIST[1:]: # use the long way
col = self._get_column(key)
if len(col) > 0:
#if not len(col) == 1 and not ( # maybe add something to prevent reading empty LineStructs)
if len(col) == 1:
if col[0] in ['-',float(nan),'']:
pass
else:
keylist.append(key)
if limit and len(keylist) > limit:
keylist = keylist[:limit]
return keylist
def _get_key_names(self):
"""
DESCRIPTION:
get the variable names for each key
APPLICATION:
keydict = self._get_key_names()
"""
keydict = {}
for key in KEYLIST:
kname = self.header.get('col-'+key)
keydict[kname] = key
return keydict
def dropempty(self):
"""
DESCRIPTION:
Drop empty arrays from ndarray and store their positions
"""
if not len(self.ndarray[0]) > 0:
return self.ndarray, np.asarray([])
newndarray = []
indexarray = []
for ind,elem in enumerate(self.ndarray):
if len(elem) > 0:
newndarray.append(np.asarray(elem).astype(object))
indexarray.append(ind)
keylist = [el for ind,el in enumerate(KEYLIST) if ind in indexarray]
return np.asarray(newndarray), keylist
def fillempty(self, ndarray, keylist):
"""
DESCRIPTION:
Fills empty arrays into ndarray at all position of KEYLIST not provided in keylist
"""
if not len(ndarray[0]) > 0:
return self
if len(self.ndarray) == KEYLIST:
return self
lst = list(ndarray)
for i,key in enumerate(KEYLIST):
if not key in keylist:
lst.insert(i,[])
newndarray = np.asarray(lst,dtype=object)
return newndarray
def sorting(self):
"""
Sorting data according to time (maybe generalize that to some key)
"""
try: # old LineStruct part
liste = sorted(self.container, key=lambda tmp: tmp.time)
except:
pass
if len(self.ndarray[0]) > 0:
self.ndarray, keylst = self.dropempty()
#self.ndarray = self.ndarray[:, np.argsort(self.ndarray[0])] # does not work if some rows have a different length)
ind = np.argsort(self.ndarray[0])
for i,el in enumerate(self.ndarray):
if len(el) == len(ind):
self.ndarray[i] = el[ind]
else:
#print("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("len(t-axis)=%d len(%s)=%d" % (len(self.ndarray[0]), KEYLIST[i], len(self.ndarray[i])))
self.ndarray[i] = np.empty(len(self.ndarray[0])) * np.nan
self.ndarray = self.fillempty(self.ndarray,keylst)
for idx,el in enumerate(self.ndarray):
self.ndarray[idx] = np.asarray(self.ndarray[idx]).astype(object)
else:
self.ndarray = self.ndarray
return DataStream(liste, self.header, self.ndarray)
# ------------------------------------------------------------------------
# B. Internal Methods: Line & column functions
# ------------------------------------------------------------------------
def _get_line(self, key, value):
"""
returns a LineStruct elemt corresponding to the first occurence of value within the selected key
e.g.
st = st._get_line('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lines = [elem for elem in self if eval('elem.'+key) == value]
return lines[0]
def _take_columns(self, keys):
"""
DEFINITION:
extract selected columns of the given keys (Old LineStruct format - decrapted)
"""
resultstream = DataStream()
for elem in self:
line = LineStruct()
line.time = elem.time
resultstream.add(line)
resultstream.header = {}
for key in keys:
if not key in KEYLIST:
pass
elif not key == 'time':
col = self._get_column(key)
#print key, len(col)
try:
resultstream.header['col-'+key] = self.header['col-'+key]
except:
pass
try:
resultstream.header['unit-col-'+key] = self.header['unit-col-'+key]
except:
pass
resultstream = resultstream._put_column(col,key)
return resultstream
def _remove_lines(self, key, value):
"""
removes lines with value within the selected key
e.g.
st = st._remove_lines('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lst = [elem for elem in self if not eval('elem.'+key) == value]
return DataStream(lst, self.header)
def _get_column(self, key):
"""
Returns a numpy array of selected column from Stream
Example:
columnx = datastream._get_column('x')
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
# Speeded up this technique:
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
col = self[key]
except:
col = self.ndarray[ind]
return col
# Check for initialization value
#testval = self[0][ind]
# if testval == KEYINITDICT[key] or isnan(testval):
# return np.asarray([])
try:
col = np.asarray([row[ind] for row in self])
#get the first ten elements and test whether nan is there -- why ??
"""
try: # in case of string....
novalfound = True
for ele in col[:10]:
if not isnan(ele):
novalfound = False
if novalfound:
return np.asarray([])
except:
return col
"""
return col
except:
return np.asarray([])
def _put_column(self, column, key, **kwargs):
"""
DEFINITION:
adds a column to a Stream
PARAMETERS:
column: (array) single list with data with equal length as stream
key: (key) key to which the data is written
Kwargs:
columnname: (string) define a name
columnunit: (string) define a unit
RETURNS:
- DataStream object
EXAMPLE:
>>> stream = stream._put_column(res, 't2', columnname='Rain',columnunit='mm in 1h')
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
columnname = kwargs.get('columnname')
columnunit = kwargs.get('columnunit')
if not key in KEYLIST:
raise ValueError("Column key not valid")
if len(self.ndarray[0]) > 0:
ind = KEYLIST.index(key)
self.ndarray[ind] = np.asarray(column)
else:
if not len(column) == len(self):
raise ValueError("Column length does not fit Datastream")
for idx, elem in enumerate(self):
setattr(elem, key, column[idx])
if not columnname:
try: # TODO correct that
if eval('self.header["col-%s"]' % key) == '':
exec('self.header["col-%s"] = "%s"' % (key, key))
except:
pass
else:
exec('self.header["col-%s"] = "%s"' % (key, columnname))
if not columnunit:
try: # TODO correct that
if eval('self.header["unit-col-%s"]' % key) == '':
exec('self.header["unit-col-%s"] = "arb"' % (key))
except:
pass
else:
exec('self.header["unit-col-%s"] = "%s"' % (key, columnunit))
return self
def _move_column(self, key, put2key):
'''
DEFINITION:
Move column of key "key" to key "put2key".
Simples.
PARAMETERS:
Variables:
- key: (str) Key to be moved.
- put2key: (str) Key for 'key' to be moved to.
RETURNS:
- stream: (DataStream) DataStream object.
EXAMPLE:
>>> data_stream._move_column('f', 'var1')
'''
if not key in KEYLIST:
logger.error("_move_column: Column key %s not valid!" % key)
if key == 'time':
logger.error("_move_column: Cannot move time column!")
if not put2key in KEYLIST:
logger.error("_move_column: Column key %s (to move %s to) is not valid!" % (put2key,key))
if len(self.ndarray[0]) > 0:
col = self._get_column(key)
self =self._put_column(col,put2key)
return self
try:
for i, elem in enumerate(self):
exec('elem.'+put2key+' = '+'elem.'+key)
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
try:
exec('self.header["col-%s"] = self.header["col-%s"]' % (put2key, key))
exec('self.header["unit-col-%s"] = self.header["unit-col-%s"]' % (put2key, key))
exec('self.header["col-%s"] = None' % (key))
exec('self.header["unit-col-%s"] = None' % (key))
except:
logger.error("_move_column: Error updating headers.")
logger.info("_move_column: Column %s moved to column %s." % (key, put2key))
except:
logger.error("_move_column: It's an error.")
return self
def _drop_column(self,key):
"""
remove a column of a Stream
"""
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
self.ndarray[ind] = np.asarray([])
except:
# Some array don't allow that, shape error e.g. PYSTRING -> then use this
array = [np.asarray(el) if idx is not ind else np.asarray([]) for idx,el in enumerate(self.ndarray)]
self.ndarray = np.asarray(array,dtype=object)
colkey = "col-%s" % key
colunitkey = "unit-col-%s" % key
try:
self.header.pop(colkey, None)
self.header.pop(colunitkey, None)
except:
print("_drop_column: Error while dropping header info")
else:
print("No data available or LineStruct type (not supported)")
return self
def _clear_column(self, key):
"""
remove a column to a Stream
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
if not key in KEYLIST:
raise ValueError("Column key not valid")
for idx, elem in enumerate(self):
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
return self
def _reduce_stream(self, pointlimit=100000):
"""
DEFINITION:
Reduces size of stream by picking for plotting methods to save memory
when plotting large data sets.
Does NOT filter or smooth!
This function purely removes data points (rows) in a
periodic fashion until size is <100000 data points.
(Point limit can also be defined.)
PARAMETERS:
Kwargs:
- pointlimit: (int) Max number of points to include in stream. Default is 100000.
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
EXAMPLE:
>>> lessdata = ten_Hz_data._reduce_stream(pointlimit=500000)
"""
size = len(self)
div = size/pointlimit
divisor = math.ceil(div)
count = 0.
lst = []
if divisor > 1.:
for elem in self:
if count%divisor == 0.:
lst.append(elem)
count += 1.
else:
logger.warning("_reduce_stream: Stream size (%s) is already below pointlimit (%s)." % (size,pointlimit))
return self
logger.info("_reduce_stream: Stream size reduced from %s to %s points." % (size,len(lst)))
return DataStream(lst, self.header)
def _remove_nancolumns(self):
"""
DEFINITION:
Remove any columsn soley filled with nan values
APPLICATION:
called by plot methods in mpplot
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
"""
array = [[] for key in KEYLIST]
if len(self.ndarray[0]) > 0:
for idx, elem in enumerate(self.ndarray):
if len(self.ndarray[idx]) > 0 and KEYLIST[idx] in NUMKEYLIST:
lst = list(self.ndarray[idx])
#print KEYLIST[idx],lst[0]
if lst[1:] == lst[:-1] and np.isnan(float(lst[0])):
array[idx] = | np.asarray([]) | numpy.asarray |
from mikkel_tools.MiClass import MiClass
import mikkel_tools.utility as mt_util
import matplotlib.pyplot as plt
import pyshtools
import scipy.linalg as spl
import pickle
import numpy as np
import mikkel_tools.GMT_tools as gt
import os
#import utility as sds_util
class SDSS(MiClass):
""" Class for performing spherical direct sequential simulation """
def __init__(self, comment, N_SH = 60, sim_type = "core", sat_height = 350, N_SH_secondary = None):
super().__init__(sat_height = sat_height)
self.comment = comment
self.class_abs_path = os.path.dirname(__file__)
# Initial constants related to spherical harmonics and Earth system size.
self.N_SH = N_SH
self.N_SH_secondary = N_SH_secondary
self.sim_type = sim_type
def make_grid(self, r_grid, grid, calc_sph_d = False, N_grid = 1000):
# Initialize
self.r_grid = r_grid
self.grid = grid
self.sph_d = None
# Generate equal area grid
if isinstance(grid,str):
self.N_grid = N_grid
N_grid_orig = self.N_grid
check_flag = False
if grid == "equal_area":
while check_flag is False:
points_polar = mt_util.eq_point_set_polar(self.N_grid) # Compute grid with equal area grid functions
# Set lat and lon from estimated grid
self.lon = points_polar[:,0]*180/np.pi
self.lat = 90 - points_polar[:,1]*180/np.pi
# Determine equal area grid specifics used for defining the integration area
s_cap, n_regions = mt_util.eq_caps(self.N_grid)
self.n_regions = n_regions.T
self.s_cap = s_cap
if self.N_grid == int(np.sum(n_regions)):
check_flag = True
if N_grid_orig - self.N_grid != 0:
print("")
print("___ CHANGES TO GRID ___")
print("N = {}, not compatible for equal area grid".format(N_grid_orig))
print("N has been set to {}".format(self.N_grid))
else:
self.N_grid -= 1
self.handle_poles()
# Generate Gauss-Legendre quadrature grid
elif grid == "gauss_leg":
self.gauss_leg_n_from_N = int(np.ceil(np.sqrt(self.N_grid/2))) # Approximate required Gauss-Legendre grid size from defined N_grid
gauss_leg = np.polynomial.legendre.leggauss(self.gauss_leg_n_from_N) # Use built-in numpy function to generate grid
# Set lat and lon range from estimated grid
lat = 90-np.flipud(np.arccos(gauss_leg[0]).reshape(-1,1))*180/np.pi
lon = np.arange(0,2*np.pi,np.pi/self.gauss_leg_n_from_N)*180/np.pi
weights, none = np.meshgrid(gauss_leg[1],lon,indexing='ij') # Get weights for quadrature on grid
self.weights = np.ravel(weights)
# Compute full lat/lon grid
lat, lon = np.meshgrid(lat,lon,indexing='ij')
self.lon = lon.ravel()
self.lat = lat.ravel()
self.N_grid = 2*self.gauss_leg_n_from_N**2 # Update N_grid
# Generate Lebedev quadrature grid
elif grid == "lebedev":
import quadpy
# Lebedev grid generation from quadpy is limited to the following two choices
if self.N_grid >= 5000:
scheme = quadpy.sphere.lebedev_131()
else:
scheme = quadpy.sphere.lebedev_059()
# Set lat and lon from estimated grid
coords = scheme.azimuthal_polar
self.lon = 180+coords[:,0]*180/np.pi
self.lat = 90-coords[:,1]*180/np.pi
self.weights = np.ravel(scheme.weights) # Get weights for quadrature on grid
self.N_grid = len(self.weights) # Update N_grid according to Lebedev grid
else:
self.lon = grid[:,0]
self.lat = grid[:,1]
self.N_grid = len(self.lon)
# Compute spherical distances between all points on grid if required
if calc_sph_d is True:
lon_mesh, lat_mesh = np.meshgrid(self.lon, self.lat, indexing='ij')
self.sph_d = mt_util.haversine(self.r_grid, lon_mesh, lat_mesh, lon_mesh.T, lat_mesh.T)
def handle_poles(self):
import numpy as np
# Remove the first and last grid points (the poles) and the corresponding structure related components
idx_end_core = self.N_grid-1
self.lat = np.delete(self.lat,[0,idx_end_core],0)
self.lon = np.delete(self.lon,[0,idx_end_core],0)
self.N_grid = idx_end_core-1
self.n_regions = np.delete(self.n_regions,-1,1)
self.n_regions = np.delete(self.n_regions,0,1)
self.s_cap = np.delete(self.s_cap,-1,0)
self.s_cap = np.delete(self.s_cap,0,0)
self.N_grid = idx_end_core-1
if self.sph_d is not None:
self.sph_d = np.delete(self.sph_d,[0,idx_end_core],0)
self.sph_d = np.delete(self.sph_d,[0,idx_end_core],1)
def data(self, *args):
# Generate design matrix for grid
A_r, A_theta, A_phi = gt.design_SHA(self.r_grid/self.a, (90.0-self.lat)*self.rad, self.lon*self.rad, self.N_SH)
G = np.vstack((A_r, A_theta, A_phi))
# Load Gauss coefficients from data files
if np.logical_or(self.sim_type == "core", self.sim_type == "sat"):
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Julien_Gauss_JFM_E-8_snap.dat')
elif self.sim_type == "surface":
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Masterton_13470_total_it1_0.glm')
else:
Gauss_in = np.loadtxt(args[0], comments='%')
# Compute Gauss coefficients as vector
g = mt_util.gauss_vector(Gauss_in, self.N_SH, i_n = 2, i_m = 3)
# Generate field data
#data_dynamo = np.matrix(G)*np.matrix(g).T
data_dynamo = np.matmul(G,g.T)
data = np.array(data_dynamo[:len(A_r)]).ravel()
self.data = np.zeros((self.N_grid,))
self.data = data.copy()
self.r_grid_repeat = np.ones(self.N_grid,)*self.r_grid
# Target statistics
self.target_var = np.var(self.data)
self.target_mean = 0.0
def load_swarm(self, dataset, use_obs = False, target_var = None, target_var_factor = None):
# Load swarm samples
data_swarm = {"SW_A":np.loadtxt("swarm_data/SW_A_AprilMayJune18_dark_quiet_NEC.txt",comments="%"), "SW_B":np.loadtxt("swarm_data/SW_B_AprilMayJune18_dark_quiet_NEC.txt",comments="%"), "SW_C":np.loadtxt("swarm_data/SW_C_AprilMayJune18_dark_quiet_NEC.txt",comments="%")}
if dataset == "A":
data_swarm = {"obs":data_swarm["SW_A"][:,13], "radius":data_swarm["SW_A"][:,1], "theta":(data_swarm["SW_A"][:,2]), "phi":data_swarm["SW_A"][:,3], "N":data_swarm["SW_A"][:,13].shape[0]}
elif dataset == "B":
data_swarm = {"obs":data_swarm["SW_B"][:,13], "radius":data_swarm["SW_B"][:,1], "theta":(data_swarm["SW_B"][:,2]), "phi":data_swarm["SW_B"][:,3], "N":data_swarm["SW_B"][:,13].shape[0]}
elif dataset == "C":
data_swarm = {"obs":data_swarm["SW_C"][:,13], "radius":data_swarm["SW_C"][:,1], "theta":(data_swarm["SW_C"][:,2]), "phi":data_swarm["SW_C"][:,3], "N":data_swarm["SW_C"][:,13].shape[0]}
elif dataset == "ABC":
data_swarm = {"obs":np.hstack((data_swarm["SW_A"][:,13],data_swarm["SW_B"][:,13],data_swarm["SW_C"][:,13])),
"radius":np.hstack((data_swarm["SW_A"][:,1],data_swarm["SW_B"][:,1],data_swarm["SW_C"][:,1])),
"theta":np.hstack(((data_swarm["SW_A"][:,2]),(data_swarm["SW_B"][:,2]),(data_swarm["SW_C"][:,2]))),
"phi":np.hstack((data_swarm["SW_A"][:,3],data_swarm["SW_B"][:,3],data_swarm["SW_C"][:,3])),
"N":np.hstack((data_swarm["SW_A"][:,13],data_swarm["SW_B"][:,13],data_swarm["SW_C"][:,13])).shape[0]}
self.grid_theta = data_swarm["theta"]
self.grid_phi = data_swarm["phi"]
self.grid_radial = data_swarm["radius"]
self.grid_obs = data_swarm["obs"]
self.grid_N = data_swarm["N"]
if use_obs == True:
self.data = self.grid_obs
# Target statistics
if target_var_factor is not None:
self.target_var = target_var_factor*np.var(self.data)
elif target_var == None:
self.target_var = np.var(self.data)
else:
self.target_var = target_var
self.target_mean_true = np.mean(self.data)
self.target_mean = 0.0
def generate_map(self, grid_type = "glq", target_var = None, target_var_factor = None, *args):
# Load Gauss coefficients from data files
if np.logical_or(self.sim_type == "core", self.sim_type == "sat"):
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Julien_Gauss_JFM_E-8_snap.dat')
elif self.sim_type == "core_alt":
import hdf5storage
#g_ens = np.genfromtxt("mikkel_tools/models_shc/gnm_midpath.dat").T*10**9
g_ens = -(hdf5storage.loadmat("mikkel_tools/models_shc/Gauss_Bsurf_2021.mat")["gnm"].T)[:,:].copy()
self.ensemble_B(g_ens, nmax = self.N_SH, r_at = self.r_cmb, grid_type = "glq")
self.m_ens = self.B_ensemble[:,0,:].copy()[:,200:]
var_ens = np.var(self.m_ens, axis=0)
idx_close_to_var = np.argwhere(np.logical_and(var_ens>0.9995*np.mean(var_ens), var_ens<1.0005*np.mean(var_ens)))
g = np.ravel(g_ens[:,idx_close_to_var[-1]])
N_SH_max = self.N_SH
self.ens_idx = int(idx_close_to_var[-1])
elif self.sim_type == "core_ens":
g_ens = np.genfromtxt("mikkel_tools/models_shc/gnm_midpath.dat").T*10**9
g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),:]
self.ensemble_B(g_ens, nmax = self.N_SH, r_at = self.r_cmb, grid_type = "glq")
self.m_ens = self.B_ensemble[:,0,:].copy()[:,200:]
var_ens = np.var(self.m_ens, axis=0)
idx_close_to_var = np.argwhere(np.logical_and(var_ens>0.9995*np.mean(var_ens), var_ens<1.0005*np.mean(var_ens)))
g = np.ravel(g_ens[:,idx_close_to_var[-1]])
N_SH_max = self.N_SH
self.ens_idx = int(idx_close_to_var[-1])
#self.g_ens = g_ens
elif self.sim_type == "lith_ens":
#g_ens = np.load("mikkel_tools/models_shc/lithosphere_g_in_rotated.npy")
g_ens = np.load("mikkel_tools/models_shc/LiP_ensemble_N500_n120_p05_vary_crust.npy")
#self.lith_ens_cut = 100
#g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),::self.lith_ens_cut]
g_ens = g_ens[:mt_util.shc_vec_len(self.N_SH),:]
#R = mt_util.lowe_shspec(self.N_SH, self.a, self.a, g_ens)
#g_ens = g_ens[:,np.mean(R,axis=0)>5]
self.ensemble_B(g_ens, nmax = self.N_SH, r_at = self.a, grid_type = "glq")
self.m_ens = self.B_ensemble[:,0,:].copy()
var_ens = np.var(self.m_ens, axis=0)
idx_close_to_var = np.argwhere(np.logical_and(var_ens>0.95*np.mean(var_ens), var_ens<1.05*np.mean(var_ens)))
g = np.ravel(g_ens[:,idx_close_to_var[-1]])
N_SH_max = self.N_SH
self.ens_idx = int(idx_close_to_var[-1])
elif self.sim_type == "surface":
Gauss_in = np.loadtxt('mikkel_tools/models_shc/Masterton_13470_total_it1_0.glm')
elif self.sim_type == "separation":
Gauss_in_core = np.loadtxt('mikkel_tools/models_shc/Julien_Gauss_JFM_E-8_snap.dat')
Gauss_in_lithos = np.loadtxt('mikkel_tools/models_shc/Masterton_13470_total_it1_0.glm')
g_c = mt_util.gauss_vector(Gauss_in_core, self.N_SH, i_n = 2, i_m = 3)
g_l = mt_util.gauss_vector(Gauss_in_lithos, self.N_SH_secondary, i_n = 2, i_m = 3)
g_zip = (g_c,g_l)
idx_zip_min = np.argmin((g_c.shape[0],g_l.shape[0]))
idx_zip_max = np.argmax((g_c.shape[0],g_l.shape[0]))
g = g_zip[idx_zip_max].copy()
g[:g_zip[idx_zip_min].shape[0]] += g_zip[idx_zip_min]
N_SH_max = np.max((self.N_SH, self.N_SH_secondary))
else:
Gauss_in = np.loadtxt(args[0], comments='%')
if np.logical_and.reduce((self.sim_type != "separation", self.sim_type != "core_ens", self.sim_type != "lith_ens", self.sim_type != "core_alt")):
# Compute Gauss coefficients as vector
g = mt_util.gauss_vector(Gauss_in, self.N_SH, i_n = 2, i_m = 3)
N_SH_max = self.N_SH
# Generate field
self.ensemble_B(g, nmax = N_SH_max, N_mf = 2, mf = True, nmf = False, r_at = self.r_grid, grid_type = grid_type)
self.data = self.B_ensemble[:,0]
del self.B_ensemble
"""
if grid_type == "glq":
self.data = self.B_ensemble_glq[:,0]
del self.B_ensemble_glq
elif grid_type == "even":
self.data = self.B_ensemble_even[:,0]
del self.B_ensemble_even
elif grid_type == "eqa":
self.data = self.B_ensemble_eqa[:,0]
del self.B_ensemble_eqa
elif grid_type == "swarm":
self.data = self.B_ensemble_swarm[:,0]
"""
if grid_type != "swarm":
self.r_grid_repeat = np.ones(self.N_grid,)*self.r_grid
# Target statistics
if target_var_factor is not None:
self.target_var = target_var_factor*np.var(self.data)
elif target_var == None:
self.target_var = np.var(self.data)
else:
self.target_var = target_var
self.target_mean_true = np.mean(self.data)
self.target_mean = 0.0
self.g_prior = g
def condtab(self, normsize = 1001, model_hist = False, table = 'rough', quantiles = None,
rangn_lim = 3.5, rangn_N = 501, rangv_lim = 2.0, rangv_N = 101, rangn_geomspace = False):
"""
Conditional distribution table
"""
import numpy as np
from scipy.stats import norm, laplace
from sklearn.preprocessing import QuantileTransformer
# Linearly spaced value array with start/end very close to zero/one
start = 1e-16 #Python min
#start = 0.001
linspace = np.linspace(start,1-start,normsize)
# Possible model target histogram cdf/ccdf
if isinstance(model_hist, str) is False:
data_sorted = np.ravel(model_hist)
elif model_hist == True:
ag,bg = laplace.fit(self.data)
mod_data = np.random.laplace(ag,bg,size=100000)
#data_sorted = np.sort(mod_data)
data_sorted = mod_data
elif model_hist == "laplace":
rv = laplace()
self.data = laplace.rvs(loc = 0, scale=1, size=self.N_grid)
self.target_var = np.var(self.data)
self.target_mean = 0.0
#data_sorted = np.sort(self.data)
data_sorted = self.data
set_nmax = self.grid_nmax
C_cilm = pyshtools.expand.SHExpandGLQ(self.data.reshape(self.grid_nmax+1,2*self.grid_nmax+1), self.grid_w_shtools, self.grid_zero, [1, 1, set_nmax])
C_index = np.transpose(pyshtools.shio.SHCilmToCindex(C_cilm))
self.g_prior = mt_util.gauss_vector_zeroth(C_index, set_nmax, i_n = 0, i_m = 1)
self.g_cilm = C_cilm.copy()
elif model_hist == "ensemble":
data_sorted = np.ravel(self.m_ens)
data_sorted = data_sorted[0.5*np.max(np.abs(data_sorted))>np.abs(data_sorted)]
#data_sorted = np.delete(data_sorted, np.abs(data_sorted)>np.max(np.abs(data_sorted))*0.5)
else:
#data_sorted = np.sort(self.data)
data_sorted = self.data
if rangn_geomspace == False:
rangn = np.linspace(-rangn_lim,rangn_lim,rangn_N)
else:
rangn = np.vstack((np.geomspace(-rangn_lim,-start,int(rangn_N/2)).reshape(-1,1),np.zeros((1,1)),np.geomspace(start,rangn_lim,int(rangn_N/2)).reshape(-1,1)))
rangv = np.linspace(start,rangv_lim,rangv_N)
# Normscored local conditional distributions
# Initialize matrices
CQF_dist = np.zeros((len(rangn),len(rangv),len(linspace)))
CQF_mean = np.zeros((len(rangn),len(rangv)))
CQF_var = np.zeros((len(rangn),len(rangv)))
# Perform quantile transformation
if quantiles == None:
quantiles = int(0.1*len(data_sorted))
# QuantileTransformer setup
qt = QuantileTransformer(n_quantiles=quantiles, random_state=None, output_distribution='normal',subsample=10e8)
qt.fit(data_sorted.reshape(-1,1))
#vrg = qt.transform(data_sorted.reshape(-1,1))
# Generate CQF distributions, means, and variances
print("")
for i in range(0,len(rangn)):
for j in range(0,len(rangv)):
#CQF_dist[i,j,:] = np.sort(qt.inverse_transform((norm.ppf(linspace,loc=rangn[i],scale=np.sqrt(rangv[j]))).reshape(-1,1)).ravel(),axis=0)
CQF_dist[i,j,:] = qt.inverse_transform((norm.ppf(linspace,loc=rangn[i],scale=np.sqrt(rangv[j]))).reshape(-1,1)).ravel()
CQF_mean[i,j] = np.mean(CQF_dist[i,j,:],axis=0,dtype=np.float64)
CQF_var[i,j] = np.var(CQF_dist[i,j,:],axis=0,ddof=1,dtype=np.float64)
#CQF_var[i,j] = np.var(CQF_dist[i,j,:],axis=0,ddof=0,dtype=np.float64)
self.CQF_dist = CQF_dist
self.CQF_mean = CQF_mean
self.CQF_var = CQF_var
self.rangv = rangv
self.rangn = rangn
self.condtab_normsize = normsize
self.condtab_model_hist = model_hist
self.condtab_table = table
#condtab = {"target variance":target_var, "target variance_dat":target_var_dat, "target mean":target_mean, "target mean_dat":target_mean_dat, "QF norm range":rangn, "QF var range":rangv, "CQF dist":CQF_dist, "CQF mean":CQF_mean, "CQF var":CQF_var, "target normscore":vrg, "compiler":setup["condtab_compiler"], "normsize":normsize, "start":start}
def find_sort_d(self, max_dist = 2000):
import numpy as np
sph_d_ravel = self.sph_d.ravel()
range_d = sph_d_ravel < max_dist
idx_range = np.array(np.where(range_d == True)).ravel()
val_range = sph_d_ravel[idx_range]
idx_sort_val_range = np.argsort(val_range)
self.sort_d = idx_range[idx_sort_val_range]
def data_variogram(self, max_dist = 11000):
"""
Function for calculating variogram from data
"""
import numpy as np
self.find_sort_d(max_dist = max_dist)
cloud_all = np.zeros([self.N_grid, self.N_grid])
for i in range(0,self.N_grid):
#cloud = (self.data[i]-self.data)**2
cloud = 0.5*(self.data[i]-self.data)**2
cloud_all[i,:] = cloud
self.cloud_sorted = cloud_all.ravel()[self.sort_d]
self.sph_d_sorted = self.sph_d.ravel()[self.sort_d]
def data_semivariogram(self, max_cloud, n_lags):
"""
Function for calculating semivariogram from data by taking the mean of
equidistant lags
"""
import numpy as np
pics = np.zeros(n_lags-1)
lags = np.zeros(n_lags-1)
#pic_zero = 0.5*np.mean(self.cloud_sorted[:self.N_grid])
pic_zero = np.mean(self.cloud_sorted[:self.N_grid])
lag_zero = np.mean(self.sph_d_sorted[:self.N_grid])
pics[0] = pic_zero
lags[0] = lag_zero
lags_geom = np.linspace(self.N_grid+2, max_cloud, n_lags, dtype=int)
for n in np.arange(0,n_lags-2):
#pic = 0.5*np.mean(self.cloud_sorted[lags_geom[n]:lags_geom[n+1]:1])
pic = np.mean(self.cloud_sorted[lags_geom[n]:lags_geom[n+1]:1])
pics[n+1] = pic
lag_c = np.mean(self.sph_d_sorted[lags_geom[n]:lags_geom[n+1]:1])
lags[n+1] = lag_c
self.lags = lags
self.pics = pics
def semivariogram_model(self, h, a, C0, C1, C2 = None, C3 = None, sv_mode = 'spherical'):
import numpy as np
if sv_mode == 'spherical':
'''
Spherical model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 )
sv_model[len(hla):] = C0 + C1
sv_model = sv_model[hir]
elif sv_mode == 'dub_spherical':
'''
Spherical model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
ha2 = h>C3
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*( 1.5*hla/C3 - 0.5*(hla/C3)**3)
sv_model[len(hla):] = C0 + C1 + C2*( 1.5*hs[len(hla):]/C3 - 0.5*(hs[len(hla):]/C3)**3)
sv_model[ha2[hi]] = C0 + C1 + C2
sv_model = sv_model[hir]
elif sv_mode == 'gaussian':
'''
Gaussian model of the semivariogram
'''
sv_model = C0 + C1*(1-np.exp(-(3*np.ravel(h))**2/a**2))
elif sv_mode == 'exponential':
'''
Exponential model of the semivariogram
'''
import numpy as np
sv_model = C0 + C1*(1-np.exp(-3*h/a))
#sv_model = C0 + C1*(1-np.exp(-h/a))
#sv_model = C0 + C1*(np.exp(-h/a))
elif sv_mode == 'power':
'''
Power model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*hla**a
sv_model[len(hla):] = C0 + C1*np.array(hs[len(hla):])**a
sv_model = sv_model[hir]
elif sv_mode == 'hole':
'''
Hole model of the semivariogram
'''
sv_model = C0 + C1*(1-np.cos(h/a*np.pi))
elif sv_mode == 'hole_damp':
'''
Hole model of the semivariogram
'''
sv_model = C0 + C1*(1-np.exp(-3*h/C2)*np.cos(h/a*np.pi))
elif sv_mode == 'nested_hole_gau':
'''
Hole model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*(1-np.cos(hla/a*np.pi)) + C2*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1*(1-np.cos(np.array(hs[len(hla):])/a*np.pi)) + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
elif sv_mode == 'nested_sph_gau':
'''
Nested spherical and gaussian model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1 + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
elif sv_mode == 'nested_sph_exp':
'''
Nested spherical and exponential model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*(1-np.exp(-(3*hla)/a))
sv_model[len(hla):] = C0 + C1 + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))/a))
sv_model = sv_model[hir]
elif sv_mode == 'nested_exp_gau':
'''
Nested exponential and gaussian model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*(1-np.exp(-(3*hla)/a)) + C2*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1*(1-np.exp(-(3*np.array(hs[len(hla):]))/a)) + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
elif sv_mode == 'nested_sph_exp_gau':
'''
Nested spherical and exponential model of the semivariogram
'''
hi = np.argsort(h)
hir = np.argsort(hi)
sv_model = np.zeros(len(h),dtype=np.longdouble)
hs = h[hi]
hla = hs[hs<a]
sv_model[0:len(hla)] = C0 + C1*( 1.5*hla/a - 0.5*(hla/a)**3 ) + C2*(1-np.exp(-(3*hla)/a)) + C3*(1-np.exp(-(3*hla)**2/a**2))
sv_model[len(hla):] = C0 + C1 + C2*(1-np.exp(-(3*np.array(hs[len(hla):]))/a)) + C3*(1-np.exp(-(3*np.array(hs[len(hla):]))**2/a**2))
sv_model = sv_model[hir]
else:
print('Unknown model type')
return
return sv_model
def varioLUT(self, a, C0, C1, C2 = None, C3 = None, sv_model = 'spherical'):
import numpy as np
#from SDSSIM_utility import printProgressBar
'''
semi-variogram LUT generation
'''
#vario_lut = np.longdouble(np.zeros([self.N_grid, self.N_grid]))
vario_lut = np.double(np.zeros([self.N_grid, self.N_grid]))
for i in range(0,self.N_grid):
vario_lut[:,i] = self.semivariogram_model(self.sph_d[i,:], a, C0, C1, C2=C2, C3=C3, sv_mode=sv_model)
return vario_lut
def semivar(self, model_lags = 'all', model = 'nested_sph_exp_gau', max_dist = 11000, lag_length = 5,
nolut = False, bounds = True, zero_nugget = False, set_model = False, hit_target_var = False):
from math import inf
import numpy as np
from scipy.optimize import curve_fit
#from sklearn.preprocessing import normalize
self.sv_model_lags = model_lags
self.sv_max_dist = max_dist
self.sv_lag_length = lag_length
self.sv_zero_nugget = zero_nugget
self.data_variogram(max_dist=max_dist)
self.max_cloud = len(self.sort_d)
d_max = np.max(self.sph_d_sorted)
self.n_lags = int(d_max/lag_length) # lags from approx typical distance between core grid points
print("____semi-variogram setup___")
print("")
print("Number of data used: %d" %self.max_cloud)
print("Max data distance: %.3f km" %d_max)
print("Lag length chosen: %.1f km" %lag_length)
print("Number of lags: %d" %self.n_lags)
print("Number of modelling lags:",model_lags)
print("")
self.data_semivariogram(self.max_cloud, self.n_lags)
#print('Generating semi-variogram model')
#print("")
if model_lags == 'all':
lags_model = self.lags
pics_model = self.pics
else:
lags_model = self.lags[:model_lags]
pics_model = self.pics[:model_lags]
# Set model name for plotting and logicals for model selection
self.model_names = {'spherical':'spherical', 'dub_spherical':'double spherical', 'gaussian':'gaussian', 'exponential':'exponential', 'power':'power', 'hole':'hole', 'hole_damp':'dampened hole', 'nested_hole_gau':'hole+Gaussian', 'nested_sph_gau':'spherical+Gaussian', 'nested_sph_exp':'spherical+exponential', 'nested_exp_gau':'exponential+Gaussian', 'nested_sph_exp_gau':'spherical+exponential+Gaussian'}
self.model_select_simple = np.logical_or.reduce((model=='nested_sph_gau', model=='nested_sph_exp', model=='nested_exp_gau', model=='nested_hole_gau', model=='hole_damp'))
self.model_select_advanced = np.logical_or.reduce((model == 'nested_sph_exp_gau', model == 'dub_spherical'))
"""SET MODEL OR NOT"""
if set_model == False:
if model == 'spherical':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3)
elif hit_target_var == True:
def semivar_return(lags_model, a):
return (1.5*lags_model/a-0.5*(lags_model/a)**3)
else:
def semivar_return(lags_model, a, C1):
return C1*(1.5*lags_model/a-0.5*(lags_model/a)**3)
elif model == 'dub_spherical':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1, C2, C3):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1.5*lags_model/C3-0.5*(lags_model/C3)**3)
else:
def semivar_return(lags_model, a, C1, C2, C3):
return C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1.5*lags_model/C3-0.5*(lags_model/C3)**3)
elif model == 'gaussian':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1-np.exp(-(3*lags_model)**2/a**2))
else:
def semivar_return(lags_model, a, C1):
return C1*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'exponential':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1-np.exp(-3*lags_model/a))
#return C0 + C1*(1-np.exp(-lags_model/a))
#return C0 + C1*(np.exp(-lags_model/a))
elif hit_target_var == True:
def semivar_return(lags_model, a):
return (1-np.exp(-3*lags_model/a))
else:
def semivar_return(lags_model, a, C1):
return C1*(1-np.exp(-3*lags_model/a))
elif model == 'power':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*lags_model**a
else:
def semivar_return(lags_model, a, C1):
return C1*lags_model**a
elif model == 'hole':
def semivar_return(lags_model, a, C0, C1):
return C0 + C1*(1-np.cos(lags_model/a*np.pi))
elif model == 'hole_damp':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1-np.exp(-3*lags_model/C2)*np.cos(lags_model/a*np.pi))
elif model == 'nested_hole_gau':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1-np.cos(lags_model/a*np.pi)) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'nested_sph_gau':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'nested_sph_exp':
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)/a))
elif model == 'nested_exp_gau':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1, C2):
return C0 + C1*(1-np.exp(-(3*lags_model)/a)) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
else:
def semivar_return(lags_model, a, C1, C2):
return C1*(1-np.exp(-(3*lags_model)/a)) + C2*(1-np.exp(-(3*lags_model)**2/a**2))
elif model == 'nested_sph_exp_gau':
if zero_nugget == False:
def semivar_return(lags_model, a, C0, C1, C2, C3):
return C0 + C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)/a)) + C3*(1-np.exp(-(3*lags_model)**2/a**2))
else:
def semivar_return(lags_model, a, C1, C2, C3): # FOR ZERO NUGGET
return C1*(1.5*lags_model/a-0.5*(lags_model/a)**3) + C2*(1-np.exp(-(3*lags_model)/a)) + C3*(1-np.exp(-(3*lags_model)**2/a**2)) # FOR ZERO NUGGET
else:
print('wrong model type chosen')
if bounds == True:
"""Bounds and start values for curve fit"""
if model == 'power':
if zero_nugget == False:
p0 = [2.0,np.min(pics_model),np.max(pics_model)]
bounds = (0, [2.0, inf, inf])
else:
p0 = [2.0,np.max(pics_model)]
bounds = (0, [2.0, inf])
elif np.logical_or(model=='nested_sph_gau',model=='nested_sph_exp'):
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model)])
elif model=='nested_exp_gau':
if zero_nugget == False:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model)])
else:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], np.max(pics_model), np.max(pics_model)])
elif model=='nested_hole_gau':
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),np.max(pics_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), np.max(pics_model)])
elif model=='hole_damp':
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]),np.min(pics_model),np.max(pics_model),5*np.max(lags_model)]
bounds = (0, [lags_model[-1], inf, np.max(pics_model), 10*np.max(lags_model)])
elif model == 'nested_sph_exp_gau':
if zero_nugget == False:
p0 = [np.mean(lags_model[-int(len(lags_model)/4.0)]), | np.min(pics_model) | numpy.min |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from random import shuffle
import time
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import log_loss
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_file',
type=str,
required=1,
help='Absolute path to the raw train file.')
parser.add_argument(
'--num_features',
type=int,
required=1,
help='num_features')
args = parser.parse_args()
samples = np.fromfile(args.train_file, dtype=float)
print('len(samples): ' + str(len(samples)) )
print('len(samples)/(args.num_features+1): ' + str(len(samples)/(args.num_features+1)) )
num_samples = int(len(samples)/(args.num_features+1))
print('num_samples: ' + str(num_samples))
samples = | np.reshape(samples, (num_samples, args.num_features+1)) | numpy.reshape |
import os
import numpy as np
import shutil
import math
import matplotlib.pyplot as plt
import random
import argparse
parser = argparse.ArgumentParser(
description='Run generation'
)
parser.add_argument('-clear_files' , default=False, type=bool)
args = parser.parse_args()
pose_num_list = ["four", "eight"]
policy_type = ["curf0covf1"] #["disagreef1"]#["curf1covf3", "curf0covf1"] #, "curf1covf0"] wait for agent train and eval # ["knn1", "cf1"] gen 0616 # ["voxel1", "npoint1", "ntouch1", "random1"] #0615 generated
clear_files = args.clear_files
test_obj_list = ["airplane", "cup", "lightbulb"] #, "spherelarge", "body", "fryingpan"]
vis_root = "exp" # exp uniform_gt two_pose long_step
# eval_dir = "agent_eval"
eval_dir = "agent_trajectory"
save_root = "../data/result/{}/".format(eval_dir)
for root, dirs, files in os.walk("../data/result/{}/".format(eval_dir)):
is_hit_type = False
for ptype in policy_type:
is_hit_type = True if ptype in root else False
is_obj_test = True
for obj_test in test_obj_list:
if obj_test in root: is_obj_test = True
for pose_num_str in pose_num_list:
is_pose_hit = (pose_num_str == "eight")
if pose_num_str == "four" and not "downleft" in root and not "downright" in root and not "upfront" in root and not "upback" in root:
is_pose_hit = True
if is_obj_test and is_hit_type and not "pose" in root and is_pose_hit:
voxel_cls = root[root.index(eval_dir)+(len(eval_dir))+1:root.index('/gene')]
voxel_config = root[root.index('/gene')+1:].replace('/','_').replace("downback", "pose").replace('downfront', 'pose').replace('downleft', 'pose').replace('downright', 'pose').replace('upback','pose').replace('upfront', 'pose').replace('upleft', 'pose').replace('upright', 'pose')
save_path = os.path.join(save_root, voxel_cls, voxel_config)
# print(root)
if clear_files:
print(">>> clear files {}".format(save_path))
if os.path.isdir(save_path) == True: shutil.rmtree(save_path)
else:
if os.path.isdir(save_path) == False: os.makedirs(save_path)
if not eval_dir == "agent_eval" and not eval_dir == "agent_trajectory":
# ======== load max overlap file
max_overlap = 0
min_overlap = 1
# if "random1" in root: # random no best policy random choose
# npz_files = []
# for file in files:
# if ".npz" in file:
# npz_files.append(file)
# recon_pcd_file = os.path.join(root, random.choice(npz_files))
for file in files:
if ".npz" in file:
file_str = str(file)
previous_occup = file_str[(file_str.index("-")+1):file_str.index(".npz")]
if not "random1" in root and float(previous_occup) > max_overlap:
max_overlap = float(previous_occup)
recon_pcd_file = os.path.join(root, file)
elif "random1" in root and float(previous_occup) <= min_overlap: # random
min_overlap = float(previous_occup)
recon_pcd_file = os.path.join(root, file)
file_path = recon_pcd_file
vis_data = np.load(file_path)['pcd']
save_file_path = os.path.join(save_path, "{}pose_alpha_pointcloud.npz".format(pose_num_str))
# save_imgfile_path = os.path.join(save_path, "twopose_alpha_pointcloud.png")
# ========= save file
if os.path.exists(save_file_path):
exist_data = np.load(save_file_path)['pcd']
concat_data = np.concatenate([vis_data, exist_data])
np.savez(save_file_path, pcd=concat_data)
print(">>> exist {} vis {} final{}".format(len(exist_data), len(vis_data), len(concat_data)))
else:
print(">>> save {}".format(save_file_path))
np.savez(save_file_path, pcd=vis_data)
else: # save each iter
for file in files:
if ".npz" in file and not "pose_alpha" in file:
file_str = str(file)
try:
iter_num = int(file_str[(file_str.index("iternum_")+8):file_str.index("_pointcloud")])
except:
print(file_str[(file_str.index("iternum_")+9):file_str.index("_pointcloud")])
# if (iter_num + 1) % 100 == 0:
if (iter_num in [9, 19, 39, 169, 199, 259, 399, 599, 899, 1049, 1099, 1139, 1499, 1999, 2199] and "down" in root) or (iter_num in [49, 99, 139, 499, 999, 1199] and "up" in root):
recon_pcd_file = os.path.join(root, file)
file_path = recon_pcd_file
vis_data = np.load(file_path)['pcd']
if "up" in root:
iter_num += 1000
save_file_path = os.path.join(save_path, "iternum_{}_{}pose_alpha_pointcloud.npz".format(iter_num, pose_num_str))
# save_imgfile_path = os.path.join(save_path, "twopose_alpha_pointcloud.png")
# ========= save file
if os.path.exists(save_file_path):
exist_data = np.load(save_file_path)['pcd']
concat_data = | np.concatenate([vis_data, exist_data]) | numpy.concatenate |
"""
This is a CLASS of predictor.
The idea is to decouple the estimation of system future state from the controller design.
While designing the controller you just chose the predictor you want,
initialize it while initializing the controller and while stepping the controller you just give it current state
and it returns the future states
"""
"""
Using predictor:
1. Initialize while initializing controller
This step load the RNN (if applies) - it make take quite a bit of time
During initialization you only need to provide RNN which should be loaded
2. Call iterativelly three functions
a) setup(initial_state, horizon, etc.)
b) predict(Q)
c) update_rnn
ad a) at this stage you can change the parameters for prediction like e.g. horizon, dt
It also prepares 0 state of the prediction, and tensors for saving the results,
to make b) max performance. This function should be called BEFORE starting solving an optim
ad b) predict is optimized to get the prediction of future states of the system as fast as possible.
It accepts control input (vector) as its only input and is intended to be used at every evaluation of the cost functiomn
ad c) this method updates the internal state of RNN (if applies). It accepts control input for current time step (scalar) as its only input
it should be called only after the optimization problem is solved with the control input used in simulation
"""
#TODO: for the moment it is not possible to update RNN more often than mpc dt
# Updating it more often will lead to false results.
import numpy as np
from SI_Toolkit.load_and_normalize import load_normalization_info, normalize_numpy_array
from CartPole.cartpole_model import (
L, Q2u, cartpole_fine_integration
)
from CartPole.state_utilities import (
ANGLE_IDX, ANGLED_IDX, POSITION_IDX, POSITIOND_IDX, ANGLE_COS_IDX, ANGLE_SIN_IDX,
STATE_VARIABLES
)
import yaml, os
config = yaml.load(open(os.path.join('SI_Toolkit_ApplicationSpecificFiles', 'config.yml'), 'r'), Loader=yaml.FullLoader)
PATH_TO_NORMALIZATION_INFO = config['paths']['PATH_TO_EXPERIMENT_FOLDERS'] + config['paths']['path_to_experiment'] + "NormalizationInfo/"
PATH_TO_NORMALIZATION_INFO += os.listdir(PATH_TO_NORMALIZATION_INFO)[0]
class predictor_ODE:
def __init__(self, horizon, dt, intermediate_steps=1):
try:
self.normalization_info = load_normalization_info(PATH_TO_NORMALIZATION_INFO)
except FileNotFoundError:
print('Normalization info not provided.')
self.batch_size = 1
self.target_position = 0.0
self.target_position_normed = 0.0
self.horizon = horizon
self.intermediate_steps = intermediate_steps
self.t_step = dt / float(self.intermediate_steps)
self.prediction_features_names = STATE_VARIABLES.tolist()
self.prediction_denorm = False
self.batch_mode = False
self.output = None
self.angle = None
self.angleD = None
self.angle_sin = None
self.angle_cos = None
self.angleDD = None
self.position = None
self.positionD = None
self.positionDD = None
def setup(self, initial_state: np.ndarray, prediction_denorm=False):
# The initial state is provided with not valid second derivatives
# Batch_size > 1 allows to feed several states at once and obtain predictions parallely
# Shape of state: (batch size x state variables)
self.batch_size = np.size(initial_state, 0) if initial_state.ndim > 1 else 1
self.batch_mode = not (self.batch_size == 1)
if not self.batch_mode: initial_state = np.expand_dims(initial_state, 0)
self.angleDD = self.positionDD = 0
self.angle, self.angleD, self.position, self.positionD, self.angle_cos, self.angle_sin = (
initial_state[:, ANGLE_IDX],
initial_state[:, ANGLED_IDX],
initial_state[:, POSITION_IDX],
initial_state[:, POSITIOND_IDX],
initial_state[:, ANGLE_COS_IDX],
initial_state[:, ANGLE_SIN_IDX],
)
self.prediction_denorm = prediction_denorm
self.u = np.zeros(shape=(self.batch_size, self.horizon), dtype=np.float32)
self.output = np.zeros((self.batch_size, self.horizon+1, len(self.prediction_features_names)+1), dtype=np.float32)
def next_state(self, k, pole_half_length=L):
"""Wrapper for CartPole ODE. Given a current state (without second derivatives), returns a state after time dt
"""
(
self.angle, self.angleD, self.position, self.positionD, self.angle_cos, self.angle_sin
) = cartpole_fine_integration(
angle=self.angle,
angleD=self.angleD,
angle_cos=self.angle_cos,
angle_sin=self.angle_sin,
position=self.position,
positionD=self.positionD,
u=self.u[:, k],
t_step=self.t_step,
intermediate_steps=self.intermediate_steps,
L=pole_half_length,
)
def predict(self, Q: np.ndarray, pole_half_length=L) -> np.ndarray:
# Shape of Q: (batch size x horizon length)
if | np.size(Q, -1) | numpy.size |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for locomotion.tasks.soccer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Internal dependencies.
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import composer
from dm_control.locomotion import soccer
import numpy as np
from six.moves import range
from six.moves import zip
RGBA_BLUE = [.1, .1, .8, 1.]
RGBA_RED = [.8, .1, .1, 1.]
def _walker(name, walker_id, marker_rgba):
return soccer.BoxHead(
name=name,
walker_id=walker_id,
marker_rgba=marker_rgba,
)
def _team_players(team_size, team, team_name, team_color):
team_of_players = []
for i in range(team_size):
team_of_players.append(
soccer.Player(team, _walker("%s%d" % (team_name, i), i, team_color)))
return team_of_players
def _home_team(team_size):
return _team_players(team_size, soccer.Team.HOME, "home", RGBA_BLUE)
def _away_team(team_size):
return _team_players(team_size, soccer.Team.AWAY, "away", RGBA_RED)
def _env(players, disable_walker_contacts=True, observables=None):
return composer.Environment(
task=soccer.Task(
players=players,
arena=soccer.Pitch((20, 15)),
observables=observables,
disable_walker_contacts=disable_walker_contacts,
),
time_limit=1)
def _observables_adder(observables_adder):
if observables_adder == "core":
return soccer.CoreObservablesAdder()
if observables_adder == "core_interception":
return soccer.MultiObservablesAdder(
[soccer.CoreObservablesAdder(),
soccer.InterceptionObservablesAdder()])
raise ValueError("Unrecognized observable_adder %s" % observables_adder)
class TaskTest(parameterized.TestCase):
def _assert_all_count_equal(self, list_of_lists):
"""Check all lists in the list are count equal."""
if not list_of_lists:
return
first = sorted(list_of_lists[0])
for other in list_of_lists[1:]:
self.assertCountEqual(first, other)
@parameterized.named_parameters(
("1vs1_core", 1, "core", 31, True),
("2vs2_core", 2, "core", 39, True),
("1vs1_interception", 1, "core_interception", 39, True),
("2vs2_interception", 2, "core_interception", 47, True),
("1vs1_core_contact", 1, "core", 31, False),
("2vs2_core_contact", 2, "core", 39, False),
("1vs1_interception_contact", 1, "core_interception", 39, False),
("2vs2_interception_contact", 2, "core_interception", 47, False),
)
def test_step_environment(self, team_size, observables_adder, num_obs,
disable_walker_contacts):
env = _env(
_home_team(team_size) + _away_team(team_size),
observables=_observables_adder(observables_adder),
disable_walker_contacts=disable_walker_contacts)
self.assertLen(env.action_spec(), 2 * team_size)
self.assertLen(env.observation_spec(), 2 * team_size)
actions = [np.zeros(s.shape, s.dtype) for s in env.action_spec()]
timestep = env.reset()
for observation, spec in zip(timestep.observation, env.observation_spec()):
self.assertLen(spec, num_obs)
self.assertCountEqual(list(observation.keys()), list(spec.keys()))
for key in observation.keys():
self.assertEqual(observation[key].shape, spec[key].shape)
while not timestep.last():
timestep = env.step(actions)
# TODO(b/124848293): consolidate environment stepping loop for task tests.
@parameterized.named_parameters(
("1vs2", 1, 2, 35),
("2vs1", 2, 1, 35),
("3vs0", 3, 0, 35),
("0vs2", 0, 2, 31),
("2vs2", 2, 2, 39),
("0vs0", 0, 0, None),
)
def test_num_players(self, home_size, away_size, num_observations):
env = _env(_home_team(home_size) + _away_team(away_size))
self.assertLen(env.action_spec(), home_size + away_size)
self.assertLen(env.observation_spec(), home_size + away_size)
actions = [np.zeros(s.shape, s.dtype) for s in env.action_spec()]
timestep = env.reset()
# Members of the same team should have identical specs.
self._assert_all_count_equal(
[spec.keys() for spec in env.observation_spec()[:home_size]])
self._assert_all_count_equal(
[spec.keys() for spec in env.observation_spec()[-away_size:]])
for observation, spec in zip(timestep.observation, env.observation_spec()):
self.assertCountEqual(list(observation.keys()), list(spec.keys()))
for key in observation.keys():
self.assertEqual(observation[key].shape, spec[key].shape)
self.assertLen(spec, num_observations)
while not timestep.last():
timestep = env.step(actions)
def test_all_contacts(self):
env = _env(_home_team(1) + _away_team(1))
def _all_contact_configuration(physics, unused_random_state):
walkers = [p.walker for p in env.task.players]
ball = env.task.ball
x, y, rotation = 0., 0., np.pi / 6.
ball.set_pose(physics, [x, y, 5.])
ball.set_velocity(
physics, velocity=np.zeros(3), angular_velocity=np.zeros(3))
x, y, rotation = 0., 0., np.pi / 3.
quat = [np.cos(rotation / 2), 0, 0, np.sin(rotation / 2)]
walkers[0].set_pose(physics, [x, y, 3.], quat)
walkers[0].set_velocity(
physics, velocity=np.zeros(3), angular_velocity=np.zeros(3))
x, y, rotation = 0., 0., np.pi / 3. + np.pi
quat = [np.cos(rotation / 2), 0, 0, np.sin(rotation / 2)]
walkers[1].set_pose(physics, [x, y, 1.], quat)
walkers[1].set_velocity(
physics, velocity=np.zeros(3), angular_velocity=np.zeros(3))
env.add_extra_hook("initialize_episode", _all_contact_configuration)
actions = [np.zeros(s.shape, s.dtype) for s in env.action_spec()]
timestep = env.reset()
while not timestep.last():
timestep = env.step(actions)
def test_symmetric_observations(self):
env = _env(_home_team(1) + _away_team(1))
def _symmetric_configuration(physics, unused_random_state):
walkers = [p.walker for p in env.task.players]
ball = env.task.ball
x, y, rotation = 0., 0., np.pi / 6.
ball.set_pose(physics, [x, y, 0.5])
ball.set_velocity(
physics, velocity=np.zeros(3), angular_velocity=np.zeros(3))
x, y, rotation = 5., 3., np.pi / 3.
quat = [np.cos(rotation / 2), 0, 0, np.sin(rotation / 2)]
walkers[0].set_pose(physics, [x, y, 0.], quat)
walkers[0].set_velocity(
physics, velocity=np.zeros(3), angular_velocity=np.zeros(3))
x, y, rotation = -5., -3., np.pi / 3. + np.pi
quat = [np.cos(rotation / 2), 0, 0, np.sin(rotation / 2)]
walkers[1].set_pose(physics, [x, y, 0.], quat)
walkers[1].set_velocity(
physics, velocity=np.zeros(3), angular_velocity=np.zeros(3))
env.add_extra_hook("initialize_episode", _symmetric_configuration)
timestep = env.reset()
obs_a, obs_b = timestep.observation
self.assertCountEqual(list(obs_a.keys()), list(obs_b.keys()))
for k in sorted(obs_a.keys()):
o_a, o_b = obs_a[k], obs_b[k]
np.testing.assert_allclose(o_a, o_b, err_msg=k + " not equal.", atol=1e-6)
def test_symmetric_dynamic_observations(self):
env = _env(_home_team(1) + _away_team(1))
def _symmetric_configuration(physics, unused_random_state):
walkers = [p.walker for p in env.task.players]
ball = env.task.ball
x, y, rotation = 0., 0., np.pi / 6.
ball.set_pose(physics, [x, y, 0.5])
# Ball shooting up. Walkers going tangent.
ball.set_velocity(physics, velocity=[0., 0., 1.],
angular_velocity=[0., 0., 0.])
x, y, rotation = 5., 3., np.pi / 3.
quat = [np.cos(rotation / 2), 0, 0, np.sin(rotation / 2)]
walkers[0].set_pose(physics, [x, y, 0.], quat)
walkers[0].set_velocity(physics, velocity=[y, -x, 0.],
angular_velocity=[0., 0., 0.])
x, y, rotation = -5., -3., np.pi / 3. + np.pi
quat = [np.cos(rotation / 2), 0, 0, np.sin(rotation / 2)]
walkers[1].set_pose(physics, [x, y, 0.], quat)
walkers[1].set_velocity(physics, velocity=[y, -x, 0.],
angular_velocity=[0., 0., 0.])
env.add_extra_hook("initialize_episode", _symmetric_configuration)
timestep = env.reset()
obs_a, obs_b = timestep.observation
self.assertCountEqual(list(obs_a.keys()), list(obs_b.keys()))
for k in sorted(obs_a.keys()):
o_a, o_b = obs_a[k], obs_b[k]
np.testing.assert_allclose(o_a, o_b, err_msg=k + " not equal.", atol=1e-6)
def test_prev_actions(self):
env = _env(_home_team(1) + _away_team(1))
actions = []
for i, player in enumerate(env.task.players):
spec = player.walker.action_spec
actions.append((i + 1) * np.ones(spec.shape, dtype=spec.dtype))
env.reset()
timestep = env.step(actions)
for walker_idx, obs in enumerate(timestep.observation):
np.testing.assert_allclose(
| np.squeeze(obs["prev_action"], axis=0) | numpy.squeeze |
import time
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
def fft_window(tnum, nfft, window, overlap):
# IN : full length of time series, nfft, window name, overlap ratio
# OUT : bins, 1 x nfft window function
# use overlapping
bins = int(np.fix((int(tnum/nfft) - overlap)/(1.0 - overlap)))
# window function
if window == 'rectwin': # overlap = 0.5
win = np.ones(nfft)
elif window == 'hann': # overlap = 0.5
win = np.hanning(nfft)
elif window == 'hamm': # overlap = 0.5
win = np.hamming(nfft)
elif window == 'kaiser': # overlap = 0.62
win = np.kaiser(nfft, beta=30)
elif window == 'HFT248D': # overlap = 0.84
z = 2*np.pi/nfft*np.arange(0,nfft)
win = 1 - 1.985844164102*np.cos(z) + 1.791176438506* | np.cos(2*z) | numpy.cos |
import os
import numpy as np
import argparse
import json
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import gridspec
font = {"size": 30}
matplotlib.rc("font", **font)
def ms2mc(m1, m2):
eta = m1 * m2 / ((m1 + m2) * (m1 + m2))
mchirp = ((m1 * m2) ** (3.0 / 5.0)) * ((m1 + m2) ** (-1.0 / 5.0))
q = m2 / m1
return (mchirp, eta, q)
def main():
parser = argparse.ArgumentParser(
description="Skymap recovery of kilonovae light curves."
)
parser.add_argument(
"--injection-file",
type=str,
required=True,
help="The bilby injection json file to be used",
)
parser.add_argument(
"--skymap-dir",
type=str,
required=True,
help="skymap file directory with Bayestar skymaps",
)
parser.add_argument(
"--lightcurve-dir",
type=str,
required=True,
help="lightcurve file directory with lightcurves",
)
parser.add_argument("-i", "--indices-file", type=str)
parser.add_argument("-d", "--detections-file", type=str)
parser.add_argument(
"--binary-type", type=str, required=True, help="Either BNS or NSBH"
)
parser.add_argument(
"-c", "--configDirectory", help="gwemopt config file directory.", required=True
)
parser.add_argument(
"--outdir", type=str, required=True, help="Path to the output directory"
)
parser.add_argument(
"--telescope", type=str, default="ZTF", help="telescope to recover"
)
parser.add_argument(
"--tmin",
type=float,
default=0.0,
help="Days to be started analysing from the trigger time (default: 0)",
)
parser.add_argument(
"--tmax",
type=float,
default=3.0,
help="Days to be stoped analysing from the trigger time (default: 14)",
)
parser.add_argument(
"--filters",
type=str,
help="A comma seperated list of filters to use.",
default="g,r",
)
parser.add_argument(
"--generation-seed",
metavar="seed",
type=int,
default=42,
help="Injection generation seed (default: 42)",
)
parser.add_argument("--exposuretime", type=int, required=True)
parser.add_argument(
"--parallel", action="store_true", default=False, help="parallel the runs"
)
parser.add_argument(
"--number-of-cores", type=int, default=1, help="Number of cores"
)
args = parser.parse_args()
# load the injection json file
if args.injection_file:
if args.injection_file.endswith(".json"):
with open(args.injection_file, "rb") as f:
injection_data = json.load(f)
datadict = injection_data["injections"]["content"]
dataframe_from_inj = pd.DataFrame.from_dict(datadict)
else:
print("Only json supported.")
exit(1)
if len(dataframe_from_inj) > 0:
args.n_injection = len(dataframe_from_inj)
indices = np.loadtxt(args.indices_file)
commands = []
lcs = {}
for index, row in dataframe_from_inj.iterrows():
outdir = os.path.join(args.outdir, str(index))
if not os.path.isdir(outdir):
os.makedirs(outdir)
skymap_file = os.path.join(args.skymap_dir, "%d.fits" % indices[index])
lc_file = os.path.join(args.lightcurve_dir, "%d.dat" % index)
lcs[index] = np.loadtxt(lc_file)
efffile = os.path.join(outdir, f"efficiency_true_{index}.txt")
if os.path.isfile(efffile):
continue
if not os.path.isfile(lc_file):
continue
ra, dec = row["ra"] * 360.0 / (2 * np.pi), row["dec"] * 360.0 / (2 * np.pi)
dist = row["luminosity_distance"]
try:
gpstime = row["geocent_time_x"]
except KeyError:
gpstime = row["geocent_time"]
exposuretime = ",".join(
[str(args.exposuretime) for i in args.filters.split(",")]
)
command = f"gwemopt_run --telescopes {args.telescope} --do3D --doTiles --doSchedule --doSkymap --doTrueLocation --true_ra {ra} --true_dec {dec} --true_distance {dist} --doObservability --doObservabilityExit --timeallocationType powerlaw --scheduleType greedy -o {outdir} --gpstime {gpstime} --skymap {skymap_file} --filters {args.filters} --exposuretimes {exposuretime} --doSingleExposure --doAlternatingFilters --doEfficiency --lightcurveFiles {lc_file} --modelType file --configDirectory {args.configDirectory}"
commands.append(command)
print("Number of jobs remaining... %d." % len(commands))
if args.parallel:
from joblib import Parallel, delayed
Parallel(n_jobs=args.number_of_cores)(
delayed(os.system)(command) for command in commands
)
else:
for command in commands:
os.system(command)
absmag, effs, probs = [], [], []
fid = open(args.detections_file, "w")
for index, row in dataframe_from_inj.iterrows():
outdir = os.path.join(args.outdir, str(index))
efffile = os.path.join(outdir, f"efficiency_true_{index}.txt")
absmag.append(np.min(lcs[index][:, 3]))
if not os.path.isfile(efffile):
fid.write("0\n")
effs.append(0.0)
probs.append(0.0)
continue
data_out = np.loadtxt(efffile)
fid.write("%d\n" % data_out)
efffile = os.path.join(outdir, "efficiency.txt")
if not os.path.isfile(efffile):
effs.append(0.0)
else:
with open(efffile, "r") as file:
data_out = file.read()
effs.append(float(data_out.split("\n")[1].split("\t")[4]))
efffile = os.path.join(outdir, f"efficiency_{index}.txt")
if not os.path.isfile(efffile):
probs.append(0.0)
else:
data_out = np.loadtxt(efffile, skiprows=1)
probs.append(data_out[0, 1])
fid.close()
detections = np.loadtxt(args.detections_file)
absmag = np.array(absmag)
effs = np.array(effs)
probs = np.array(probs)
idx = np.where(detections)[0]
idy = np.setdiff1d(np.arange(len(dataframe_from_inj)), idx)
dataframe_from_detected = dataframe_from_inj.iloc[idx]
dataframe_from_missed = dataframe_from_inj.iloc[idy]
absmag_det = absmag[idx]
absmag_miss = absmag[idy]
effs_det = effs[idx]
effs_miss = effs[idy]
probs_det = probs[idx]
probs_miss = probs[idy]
(mchirp_det, eta_det, q_det) = ms2mc(
dataframe_from_detected["mass_1_source"],
dataframe_from_detected["mass_2_source"],
)
(mchirp_miss, eta_miss, q_miss) = ms2mc(
dataframe_from_missed["mass_1_source"], dataframe_from_missed["mass_2_source"]
)
cmap = plt.cm.rainbow
norm = matplotlib.colors.Normalize(vmin=0, vmax=1)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
plotdir = os.path.join(args.outdir, "summary")
if not os.path.isdir(plotdir):
os.makedirs(plotdir)
plt.figure()
plt.scatter(
absmag_det,
dataframe_from_detected["luminosity_distance"],
cmap=cmap,
marker="*",
c=probs_det,
alpha=effs_det,
label="Detected",
)
plt.scatter(
absmag_miss,
dataframe_from_missed["luminosity_distance"],
cmap=cmap,
marker="o",
c=probs_miss,
alpha=effs_miss,
label="Missed",
)
if args.binary_type == "BNS":
plt.xlim([-17.5, -14.0])
elif args.binary_type == "NSBH":
plt.xlim([-16.5, -13.0])
plt.gca().invert_xaxis()
plt.xlabel("Absolute Magnitude")
plt.ylabel("Distance [Mpc]")
plt.legend()
# cbar = plt.colorbar(cmap=cmap, norm=norm)
# cbar.set_label(r'Detection Efficiency')
plotName = os.path.join(plotdir, "missed_found.pdf")
plt.savefig(plotName)
plt.close()
fig = plt.figure(figsize=(20, 16))
gs = gridspec.GridSpec(4, 4)
ax1 = fig.add_subplot(gs[1:4, 0:3])
ax2 = fig.add_subplot(gs[0, 0:3])
ax3 = fig.add_subplot(gs[1:4, 3], sharey=ax1)
ax4 = fig.add_axes([0.03, 0.17, 0.5, 0.10])
plt.setp(ax3.get_yticklabels(), visible=False)
plt.axes(ax1)
plt.scatter(
absmag_det,
1 - probs_det,
s=150 * np.ones(absmag_det.shape),
cmap=cmap,
norm=norm,
marker="*",
alpha=effs_det,
c=effs_det,
)
plt.scatter(
absmag_miss,
1 - probs_miss,
s=150 * np.ones(absmag_miss.shape),
cmap=cmap,
norm=norm,
marker="o",
alpha=effs_miss,
c=effs_miss,
)
legend_elements = [
Line2D(
[0],
[0],
marker="o",
color="w",
label="Missed",
markersize=20,
markerfacecolor="k",
),
Line2D(
[0],
[0],
marker="*",
color="w",
label="Found",
markersize=20,
markerfacecolor="k",
),
]
if args.binary_type == "BNS":
plt.xlim([-17.5, -14.0])
elif args.binary_type == "NSBH":
plt.xlim([-16.5, -13.0])
plt.ylim([0.001, 1.0])
ax1.set_yscale("log")
plt.gca().invert_xaxis()
plt.xlabel("Absolute Magnitude")
plt.ylabel("1 - 2D Probability")
plt.legend(handles=legend_elements, loc=4)
plt.grid()
plt.axes(ax4)
plt.axis("off")
cbar = plt.colorbar(sm, shrink=0.5, orientation="horizontal")
cbar.set_label(r"Detection Efficiency")
plt.axes(ax3)
yedges = np.logspace(-3, 0, 30)
hist, bin_edges = | np.histogram(1 - probs_miss, bins=yedges, density=False) | numpy.histogram |
from detectron2.utils.logger import setup_logger
setup_logger()
import cv2, os, re
import numpy as np
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from densepose.config import add_densepose_config
from densepose.vis.base import CompoundVisualizer
from densepose.vis.densepose_results import DensePoseResultsFineSegmentationVisualizer, DensePoseResultsVisualizer
from densepose.vis.densepose_data_points import DensePoseDataCoarseSegmentationVisualizer
from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
from densepose.vis.extractor import CompoundExtractor, DensePoseResultExtractor, create_extractor
from densepose.vis.extractor import extract_boxes_xywh_from_instances
from densepose.converters import ToChartResultConverterWithConfidences
from densepose.vis.base import MatrixVisualizer
import torch
import collections
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy.ndimage.interpolation import rotate
from scipy.spatial import ConvexHull
import pandas as pd
from skimage import morphology
# window setting
window_segm = 'segm'
window_bbox = 'bbox'
window_norm = 'norm'
window_dilation = 'dilation'
window_stitched_data = 'stitched data'
# setting
gray_val_scale = 10.625
cmap = cv2.COLORMAP_PARULA
# files of config
densepose_keypoints_dir = os.path.join('output', 'segments')
openpose_keypoints_dir = os.path.join('output', 'data')
norm_segm_dir = os.path.join('output', 'pix')
fname_vitruve_norm = os.path.join('pix', 'vitruve_norm.png')
# data type
# keypoints = {key: (x, y, score)}
# pixel = (x, y)
# segments_xy = [(x1, y1), (x2, y2), ...]
# segm = [[x1, y1]=(b,g,r), [x2, y2]=(b,g,r), ...] -> 2D np.ndarray
# coarse segmentation:
# 0 = Background
# 1 = Torso,
# 2 = Right Hand, 3 = Left Hand, 4 = Left Foot, 5 = Right Foot,
# 6 = Upper Leg Right, 7 = Upper Leg Left, 8 = Lower Leg Right, 9 = Lower Leg Left,
# 10 = Upper Arm Left, 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right,
# 14 = Head
COARSE_ID = [
'Background',
'Torso',
'RHand', 'LHand', 'LFoot', 'RFoot',
'RThigh', 'LThigh', 'RCalf', 'LCalf',
'LUpperArm', 'RUpperArm', 'LLowerArm', 'RLowerArm',
'Head'
]
# implicit cmap = cv2.COLORMAP_PARULA <= hard-coded!!! ugh!!!
# BGRA -> alpha channel: 0 = transparent, 255 = non-transparent
COARSE_TO_COLOR = {
'Background': [255, 255, 255, 255],
'Torso': [191, 78, 22, 255],
'RThigh': [167, 181, 44, 255],
'LThigh': [141, 187, 91, 255],
'RCalf': [114, 191, 147, 255],
'LCalf': [96, 188, 192, 255],
'LUpperArm': [87, 207, 112, 255],
'RUpperArm': [55, 218, 162, 255],
'LLowerArm': [25, 226, 216, 255],
'RLowerArm': [37, 231, 253, 255],
'Head': [14, 251, 249, 255]
}
# fine segmentation:
# 0 = Background
# 1, 2 = Torso,
# 3 = Right Hand, 4 = Left Hand, 5 = Left Foot, 6 = Right Foot,
# 7, 9 = Upper Leg Right, 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right, 12, 14 = Lower Leg Left,
# 15, 17 = Upper Arm Left, 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left, 20, 22 = Lower Arm Right,
# 23, 24 = Head
FINE_TO_COARSE_SEGMENTATION = {
1: 1,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 6,
10: 7,
11: 8,
12: 9,
13: 8,
14: 9,
15: 10,
16: 11,
17: 10,
18: 11,
19: 12,
20: 13,
21: 12,
22: 13,
23: 14,
24: 14
}
# Body 25 Keypoints
JOINT_ID = [
'Nose', 'Neck',
'RShoulder', 'RElbow', 'RWrist', 'LShoulder', 'LElbow', 'LWrist',
'MidHip',
'RHip', 'RKnee', 'RAnkle', 'LHip', 'LKnee', 'LAnkle',
'REye', 'LEye', 'REar', 'LEar',
'LBigToe', 'LSmallToe', 'LHeel', 'RBigToe', 'RSmallToe', 'RHeel',
'Background'
]
def _extract_i_from_iuvarr(iuv_arr):
return iuv_arr[0, :, :]
def _extract_u_from_iuvarr(iuv_arr):
return iuv_arr[1, :, :]
def _extract_v_from_iuvarr(iuv_arr):
return iuv_arr[2, :, :]
def extract_segm(result_densepose, is_coarse=True):
iuv_array = torch.cat(
(result_densepose.labels[None].type(torch.float32), result_densepose.uv * 255.0)
).type(torch.uint8)
iuv_array = iuv_array.cpu().numpy()
segm = _extract_i_from_iuvarr(iuv_array)
if is_coarse:
for fine_idx, coarse_idx in FINE_TO_COARSE_SEGMENTATION.items():
segm[segm == fine_idx] = coarse_idx
mask = np.zeros(segm.shape, dtype=np.uint8)
mask[segm > 0] = 1
# matrix = _extract_v_from_iuvarr(iuv_array)
return mask, segm
def _resize(mask, segm, w, h):
interp_method_mask = cv2.INTER_NEAREST
interp_method_segm = cv2.INTER_LINEAR,
if (w != mask.shape[1]) or (h != mask.shape[0]):
mask = cv2.resize(mask, (w, h), interp_method_mask)
if (w != segm.shape[1]) or (h != segm.shape[0]):
segm = cv2.resize(segm, (w, h), interp_method_segm)
return mask, segm
def _calc_angle(point1, center, point2):
try:
a = np.array(point1)[0:2] - np.array(center)[0:2]
b = np.array(point2)[0:2] - np.array(center)[0:2]
cos_theta = np.dot(a, b)
sin_theta = np.cross(a, b)
rad = np.arctan2(sin_theta, cos_theta)
deg = np.rad2deg(rad)
if np.isnan(rad):
return 0, 0
return rad, deg
except:
return 0, 0
def _rotate(point, center, rad):
# print(point)
x = ((point[0] - center[0]) * np.cos(rad)) - ((point[1] - center[1]) * np.sin(rad)) + center[0]
y = ((point[0] - center[0]) * np.sin(rad)) + ((point[1] - center[1]) * np.cos(rad)) + center[1]
if len(point) == 3:
return [int(x), int(y), point[2]] # for keypoints with score
elif len(point) == 2:
return (int(x), int(y)) # for segments (x, y) without score
def _segm_xy(segm, segm_id_list, is_equal=True):
if len(segm_id_list) == 1:
segm_id = segm_id_list[0]
if is_equal:
y, x = np.where(segm == segm_id)
else:
y, x = np.where(segm != segm_id)
elif len(segm_id_list) > 1:
if is_equal:
cond = []
for segm_id in segm_id_list:
cond.append(segm == segm_id)
y, x = np.where(np.logical_or.reduce(tuple(cond)))
else:
cond = []
for segm_id in segm_id_list:
cond.append(segm != segm_id)
y, x = np.where(np.logical_or.reduce(tuple(cond)))
return list(zip(x, y))
def _segments_xy_centroid(segments_xy):
x = [segment_xy[0] for segment_xy in segments_xy if not np.isnan(segment_xy[0])]
y = [segment_xy[1] for segment_xy in segments_xy if not np.isnan(segment_xy[1])]
centroid = (sum(x) / len(segments_xy), sum(y) / len(segments_xy))
return centroid
def _keypoints_midpoint(keypoint1, keypoint2):
return ((np.array(keypoint1) + np.array(keypoint2)) / 2).astype(int)
def is_valid(keypoints):
# check the scores for each main keypoint, which MUST exist!
# main_keypoints = BODY BOX
main_keypoints = ['Nose', 'Neck', 'RShoulder', 'LShoulder', 'RHip', 'LHip', 'MidHip']
keypoints = dict(zip(JOINT_ID, keypoints))
# filter the main keypoints by score > 0
filtered_keypoints = [key for key, value in keypoints.items() if key in main_keypoints and value[2] > 0]
print('Number of valid keypoints (must be equal to 7):', len(filtered_keypoints))
if len(filtered_keypoints) != 7:
return False
else:
return True
def _get_segments_xy(segm, keypoints):
segments_xy = []
bg_xy = [] # 0
segments_xy.append(bg_xy)
torso_xy = _segm_xy(segm=segm, segm_id_list=[1])
segments_xy.append(torso_xy)
r_hand_xy = [] # 2
l_hand_xy = [] # 3
l_foot_xy = [] # 4
r_foot_xy = [] # 5
segments_xy.append(r_hand_xy)
segments_xy.append(l_hand_xy)
segments_xy.append(l_foot_xy)
segments_xy.append(r_foot_xy)
r_thigh_xy = _segm_xy(segm=segm, segm_id_list=[6])
l_thigh_xy = _segm_xy(segm=segm, segm_id_list=[7])
r_calf_xy = _segm_xy(segm=segm, segm_id_list=[8])
l_calf_xy = _segm_xy(segm=segm, segm_id_list=[9])
segments_xy.append(r_thigh_xy)
segments_xy.append(l_thigh_xy)
segments_xy.append(r_calf_xy)
segments_xy.append(l_calf_xy)
l_upper_arm_xy = _segm_xy(segm=segm, segm_id_list=[10])
r_upper_arm_xy = _segm_xy(segm=segm, segm_id_list=[11])
l_lower_arm_xy = _segm_xy(segm=segm, segm_id_list=[12])
r_lower_arm_xy = _segm_xy(segm=segm, segm_id_list=[13])
segments_xy.append(l_upper_arm_xy)
segments_xy.append(r_upper_arm_xy)
segments_xy.append(l_lower_arm_xy)
segments_xy.append(r_lower_arm_xy)
head_xy = _segm_xy(segm=segm, segm_id_list=[14])
segments_xy.append(head_xy)
# valid segments with keypoints
dict_segments_xy = dict(zip(COARSE_ID, segments_xy))
segments_xy = {}
# head
if len(dict_segments_xy['Head']) > 0 and keypoints['Nose'][2] > 0:
segments_xy['Head'] = {'segm_xy': dict_segments_xy['Head'],
'keypoints':
{'Nose': keypoints['Nose']}
}
# torso
if len(dict_segments_xy['Torso']) > 0:
segments_xy['Torso'] = {'segm_xy': dict_segments_xy['Torso'],
'keypoints':
{'Neck': keypoints['Neck'],
'RShoulder': keypoints['RShoulder'],
'LShoulder': keypoints['LShoulder'],
'MidHip': keypoints['MidHip'],
'RHip': keypoints['RHip'],
'LHip': keypoints['LHip']}
}
# lower limbs
if len(dict_segments_xy['RThigh']) > 0 and 'RKnee' in keypoints and keypoints['RKnee'][2] > 0:
segments_xy['RThigh'] = {'segm_xy': dict_segments_xy['RThigh'],
'keypoints':
{'RKnee': keypoints['RKnee']}
}
if len(dict_segments_xy['LThigh']) > 0 and 'LKnee' in keypoints and keypoints['LKnee'][2] > 0:
segments_xy['LThigh'] = {'segm_xy': dict_segments_xy['LThigh'],
'keypoints':
{'LKnee': keypoints['LKnee']}
}
if len(dict_segments_xy['RCalf']) > 0 and 'RAnkle' in keypoints and keypoints['RAnkle'][2] > 0:
segments_xy['RCalf'] = {'segm_xy': dict_segments_xy['RCalf'],
'keypoints':
{'RAnkle': keypoints['RAnkle']}
}
if len(dict_segments_xy['LCalf']) > 0 and 'LAnkle' in keypoints and keypoints['LAnkle'][2] > 0:
segments_xy['LCalf'] = {'segm_xy': dict_segments_xy['LCalf'],
'keypoints':
{'LAnkle': keypoints['LAnkle']}
}
# upper limbs
if len(dict_segments_xy['RUpperArm']) > 0 and 'RElbow' in keypoints and keypoints['RElbow'][2] > 0:
segments_xy['RUpperArm'] = {'segm_xy': dict_segments_xy['RUpperArm'],
'keypoints':
{'RElbow': keypoints['RElbow']}
}
if len(dict_segments_xy['LUpperArm']) > 0 and 'LElbow' in keypoints and keypoints['LElbow'][2] > 0:
segments_xy['LUpperArm'] = {'segm_xy': dict_segments_xy['LUpperArm'],
'keypoints':
{'LElbow': keypoints['LElbow']}
}
if len(dict_segments_xy['RLowerArm']) > 0 and 'RWrist' in keypoints and keypoints['RWrist'][2] > 0:
segments_xy['RLowerArm'] = {'segm_xy': dict_segments_xy['RLowerArm'],
'keypoints':
{'RWrist': keypoints['RWrist']}
}
if len(dict_segments_xy['LLowerArm']) > 0 and 'LWrist' in keypoints and keypoints['LWrist'][2] > 0:
segments_xy['LLowerArm'] = {'segm_xy': dict_segments_xy['LLowerArm'],
'keypoints':
{'LWrist': keypoints['LWrist']}
}
return segments_xy
def _rotate_to_vertical_pose(segments_xy):
midhip_keypoint = segments_xy['Torso']['keypoints']['MidHip']
neck_keypoint = segments_xy['Torso']['keypoints']['Neck']
# calculate the angle for rotation to vertical pose
reference_point = np.array(midhip_keypoint) + np.array((0, -100, 0))
rad, deg = _calc_angle(point1=neck_keypoint, center=midhip_keypoint, point2=reference_point)
for segment_id, segment in segments_xy.items():
segments_xy[segment_id]['segm_xy'] = np.array([_rotate((x, y), midhip_keypoint, rad) for (x, y) in segment['segm_xy']])
for keypoints_id, keypoints in segment['keypoints'].items():
segments_xy[segment_id]['keypoints'][keypoints_id] = _rotate(keypoints, midhip_keypoint, rad)
return segments_xy
def _rotate_head_around_centroid(segm_xy, keypoint1_ref, keypoint2_ref):
# midpoint of vertical line and horizontal line
centroid = _segments_xy_centroid(segm_xy)
rad, deg = _calc_angle(centroid, keypoint1_ref, keypoint2_ref)
rad += np.pi
segm_xy = np.array([_rotate([x, y], keypoint1_ref, rad) for (x, y) in segm_xy])
keypoint = _rotate(centroid, keypoint1_ref, rad)
return segm_xy, keypoint
def _rotate_limbs_around_midpoint(segm_xy, keypoint, ref_keypoint, is_right, is_leg):
# mid-keypoint
midpoint = _keypoints_midpoint(keypoint1=keypoint, keypoint2=ref_keypoint)
# rotate to horizontal
ref_midpoint = midpoint + np.array([50, 0, 0])
if is_right:
rad, deg = _calc_angle(ref_keypoint, midpoint, ref_midpoint)
if is_leg:
rad -= np.pi/2
else:
rad, deg = _calc_angle(keypoint, midpoint, ref_midpoint)
if is_leg:
rad += np.pi / 2
segm_xy = np.array([_rotate([x, y], midpoint, rad) for (x, y) in segm_xy])
keypoint = midpoint
return segm_xy, keypoint
def _rotate_to_tpose(segments_xy):
# nose -> head (BUT nose is not at the middle point of face, e.g., face right, face left!!!)
# midhip -> torso (DONE in vertical rotation)
# elbow -> upper arm
# wrist -> lower arm
# knee -> thigh
# ankle -> calf
# valid keypoints confirmed by is_valid()
nose_keypoint = segments_xy['Head']['keypoints']['Nose']
neck_keypoint = segments_xy['Torso']['keypoints']['Neck']
rsho_keypoint = segments_xy['Torso']['keypoints']['RShoulder']
lsho_keypoint = segments_xy['Torso']['keypoints']['LShoulder']
midhip_keypoint = segments_xy['Torso']['keypoints']['MidHip']
rhip_keypoint = segments_xy['Torso']['keypoints']['RHip']
lhip_keypoint = segments_xy['Torso']['keypoints']['LHip']
# update midhip keypoint = [vertical height of torso] + [midpoint = (midhip + neck) / 2]
if 'Torso' in segments_xy and len(segments_xy['Torso']['segm_xy']) > 0:
segments_xy['Torso']['keypoints']['MidHip'] = (_euclidian(neck_keypoint, midhip_keypoint), _keypoints_midpoint(neck_keypoint, midhip_keypoint))
# buggy -> update midhip keypoint = (midhip + neck) / 2
# elongated torso <- (1) shoulders go up at both sides; (2) crotch goes down in the middle;
# segments_xy['Torso']['keypoints']['MidHip'] = _keypoints_midpoint(neck_keypoint, midhip_keypoint)
# head -> NOT use Nose, use Centroid of head_xy!!!
# ONE solution to Issue FOUR: NOSE is not at the middle point of the head!!!
# so nose keypoint = head centroid
if 'Head' in segments_xy and len(segments_xy['Head']['segm_xy']) > 0:
segm_xy, keypoint = _rotate_head_around_centroid(segm_xy=segments_xy['Head']['segm_xy'],
keypoint1_ref=neck_keypoint,
keypoint2_ref=midhip_keypoint)
segments_xy['Head']['segm_xy'] = segm_xy
segments_xy['Head']['keypoints']['Nose'] = keypoint
# Upper Limb
# Right
# wrist keypoint = lower arm midpoint
if 'RLowerArm' in segments_xy and 'RUpperArm' in segments_xy and len(segments_xy['RLowerArm']['segm_xy']) > 0 and segments_xy['RLowerArm']['keypoints']['RWrist'][2] > 0 and segments_xy['RUpperArm']['keypoints']['RElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RLowerArm']['segm_xy'],
keypoint=segments_xy['RLowerArm']['keypoints']['RWrist'],
ref_keypoint=segments_xy['RUpperArm']['keypoints']['RElbow'],
is_right=True,
is_leg=False)
segments_xy['RLowerArm']['segm_xy'] = segm_xy
segments_xy['RLowerArm']['keypoints']['RWrist'] = keypoint
# elbow keypoint = upper arm midpoint
if 'RUpperArm' in segments_xy and len(segments_xy['RUpperArm']['segm_xy']) > 0 and segments_xy['RUpperArm']['keypoints']['RElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RUpperArm']['segm_xy'],
keypoint=segments_xy['RUpperArm']['keypoints']['RElbow'],
ref_keypoint=rsho_keypoint,
is_right=True,
is_leg=False)
segments_xy['RUpperArm']['segm_xy'] = segm_xy
segments_xy['RUpperArm']['keypoints']['RElbow'] = keypoint
# Left
# wrist keypoint = lower arm midpoint
if 'LLowerArm' in segments_xy and 'LUpperArm' in segments_xy and len(segments_xy['LLowerArm']['segm_xy']) > 0 and segments_xy['LLowerArm']['keypoints']['LWrist'][2] > 0 and segments_xy['LUpperArm']['keypoints']['LElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LLowerArm']['segm_xy'],
keypoint=segments_xy['LLowerArm']['keypoints']['LWrist'],
ref_keypoint=segments_xy['LUpperArm']['keypoints']['LElbow'],
is_right=False,
is_leg=False)
segments_xy['LLowerArm']['segm_xy'] = segm_xy
segments_xy['LLowerArm']['keypoints']['LWrist'] = keypoint
# elbow keypoint = upper arm midpoint
if 'LUpperArm' in segments_xy and len(segments_xy['LUpperArm']['segm_xy']) > 0 and segments_xy['LUpperArm']['keypoints']['LElbow'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LUpperArm']['segm_xy'],
keypoint=segments_xy['LUpperArm']['keypoints']['LElbow'],
ref_keypoint=lsho_keypoint,
is_right=False,
is_leg=False)
segments_xy['LUpperArm']['segm_xy'] = segm_xy
segments_xy['LUpperArm']['keypoints']['LElbow'] = keypoint
# Lower Limb
# Right
# ankle keypoint = calf midpoint
if 'RCalf' in segments_xy and 'RThigh' in segments_xy and len(segments_xy['RCalf']['segm_xy']) > 0 and segments_xy['RCalf']['keypoints']['RAnkle'][2] > 0 and segments_xy['RThigh']['keypoints']['RKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RCalf']['segm_xy'],
keypoint=segments_xy['RCalf']['keypoints']['RAnkle'],
ref_keypoint=segments_xy['RThigh']['keypoints']['RKnee'],
is_right=True,
is_leg=True)
segments_xy['RCalf']['segm_xy'] = segm_xy
segments_xy['RCalf']['keypoints']['RAnkle'] = keypoint
# knee keypoint = thigh midpoint
if 'RThigh' in segments_xy and len(segments_xy['RThigh']['segm_xy']) > 0 and segments_xy['RThigh']['keypoints']['RKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['RThigh']['segm_xy'],
keypoint=segments_xy['RThigh']['keypoints']['RKnee'],
ref_keypoint=rhip_keypoint,
is_right=True,
is_leg=True)
segments_xy['RThigh']['segm_xy'] = segm_xy
segments_xy['RThigh']['keypoints']['RKnee'] = keypoint
# Left
# ankle keypoint = calf midpoint
if 'LCalf' in segments_xy and 'LThigh' in segments_xy and len(segments_xy['LCalf']['segm_xy']) > 0 and segments_xy['LCalf']['keypoints']['LAnkle'][2] > 0 and segments_xy['LThigh']['keypoints']['LKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LCalf']['segm_xy'],
keypoint=segments_xy['LCalf']['keypoints']['LAnkle'],
ref_keypoint=segments_xy['LThigh']['keypoints']['LKnee'],
is_right=False,
is_leg=True)
segments_xy['LCalf']['segm_xy'] = segm_xy
segments_xy['LCalf']['keypoints']['LAnkle'] = keypoint
# knee keypoint = thigh midpoint
if 'LThigh' in segments_xy and len(segments_xy['LThigh']['segm_xy']) > 0 and segments_xy['LThigh']['keypoints']['LKnee'][2] > 0:
segm_xy, keypoint = _rotate_limbs_around_midpoint(segm_xy=segments_xy['LThigh']['segm_xy'],
keypoint=segments_xy['LThigh']['keypoints']['LKnee'],
ref_keypoint=lhip_keypoint,
is_right=False,
is_leg=True)
segments_xy['LThigh']['segm_xy'] = segm_xy
segments_xy['LThigh']['keypoints']['LKnee'] = keypoint
return segments_xy
def rotate_segments_xy(segm, keypoints):
# Issue ONE: cannot rotate body to [Face-front + Torso-front] view!!!
# Issue TWO: cannot have the same person -> so it can be a fat person or a thin person!!!
# *Issue THREE*: NO mapped HAND and FOOT keypoints to rotate them - hands are feet are ignored in analysis!!!
# *Issue FOUR*: NOSE is not at the middle point of the head, e.g., face right, face left, so cannot normalize HEAD!!!
# STEP 1: rotated any pose to a vertical pose, i.e., stand up, sit up, etc...
# extract original segment's x, y
segments_xy = _get_segments_xy(segm=segm, keypoints=keypoints)
# rotated segment to vertical pose, i.e., stand up, sit up, etc...
vertical_segments_xy = _rotate_to_vertical_pose(segments_xy=segments_xy)
# STEP 2: rotate specific segment further to t-pose
tpose_segments_xy = _rotate_to_tpose(segments_xy=vertical_segments_xy)
return tpose_segments_xy
def _euclidian(point1, point2):
return np.sqrt((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2)
def _remove_outlier(segm_xy):
# outlier factor
factor = 2
# mean of [x, y]
xy_mean = np.mean(segm_xy, axis=0)
# mean distance between [x, y] and mean of [x, y]
distance_mean = np.mean([_euclidian(xy, xy_mean) for xy in segm_xy])
# remove outliers from segm_xy
segm_xy_without_outliers = [xy for xy in segm_xy if _euclidian(xy, xy_mean) <= distance_mean * factor]
return segm_xy_without_outliers
def _translate_and_scale_segm_to_convex(image, segm_id, segm_xy, keypoint, ref_point, is_man, is_rect_symmetrical, segm_symmetry_dict, scaler):
# test each segment
# print('Segment ID:', segm_id)
# remove outliers
print('Before removing outliers:', len(segm_xy))
segm_xy = np.array(_remove_outlier(segm_xy=segm_xy)).astype(int)
print('After removing outliers:', len(segm_xy))
min_x, min_y = np.min(segm_xy, axis=0).astype(int)
max_x, max_y = np.max(segm_xy, axis=0).astype(int)
margin = 5
w = int(max_x - min_x + margin*2)
h = int(max_y - min_y + margin*2)
img_bg = np.empty((h, w, 4), np.uint8)
img_bg.fill(255)
img_bg[:, :, 3] = 0 # alpha channel = 0 -> transparent
# fill the segment with the segment color
contours = [[int(x - min_x + margin), int(y - min_y + margin)] for x, y in segm_xy]
# option 1 - convex hull of [x, y]
contours = np.array(contours, np.int32)
cv2.fillConvexPoly(img_bg, cv2.convexHull(contours), color=COARSE_TO_COLOR[segm_id])
# option 2 - dots on [x, y]
# for x, y in contours:
# cv2.circle(img_bg, (x, y), color=COARSE_TO_COLOR[segm_id], radius=2, thickness=-2)
# assumption: head_radius = 31 -> head_height = 31*2 = 62 -> men; 58 -> women
if segm_id == 'Head' and h > 0:
if is_man:
scaler = 62 / h
else:
scaler = 58 / h
img_bg = cv2.resize(img_bg, (int(w * scaler), int(h * scaler)), cv2.INTER_LINEAR)
h, w, _ = img_bg.shape
# midpoint [x, y] in the scaled coordinates of img_bg
# distance between the center point and the left/upper boundaries
midpoint_x, midpoint_y = ((np.array(keypoint)[0:2] - np.array([min_x, min_y]) + np.array([margin, margin])) * scaler).astype(int)
x, y = ref_point
min_x = int(x - midpoint_x)
max_x = int(x + w - midpoint_x)
min_y = int(y - midpoint_y)
max_y = int(y + h - midpoint_y)
cond_bg = img_bg[:, :, 3] > 0 # condition for already-drawn segment pixels
try:
image[min_y:max_y, min_x:max_x, :][cond_bg] = img_bg[cond_bg]
except:
if segm_id == 'Head':
return scaler
# test each segment
# cv2.circle(img_bg, (midpoint_x, midpoint_y), radius=5,color=(255, 255, 0), thickness=-1)
# cv2.imshow('test', img_bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if segm_id == 'Head':
return scaler, None
else:
return None
def _symmetrize_rect_segm(segm_id, w, h, midpoint_x, midpoint_y, segm_symmetry_dict):
if segm_id == 'Head':
segm_symmetry_dict['Head'] = (w, h)
else:
if midpoint_x < w/2:
w = int((w - midpoint_x) * 2)
else:
w = int(midpoint_x * 2)
if midpoint_y < h/2:
h = int((h - midpoint_y) * 2)
else:
h = int(midpoint_y * 2)
if segm_id == 'Torso':
segm_symmetry_dict['Torso'] = (w, h)
elif segm_id == 'RUpperArm':
segm_symmetry_dict['RUpperArm'] = (w, h)
elif segm_id == 'RLowerArm':
segm_symmetry_dict['RLowerArm'] = (w, h)
elif segm_id == 'LUpperArm':
if 'RUpperArm' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RUpperArm']
if w < ref_w:
segm_symmetry_dict['LUpperArm'] = segm_symmetry_dict['RUpperArm']
else:
segm_symmetry_dict['LUpperArm'] = (w, h)
segm_symmetry_dict['RUpperArm'] = (w, h)
else:
segm_symmetry_dict['LUpperArm'] = (w, h)
segm_symmetry_dict['RUpperArm'] = (w, h)
elif segm_id == 'LLowerArm':
if 'RLowerArm' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RLowerArm']
if w < ref_w:
segm_symmetry_dict['LLowerArm'] = segm_symmetry_dict['RLowerArm']
else:
segm_symmetry_dict['LLowerArm'] = (w, h)
segm_symmetry_dict['RLowerArm'] = (w, h)
else:
segm_symmetry_dict['LLowerArm'] = (w, h)
segm_symmetry_dict['RLowerArm'] = (w, h)
elif segm_id == 'RThigh':
segm_symmetry_dict['RThigh'] = (w, h)
elif segm_id == 'RCalf':
segm_symmetry_dict['RCalf'] = (w, h)
elif segm_id == 'LThigh':
if 'RThigh' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RThigh']
if h < ref_h:
segm_symmetry_dict['LThigh'] = segm_symmetry_dict['RThigh']
else:
segm_symmetry_dict['LThigh'] = (w, h)
segm_symmetry_dict['RThigh'] = (w, h)
else:
segm_symmetry_dict['LThigh'] = (w, h)
segm_symmetry_dict['RThigh'] = (w, h)
elif segm_id == 'LCalf':
if 'RCalf' in segm_symmetry_dict:
ref_w, ref_h = segm_symmetry_dict['RCalf']
if h < ref_h:
segm_symmetry_dict['LCalf'] = segm_symmetry_dict['RCalf']
else:
segm_symmetry_dict['LCalf'] = (w, h)
segm_symmetry_dict['RCalf'] = (w, h)
else:
segm_symmetry_dict['LCalf'] = (w, h)
segm_symmetry_dict['RCalf'] = (w, h)
def _draw_symmetrical_rect_segm(image, segm_id, w_and_h, ref_point, update_dict=True):
w, h = w_and_h
# update output_dict
if update_dict:
global output_dict
output_dict[segm_id + '_w'] = w
output_dict[segm_id + '_h'] = h
img_bg = np.empty((h, w, 4), np.uint8)
img_bg.fill(255)
img_bg[:, :] = COARSE_TO_COLOR[segm_id]
midpoint_x = w / 2
midpoint_y = h / 2
x, y = ref_point
min_x = int(x - midpoint_x)
max_x = int(x + midpoint_x)
min_y = int(y - midpoint_y)
max_y = int(y + midpoint_y)
try:
added_image = cv2.addWeighted(image[min_y:max_y, min_x:max_x, :], 0.1, img_bg, 0.9, 0)
image[min_y:max_y, min_x:max_x, :] = added_image
except:
pass
def _translate_and_scale_segm_to_rect(image, segm_id, segm_xy, keypoint, ref_point, is_man, is_rect_symmetrical, segm_symmetry_dict, scaler):
# test each segment
print('Segment ID:', segm_id)
# remove outliers
print('Before removing outliers:', len(segm_xy))
segm_xy = np.array(_remove_outlier(segm_xy=segm_xy)).astype(int)
print('After removing outliers:', len(segm_xy))
min_x, min_y = np.min(segm_xy, axis=0).astype(int)
max_x, max_y = np.max(segm_xy, axis=0).astype(int)
# debug
# img_bg = np.empty((max_y, max_x, 4), np.uint8)
# img_bg.fill(255)
# for x, y in segm_xy:
# cv2.circle(img_bg, (int(x), int(y)), 1, COARSE_TO_COLOR[segm_id], -1)
# cv2.imshow(segm_id, img_bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
w = int(max_x - min_x)
if segm_id == 'Torso':
h = int(keypoint[0])
else:
h = int(max_y - min_y)
if segm_id == 'Head' and h > 0:
if is_man:
scaler = 62 / h
else:
scaler = 58 / h
w, h = int( w * scaler), int(h * scaler)
# midpoint [x, y] in the scaled coordinates of img_bg
# distance between the center point and the left/upper boundaries
if segm_id == 'Torso':
midpoint_x = int((keypoint[1][0] - min_x) * scaler)
# (1) without above the neck; (2) without crotch;
midpoint_y = int(keypoint[0] / 2 * scaler)
else:
midpoint_x, midpoint_y = ((np.array(keypoint)[0:2] - np.array([min_x, min_y])) * scaler).astype(int)
# discard the segment if the midpoint is not within it
if midpoint_x > w or midpoint_y > h:
if segm_id == 'Head':
return scaler, segm_symmetry_dict
else:
return segm_symmetry_dict
# debug
# cv2.circle(img_bg, (midpoint_x, midpoint_y), 5, (0, 255, 255), -1)
# cv2.imshow(segm_id, img_bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if is_rect_symmetrical:
_symmetrize_rect_segm(segm_id=segm_id, w=w, h=h, midpoint_x=midpoint_x, midpoint_y=midpoint_y, segm_symmetry_dict=segm_symmetry_dict)
else:
img_bg = np.empty((h, w, 4), np.uint8)
img_bg.fill(255)
img_bg[:, :] = COARSE_TO_COLOR[segm_id]
x, y = ref_point
min_x = int(x - midpoint_x)
max_x = int(x + w - midpoint_x)
min_y = int(y - midpoint_y)
max_y = int(y + h - midpoint_y)
try:
image[min_y:max_y, min_x:max_x, :] = img_bg
except:
if segm_id == 'Head':
return scaler
# test each segment
# cv2.circle(img_bg, (midpoint_x, midpoint_y), radius=5,color=(255, 255, 0), thickness=-1)
# cv2.imshow('test', img_bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if segm_id == 'Head':
return scaler, segm_symmetry_dict
else:
return segm_symmetry_dict
def draw_segments_xy(segments_xy, is_vitruve, is_rect, is_man, is_rect_symmetrical):
global output_dict
segm_symmetry_dict = {}
if is_vitruve:
# normalized image = (624, 624, 4)
image = cv2.imread(fname_vitruve_norm, 0)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGRA)
# assumption -> default height of head = 63 pixels!
# scaler = 63 / actual head height
else:
# normalized image = (624, 624, 4)
image = np.empty((624, 624, 4), np.uint8)
image.fill(255) # => white (255, 255, 255, 255) = background with non-transparency
# assumption -> default height of head = 63 pixels!
# scaler = 63 / actual head height
# common settings
# coordinates [x, y] coming from distribution_segm.extract_contour_on_vitruve()
# nose_y 146
# torso_y 281
# rupper_arm_x 218
# rlower_arm_x 149
# lupper_arm_x 405
# llower_arm_x 474
# thigh_y 427
# calf_y 544
# [x, y]
mid_x = 312
arm_line_y = 217
right_leg_x = 288
left_leg_x = 336
norm_nose_xy = [mid_x, 146]
norm_mid_torso_xy = [mid_x, 281]
norm_mid_rupper_arm_xy = [218, arm_line_y]
norm_mid_rlower_arm_xy = [149, arm_line_y]
norm_mid_lupper_arm_xy = [405, arm_line_y]
norm_mid_llower_arm_xy = [474, arm_line_y]
norm_mid_rthigh_xy = [right_leg_x, 427]
norm_mid_lthigh_xy = [left_leg_x, 427]
norm_mid_rcalf_xy = [right_leg_x, 544]
norm_mid_lcalf_xy = [left_leg_x, 544]
# mid-point radius for keypoints
radius = 2
# assumption -> size of head for all people is the same!!!
scaler = None
dispatcher = {}
if is_rect:
dispatcher['segm_function'] = _translate_and_scale_segm_to_rect
else:
dispatcher['segm_function'] = _translate_and_scale_segm_to_convex
# translate first, scale second!
# head
if 'Head' in segments_xy:
scaler, segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='Head', segm_xy=segments_xy['Head']['segm_xy'],
keypoint=segments_xy['Head']['keypoints']['Nose'],
ref_point=norm_nose_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=None)
# update output_dict
output_dict['scaler'] = scaler
print('scaler:', scaler)
# torso
if 'Torso' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='Torso',
segm_xy=segments_xy['Torso']['segm_xy'],
keypoint=segments_xy['Torso']['keypoints']['MidHip'],
ref_point=norm_mid_torso_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
# upper limbs
if 'RUpperArm' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='RUpperArm',
segm_xy=segments_xy['RUpperArm']['segm_xy'],
keypoint=segments_xy['RUpperArm']['keypoints']['RElbow'],
ref_point=norm_mid_rupper_arm_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
if 'RLowerArm' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='RLowerArm',
segm_xy=segments_xy['RLowerArm']['segm_xy'],
keypoint=segments_xy['RLowerArm']['keypoints']['RWrist'],
ref_point=norm_mid_rlower_arm_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
if 'LUpperArm' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='LUpperArm',
segm_xy=segments_xy['LUpperArm']['segm_xy'],
keypoint=segments_xy['LUpperArm']['keypoints']['LElbow'],
ref_point=norm_mid_lupper_arm_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
if 'LLowerArm' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='LLowerArm',
segm_xy=segments_xy['LLowerArm']['segm_xy'],
keypoint=segments_xy['LLowerArm']['keypoints']['LWrist'],
ref_point=norm_mid_llower_arm_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
# lower limbs
if 'RThigh' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='RThigh',
segm_xy=segments_xy['RThigh']['segm_xy'],
keypoint=segments_xy['RThigh']['keypoints']['RKnee'],
ref_point=norm_mid_rthigh_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
if 'RCalf' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='RCalf',
segm_xy=segments_xy['RCalf']['segm_xy'],
keypoint=segments_xy['RCalf']['keypoints']['RAnkle'],
ref_point=norm_mid_rcalf_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
if 'LThigh' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='LThigh',
segm_xy=segments_xy['LThigh']['segm_xy'],
keypoint=segments_xy['LThigh']['keypoints']['LKnee'],
ref_point=norm_mid_lthigh_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
if 'LCalf' in segments_xy:
segm_symmetry_dict = dispatcher['segm_function'](image=image,
segm_id='LCalf',
segm_xy=segments_xy['LCalf']['segm_xy'],
keypoint=segments_xy['LCalf']['keypoints']['LAnkle'],
ref_point=norm_mid_lcalf_xy,
is_man=is_man,
is_rect_symmetrical=is_rect_symmetrical,
segm_symmetry_dict=segm_symmetry_dict,
scaler=scaler)
# draw the segments at last, after the symmetry of all segments has been checked
if is_rect_symmetrical:
# head
if 'Head' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='Head', w_and_h=segm_symmetry_dict['Head'],
ref_point=norm_nose_xy)
# torso
if 'Torso' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='Torso', w_and_h=segm_symmetry_dict['Torso'],
ref_point=norm_mid_torso_xy)
# arms
if 'RUpperArm' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='RUpperArm', w_and_h=segm_symmetry_dict['RUpperArm'],
ref_point=norm_mid_rupper_arm_xy)
if 'RLowerArm' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='RLowerArm', w_and_h=segm_symmetry_dict['RLowerArm'],
ref_point=norm_mid_rlower_arm_xy)
if 'LUpperArm' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='LUpperArm', w_and_h=segm_symmetry_dict['LUpperArm'],
ref_point=norm_mid_lupper_arm_xy)
if 'LLowerArm' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='LLowerArm', w_and_h=segm_symmetry_dict['LLowerArm'],
ref_point=norm_mid_llower_arm_xy)
# legs
if 'RThigh' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='RThigh', w_and_h=segm_symmetry_dict['RThigh'],
ref_point=norm_mid_rthigh_xy)
if 'RCalf' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='RCalf', w_and_h=segm_symmetry_dict['RCalf'],
ref_point=norm_mid_rcalf_xy)
if 'LThigh' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='LThigh', w_and_h=segm_symmetry_dict['LThigh'],
ref_point=norm_mid_lthigh_xy)
if 'LCalf' in segm_symmetry_dict:
_draw_symmetrical_rect_segm(image=image, segm_id='LCalf', w_and_h=segm_symmetry_dict['LCalf'],
ref_point=norm_mid_lcalf_xy)
# draw centers
# head center
cv2.circle(image, tuple(norm_nose_xy), radius=radius, color=(255, 0, 255), thickness=-1)
# torso center
cv2.circle(image, tuple(norm_mid_torso_xy), radius=radius, color=(255, 0, 255), thickness=-1)
# upper limbs
cv2.circle(image, tuple(norm_mid_rupper_arm_xy), radius=radius, color=(255, 0, 255), thickness=-1)
cv2.circle(image, tuple(norm_mid_rlower_arm_xy), radius=radius, color=(255, 0, 255), thickness=-1)
cv2.circle(image, tuple(norm_mid_lupper_arm_xy), radius=radius, color=(255, 0, 255), thickness=-1)
cv2.circle(image, tuple(norm_mid_llower_arm_xy), radius=radius, color=(255, 0, 255), thickness=-1)
# lower limbs
cv2.circle(image, tuple(norm_mid_rthigh_xy), radius=radius, color=(255, 0, 255), thickness=-1)
cv2.circle(image, tuple(norm_mid_rcalf_xy), radius=radius, color=(255, 0, 255), thickness=-1)
cv2.circle(image, tuple(norm_mid_lthigh_xy), radius=radius, color=(255, 0, 255), thickness=-1)
cv2.circle(image, tuple(norm_mid_lcalf_xy), radius=radius, color=(255, 0, 255), thickness=-1)
return image
def visualize_norm_segm(image_bg, mask, segm, bbox_xywh, keypoints, infile, person_index, is_vitruve, is_rect, is_man, is_rect_symmetrical, show=False):
x, y, w, h = [int(v) for v in bbox_xywh]
mask, segm = _resize(mask, segm, w, h)
# mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3])
# translate keypoints to bbox
keypoints = np.array(keypoints) - np.array((x, y, 0.0))
# dict keypoints
keypoints = dict(zip(JOINT_ID, keypoints))
# visualize original pose by bbox
if show:
segm_scaled = segm.astype(np.float32) * gray_val_scale
segm_scaled_8u = segm_scaled.clip(0, 255).astype(np.uint8)
# apply cmap
segm_vis = cv2.applyColorMap(segm_scaled_8u, cmap)
cv2.imshow(window_bbox, segm_vis)
cv2.setWindowProperty(window_bbox, cv2.WND_PROP_TOPMOST, 1)
cv2.waitKey(0)
cv2.destroyAllWindows()
# visualize normalized pose
# rotate to t-pose
segments_xy = rotate_segments_xy(segm=segm, keypoints=keypoints)
# draw segments in normalized image
image = draw_segments_xy(segments_xy=segments_xy, is_vitruve=is_vitruve,
is_rect=is_rect, is_man=is_man, is_rect_symmetrical=is_rect_symmetrical)
if show:
outfile = generate_norm_segm_outfile(infile, person_index, is_rect)
cv2.imwrite(outfile, image)
print('output', outfile)
cv2.imshow(window_norm, image)
cv2.setWindowProperty(window_norm, cv2.WND_PROP_TOPMOST, 1)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
outfile = generate_norm_segm_outfile(infile, is_rect)
cv2.imwrite(outfile, image)
print('output', outfile)
def _dilate_segm_to_convex(image, segm_id, segm_xy, bbox_xywh):
# test each segment
# print('Segment ID:', segm_id)
# remove outliers
print('Before removing outliers:', len(segm_xy))
segm_xy = np.array(_remove_outlier(segm_xy=segm_xy)).astype(int)
print('After removing outliers:', len(segm_xy))
min_x, min_y = np.min(segm_xy, axis=0).astype(int)
max_x, max_y = np.max(segm_xy, axis=0).astype(int)
margin = 5
w = int(max_x - min_x + margin * 2)
h = int(max_y - min_y + margin * 2)
img_bg = np.empty((h, w, 4), np.uint8)
img_bg.fill(255)
img_bg[:, :, 3] = 0 # alpha channel = 0 -> transparent
# fill the segment with the segment color
contours = [[int(x - min_x + margin), int(y - min_y + margin)] for x, y in segm_xy]
contours = np.array(contours, np.int32)
cv2.fillConvexPoly(img_bg, cv2.convexHull(contours), color=COARSE_TO_COLOR[segm_id])
# translate from the bbox's coordinate to the image's coordinate
bbox_x, bbox_y, bbox_w, bbox_h = bbox_xywh
# stack two images
cond_bg = img_bg[:, :, 3] > 0 # condition for already-drawn segment pixels
image[int(min_y - margin + bbox_y):int(max_y + margin + bbox_y), int(min_x - margin + bbox_x):int(max_x + margin + bbox_x), :][cond_bg] = img_bg[cond_bg]
def _get_min_bounding_rect(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
"""
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = | np.dot([x2, y2], r) | numpy.dot |
import numpy as np
from colorama import Style, Fore
from ..utils.statistics import success_overlap, success_error
class OPEBenchmark:
"""
Args:
result_path: result path of your tracker
should the same format like VOT
"""
def __init__(self, dataset):
self.dataset = dataset
def convert_bb_to_center(self, bboxes):
return np.array([(bboxes[:, 0] + (bboxes[:, 2] - 1) / 2),
(bboxes[:, 1] + (bboxes[:, 3] - 1) / 2)]).T
def convert_bb_to_norm_center(self, bboxes, gt_wh):
return self.convert_bb_to_center(bboxes) / (gt_wh+1e-16)
def eval_success(self, eval_trackers=None):
"""
Args:
eval_trackers: list of tracker name or single tracker name
Return:
res: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
success_ret = {}
for tracker_name in eval_trackers:
success_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
success_ret_[video.name] = success_overlap(gt_traj, tracker_traj, n_frame)
success_ret[tracker_name] = success_ret_
return success_ret
def eval_precision(self, eval_trackers=None):
"""
Args:
eval_trackers: list of tracker name or single tracker name
Return:
res: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
precision_ret = {}
for tracker_name in eval_trackers:
precision_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
gt_center = self.convert_bb_to_center(gt_traj)
tracker_center = self.convert_bb_to_center(tracker_traj)
thresholds = np.arange(0, 51, 1)
precision_ret_[video.name] = success_error(gt_center, tracker_center,
thresholds, n_frame)
precision_ret[tracker_name] = precision_ret_
return precision_ret
def eval_norm_precision(self, eval_trackers=None):
"""
Args:
eval_trackers: list of tracker name or single tracker name
Return:
res: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
norm_precision_ret = {}
for tracker_name in eval_trackers:
norm_precision_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = | np.array(tracker_traj) | numpy.array |
from torch._C import dtype
import torch.utils.data as data
from PIL import Image
import os
import json
from torchvision import transforms
import random
import numpy as np
import math
import torch
def default_loader(path):
return Image.open(path).convert('RGB')
def load_taxonomy(ann_data, tax_levels, classes):
# loads the taxonomy data and converts to ints
taxonomy = {}
if 'categories' in ann_data.keys():
num_classes = len(ann_data['categories'])
for tt in tax_levels:
tax_data = [aa[tt] for aa in ann_data['categories']]
_, tax_id = np.unique(tax_data, return_inverse=True)
taxonomy[tt] = dict(zip(range(num_classes), list(tax_id)))
else:
# set up dummy data
for tt in tax_levels:
taxonomy[tt] = dict(zip([0], [0]))
# create a dictionary of lists containing taxonomic labels
classes_taxonomic = {}
for cc in np.unique(classes):
tax_ids = [0]*len(tax_levels)
for ii, tt in enumerate(tax_levels):
tax_ids[ii] = taxonomy[tt][cc]
classes_taxonomic[cc] = tax_ids
return taxonomy, classes_taxonomic
class INAT(data.Dataset):
def __init__(self, root, ann_file, is_train=True, split=0):
train_filename="train2018.json"
test_filename="val2018.json"
# load train to get permutation
train_path=f"{root}/{train_filename}"
with open(train_path) as data_file:
train_data=json.load(data_file)
imgs = [aa['file_name'] for aa in train_data['images']]
if 'annotations' in train_data.keys():
self.classes = [aa['category_id'] for aa in train_data['annotations']]
else:
self.classes = [0]*len(imgs)
self.num_classes=len(set(self.classes))
num_train_samples = [0 for _ in range(self.num_classes)]
for label in self.classes:
num_train_samples[label]+=1
permutation=np.argsort(num_train_samples)[::-1]
#print(train_size)
# load annotations
print('Loading annotations from: ' + os.path.basename(ann_file))
with open(ann_file) as data_file:
ann_data = json.load(data_file)
# set up the filenames and annotations
self.imgs = [aa['file_name'] for aa in ann_data['images']]
self.ids = [aa['id'] for aa in ann_data['images']]
# if we dont have class labels set them to '0'
if 'annotations' in ann_data.keys():
self.classes = [aa['category_id'] for aa in ann_data['annotations']]
else:
self.classes = [0]*len(self.imgs)
# load taxonomy
self.tax_levels = ['id', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom']
#8142, 4412, 1120, 273, 57, 25, 6
self.taxonomy, self.classes_taxonomic = load_taxonomy(ann_data, self.tax_levels, self.classes)
self.num_classes=len(set(self.classes))
selected_index=[]
if split!=0:
selected_index=[]
if split==1:
labels=np.array(self.classes)
for i in range(self.num_classes):
#print((np.where(labels==i)[0]),len((np.where(labels==i)[0])),(np.where(labels==i)[0])*0.8)
selected_len=len((np.where(labels==i)[0]))-math.ceil(len((np.where(labels==i)[0]))*0.2)
selected_index.extend(np.where(labels==i)[0][:selected_len])
elif split==2:
labels=np.array(self.classes)
for i in range(self.num_classes):
#print((np.where(labels==i)[0]),len((np.where(labels==i)[0])),(np.where(labels==i)[0])*0.8)
selected_len=math.ceil(len((np.where(labels==i)[0]))*0.2)
selected_index.extend(np.where(labels==i)[0][-selected_len:])
np.random.shuffle(selected_index)
self.imgs=np.array(self.imgs)[selected_index]
self.classes=np.array(self.classes)[selected_index]
# print out some stats
print ('\t' + str(len(self.imgs)) + ' images')
print ('\t' + str(len(set(self.classes))) + ' classes')
#for i in self.classes:
# print(num_train_samples[i],np.where(permutation==i)[0][0])
self.classes=[ | np.where(permutation==i) | numpy.where |
# -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
import logging
from scipy.sparse import lil_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
from microgridRLsimulator.agent.agent import Agent
import pickle
import numpy as np
from copy import deepcopy
from microgridRLsimulator.simulate.forecaster import Forecaster
from microgridRLsimulator.simulate.gridaction import GridAction
from microgridRLsimulator.utils import time_string_for_storing_results, decode_GridState
from microgridRLsimulator.agent.OptimizationAgent import OptimizationAgent
logger = logging.getLogger(__name__)
def plot_training_progress(y_train, y_pred_train, y_test, y_pred_test):
fig, axes = plt.subplots(len(y_train[0]), 1, sharex=True)
fig.suptitle('Train')
for i in range(len(y_train[0])):
axes[i].plot(y_train[:, i], label="Original")
axes[i].plot(y_pred_train[:, i], label="Prediction")
axes[i].legend()
fig, axes = plt.subplots(len(y_test[0]), 1, sharex=True)
fig.suptitle('Test')
for i in range(len(y_test[0])):
axes[i].plot(y_test[:, i], label="Original")
axes[i].plot(y_pred_test[:, i], label="Prediction")
axes[i].legend()
plt.show()
class SLAgent(Agent):
def __init__(self, env, control_horizon_data, simulation_horizon, path_to_stored_experience, path_to_store_models,
features, test_size, shuffle, use_forecasts, models_dict, expert_iterations, scale_outputs=False, n_test_episodes=1):
super().__init__(env)
self.control_horizon = control_horizon_data
self.simulation_horizon = simulation_horizon
self.path_to_stored_experience = path_to_stored_experience
self.path_to_store_models = path_to_store_models + time_string_for_storing_results("experiment",
env.simulator.case)
if not os.path.isdir(self.path_to_store_models):
os.makedirs(self.path_to_store_models)
self.features = features
self.use_forecasts = use_forecasts
self.test_size = test_size
self.shuffle = shuffle
self.grid = self.env.simulator.grid
self.forecaster = None
self.sl_models = None
self.create_models_from_dict(models_dict)
self.inputs = None
self.outputs = None
self.states = None
self.forecasts = None
self.actions = None
self.expert = OptimizationAgent(self.env, self.control_horizon, self.simulation_horizon)
self.scale_outputs = scale_outputs
self.expert_iterations = expert_iterations
self.n_test_episodes = n_test_episodes
@staticmethod
def name():
return "SL"
def reset_agent(self):
self.forecaster = Forecaster(simulator=self.env.simulator, control_horizon=self.control_horizon)
self.grid = self.env.simulator.grid
def train_agent(self):
# Load the datasets
self.load_data()
# Prepare the data
self.process_data()
## Supervised learning model to tune.
for _ in range(self.expert_iterations):
new_states = []
expert_actions = []
for name, model in self.sl_models.items():
x_train, x_test, y_train, y_test = train_test_split(self.inputs, self.outputs,
test_size=self.test_size,
shuffle=self.shuffle)
model.fit(x_train, y_train)
y_pred_train = model.predict(x_train)
y_pred_test = model.predict(x_test)
logger.info("Model: %s Train set error: %d, Test set error: %d" % (
name, mean_squared_error(y_train, y_pred_train),
mean_squared_error(y_test, y_pred_test)))
plot_training_progress(y_train, y_pred_train, y_test, y_pred_test)
model.fit(self.inputs, self.outputs)
new_states_model, expert_actions_model = self.augment_training_agent(model)
new_states += new_states_model
expert_actions += expert_actions_model
self.add_new_data_in_experience(new_states, expert_actions)
for name, model in self.sl_models.items():
model.fit(self.inputs, self.outputs)
self.store_model(name, model)
def simulate_agent(self, agent_options=None):
for name, model in self.sl_models.items():
actions = []
for i in range(1, self.n_test_episodes + 1):
state = self.env.reset()
self.reset_agent()
cumulative_reward = 0.0
done = False
while not done:
state_decoded = decode_GridState(self.env.simulator.grid_states[-1], self.features)
if self.forecasts:
self.forecaster.exact_forecast(self.env.simulator.env_step)
consumption_forecast, pv_forecast = self.forecaster.get_forecast()
final_state = np.concatenate((np.array(state_decoded),
np.array(consumption_forecast), np.array(pv_forecast)))
else:
final_state = np.array(state_decoded)
state_shape = np.shape(final_state)[0]
model_output = model.predict(final_state.reshape(-1, state_shape))[0]
action = self.list_to_GridAction(list(model_output))
actions.append(model_output)
next_state, reward, done, info = self.env.step(state=state, action=action)
cumulative_reward += reward
state = deepcopy(next_state)
print('Finished %s simulation for model %s and the reward is: %d.' % (self.env.purpose, name,
cumulative_reward))
self.env.simulator.store_and_plot(
folder="results/" + time_string_for_storing_results(self.name() + "_" + self.env.purpose + "_from_" + self.env.simulator.start_date.strftime("%m-%d-%Y") + "_to_" + self.env.simulator.end_date.strftime("%m-%d-%Y"),
self.env.simulator.case) + "_" + str(i), agent_options=agent_options)
# plt.figure()
# actions_array = np.array(actions).T
# for a in actions_array:
# plt.plot(a)
# plt.show()
def augment_training_agent(self, model):
state = self.env.reset()
self.reset_agent()
self.expert.reset_agent()
cumulative_reward = 0.0
done = False
new_states = []
expert_actions = []
while not done:
self.expert._create_model(self.env.simulator.env_step)
state_decoded = decode_GridState(self.env.simulator.grid_states[-1], self.features)
expert_action = self.expert.get_optimal_action()[0].to_list()
if self.forecasts:
self.forecaster.exact_forecast(self.env.simulator.env_step)
consumption_forecast, pv_forecast = self.forecaster.get_forecast()
final_state = np.concatenate((np.array(state_decoded),
np.array(consumption_forecast), np.array(pv_forecast)))
else:
final_state = np.array(state_decoded)
new_states.append(final_state)
expert_actions.append(expert_action)
state_shape = np.shape(final_state)[0]
model_output = model.predict(final_state.reshape(-1, state_shape))[0]
action = self.list_to_GridAction(list(model_output))
next_state, reward, done, info = self.env.step(state=state, action=action)
cumulative_reward += reward
state = deepcopy(next_state)
logger.info(' Collected reward is: %d.' % (cumulative_reward))
return new_states, expert_actions
def add_new_data_in_experience(self, new_states, expert_actions):
self.inputs = np.concatenate((self.inputs, | np.array(new_states) | numpy.array |
# Copyright (C) 2013-2016 <NAME>, UC Denver
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
# A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from fmda.fuel_moisture_da import execute_da_step, retrieve_mesowest_observations
from fmda.fuel_moisture_model import FuelMoistureModel
from ingest.grib_file import GribFile, GribMessage
from ingest.rtma_source import RTMA
from utils import Dict, ensure_dir, utc_to_esmf
from vis.postprocessor import scalar_field_to_raster
from ssh_shuttle import send_product_to_server
import netCDF4
import numpy as np
import json
import sys
import logging
import os
import os.path as osp
from datetime import datetime, timedelta
import pytz
def postprocess_cycle(cycle, region_cfg, wksp_path):
"""
Build rasters from the computed fuel moisture.
:param cycle: the UTC cycle time
:param region_cfg: the region configuration
:param wksp_path: the workspace path
:return: the postprocessing path
"""
data_path = compute_model_path(cycle, region_cfg.code, wksp_path)
year_month = '%04d%02d' % (cycle.year, cycle.month)
cycle_dir = 'fmda-%s-%04d%02d%02d-%02d' % (region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour)
postproc_path = osp.join(wksp_path, year_month, cycle_dir)
# open and read in the fuel moisture values
d = netCDF4.Dataset(data_path)
fmc_gc = d.variables['FMC_GC'][:,:,:]
d.close()
# read in the longitudes and latitudes
geo_path = osp.join(wksp_path, '%s-geo.nc' % region_cfg.code)
logging.info('CYCLER reading longitudes and latitudes from NetCDF file %s' % geo_path )
d = netCDF4.Dataset(geo_path)
lats = d.variables['XLAT'][:,:]
lons = d.variables['XLONG'][:,:]
d.close()
fm_wisdom = {
'native_unit' : '-',
'colorbar' : '-',
'colormap' : 'jet_r',
'scale' : [0.0, 0.4]
}
esmf_cycle = utc_to_esmf(cycle)
mf = { "1" : {esmf_cycle : {}}}
manifest_name = 'fmda-%s-%04d%02d%02d-%02d.json' % (region_cfg.code, cycle.year, cycle.month, cycle.day, cycle.hour)
ensure_dir(osp.join(postproc_path, manifest_name))
for i,name in [(0, '1-hr'), (1, '10-hr'), (2, '100-hr')]:
fm_wisdom['name'] = '%s fuel moisture' % name
raster_png, coords, cb_png = scalar_field_to_raster(fmc_gc[:,:,i], lats, lons, fm_wisdom)
raster_name = 'fmda-%s-raster.png' % name
cb_name = 'fmda-%s-raster-cb.png' % name
with open(osp.join(postproc_path, raster_name), 'w') as f:
f.write(raster_png)
with open(osp.join(postproc_path, cb_name), 'w') as f:
f.write(cb_png)
mf["1"][esmf_cycle][name] = { 'raster' : raster_name, 'coords' : coords, 'colorbar' : cb_name }
logging.info('writing manifest file %s' % osp.join(postproc_path, manifest_name) )
json.dump(mf, open(osp.join(postproc_path, manifest_name), 'w'))
return postproc_path
def compute_model_path(cycle, region_code, wksp_path):
"""
Construct a relative path to the fuel moisture model file
for the region code and cycle.
:param cycle: the UTC cycle time
:param region_code: the code of the region
:param wksp_path: the workspace path
:return: a relative path (w.r.t. workspace and region) of the fuel model file
"""
year_month = '%04d%02d' % (cycle.year, cycle.month)
filename = 'fmda-%s-%04d%02d%02d-%02d.nc' % (region_code, cycle.year, cycle.month, cycle.day, cycle.hour)
return osp.join(wksp_path,region_code,year_month,filename)
def find_region_indices(glat,glon,minlat,maxlat,minlon,maxlon):
"""
Find the indices i1:i2 (lat dimension) and j1:j2 (lon dimension)
that contain the desired region (minlat-maxlat,minlon-maxlon).
:param glat: the grid latitudes
:param glon: the grid longitudes
:param minlat: the minimum latitude
:param maxlat: the maximum latitude
:param minlon: the minimum longitude
:param maxlon: the maximum longitude
:return: dim 0 min/max indices and dim1 min/max indices
"""
i1, i2, j1, j2 = 0, glat.shape[0], 0, glat.shape[1]
done = False
while not done:
done = True
tmp = np.where(np.amax(glat[:, j1:j2],axis=1) < minlat)[0][-1]
if i1 != tmp:
i1 = tmp
done = False
tmp = np.where(np.amin(glat[:, j1:j2],axis=1) > maxlat)[0][0]
if i2 != tmp:
i2 = tmp
done = False
tmp = np.where(np.amax(glon[i1:i2,:],axis=0) < minlon)[0][-1]
if j1 != tmp:
j1 = tmp
done = False
tmp = np.where(np.amin(glon[i1:i2,:],axis=0) > maxlon)[0][0]
if j2 != tmp:
j2 = tmp
done = False
return i1,i2,j1,j2
def load_rtma_data(rtma_data, bbox):
"""
Load relevant RTMA fields and return them
:param rtma_data: a dictionary mapping variable names to local paths
:param bbox: the bounding box of the data
:return: a tuple containing t2, rh, lats, lons
"""
gf = GribFile(rtma_data['temp'])[1]
lats, lons = gf.latlons()
# bbox format: minlat, minlon, maxlat, maxlon
i1, i2, j1, j2 = find_region_indices(lats, lons, bbox[0], bbox[2], bbox[1], bbox[3])
t2 = gf.values()[i1:i2,j1:j2] # temperature at 2m in K
td = GribFile(rtma_data['td'])[1].values()[i1:i2,j1:j2] # dew point in K
rain = GribFile(rtma_data['precipa'])[1].values()[i1:i2,j1:j2] # precipitation
hgt = GribFile('static/ds.terrainh.bin')[1].values()[i1:i2,j1:j2]
# compute relative humidity
rh = 100*np.exp(17.625*243.04*(td - t2) / (243.04 + t2 - 273.15) / (243.0 + td - 273.15))
return t2, rh, rain, hgt, lats[i1:i2,j1:j2], lons[i1:i2,j1:j2]
def compute_equilibria(T, H):
"""
Compute the drying and wetting equilibrium given temperature and relative humidity.
:param T: the temperature at 2 meters in K
:param H: the relative humidity in percent
:return: a tuple containing the drying and wetting equilibrium
"""
d = 0.924*H**0.679 + 0.000499*np.exp(0.1*H) + 0.18*(21.1 + 273.15 - T)*(1 - np.exp(-0.115*H))
w = 0.618*H**0.753 + 0.000454*np.exp(0.1*H) + 0.18*(21.1 + 273.15 - T)*(1 - np.exp(-0.115*H))
d *= 0.01
w *= 0.01
return d, w
def fmda_advance_region(cycle, cfg, rtma, wksp_path, lookback_length, meso_token):
"""
Advance the fuel moisture estimates in the region specified by the configuration.
The function assumes that the fuel moisture model has not been advanced to this
cycle yet and will overwrite any previous computations.
Control flow:
1) read in RTMA variables
2) check if there is a stored FM model for previous cycle
2a) yes -> load it, advance one time-step, perform DA
2b) no -> compute equilibrium, use background covariance to do DA
3) store model
:param cycle: the datetime indicating the processed cycle in UTC
:param cfg: the configuration dictionary specifying the region
:param rtma: the RTMA object that can be used to retrieve variables for this cycle
:param wksp_path: the workspace path for the cycler
:param lookback_length: number of cycles to search before we find a computed cycle
:param meso_token: the mesowest API access token
:return: the model advanced and assimilated at the current cycle
"""
model = None
prev_cycle = cycle - timedelta(hours=1)
prev_model_path = compute_model_path(prev_cycle, cfg.code, wksp_path)
if not osp.exists(prev_model_path):
logging.info('CYCLER cannot find model from previous cycle %s' % str(prev_cycle))
if lookback_length > 0:
model = fmda_advance_region(cycle - timedelta(hours=1), cfg, rtma, wksp_path, lookback_length - 1, meso_token)
else:
logging.info('CYCLER found previous model for cycle %s.' % str(prev_cycle))
model = FuelMoistureModel.from_netcdf(prev_model_path)
# retrieve the variables and make sure they are available (we should not be here if they are not)
try:
dont_have_vars, have_vars = rtma.retrieve_rtma(cycle)
except ValueError as e:
logging.error(e)
sys.exit(1)
assert not dont_have_vars
logging.info('CYCLER loading RTMA data for cycle %s.' % str(cycle))
T2, RH, rain, hgt, lats, lons = load_rtma_data(have_vars, cfg.bbox)
Ed, Ew = compute_equilibria(T2, RH)
dom_shape = T2.shape
# store the lons/lats for this domain
geo_path = osp.join(wksp_path, '%s-geo.nc' % cfg.code)
if not osp.isfile(geo_path):
logging.info('CYCLER initializing new file %s.' % (geo_path))
d = netCDF4.Dataset(geo_path, 'w', format='NETCDF4')
d.createDimension('south_north', dom_shape[0])
d.createDimension('west_east', dom_shape[1])
xlat = d.createVariable('XLAT', 'f4', ('south_north', 'west_east'))
xlat[:,:] = lats
xlong = d.createVariable('XLONG', 'f4', ('south_north', 'west_east'))
xlong[:,:] = lons
d.close()
else:
logging.info('CYCLER file already exists: %s.' % (geo_path))
# the process noise matrix
Q = | np.diag([1e-4,5e-5,1e-5,1e-6,1e-6]) | numpy.diag |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
import os,os.path
import re
from numpy.lib.recfunctions import append_fields
from . import localpath
class SN1a_feedback(object):
def __init__(self):
"""
this is the object that holds the feedback table for SN1a
.masses gives a list of masses
.metallicities gives a list of possible yield metallicities
.elements gives the elements considered in the yield table
.table gives a dictionary where the yield table for a specific metallicity can be queried
.table[0.02] gives a yield table.
Keys of this object are ['Mass','mass_in_remnants','elements']
Mass is in units of Msun
'mass_in_remnants' in units of Msun but with a '-'
'elements' yield in Msun normalised to Mass. i.e. integral over all elements is unity
"""
def TNG(self):
""" IllustrisTNG yield tables from Pillepich et al. 2017.
These are the 1997 Nomoto W7 models, and sum all isotopes (not just stable)"""
import h5py as h5
filename = localpath+'input/yields/TNG/SNIa.hdf5'
# Read H5 file
f = h5.File(filename, "r")
indexing = {}
indexing['H'] = 'Hydrogen'
indexing['He'] = 'Helium'
indexing['Li'] = 'Lithium'
indexing['Be'] = 'Beryllium'
indexing['B'] = 'Boron'
indexing['C'] = 'Carbon'
indexing['N'] = 'Nitrogen'
indexing['O'] = 'Oxygen'
indexing['F'] = 'Fluorine'
indexing['Ne'] = 'Neon'
indexing['Na'] = 'Sodium'
indexing['Mg'] = 'Magnesium'
indexing['Al'] = 'Aluminum'
indexing['Si'] = 'Silicon'
indexing['P'] = 'Phosphorus'
indexing['S'] = 'Sulphur'
indexing['Cl'] = 'Chlorine'
indexing['Ar'] = 'Argon'
indexing['K'] = 'Potassium'
indexing['Ca'] = 'Calcium'
indexing['Sc'] = 'Scandium'
indexing['Ti'] = 'Titanium'
indexing['V'] = 'Vanadium'
indexing['Cr'] = 'Chromium'
indexing['Mn'] = 'Manganese'
indexing['Fe'] = 'Iron'
indexing['Co'] = 'Cobalt'
indexing['Ni'] = 'Nickel'
indexing['Cu'] = 'Copper'
indexing['Zn'] = 'Zinc'
indexing['Ga'] = 'Gallium'
indexing['Ge'] = 'Germanium'
indexing['As'] = 'Arsenic'
indexing['Se'] = 'Selenium'
indexing['Br'] = 'Bromine'
indexing['Kr'] = 'Krypton'
indexing['Rb'] = 'Rubidium'
indexing['Sr'] = 'Strontium'
indexing['Y'] = 'Yttrium'
indexing['Zr'] = 'Zirconium'
indexing['Nb'] = 'Niobium'
indexing['Mo'] = 'Molybdenum'
self.elements = list(indexing.keys())
self.table = {}
self.metallicities = list([0.02]) # arbitrary since only one value
self.masses = list([np.sum(f['Yield'].value)]) # sum of all yields
names = ['Mass','mass_in_remnants']+self.elements
yield_subtable = {}
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_subtable['Mass'] = self.masses
yield_subtable['mass_in_remnants'] = np.asarray([-1*m for m in self.masses])
for el_index,el in enumerate(self.elements):
yield_subtable[el] = np.divide(f['Yield'][el_index],self.masses)
self.table[self.metallicities[0]] = yield_subtable
def Seitenzahl(self):
"""
Seitenzahl 2013 from Ivo txt
"""
y = np.genfromtxt(localpath + 'input/yields/Seitenzahl2013/0.02.txt', names = True, dtype = None)
self.metallicities = list([0.02])
self.masses = list([1.4004633930489443])
names = list(y.dtype.names)
self.elements = names[2:]
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Thielemann(self):
"""
Thilemann 2003 yields as compiled in Travaglio 2004
"""
y = np.genfromtxt(localpath + 'input/yields/Thielemann2003/0.02.txt', names = True, dtype = None)
metallicity_list = [0.02]
self.metallicities = metallicity_list
self.masses = [1.37409]
names = y.dtype.names
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
self.elements = list(y.dtype.names[2:])
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Iwamoto(self):
'''
Iwamoto99 yields building up on Nomoto84
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('species1','|S4'),('W7',float),('W70',float),('WDD1',float),('WDD2',float),('WDD3',float),('CDD1',float),('CDD2',float)]
metallicity_list = [0.02,0.0]
self.metallicities = metallicity_list
self.masses = [1.38]
y = np.genfromtxt(localpath + 'input/yields/Iwamoto/sn1a_yields.txt',dtype = tdtype, names = None)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['species1']):
element_list2.append(jtem.decode('utf8'))
y = rcfuncs.append_fields(y,'species',element_list2,usemask = False)
################################
without_radioactive_isotopes=True
if without_radioactive_isotopes:### without radioactive isotopes it should be used this way because the radioactive nuclides are already calculated in here
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne']#,'22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg']#,'26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar']#, '36Cl']
potassium_list = ['39K','41K']#, '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca']#, '40K']
scandium_list = ['45Sc']#,'44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti']#,'48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr']#,'53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe']#,'56Co','57Co']
cobalt_list = ['59Co']#,'60Fe','56Ni','57Ni','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni']#,'60Co']
copper_list = ['63Cu','65Cu']#,'63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
##### with radioactive isotopes (unclear weather they are double, probably not but remnant mass is too big)
else:
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne','22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg','26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar', '36Cl']
potassium_list = ['39K','41K', '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca', '40K']
scandium_list = ['45Sc','44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti','48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr','53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe','56Co','57Co','56Ni','57Ni']
cobalt_list = ['59Co','60Fe','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni','60Co']
copper_list = ['63Cu','65Cu','63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
indexing = {}
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
self.elements = list(indexing.keys())
#################################
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(metallicity_list[:]):
if metallicity == 0.02:
model = 'W7'
elif metallicity == 0.0:
model = 'W70'
else:
print('this metallicity is not represented in the Iwamoto yields. They only have solar (0.02) and zero (0.0001)')
additional_keys = ['Mass', 'mass_in_remnants']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses[0]
total_mass = []
for i,item in enumerate(self.elements):
for j,jtem in enumerate(indexing[item]):
cut = np.where(y['species']==jtem)
yield_tables_final_structure_subtable[item] += y[model][cut]
total_mass.append(y[model][cut])
yield_tables_final_structure_subtable['mass_in_remnants'] = -sum(total_mass)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = np.divide(yield_tables_final_structure_subtable[item],-yield_tables_final_structure_subtable['mass_in_remnants'])
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
class SN2_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for CC-SN.
Different tables can be loaded by the methods.
"""
def Portinari_net(self):
'''
Loading the yield table from Portinari1998.
These are presented as net yields in fractions of initial stellar mass.
'''
# Define metallicities in table
self.metallicities = [0.0004,0.004,0.008,0.02,0.05]
# Load one table
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/0.02.txt',names=True)
# Define masses and elements in yield tables
self.masses = list(x['Mass']) # In solar masses
self.elements = list(x.dtype.names[3:])
self.table = {} # Output dictionary for yield tables
for metallicity in self.metallicities:
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements # These are fields in dictionary
# Create empty record array of correct size
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
# Add mass field to subtable (in solar masses)
yield_subtable['Mass'] = np.array(self.masses)
# Read in yield tbale
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/%s.txt' %(metallicity),names=True)
# Read in element yields
for item in self.elements:
yield_subtable[item] = np.divide(x[item],x['Mass']) # Yields must be in mass fraction
# Add fractional mass in remnants
yield_subtable['mass_in_remnants'] = np.divide(x['Mass'] - x['ejected_mass'], x['Mass'])
# Add unprocessed mass as 1-remnants (with correction if summed net yields are not exactly zero)
for i,item in enumerate(self.masses):
yield_subtable['unprocessed_mass_in_winds'][i] = 1. - (yield_subtable['mass_in_remnants'][i] + sum(list(yield_subtable[self.elements][i])))
# Add subtable to output table
self.table[metallicity] = yield_subtable
def francois(self):
'''
Loading the yield table of Francois et. al. 2004. Taken from the paper table 1 and 2 and added O H He from WW95 table 5A and 5B
where all elements are for Z=Zsun and values for Msun > 40 have been stayed the same as for Msun=40.
Values from 11-25 Msun used case A from WW95 and 30-40 Msun used case B.
'''
y = np.genfromtxt(localpath + 'input/yields/Francois04/francois_yields.txt',names=True)
self.elements = list(y.dtype.names[1:])
self.masses = y[y.dtype.names[0]]
self.metallicities = [0.02]
######### going from absolute ejected masses to relative ejected masses normed with the weight of the initial star
for i,item in enumerate(y.dtype.names[1:]):
y[item] = np.divide(y[item],y['Mass'])
yield_tables = {}
for i,item in enumerate(self.metallicities):
yield_tables[item] = y
self.table = yield_tables
def chieffi04(self):
'''
Loading the yield table of chieffi04.
'''
DATADIR = localpath + 'input/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extractall(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('metallicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = np.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
metallicity_list = np.unique(y['metallicity'])
self.metallicities = np.sort(metallicity_list)
number_of_species = int(len(y)/len(self.metallicities))
tables = []
for i, item in enumerate(self.metallicities):
tables.append(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][np.where(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.append(item.decode('utf8'))
element_list = np.array(element_list2)
indexing = [re.split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.append(indexing[i][1])
self.elements = list(np.unique(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.append(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = tables[metallicity_index]
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = np.array(self.masses)
for j,jtem in enumerate(self.masses):
yield_tables_final_structure_subtable['mass_in_remnants'][j] = yields_for_one_metallicity[str(jtem)][1] / float(jtem) # ,yield_tables_final_structure_subtable['Mass'][i])
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
for t,ttem in enumerate(element_position):
if ttem == item:
yield_tables_final_structure_subtable[item][j] += yields_for_one_metallicity[str(jtem)][t+3] / float(jtem)
# remnant + yields of all elements is less than the total mass. In the next loop the wind mass is calculated.
name_list = list(yield_tables_final_structure_subtable.dtype.names[3:]) + ['mass_in_remnants']
for i in range(len(yield_tables_final_structure_subtable)):
tmp = []
for j,jtem in enumerate(name_list):
tmp.append(yield_tables_final_structure_subtable[jtem][i])
tmp = sum(tmp)
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][i] = 1 - tmp
yield_tables_final_structure[self.metallicities[metallicity_index]] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def chieffi04_net(self):
'''
Loading the yield table of chieffi04 corrected for Anders & Grevesse 1989 solar scaled initial yields
'''
DATADIR = localpath + 'input/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extractall(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('metallicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = np.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
metallicity_list = np.unique(y['metallicity'])
self.metallicities = np.sort(metallicity_list)
number_of_species = int(len(y)/len(self.metallicities))
tables = []
for i, item in enumerate(self.metallicities):
tables.append(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][np.where(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.append(item.decode('utf8'))
element_list = np.array(element_list2)
indexing = [re.split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.append(indexing[i][1])
self.elements = list(np.unique(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.append(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yield_tables_final_structure[self.metallicities[metallicity_index]] = np.load(DATADIR + '/chieffi_net_met_ind_%d.npy' %(metallicity_index))
self.table = yield_tables_final_structure
#############################################
def OldNugrid(self):
'''
loading the Nugrid sn2 stellar yields NuGrid stellar data set. I. Stellar yields from H to Bi for stars with metallicities Z = 0.02 and Z = 0.01
The wind yields need to be added to the *exp* explosion yields.
No r-process contribution but s and p process from AGB and massive stars
delayed and rapid SN Explosiom postprocessing is included. Rapid is not consistent with very massive stars so we use the 'delayed' yield set
mass in remnants not totally consistent with paper table: [ 6.47634087, 2.67590435, 1.98070676] vs. [6.05,2.73,1.61] see table 4
same with z=0.02 but other elements are implemented in the right way:[ 3.27070753, 8.99349996, 6.12286813, 3.1179861 , 1.96401573] vs. [3,8.75,5.71,2.7,1.6]
we have a switch to change between the two different methods (rapid/delay explosion)
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float)]
tdtype2 = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float),('3200',float),('6000',float)]
expdtype = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('25_rapid',float)]
expdtype2 = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('32_delay',float),('32_rapid',float),('60_delay',float)]
yield_tables = {}
self.metallicities = [0.02,0.01]
which_sn_model_to_use = 'delay' # 'rapid'
for i,metallicity_index in enumerate([2,1]):
if i == 0:
z = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(metallicity_index,metallicity_index),dtype = tdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(metallicity_index,metallicity_index),dtype = expdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_delay'] += z['2500']
y['32_%s' %(which_sn_model_to_use)] += z['3200']
y['60_delay'] += z['6000']
else:
z = np.genfromtxt(localpath +'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(metallicity_index,metallicity_index),dtype = tdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(metallicity_index,metallicity_index),dtype = expdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_%s' %(which_sn_model_to_use)] += z['2500']
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(y['element1']):
element_list2.append(item.decode('utf8'))
y = rcfuncs.append_fields(y,'element',element_list2,usemask = False)
yield_tables[self.metallicities[i]] = y
self.elements = list(yield_tables[0.02]['element'])
# For python 3 the bytes need to be changed into strings
self.masses = np.array((15,20,25,32,60))
######
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables[metallicity]
final_mass_name_tag = 'mass_in_remnants'
additional_keys = ['Mass',final_mass_name_tag]
names = additional_keys + self.elements
if metallicity == 0.02:
base = np.zeros(len(self.masses))
else:
base = np.zeros(len(self.masses)-2)
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
if metallicity == 0.02:
yield_tables_final_structure_subtable['Mass'] = self.masses
else:
yield_tables_final_structure_subtable['Mass'] = self.masses[:-2]
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
if metallicity == 0.02:
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==item)]
temp1 = np.zeros(5)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_delay']
temp1[3] = line_of_one_element['32_%s' %(which_sn_model_to_use)]
temp1[4] = line_of_one_element['60_delay']
yield_tables_final_structure_subtable[item] = np.divide(temp1,self.masses)
else:
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==item)]
temp1 = np.zeros(3)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_%s' %(which_sn_model_to_use)]
yield_tables_final_structure_subtable[item] = np.divide(temp1,self.masses[:-2])
if metallicity == 0.02:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-sum(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-sum(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-sum(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure_subtable[final_mass_name_tag][3] = (1-sum(yield_tables_final_structure_subtable[self.elements][3]))
yield_tables_final_structure_subtable[final_mass_name_tag][4] = (1-sum(yield_tables_final_structure_subtable[self.elements][4]))
else:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-sum(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-sum(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-sum(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def one_parameter(self, elements, element_fractions):
"""
This function was introduced in order to find best-fit yield sets where each element has just a single yield (no metallicity or mass dependence).
One potential problem is that sn2 feedback has a large fraction of Neon ~ 0.01, the next one missing is Argon but that only has 0.05%. This might spoil the metallicity derivation a bit.
Another problem: He and the remnant mass fraction is not constrained in the APOGEE data. Maybe these can be constrained externally by yield sets or cosmic abundance standard or solar abundances.
"""
self.metallicities = [0.01]
self.masses = np.array([10])
self.elements = elements
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_table = np.core.records.fromarrays(list_of_arrays,names=names)
yield_table['Mass'] = self.masses
yield_table['mass_in_remnants'] = 0.1
yield_table['unprocessed_mass_in_winds'] = 1 - yield_table['mass_in_remnants']
for i,item in enumerate(self.elements[1:]):
yield_table[item] = element_fractions[i+1]
yield_table['H'] = -sum(element_fractions[1:])
yield_tables_final_structure[self.metallicities[0]] = yield_table
self.table = yield_tables_final_structure
def Nomoto2013(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import numpy.lib.recfunctions as rcfuncs
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((13,15,18,20,25,30,40))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables_dict[metallicity]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(yields_for_one_metallicity['M']):
element_list2.append(item.decode('utf8'))
yields_for_one_metallicity = rcfuncs.append_fields(yields_for_one_metallicity,'element',element_list2,usemask = False)
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses
#yield_tables_final_structure_subtable['mass_in_remnants'] = yields_for_one_metallicity['M']
temp1 = np.zeros(len(self.masses))
temp1[0] = yields_for_one_metallicity[0][21]
temp1[1] = yields_for_one_metallicity[0][22]
temp1[2] = yields_for_one_metallicity[0][23]
temp1[3] = yields_for_one_metallicity[0][24]
temp1[4] = yields_for_one_metallicity[0][25]
temp1[5] = yields_for_one_metallicity[0][26]
temp1[6] = yields_for_one_metallicity[0][27]
yield_tables_final_structure_subtable['mass_in_remnants'] = np.divide(temp1,self.masses)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = 0
for j,jtem in enumerate(indexing[item]):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==jtem)][0]
temp1 = np.zeros(len(self.masses))
temp1[0] = line_of_one_element[21]
temp1[1] = line_of_one_element[22]
temp1[2] = line_of_one_element[23]
temp1[3] = line_of_one_element[24]
temp1[4] = line_of_one_element[25]
temp1[5] = line_of_one_element[26]
temp1[6] = line_of_one_element[27]
yield_tables_final_structure_subtable[item] += np.divide(temp1,self.masses)
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][0] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][0]-sum(yield_tables_final_structure_subtable[self.elements][0]))#yields_for_one_metallicity[0][21]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][1] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][1]-sum(yield_tables_final_structure_subtable[self.elements][1]))#yields_for_one_metallicity[0][22]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][2] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][2]-sum(yield_tables_final_structure_subtable[self.elements][2]))#yields_for_one_metallicity[0][23]#divided by mass because 'mass in remnant' is also normalised
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][3] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][3]-sum(yield_tables_final_structure_subtable[self.elements][3]))#yields_for_one_metallicity[0][24]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][4] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][4]-sum(yield_tables_final_structure_subtable[self.elements][4]))#yields_for_one_metallicity[0][25]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][5] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][5]-sum(yield_tables_final_structure_subtable[self.elements][5]))#yields_for_one_metallicity[0][26]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][6] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][6]-sum(yield_tables_final_structure_subtable[self.elements][6]))#yields_for_one_metallicity[0][27]#
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def Nomoto2013_net(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import numpy.lib.recfunctions as rcfuncs
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((13,15,18,20,25,30,40))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yield_tables_final_structure[metallicity] = np.load(localpath + 'input/yields/Nomoto2013/nomoto_net_met_ind_%d.npy' %(metallicity_index))
self.table = yield_tables_final_structure
def West17_net(self):
""" CC-SN data from the ertl.txt file from <NAME> & <NAME> (2017, in prep)
Only elements up to Ge are implemented here - but original table has all up to Pb"""
# Index elements
indexing = {}
indexing['H'] = ['H1', 'H2']
indexing['He'] = ['He3', 'He4']
indexing['Li'] = ['Li6', 'Li7']
indexing['Be'] = ['Be9']
indexing['B'] = ['B10', 'B11']
indexing['C'] = ['C12', 'C13']
indexing['N'] = ['N14', 'N15']
indexing['O'] = ['O16', 'O17', 'O18']
indexing['F'] = ['F19']
indexing['Ne'] = ['Ne20', 'Ne21', 'Ne22']
indexing['Na'] = ['Na23']
indexing['Mg'] = ['Mg24', 'Mg25', 'Mg26']
indexing['Al'] = ['Al27']
indexing['Si'] = ['Si28', 'Si29', 'Si30']
indexing['P'] = ['P31']
indexing['S'] = ['S32','S33','S34','S36']
indexing['Cl'] = ['Cl35', 'Cl37']
indexing['Ar'] = ['Ar36', 'Ar38', 'Ar40']
indexing['K'] = ['K39', 'K41']
indexing['Ca'] = ['K40','Ca40', 'Ca42', 'Ca43', 'Ca44', 'Ca46', 'Ca48']
indexing['Sc'] = ['Sc45']
indexing['Ti'] = ['Ti46', 'Ti47', 'Ti48', 'Ti49', 'Ti50']
indexing['V'] = ['V50', 'V51']
indexing['Cr'] = ['Cr50', 'Cr52', 'Cr53', 'Cr54']
indexing['Mn'] = ['Mn55']
indexing['Fe'] = ['Fe54', 'Fe56', 'Fe57', 'Fe58']
indexing['Co'] = ['Co59']
indexing['Ni'] = ['Ni58', 'Ni60', 'Ni61', 'Ni62', 'Ni64']
indexing['Cu'] = ['Cu63', 'Cu65']
indexing['Zn'] = ['Zn64', 'Zn66', 'Zn67', 'Zn68', 'Zn70']
indexing['Ga'] = ['Ga69', 'Ga71']
indexing['Ge'] = ['Ge70', 'Ge72', 'Ge73', 'Ge74', 'Ge76']
# Load data
data = np.genfromtxt('Chempy/input/yields/West17/ertl.txt',skip_header=102,names=True)
# Load model parameters
z_solar = 0.0153032
self.masses = np.unique(data['mass'])
scaled_z = np.unique(data['metallicity']) # scaled to solar
self.metallicities = scaled_z*z_solar # actual metallicities
self.elements = [key for key in indexing.keys()] # list of elements
# Output table
self.table = {}
# Create initial abundances
init_abun = {}
import os
if os.path.exists('Chempy/input/yields/West17/init_abun.npz'):
init_file = np.load('Chempy/input/yields/West17/init_abun.npz')
for z_in,sc_z in enumerate(scaled_z):
init_abun[sc_z] = {}
for k,key in enumerate(init_file['keys']):
init_abun[sc_z][key] = init_file['datfile'][z_in][k]
else: # If not already saved
# Import initial abundance package
os.chdir('Chempy/input/yields/West17')
import gch_wh13
os.chdir('../../../../')
init_dat = []
from matplotlib.cbook import flatten
all_isotopes=list(flatten(list(indexing.values())))
for sc_z in scaled_z:
init_abun[sc_z] = gch_wh13.GCHWH13(sc_z)
init_dat.append(init_abun[sc_z].abu)
np.savez('Chempy/input/yields/West17/init_abun.npz',datfile=init_dat,keys=all_isotopes)
for z_index,z in enumerate(self.metallicities): # Define table for each metallicity
# Initialise subtables
yield_subtable = {}
yield_subtable['mass_in_remnants'] = []
yield_subtable['Mass'] = self.masses
for el in self.elements:
yield_subtable[el]=[]
# Find correct row in table
for mass in self.masses:
for r,row in enumerate(data):
if row['mass'] == mass and row['metallicity']==scaled_z[z_index]:
row_index = r
break
# Add remnant mass fraction
remnant = data['remnant'][row_index]
yield_subtable['mass_in_remnants'].append(remnant/mass)
# Add each isotope into table
for element in self.elements:
el_net_yield = 0
for isotope in indexing[element]: # Sum contributions from each element
isotope_net_yield = data[isotope][r]/mass-init_abun[scaled_z[z_index]][isotope]*(mass-remnant)/mass
el_net_yield +=isotope_net_yield # combine for total isotope yield
yield_subtable[element].append(el_net_yield)
summed_yields = np.zeros(len(self.masses)) # Total net yield - should be approx 1
for element in self.elements:
yield_subtable[element] = np.asarray(yield_subtable[element])
summed_yields+=yield_subtable[element]
# Write into yield table
yield_subtable['mass_in_remnants'] = np.asarray(yield_subtable['mass_in_remnants'])
yield_subtable['unprocessed_mass_in_winds'] = 1.0-yield_subtable['mass_in_remnants']-summed_yields
# Restructure table
all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements
list_of_arrays = [yield_subtable[key] for key in all_keys]
restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)
self.table[z] = restructure_subtable
def Frischknecht16_net(self):
""" DO NOT USE!!
pre-SN2 yields from Frischknecht et al. 2016. These are implemented for masses of 15-40Msun, for rotating stars.
Yields from stars with 'normal' rotations are used here.
These are net yields automatically, so no conversions need to be made
"""
import numpy.lib.recfunctions as rcfuncs
import os
# Define metallicites
self.metallicities = [0.0134,1e-3,1e-5] # First is solar value
# Define masses
self.masses= np.array((15,20,25,40))
# Define isotope indexing. For radioactive isotopes with half-lives << Chempy time_step they are assigned to their daughter element
# NB: we only use elements up to Ge here, as in the paper
indexing={}
indexing['H']=['p','d']
indexing['He'] = ['he3','he4']
indexing['Li'] = ['li6','li7']
indexing['Be'] = ['be9']
indexing['B'] = ['b10','b11']
indexing['C'] = ['c12','c13']
indexing['N'] = ['n14','n15']
indexing['O'] = ['o16','o17','o18']
indexing['F'] = ['f19']
indexing['Ne'] = ['ne20','ne21','ne22']
indexing['Na'] = ['na23']
indexing['Mg'] = ['mg24','mg25','mg26','al26']
indexing['Al'] = ['al27']
indexing['Si'] = ['si28','si29','si30']
indexing['P'] = ['p31']
indexing['S'] = ['s32','s33','s34','s36']
indexing['Cl'] = ['cl35','cl37']
indexing['Ar'] = ['ar36','ar38','ar40']
indexing['K'] = ['k39','k41']
indexing['Ca'] = ['ca40','ca42','ca43','ca44','ca46','ca48']
indexing['Sc'] = ['sc45']
indexing['Ti'] = ['ti46','ti47','ti48','ti49','ti50']
indexing['V'] = ['v50','v51']
indexing['Cr'] = ['cr50','cr52','cr53','cr54']
indexing['Mn'] = ['mn55']
indexing['Fe'] = ['fe54', 'fe56','fe57','fe58']
indexing['Co'] = ['fe60', 'co59']
indexing['Ni'] = ['ni58','ni60','ni61','ni62','ni64']
indexing['Cu'] = ['cu63','cu65']
indexing['Zn'] = ['zn64','zn66','zn67','zn68','zn70']
indexing['Ga'] = ['ga69','ga71']
indexing['Ge'] = ['ge70','ge72','ge73','ge74','ge76']
# Define indexed elements
self.elements = list(indexing.keys())
# Define data types
dt = np.dtype('U8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
# Initialise yield table
yield_table = {}
# Import full table with correct rows and data-types
z = np.genfromtxt(localpath+'input/yields/Frischknecht16/yields_total.txt',skip_header=62,dtype=dt)
# Create model dictionary indexed by metallicity, giving relevant model number for each choice of mass
# See Frischknecht info_yields.txt file for model information
model_dict = {}
model_dict[0.0134] = [2,8,14,27]
model_dict[1e-3]=[4,10,16,28]
model_dict[1e-5]=[6,12,18,29]
# Import list of remnant masses for each model (from row 32-60, column 6 of .txt file)
# NB: these are in solar masses
rem_mass_table = np.loadtxt(localpath+'input/yields/Frischknecht16/yields_total.txt',skiprows=31,usecols=6)[:29]
# Create one subtable for each metallicity
for metallicity in self.metallicities:
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds'] # List of keys for table
names = additional_keys + self.elements
# Initialise table and arrays
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
mass_in_remnants = np.zeros(len(self.masses))
total_mass_fraction = np.zeros(len(self.masses))
element_mass = np.zeros(len(self.masses))
# Add masses to table
yield_subtable['Mass'] = self.masses
# Extract remnant masses (in solar masses) for each model:
for mass_index,model_index in enumerate(model_dict[metallicity]):
mass_in_remnants[mass_index] = rem_mass_table[model_index-1]
# Iterate over all elements
for element in self.elements:
element_mass = np.zeros(len(self.masses))
for isotope in indexing[element]: # Iterate over isotopes of each element
for mass_index,model_index in enumerate(model_dict[metallicity]): # Iterate over masses
for row in z: # Find required row in table
if row[0] == isotope:
element_mass[mass_index]+=row[model_index] # Compute cumulative mass for all isotopes
yield_subtable[element]=np.divide(element_mass,self.masses) # Add entry to subtable
all_fractions = [row[model_index] for row in z] # This lists all elements (not just up to Ge)
total_mass_fraction[mass_index] = np.sum(all_fractions) # Compute total net mass fraction (sums to approximately 0)
# Add fields for remnant mass (now as a mass fraction) and unprocessed mass fraction
yield_subtable['mass_in_remnants']=np.divide(mass_in_remnants,self.masses)
yield_subtable['unprocessed_mass_in_winds'] = 1.-(yield_subtable['mass_in_remnants']+total_mass_fraction) # This is all mass not from yields/remnants
# Add subtable to full table
yield_table[metallicity]=yield_subtable
# Define final yield table for output
self.table = yield_table
def NuGrid_net(self,model_type='delay'):
""" This gives the net SNII yields from the NuGrid collaboration (Ritter et al. 2017 (in prep))
Either rapid or delay SN2 yields (Fryer et al. 2012) can be used - changeable via the model_type parameter.
Delay models are chosen for good match with the Fe yields of Nomoto et al. (2006) and Chieffi & Limongi (2004)
"""
# Create list of masses and metallicites:
self.masses = [12.0,15.0,20.0,25.0]
self.metallicities = [0.02,0.01,0.006,0.001,0.0001]
# First define names of yield tables and the remnant masses for each metallicity (in solar masses)
if model_type == 'delay':
filename=localpath+'input/yields/NuGrid/H NuGrid yields delay_total.txt'
remnants = {}
remnants[0.02] = [1.61,1.61,2.73,5.71] # This gives remnant masses for each mass
remnants[0.01] = [1.61,1.61,2.77,6.05]
remnants[0.006] = [1.62,1.62,2.79,6.18]
remnants[0.001] = [1.62,1.62,2.81,6.35]
remnants[0.0001] = [1.62,1.62,2.82,6.38]
elif model_type == 'rapid':
filename = localpath+'input/yields/NuGrid/H NuGrid yields rapid total.txt'
remnants = {}
remnants[0.02] = [1.44,1.44,2.70,12.81] # Define remnants from metallicities
remnants[0.01] = [1.44,1.44,1.83,9.84]
remnants[0.006] = [1.44, 1.44, 1.77, 7.84]
remnants[0.001] = [1.44,1.44,1.76,5.88]
remnants[0.0001] = [1.44,1.44,1.76,5.61]
else:
raise ValueError('Wrong type: must be delay or rapid')
# Define which lines in the .txt files to use.
# This defines cuts starting at each relevant table
cuts={}
for z in self.metallicities:
cuts[z] = []
for mass in self.masses:
txtfile=open(filename,"r")
for line_no,line in enumerate(txtfile):
if str(mass) in line and str(z) in line:
cuts[z].append(line_no)
line_end = line_no # Final line
# Create list of elements taken from data-file (from first relevant table)
data = np.genfromtxt(filename,skip_header=int(cuts[0.02][0])+4,
skip_footer=line_end-int(cuts[0.02][0])-83,
dtype=['<U8','<U15','<U15','<U15'])
self.elements = [str(line[0][1:]) for line in data]
self.table={} # Initialize final output
for z in self.metallicities: # Produce subtable for each metallicity
yield_subtable={}
yield_subtable['Mass'] = self.masses
yield_subtable['mass_in_remnants'] = np.divide(np.asarray(remnants[z]),self.masses) # Initialize lists
for el in self.elements:
yield_subtable[el] = []
for m_index,mass in enumerate(self.masses): # Create data array for each mass
unprocessed_mass = mass-remnants[z][m_index] # Mass not in remnants in Msun
data = np.genfromtxt(filename,skip_header=int(cuts[z][m_index])+4,
skip_footer=line_end-int(cuts[z][m_index])-83,dtype=['<U8','<U15','<U15','<U15']) # Read from data file
# Now iterate over data-file and read in element names
# NB: [1:]s are necessary as each element in txt file starts with &
for line in data:
el_name = str(line[0][1:]) # Name of element
el_yield = float(line[1][1:]) # Yield in Msun
el_init = float(line[2][1:]) # Initial mass fraction
el_net = el_yield-el_init*unprocessed_mass
yield_subtable[el_name].append(el_net/mass) # Net mass fraction
# Calculate summed net yield - should be approximately 0
summed_yields = np.zeros(len(self.masses))
for el in self.elements:
yield_subtable[el] = np.asarray(yield_subtable[el])
summed_yields+=yield_subtable[el]
# Compute mass not in remnants with summed net yield small correction
yield_subtable['unprocessed_mass_in_winds'] = 1.0-yield_subtable['mass_in_remnants']-summed_yields
# Restructure dictionary into record array for output
all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements
list_of_arrays = [yield_subtable[key] for key in all_keys]
restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)
self.table[z] = restructure_subtable # This is output table for specific z
# Yield table output is self.table
def TNG_net(self):
""" This loads the CC-SN yields used in the Illustris TNG simulation.
This includes Kobayashi (2006) and Portinari (1998) tables - see Pillepich et al. 2017
THIS ONLY WORKS FOR IMF SLOPE IS -2.3 - DO NOT OPTIMIZE OVER THIS
"""
import h5py as h5
filename = localpath+'input/yields/TNG/SNII.hdf5'
# Read H5 file
f = h5.File(filename, "r")
# Define element indexing
indexing = {}
indexing['H'] = 'Hydrogen'
indexing['He'] = 'Helium'
indexing['C'] = 'Carbon'
indexing['N']= 'Nitrogen'
indexing['O'] = 'Oxygen'
indexing['Ne'] = 'Neon'
indexing['Mg'] = 'Magnesium'
indexing['Si'] = 'Silicon'
indexing['S'] = 'Sulphur' # Not used by TNG simulation
indexing['Ca'] = 'Calcium' # Not used by TNG simulation
indexing['Fe'] = 'Iron'
self.elements = list(indexing.keys())
self.table = {}
# Define masses / metallicities
self.metallicities = list(f['Metallicities'].value)
self.masses = f['Masses'].value
for z_index,z in enumerate(self.metallicities):
yield_subtable = {}
z_name = f['Yield_names'].value[z_index].decode('utf-8')
z_data = f['Yields/'+z_name+'/Yield']
ejecta_mass = f['Yields/'+z_name+'/Ejected_mass'].value
yield_subtable['Mass'] = self.masses
remnants = self.masses-ejecta_mass
yield_subtable['mass_in_remnants'] = np.divide(remnants,self.masses)
for el in list(indexing.keys()):
yield_subtable[el] = np.zeros(len(self.masses))
summed_yields = np.zeros(len(self.masses))
for m_index,mass in enumerate(self.masses):
for el_index,el in enumerate(self.elements):
el_yield_fraction = z_data[el_index][m_index]/mass #(mass-remnants[m_index]) # Find fraction of ejecta per element
yield_subtable[el][m_index] = el_yield_fraction
summed_yields[m_index]+=el_yield_fraction # Compute total yield
yield_subtable['unprocessed_mass_in_winds'] = 1.-summed_yields-yield_subtable['mass_in_remnants']
# Restructure table
all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements
list_of_arrays = [yield_subtable[key] for key in all_keys]
restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)
self.table[z] = restructure_subtable
def CL18_net(self):
"""These are net yields from Chieffi + Limongi 2018 (unpublished), downloaded from http://orfeo.iaps.inaf.it/"""
datpath=localpath+'/input/yields/CL18/'
self.metallicities=[0.0134,0.00134,0.000134,0.0000134] # metallicities of [Fe/H]=[0,-1,-2,-3]
rotations=[0,150,300] # initial rotational velocity in km/s
self.masses=np.array([13,15,20,25,30,40,60,80,120])
weight_matrix=np.array([[0.7,0.3,0.],[0.6,0.4,0.],[0.48,0.48,0.04],[0.05,0.7,0.25]]) # np.array([[1.,0.,0.],[1.,0.,0.],[1.,0.,0.],[1.,0.,0.]])#
self.elements=['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Xe','Cs','Ba','La','Ce','Pr','Nd','Hg','Tl','Pb','Bi']
LEN=len(self.elements)
yield_table={}
# Import full table with correct rows and data-types
dt = np.dtype('U8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
# Load once in full to find length
z = np.genfromtxt(datpath+'tab_yieldsnet_ele_exp.dec',skip_header=1,dtype=dt)
full_len=len(z)+1
# Import full table with correct rows and data-types
dt = np.dtype('U8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
for m,met in enumerate(self.metallicities):
z,zTot=[],[]
for rotation_index in range(3):
header=(3*m+rotation_index)*(LEN+1)+1
z.append(np.genfromtxt(datpath+'tab_yieldsnet_ele_exp.dec',skip_header=header,skip_footer=full_len-header-LEN,dtype=dt))
zTot.append(np.genfromtxt(datpath+'tab_yieldstot_ele_exp.dec',skip_header=header,skip_footer=full_len-header-LEN,dtype=dt))
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds'] # List of keys for table
names = additional_keys + self.elements
# Initialise table and arrays
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
mass_in_remnants = np.zeros(len(self.masses))
total_mass_fraction = np.zeros(len(self.masses))
element_mass = np.zeros(len(self.masses))
yield_subtable['Mass']=self.masses
tot_yield=np.zeros(len(self.masses))
for e,el in enumerate(self.elements):
for m_index in range(len(self.masses)):
for rotation_index in range(3):
yield_subtable[el][m_index]+=z[rotation_index][e][m_index+4]*weight_matrix[m,rotation_index]/self.masses[m_index]
tot_yield[m_index]+=yield_subtable[el][m_index]
# Compute total remnant mass
for m_index,mass in enumerate(self.masses):
for rotation_index in range(3):
yield_subtable['mass_in_remnants'][m_index]+=(1.-np.sum([zTot[rotation_index][i][m_index+4] for i in range(len(self.elements))])/mass)*weight_matrix[m,rotation_index]
# Compute unprocessed mass
yield_subtable['unprocessed_mass_in_winds']=1.-yield_subtable['mass_in_remnants']-tot_yield
yield_table[met]=yield_subtable
self.table=yield_table
#######################
class AGB_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for agb stars.
The different methods load different tables from the literature. They are in the input/yields/ folder.
"""
def TNG_net(self):
""" This gives the yields used in the IllustrisTNG simulation (see Pillepich et al. 2017)
These are net yields, and a combination of Karakas (2006), Doherty et al. (2014) & Fishlock et al. (2014)
These were provided by Annalisa herself.
This is indexing backwards in mass (high to low) to match with Karakas tables
"""
import h5py as h5
filename = localpath+'input/yields/TNG/AGB.hdf5'
# Read H5 file
f = h5.File(filename, "r")
indexing = {}
indexing['H'] = 'Hydrogen'
indexing['He'] = 'Helium'
indexing['C'] = 'Carbon'
indexing['N']= 'Nitrogen'
indexing['O'] = 'Oxygen'
indexing['Ne'] = 'Neon'
indexing['Mg'] = 'Magnesium'
indexing['Si'] = 'Silicon'
indexing['S'] = 'Sulphur' # Not used by TNG simulation
indexing['Ca'] = 'Calcium' # Not used by TNG simulation
indexing['Fe'] = 'Iron'
self.elements = list(indexing.keys())
self.table = {}
self.metallicities = list(f['Metallicities'].value)
self.masses = f['Masses'].value
for z_index,z in enumerate(self.metallicities):
yield_subtable = {}
z_name = f['Yield_names'].value[z_index].decode('utf-8')
z_data = f['Yields/'+z_name+'/Yield']
ejecta_mass = f['Yields/'+z_name+'/Ejected_mass'].value
yield_subtable['Mass'] = list(reversed(self.masses))
remnants = self.masses-ejecta_mass
yield_subtable['mass_in_remnants'] = np.divide(list(reversed(remnants)),yield_subtable['Mass'])
for el in list(indexing.keys()):
yield_subtable[el] = np.zeros(len(self.masses))
summed_yields = np.zeros(len(self.masses))
for m_index,mass in enumerate(yield_subtable['Mass']):
for el_index,el in enumerate(self.elements):
el_yield = z_data[el_index][len(self.masses)-m_index-1]
el_yield_fraction = el_yield/mass
yield_subtable[el][m_index] = el_yield_fraction
summed_yields[m_index]+=el_yield_fraction
yield_subtable['unprocessed_mass_in_winds'] = 1.-summed_yields-yield_subtable['mass_in_remnants']
self.table[z.astype(float)] = yield_subtable
# Restructure table
all_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']+self.elements
list_of_arrays = [yield_subtable[key] for key in all_keys]
restructure_subtable = np.core.records.fromarrays(list_of_arrays,names=all_keys)
self.table[z] = restructure_subtable
def Ventura_net(self):
"""
Ventura 2013 net yields from Paolo himself
"""
self.metallicities = [0.04,0.018,0.008,0.004,0.001,0.0003]
x = np.genfromtxt(localpath + 'input/yields/Ventura2013/0.018.txt',names=True)
self.masses = x['Mass']
self.elements = ['H', 'He', 'Li','C','N','O','F','Ne','Na','Mg','Al','Si']
###
yield_tables_final_structure = {}
for metallicity in self.metallicities:
x = np.genfromtxt(localpath + 'input/yields/Ventura2013/%s.txt' %(str(metallicity)),names=True)
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(x['Mass']))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = x['Mass']
yield_tables_final_structure_subtable['mass_in_remnants'] = np.divide(x['mass_in_remnants'],x['Mass'])
for item in self.elements:
if item == 'C':
yield_tables_final_structure_subtable[item] = x['C12']
yield_tables_final_structure_subtable[item] += x['C13']
elif item == 'N':
yield_tables_final_structure_subtable[item] = x['N14']
elif item == 'O':
yield_tables_final_structure_subtable[item] = x['O16']
yield_tables_final_structure_subtable[item] += x['O17']
yield_tables_final_structure_subtable[item] += x['O18']
elif item == 'F':
yield_tables_final_structure_subtable[item] = x['F19']
elif item == 'Ne':
yield_tables_final_structure_subtable[item] = x['NE20']
yield_tables_final_structure_subtable[item] += x['NE22']
elif item == 'Na':
yield_tables_final_structure_subtable[item] = x['NA23']
elif item == 'Mg':
yield_tables_final_structure_subtable[item] = x['MG24']
yield_tables_final_structure_subtable[item] += x['MG25']
yield_tables_final_structure_subtable[item] += x['MG26']
elif item == 'Al':
yield_tables_final_structure_subtable[item] = x['AL26']
yield_tables_final_structure_subtable[item] += x['AL27']
elif item == 'Si':
yield_tables_final_structure_subtable[item] = x['SI28']
else:
yield_tables_final_structure_subtable[item] = x[item]
for item in self.elements:
yield_tables_final_structure_subtable[item] = np.divide(yield_tables_final_structure_subtable[item],x['Mass'])
for i,item in enumerate(x['Mass']):
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][i] = 1. - (yield_tables_final_structure_subtable['mass_in_remnants'][i] + sum(list(yield_tables_final_structure_subtable[self.elements][i])))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
###
def Nomoto2013(self):
'''
Nomoto2013 agb yields up to 6.5Msun and are a copy of Karakas2010. Only that the yields here are given as net yields which does not help so much
'''
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((1.,1.2,1.5,1.8,1.9,2.0,2.2,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0))#,6.5,7.0,8.0,10.))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
#########################
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = indexing.keys()
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables_dict[metallicity]
final_mass_name_tag = 'mass_in_remnants'
additional_keys = ['Mass',final_mass_name_tag]
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = 0
for j,jtem in enumerate(indexing[item]):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['M']==jtem)][0]
temp1 = np.zeros(len(self.masses))
for s in range(len(self.masses)):
temp1[s] = line_of_one_element[s+2]
yield_tables_final_structure_subtable[item] += | np.divide(temp1,self.masses) | numpy.divide |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# multiply chunks
df = from_pandas_df(raw, chunk_size=6)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_frame_equal(expected, result)
# dict type
df = from_pandas_df(raw, chunk_size=5)
r = df.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'int32', 'c2': 'float', 'c8': 'str'})
pd.testing.assert_frame_equal(expected, result)
# test arrow_string dtype
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c1': 'arrow_string'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'arrow_string'})
pd.testing.assert_frame_equal(expected, result)
# test series
s = pd.Series(rs.randint(5, size=20))
series = from_pandas_series(s)
r = series.astype('int32')
result = r.execute().fetch()
expected = s.astype('int32')
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s, chunk_size=6)
r = series.astype('arrow_string')
result = r.execute().fetch()
expected = s.astype('arrow_string')
pd.testing.assert_series_equal(result, expected)
# test index
raw = pd.Index(rs.randint(5, size=20))
mix = from_pandas_index(raw)
r = mix.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
pd.testing.assert_index_equal(result, expected)
# multiply chunks
series = from_pandas_series(s, chunk_size=6)
r = series.astype('str')
result = r.execute().fetch()
expected = s.astype('str')
pd.testing.assert_series_equal(result, expected)
# test category
raw = pd.DataFrame(rs.randint(3, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=5)
r = df.astype('category')
result = r.execute().fetch()
expected = raw.astype('category')
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=3)
r = df.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c8': 'int32', 'c4': 'str'})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=6)
r = df.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
result = r.execute().fetch()
expected = raw.astype({'c1': 'category', 'c5': 'float', 'c2': 'int32',
'c7': pd.CategoricalDtype([1, 3, 4, 2]),
'c4': pd.CategoricalDtype([1, 3, 2])})
pd.testing.assert_frame_equal(expected, result)
df = from_pandas_df(raw, chunk_size=8)
r = df.astype({'c2': 'category'})
result = r.execute().fetch()
expected = raw.astype({'c2': 'category'})
pd.testing.assert_frame_equal(expected, result)
# test series category
raw = pd.Series(np.random.choice(['a', 'b', 'c'], size=(10,)))
series = from_pandas_series(raw, chunk_size=4)
result = series.astype('category').execute().fetch()
expected = raw.astype('category')
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=3)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b']), copy=False).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b']), copy=False)
pd.testing.assert_series_equal(expected, result)
series = from_pandas_series(raw, chunk_size=6)
result = series.astype(
pd.CategoricalDtype(['a', 'c', 'b', 'd'])).execute().fetch()
expected = raw.astype(pd.CategoricalDtype(['a', 'c', 'b', 'd']))
pd.testing.assert_series_equal(expected, result)
def test_drop(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
columns = ['c2', 'c4', 'c5', 'c6']
index = [3, 6, 7]
r = df.drop(columns=columns, index=index)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(columns=columns, index=index))
idx_series = from_pandas_series(pd.Series(index))
r = df.drop(idx_series)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.drop(pd.Series(index)))
df.drop(columns, axis=1, inplace=True)
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns, axis=1))
del df['c3']
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3'], axis=1))
ps = df.pop('c8')
pd.testing.assert_frame_equal(df.execute().fetch(),
raw.drop(columns + ['c3', 'c8'], axis=1))
pd.testing.assert_series_equal(ps.execute().fetch(),
raw['c8'])
# test series drop
raw = pd.Series(rs.randint(1000, size=(20,)))
series = from_pandas_series(raw, chunk_size=3)
r = series.drop(index=index)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.drop(index=index))
# test index drop
ser = pd.Series(range(20))
rs.shuffle(ser)
raw = pd.Index(ser)
idx = from_pandas_index(raw)
r = idx.drop(index)
pd.testing.assert_index_equal(r.execute().fetch(),
raw.drop(index))
def test_melt(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=3)
r = df.melt(id_vars=['c1'], value_vars=['c2', 'c4'])
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values(['c1', 'variable']).reset_index(drop=True),
raw.melt(id_vars=['c1'], value_vars=['c2', 'c4']).sort_values(['c1', 'variable']).reset_index(drop=True)
)
def test_drop_duplicates(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
for ignore_index in [True, False]:
try:
r = df.drop_duplicates(method=method, subset=subset,
keep=keep, ignore_index=ignore_index)
result = r.execute().fetch()
try:
expected = raw.drop_duplicates(subset=subset,
keep=keep, ignore_index=ignore_index)
except TypeError:
# ignore_index is supported in pandas 1.0
expected = raw.drop_duplicates(subset=subset,
keep=keep)
if ignore_index:
expected.reset_index(drop=True, inplace=True)
pd.testing.assert_frame_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}, ignore_index={ignore_index}') from e
# test series and index
s = raw['c3']
ind = pd.Index(s)
for tp, obj in [('series', s), ('index', ind)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.drop_duplicates(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.drop_duplicates(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
# test inplace
series = from_pandas_series(s, chunk_size=11)
series.drop_duplicates(inplace=True)
result = series.execute().fetch()
expected = s.drop_duplicates()
pd.testing.assert_series_equal(result, expected)
def test_duplicated(setup):
# test dataframe drop
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 5)),
columns=['c' + str(i + 1) for i in range(5)],
index=['i' + str(j) for j in range(20)])
duplicate_lines = rs.randint(1000, size=5)
for i in [1, 3, 10, 11, 15]:
raw.iloc[i] = duplicate_lines
with option_context({'combine_size': 2}):
# test dataframe
for chunk_size in [(8, 3), (20, 5)]:
df = from_pandas_df(raw, chunk_size=chunk_size)
if chunk_size[0] < len(raw):
methods = ['tree', 'subset_tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for subset in [None, 'c1', ['c1', 'c2']]:
for keep in ['first', 'last', False]:
try:
r = df.duplicated(method=method, subset=subset, keep=keep)
result = r.execute().fetch()
expected = raw.duplicated(subset=subset, keep=keep)
pd.testing.assert_series_equal(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(
f'failed when method={method}, subset={subset}, '
f'keep={keep}') from e
# test series
s = raw['c3']
for tp, obj in [('series', s)]:
for chunk_size in [8, 20]:
to_m = from_pandas_series if tp == 'series' else from_pandas_index
mobj = to_m(obj, chunk_size=chunk_size)
if chunk_size < len(obj):
methods = ['tree', 'shuffle']
else:
# 1 chunk
methods = [None]
for method in methods:
for keep in ['first', 'last', False]:
try:
r = mobj.duplicated(method=method, keep=keep)
result = r.execute().fetch()
expected = obj.duplicated(keep=keep)
cmp = pd.testing.assert_series_equal \
if tp == 'series' else pd.testing.assert_index_equal
cmp(result, expected)
except Exception as e: # pragma: no cover
raise AssertionError(f'failed when method={method}, keep={keep}') from e
def test_memory_usage_execution(setup):
dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
data = dict([(t, np.ones(shape=500).astype(t))
for t in dtypes])
raw = pd.DataFrame(data)
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
df = from_pandas_df(raw, chunk_size=(500, 2))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=False)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=False))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.DataFrame(data, index=np.arange(500).astype('object'))
df = from_pandas_df(raw, chunk_size=(100, 3))
r = df.memory_usage(index=True)
pd.testing.assert_series_equal(r.execute().fetch(),
raw.memory_usage(index=True))
raw = pd.Series(np.ones(shape=500).astype('object'), name='s')
series = from_pandas_series(raw)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=False)
assert r.execute().fetch() == raw.memory_usage(index=False)
series = from_pandas_series(raw, chunk_size=100)
r = series.memory_usage(index=True)
assert r.execute().fetch() == raw.memory_usage(index=True)
raw = pd.Series( | np.ones(shape=500) | numpy.ones |
from __future__ import annotations
from typing import Union, Tuple, List
import warnings
import numpy as np
class Question:
"""Question is a thershold/matching concept for splitting the node of the Decision Tree
Args:
column_index (int): Column index to be chosen from the array passed at the matching time.
value (Union[int, str, float, np.int64, np.float64]): Threshold value/ matching value.
header (str): column/header name.
"""
def __init__(self, column_index: int, value: Union[int, str, float, np.int64, np.float64], header: str):
"""Constructor
"""
self.column_index = column_index
self.value = value
self.header = header
def match(self, example: Union[list, np.ndarray]) -> bool:
"""Matching function to decide based on example whether result is true or false.
Args:
example (Union[list, np.ndarray]): Example to compare with question parameters.
Returns:
bool: if the example is in threshold or value matches then results true or false.
"""
if isinstance(example, list):
example = np.array(example, dtype="O")
val = example[self.column_index]
# adding numpy int and float data types as well
if isinstance(val, (int, float, np.int64, np.float64)):
# a condition for question to return True or False for numeric value
return float(val) >= float(self.value)
else:
return str(val) == str(self.value) # categorical data comparison
def __repr__(self):
condition = "=="
if isinstance(self.value, (int, float, np.int64, np.float64)):
condition = ">="
return f"Is {self.header} {condition} {self.value} ?"
class Node:
"""A Tree node either Decision Node or Leaf Node
Args:
question (Question, optional): question object. Defaults to None.
true_branch (Node, optional): connection to node at true side of the branch. Defaults to None.
false_branch (Node, optional): connection to node at false side of the branch. Defaults to None.
uncertainty (float, optional): Uncertainty value like gini,entropy,variance etc. Defaults to None.
leaf_value (Union[dict,int,float], optional): Leaf node/final node's value. Defaults to None.
"""
def __init__(self, question: Question = None, true_branch: Node = None, false_branch: Node = None, uncertainty: float = None, *, leaf_value: Union[dict, int, float] = None):
"""Constructor
"""
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
self.uncertainty = uncertainty
self.leaf_value = leaf_value
@property
def _is_leaf_node(self) -> bool:
"""Check if this node is leaf node or not.
Returns:
bool: True if leaf node else false.
"""
return self.leaf_value is not None
class DecisionTreeClassifier:
"""Decision Tree Based Classification Model
Args:
max_depth (int, optional): max depth of the tree. Defaults to 100.
min_samples_split (int, optional): min size of the sample at the time of split. Defaults to 2.
criteria (str, optional): what criteria to use for information. Defaults to 'gini'. available 'gini','entropy'.
"""
def __init__(self, max_depth: int = 100, min_samples_split: int = 2, criteria: str = 'gini'):
"""Constructor
"""
self._X = None
self._y = None
self._feature_names = None
self._target_name = None
self._tree = None
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criteria = criteria
def _count_dict(self, a: np.ndarray) -> dict:
"""Count class frequecies and get a dictionary from it
Args:
a (np.ndarray): input array. shape should be (m,1) for m samples.
Returns:
dict: categories/classes freq dictionary.
"""
unique_values = np.unique(a, return_counts=True)
zipped = zip(*unique_values)
dict_obj = dict(zipped)
return dict_obj
def _gini_impurity(self, arr: np.ndarray) -> float:
"""Calculate Gini Impurity
Args:
arr (np.ndarray): input array.
Returns:
float: gini impurity value.
"""
classes, counts = np.unique(arr, return_counts=True)
gini_score = 1 - np.square(counts / arr.shape[0]).sum(axis=0)
return gini_score
def _entropy(self, arr: np.ndarray) -> float:
"""Calculate Entropy
Args:
arr (np.ndarray): input array.
Returns:
float: entropy result.
"""
classes, counts = np.unique(arr, return_counts=True)
p = counts / arr.shape[0]
entropy_score = (-p * np.log2(p)).sum(axis=0)
return entropy_score
def _uncertainty(self, a: np.ndarray) -> float:
"""calcualte uncertainty
Args:
a (np.ndarray): input array
Returns:
float: uncertainty value
"""
if self.criteria == "entropy":
value = self._entropy(a)
elif self.criteria == "gini":
value = self._gini_impurity(a)
else:
warnings.warn(f"{self.criteria} is not coded yet. returning to gini.")
value = self._gini_impurity(a)
return value
def _partition(self, rows: np.ndarray, question: Union[Question, None]) -> Tuple[list, list]:
"""partition the rows based on the question
Args:
rows (np.ndarray): input array to split.
question (Question): question object containing spltting concept.
Returns:
Tuple[list,list]: true idxs and false idxs.
"""
true_idx, false_idx = [], []
for idx, row in enumerate(rows):
if question.match(row):
true_idx.append(idx)
else:
false_idx.append(idx)
return true_idx, false_idx
def _info_gain(self, left: np.ndarray, right: np.ndarray, parent_uncertainty: float) -> float:
"""Calculate information gain after splitting
Args:
left (np.ndarray): left side array.
right (np.ndarray): right side array.
parent_uncertainty (float): parent node Uncertainity.
Returns:
flaot: information gain value.
"""
# calculating portion/ partition/ weightage
pr = left.shape[0] / (left.shape[0] + right.shape[0])
# calcualte child uncertainity
child_uncertainty = pr * \
self._uncertainty(left) - (1 - pr) * self._uncertainty(right)
# calculate information gain
info_gain_value = parent_uncertainty - child_uncertainty
return info_gain_value
def _find_best_split(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, Union[Question, None], float]:
"""method to find best split possible for the sample
Args:
X (np.ndarray): Feature matrix.
y (np.ndarray): target matrix.
Returns:
Tuple[float,Union[Question,None],float]: maximum gain from the split, best question of it, and parent node uncertainty.
"""
max_gain = -1
best_split_question = None
parent_uncertainty = self._uncertainty(y)
m_samples, n_labels = X.shape
for col_index in range(n_labels): # iterate over feature columns
# get unique values from the feature
unique_values = np.unique(X[:, col_index])
for val in unique_values: # check for every value and find maximum info gain
ques = Question(
column_index=col_index,
value=val,
header=self._feature_names[col_index]
)
t_idx, f_idx = self._partition(X, ques)
# if it does not split the data
# skip it
if len(t_idx) == 0 or len(f_idx) == 0:
continue
true_y = y[t_idx, :]
false_y = y[f_idx, :]
# get information gain
gain = self._info_gain(true_y, false_y, parent_uncertainty)
if gain > max_gain:
max_gain, best_split_question = gain, ques
return max_gain, best_split_question, parent_uncertainty
def _build_tree(self, X: np.ndarray, y: np.ndarray, depth: int = 0) -> Node:
"""Recursive funtion to build tree.
Args:
X (np.ndarray): input features matrix.
y (np.ndarray): target matrix.
depth (int, optional): depth count of the recursion. Defaults to 0.
Returns:
Node: either leaf node or decision node
"""
m_samples, n_labels = X.shape
# if depth is greater than max depth defined or labels/features are left to 1
# or number of samples are less than the minimum size of samples to split then
# stop recursion and return a node
if (depth > self.max_depth or n_labels == 1 or m_samples < self.min_samples_split):
return Node(leaf_value=self._count_dict(y))
gain, ques, uncertainty = self._find_best_split(X, y)
# if gain is zero
# then no point grinding further here
if gain < 0:
return Node(leaf_value=self._count_dict(y))
t_idx, f_idx = self._partition(X, ques) # get partition idxs
true_branch = self._build_tree(
X[t_idx, :], y[t_idx, :], depth + 1) # recog true branch samples
false_branch = self._build_tree(
X[f_idx, :], y[f_idx, :], depth + 1) # recog false branch samples
return Node(
question=ques,
true_branch=true_branch,
false_branch=false_branch,
uncertainty=uncertainty
)
def train(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list], feature_name: list = None, target_name: list = None) -> None:
"""Train the model
Args:
X (Union[np.ndarray,list]): feature matrix.
y (Union[np.ndarray,list]): target matrix.
feature_name (list, optional): feature names list. Defaults to None.
target_name (list, optional): target name list. Defaults to None.
"""
X = np.array(X, dtype='O') if not isinstance(
X, (np.ndarray)) else X # converting to numpy array
y = np.array(y, dtype='O') if not isinstance(
y, (np.ndarray)) else y # converting to numpy array
# reshaping to vectors
self._X = X.reshape(-1, 1) if len(X.shape) == 1 else X
self._y = y.reshape(-1, 1) if len(y.shape) == 1 else y
# creating feature names if not mentioned
self._feature_names = feature_name or [
f"C_{i}" for i in range(self._X.shape[1])]
# creating target name if not mentioned
self._target_name = target_name or ['target']
# BOOOM
# building the tree
self._tree = self._build_tree(
X=self._X,
y=self._y
)
def print_tree(self, node: Union[Node, None] = None, spacing: str = "|-") -> None:
"""print the tree
Args:
node (Union[Node,None], optional): starting node. Defaults to None. then it will go to the root node of the tree.
spacing (str, optional): printing separater. Defaults to "|-".
"""
node = node or self._tree
if node._is_leaf_node:
print(spacing, " Predict :", node.leaf_value)
return
# Print the question at this node
print(spacing + str(node.question) +
" | " + self.criteria + " :" + str(node.uncertainty))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(node.true_branch, " " + spacing + "-")
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(node.false_branch, " " + spacing + "-")
def _classification(self, row: np.ndarray, node: Union[Node, None]) -> Union[dict]:
"""Classification recursive function
Args:
row (np.ndarray): input matrix.
node (Union[Node,None]): node to start with. mostly root node. rest will be handled by recursion.
Returns:
Union[dict]: leaf value. classification result.
"""
if node._is_leaf_node:
return node.leaf_value
if node.question.match(row):
return self._classification(row, node.true_branch)
else:
return self._classification(row, node.false_branch)
def _leaf_probabilities(self, results: dict) -> dict:
"""get probabilties
Args:
results (dict): results from _classification.
Returns:
dict: dictionary with categorical probabilities.
"""
total = sum(results.values())
probs = {}
for key in results:
probs[key] = (results[key] / total) * 100
return probs
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
"""predict classification results
Args:
X (Union[np.ndarray,list]): testing matrix.
Raises:
ValueError: input X can only be a list or numpy array.
Returns:
np.ndarray: results of classification.
"""
if isinstance(X, (np.ndarray, list)):
X = np.array(X, dtype='O') if not isinstance(X, (np.ndarray)) else X
if len(X.shape) == 1:
max_result = 0
result_dict = self._classification(row=X, node=self._tree)
result = None
for key in result_dict:
if result_dict[key] > max_result:
result = key
return np.array([[result]], dtype='O')
else:
leaf_value = []
# get maximum caterigorical value from all catergories
for row in X:
max_result = 0
result_dict = self._classification(row=row, node=self._tree)
result = None
for key in result_dict:
if result_dict[key] > max_result:
result = key
leaf_value.append([result])
return | np.array(leaf_value, dtype='O') | numpy.array |
import numpy as np
from scipy.special import factorial
'''
The range of indexes of the columns in the TiteSeq input CSV to use as
normalized counts.
'''
NORMALIZED_COUNT_COLUMN_RANGE = (33, 65)
'''
The range of indexes of the columns in the TiteSeq input CSV to use as raw counts.
'''
READ_COUNT_COLUMN_RANGE = (1, 33)
'''
Indicates whether the data groups values by concentrations first or by bins first.
'''
BINS_VARY_FIRST = True
'''
Indicates whether or not to flip the CSV values to match the order of bin_fluorescences
and concentrations below.
'''
BINS_ORDER_ASCENDING = False
CONCENTRATIONS_ORDER_ASCENDING = False
'''
Defines the values for the mean bin fluorescences (c) and the concentrations (fl).
The lengths of these arrays defines the dimensions of the matrix that will be used
for the input to TiteSeq. These values should be provided in *ascending* order,
regardless of whether or not the CSV counts are flipped.
'''
bin_fluorescences = | np.array([3.0, 250.0, 900.0, 3000.0]) | numpy.array |
#!/usr/bin/env python
import gym
from gym_line_follower.envs import LineFollowerEnv
import numpy as np
import time
# PID gain
P=0.13
I=0.002
D=0.00001
# Sample time
Ts = 1/200
# as https://www.scilab.org/discrete-time-pid-controller-implementation trapezoidal form
class PidControl:
"""docstring for PidControl"""
def __init__(self, P,I,D, Ts, sensorLen, vB=0.4, thresh=0.5):
self.P = P
self.I = I
self.D = D
# Aux constants
self.b0 = (2*Ts*P + I*Ts*Ts + 4*D)/(2*Ts)
self.b1 = (2*I*Ts - 8*D)/(2*Ts)
self.b2 = (I*Ts*Ts - 2*Ts*P + 4*D)/(2*Ts)
self.u1 = 0
self.u2 = 0
self.e1 = 0
self.e2 = 0
# motor
self.vB = vB
# sensor
self.thresh = thresh
self.sMax = sensorLen/2
self.sensorPos = | np.arange(sensorLen) | numpy.arange |
from .mcmcposteriorsamplernorm import fit
from scipy.stats import norm
import pandas as pd
import numpy as np
import pickle as pk
from sklearn.cluster import KMeans
from ..shared_functions import *
class mcmcsamplernorm:
"""
Class for the mcmc sampler of the deconvolution gaussian model
"""
def __init__(self, K=1, Kc=1):
"""
Constructor of the class
Parameters
-------------
K: int, Number of components of the noise distribution
Kc: int, Number of components of the convolved distribution
**kwargs:
alpha: float, parameter to determine the hyperprior of the noise weight components
alphac: float, parameter to determine the hyperprior of the target weight components
"""
self.K = K
self.Kc = Kc
self.fitted = False
return
def fit(self, dataNoise, dataConvolution, iterations = 1000, ignored_iterations = 1000, chains = 1, priors = None, method_initialisation = "kmeans", initial_conditions = [], show_progress = True, seed = 0):
"""
Fit the model to the posterior distribution
Parameters
-------------
dataNoise: list/npArray, 1D array witht he data of the noise
dataConvolution: list/npArray, 1D array witht he data of the convolution
iterations: int, number of samples to be drawn and stored for each chain during the sampling
ignored_iterations: int, number of samples to be drawn and ignored for each chain during the sampling
chains: int, number of independently initialised realisations of the markov chain
priors: array, parameter of the priors gamma distribution acording to the definition of the wikipedia
kconst: float, parameter k of the prior gamma distribution
initialConditions: list, 1D array with all the parameters required to initialise manually all the components of all the chains the chains
show_progress: bool, indicate if the method should show the progress in the generation of the new data
seed: int, value to initialise the random generator and obtain reproducible results
Returns
---------------
Nothing
"""
self.data = dataNoise
self.datac = dataConvolution
self.iterations = iterations
self.ignored_iterations = ignored_iterations
self.chains = chains
if priors == None:
self.priors = np.zeros(10)
self.priors[0] = 1/self.K
self.priors[1] = (np.max(dataNoise)+np.min(dataNoise))/2
self.priors[2] = 3*(np.max(dataNoise)-np.min(dataNoise))
self.priors[3] = 10*(np.max(dataNoise)-np.min(dataNoise))
self.priors[4] = 1.1
self.priors[5] = 1/self.Kc
self.priors[6] = (np.max(dataConvolution)+np.min(dataConvolution))/2
self.priors[7] = 3*(np.max(dataConvolution)-np.min(dataConvolution))
self.priors[8] = 10*(np.max(dataConvolution)-np.min(dataConvolution))
self.priors[9] = 1.1
else:
self.priors = priors
if initial_conditions != []:
self.initial_conditions = initial_conditions
elif method_initialisation == "kmeans":
K =self.K
Kc = self.Kc
y = np.zeros([chains,(K+Kc)*3])
model = KMeans(n_clusters=K)
model.fit(dataNoise.reshape(-1,1))
ids = model.predict(dataNoise.reshape(-1,1))
#Add weights autofluorescence
for i in range(K):
for j in range(chains):
y[j,i] = np.sum(ids==i)/len(ids)
#Add means autofluorescence
for i in range(K):
for j in range(chains):
y[j,K+i] = np.mean(dataNoise[ids==i])
#Add std autofluorescence
for i in range(K):
for j in range(chains):
y[j,2*K+i] = np.std(dataNoise[ids==i])
model = KMeans(n_clusters=Kc)
model.fit(dataConvolution.reshape(-1,1))
ids = model.predict(dataConvolution.reshape(-1,1))
#Add weights autofluorescence
for i in range(Kc):
for j in range(chains):
y[j,3*K+i] = np.sum(ids==i)/len(ids)
#Add means autofluorescence
for i in range(Kc):
for j in range(chains):
y[j,3*K+Kc+i] = np.mean(dataConvolution[ids==i])
#Add std autofluorescence
for i in range(Kc):
for j in range(chains):
y[j,3*K+2*Kc+i] = np.std(dataConvolution[ids==i])
self.initial_conditions = y
elif method_initialisation == "random":
self.initial_conditions = []
else:
self.initial_conditions = []
self.samples = np.array(fit(dataNoise, dataConvolution, ignored_iterations, iterations, chains, self.K, self.Kc, self.priors, self.initial_conditions, show_progress, seed))
self.fitted = True
return
def save(self, name):
"""
Pickle save the model.
Parameters
----------------
name: string, name in which to store the model
Return:
nothing
"""
if self.fitted:
pickling_on = open(name+".pickle","wb")
pk.dump({"K":self.K, "Kc":self.Kc, "priors": self.priors, "iterations": self.iterations,
"ignored_iterations": self.ignored_iterations,
"chains":self.chains, "samples":self.samples}, pickling_on)
pickling_on.close()
else:
print("The model has not been fitted so there is nothing to save.")
return
def load(self, name):
"""
Pickle load the model.
Parameters
----------------
name: string, name from which to recover the model
Return:
nothing
"""
pickle_off = open(name+".pickle","rb")
aux = pk.load(pickle_off)
pickle_off.close()
self.K = aux["K"]
self.Kc = aux ["Kc"]
self.priors = aux["priors"]
self.iterations = aux["iterations"]
self.ignored_iterations = aux["ignored_iterations"]
self.chains = aux["chains"]
self.samples = aux["samples"]
self.fitted = True
return
def sample_autofluorescence(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the noise distribution
Parameters
--------------
size: int, number of samples to be drawn
style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw
pos: if style = "single", draw from the posterior from which to choose
Returns:
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size))
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos))
else:
return np.array(sample_autofluorescence(self.samples,self.K,self.Kc,size=size,pos=pos))
return
def sample_deconvolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the deconvolved distribution
Parameters
--------------
size: int, number of samples to be drawn
style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw
pos: if style = "single", draw from the posterior from which to choose
Returns:
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size))
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos))
else:
return np.array(sample_deconvolution(self.samples,self.K,self.Kc,size=size,pos=pos))
return
def sample_convolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the convolved distribution
Parameters
--------------
size: int, number of samples to be drawn
style: string ("full" or "single"), sample from the posterior and then sample, or all the samples from the same posterior draw
pos: if style = "single", draw from the posterior from which to choose
Returns:
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size))
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos))
else:
return np.array(sample_convolution(self.samples,self.K,self.Kc,size=size,pos=pos))
return
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for k in range(self.K):
mu = self.samples[i,self.K+k]
sigma = self.samples[i,2*self.K+k]
y += self.samples[i,k]*norm.pdf(x,loc=mu,scale=sigma)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_deconvolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
mu = self.samples[i,3*self.K+self.Kc+j]
sigma = self.samples[i,3*self.K+2*self.Kc+j]
y += self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=sigma)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_convolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
for k in range(self.K):
mu1 = self.samples[i,self.K+k]
mu2 = self.samples[i,3*self.K+self.Kc+j]
sigma1 = self.samples[i,2*self.K+k]
sigma2 = self.samples[i,3*self.K+2*self.Kc+j]
mu = mu1
s = np.sqrt(sigma1**2+sigma2**2)
y += self.samples[i,k]*self.samples[i,3*self.K+j]*norm.pdf(x,loc=mu,scale=s)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def sampler_statistics(self, sort="weight"):
"""
Show statistics of correct mixing of the mcmc sampler
Args:
sort: ["weight", "none", "means"], method for sorting the samples from the different chains
Returns
-------------
DataFrame: DataFrame the mean, std, percentiles, mixing ratio(rhat) and effective number of samples for each parameter of the model
"""
self.sampler_statistics = pd.DataFrame(columns=["Mean","Std","5%","50%","95%","Rhat","Neff"])
samples = self.samples.copy()
if sort == "weight":
argsort = np.argsort(samples[:,0:self.K],axis=1)
samples[:,0:self.K] = np.take_along_axis(samples[:,0:self.K],argsort,axis=1)
samples[:,self.K:2*self.K] = np.take_along_axis(samples[:,self.K:2*self.K],argsort,axis=1)
samples[:,2*self.K:3*self.K] = np.take_along_axis(samples[:,2*self.K:3*self.K],argsort,axis=1)
argsort = | np.argsort(samples[:,3*self.K:3*self.K+self.Kc],axis=1) | numpy.argsort |
import sympy as sym
import numpy as np
from riemannTensor import RiemannTensor
class RicciTensor(object):
"""
Represents the Ricci tensor R_mu,nu.
See https://www.visus.uni-stuttgart.de/publikationen/catalogue-of-spacetimes
Parameters
----------
R : RiemannTensor
The Riemann tensor corresponding to which the Ricci tensor should be calculated.
Returns
-------
RicciTensor
the Ricci tensor
"""
def __init__(self, R: RiemannTensor):
self.R = R
self.dim = R.g.dim()
self.Ric = np.zeros((self.dim, self.dim), dtype='O')
self.evaluated = | np.full((self.dim, self.dim), False) | numpy.full |
import numpy as np
import random
import math
from FuzzyClustering import ClusteringIteration
from FuzzyClustering import ClusterAidedComputing
'''模糊C均值聚类算法(FCM)'''
def fcm(data,cluster_n,m = 2,max_iter = 1000,e = 0.00001,printOn = 1):
data = np.array(data)
obj_fcn = np.zeros(max_iter)
# 随机初始化聚类中心(并根据初始聚类中心生成初始隶属度矩阵)
U,center = ClusterAidedComputing.initcenter(data,cluster_n)
# 主循环
for i in range(max_iter):
U,center,obj_fcn[i] = ClusteringIteration.stepfcm(data,U,cluster_n,m)
if printOn == 1:
print("FCM第",i,"次迭代的目标函数值为:",obj_fcn[i])
if i > 0:
if abs(obj_fcn[i] - obj_fcn[i-1]) < e :
break
return U,center,obj_fcn
'''极大熵模糊聚类算法(MEC)'''
def mec(data,cluster_n,gamma=0.01,max_iter = 1000,e = 0.00001,printOn = 1):
data = np.array(data)
obj_fcn = np.zeros(max_iter)
# 随机初始化聚类中心(并根据初始聚类中心生成初始隶属度矩阵)
U,center = ClusterAidedComputing.initcenter(data,cluster_n)
# 主循环
for i in range(max_iter):
U,center,obj_fcn[i] = ClusteringIteration.stepmec(data,U,cluster_n,gamma)
if printOn == 1:
print("MEC第",i,"次迭代的目标函数值为:",obj_fcn[i])
if i > 0:
if abs(obj_fcn[i] - obj_fcn[i-1]) < e :
break
return U,center,obj_fcn
'''基于高斯核的模糊C均值聚类算法(KFCM)'''
def kfcm(data,cluster_n,sigma=2,m=2,lamda=0.1,max_iter = 1000,e = 0.00001,printOn = 1):
data = | np.array(data) | numpy.array |
#!/usr/bin/env python
import InstrumentDriver
import numpy as np
import configparser
from pathlib import Path
from numpy import genfromtxt
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements Z-Crosstalk Compensation"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# init variables
def updateMatrix(self):
"""Set matrix elements from the csv file
"""
# return directly if not in use
path = self.getValue('Crosstalk Matrix')
full_matrix = genfromtxt(path, delimiter=',')
n = int(self.getValue('Number of Z-Control Lines'))
matrix = full_matrix[1:, 1:]
if (matrix.shape[0] != n):
raise ValueError("Matrix File qubit number does not equal Number of Z-Control Lines")
for i in range(n):
for j in range(n):
self.setValue('M'+str(i+1)+ '-' +str(j+1), matrix[i, j])
def updateTuningCurves(self):
path = self.getValue('Tuning Curves')
tuning_parameters = genfromtxt(path, delimiter=',')
n = int(self.getValue('Number of Z-Control Lines'))
fmax = np.zeros(n)
fmin = np.zeros(n)
V_0 = | np.zeros(n) | numpy.zeros |
""" Routines related to flexure, air2vac, etc. """
import inspect
import numpy as np
import copy
from matplotlib import pyplot as plt
from matplotlib import gridspec
from scipy import interpolate
from astropy import units
from astropy.coordinates import solar_system, ICRS
from astropy.coordinates import UnitSphericalRepresentation, CartesianRepresentation
from astropy.time import Time
from linetools.spectra import xspectrum1d
from pypeit import msgs
from pypeit.core import arc
from pypeit.core import qa
from pypeit import utils
from pypeit import debugger
def load_sky_spectrum(sky_file):
"""
Load a sky spectrum into an XSpectrum1D object
Args:
sky_file: str
Returns:
sky_spec: XSpectrum1D
spectrum
"""
sky_spec = xspectrum1d.XSpectrum1D.from_file(sky_file)
return sky_spec
def flex_shift(obj_skyspec, arx_skyspec, mxshft=20):
""" Calculate shift between object sky spectrum and archive sky spectrum
Parameters
----------
obj_skyspec
arx_skyspec
Returns
-------
flex_dict: dict
Contains flexure info
"""
flex_dict = {}
# Determine the brightest emission lines
msgs.warn("If we use Paranal, cut down on wavelength early on")
arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig = arc.detect_lines(arx_skyspec.flux.value)
obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj= arc.detect_lines(obj_skyspec.flux.value)
# Keep only 5 brightest amplitude lines (xxx_keep is array of
# indices within arx_w of the 5 brightest)
arx_keep = np.argsort(arx_amp[arx_w])[-5:]
obj_keep = np.argsort(obj_amp[obj_w])[-5:]
# Calculate wavelength (Angstrom per pixel)
arx_disp = np.append(arx_skyspec.wavelength.value[1]-arx_skyspec.wavelength.value[0],
arx_skyspec.wavelength.value[1:]-arx_skyspec.wavelength.value[:-1])
#arx_disp = (np.amax(arx_sky.wavelength.value)-np.amin(arx_sky.wavelength.value))/arx_sky.wavelength.size
obj_disp = np.append(obj_skyspec.wavelength.value[1]-obj_skyspec.wavelength.value[0],
obj_skyspec.wavelength.value[1:]-obj_skyspec.wavelength.value[:-1])
#obj_disp = (np.amax(obj_sky.wavelength.value)-np.amin(obj_sky.wavelength.value))/obj_sky.wavelength.size
# Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need
# this? can just use sigmas
arx_idx = (arx_cent+0.5).astype(np.int)[arx_w][arx_keep] # The +0.5 is for rounding
arx_res = arx_skyspec.wavelength.value[arx_idx]/\
(arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep])
obj_idx = (obj_cent+0.5).astype(np.int)[obj_w][obj_keep] # The +0.5 is for rounding
obj_res = obj_skyspec.wavelength.value[obj_idx]/ \
(obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])
#obj_res = (obj_sky.wavelength.value[0]+(obj_disp*obj_cent[obj_w][obj_keep]))/(
# obj_disp*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep])
if not np.all(np.isfinite(obj_res)):
msgs.warn('Failed to measure the resolution of the object spectrum, likely due to error '
'in the wavelength image.')
return None
msgs.info("Resolution of Archive={0} and Observation={1}".format(np.median(arx_res),
np.median(obj_res)))
# Determine sigma of gaussian for smoothing
arx_sig2 = np.power(arx_disp[arx_idx]*arx_wid[arx_w][arx_keep], 2)
obj_sig2 = np.power(obj_disp[obj_idx]*obj_wid[obj_w][obj_keep], 2)
arx_med_sig2 = np.median(arx_sig2)
obj_med_sig2 = np.median(obj_sig2)
if obj_med_sig2 >= arx_med_sig2:
smooth_sig = np.sqrt(obj_med_sig2-arx_med_sig2) # Ang
smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx])
arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
else:
msgs.warn("Prefer archival sky spectrum to have higher resolution")
smooth_sig_pix = 0.
msgs.warn("New Sky has higher resolution than Archive. Not smoothing")
#smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2)
#Determine region of wavelength overlap
min_wave = max(np.amin(arx_skyspec.wavelength.value), np.amin(obj_skyspec.wavelength.value))
max_wave = min(np.amax(arx_skyspec.wavelength.value), np.amax(obj_skyspec.wavelength.value))
#Smooth higher resolution spectrum by smooth_sig (flux is conserved!)
# if np.median(obj_res) >= np.median(arx_res):
# msgs.warn("New Sky has higher resolution than Archive. Not smoothing")
#obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig)
# else:
#tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
# arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2)))
#arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig)
# Define wavelengths of overlapping spectra
keep_idx = np.where((obj_skyspec.wavelength.value>=min_wave) &
(obj_skyspec.wavelength.value<=max_wave))[0]
#keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave]
#Rebin both spectra onto overlapped wavelength range
if len(keep_idx) <= 50:
msgs.warn("Not enough overlap between sky spectra")
return None
else: #rebin onto object ALWAYS
keep_wave = obj_skyspec.wavelength[keep_idx]
arx_skyspec = arx_skyspec.rebin(keep_wave)
obj_skyspec = obj_skyspec.rebin(keep_wave)
# Trim edges (rebinning is junk there)
arx_skyspec.data['flux'][0,:2] = 0.
arx_skyspec.data['flux'][0,-2:] = 0.
obj_skyspec.data['flux'][0,:2] = 0.
obj_skyspec.data['flux'][0,-2:] = 0.
# Normalize spectra to unit average sky count
norm = np.sum(obj_skyspec.flux.value)/obj_skyspec.npix
obj_skyspec.flux = obj_skyspec.flux / norm
norm2 = np.sum(arx_skyspec.flux.value)/arx_skyspec.npix
arx_skyspec.flux = arx_skyspec.flux / norm2
if (norm < 0.):
msgs.warn("Bad normalization of object in flexure algorithm")
msgs.warn("Will try the median")
norm = np.median(obj_skyspec.flux.value)
if (norm < 0.):
msgs.warn("Improper sky spectrum for flexure. Is it too faint??")
return None
if (norm2 < 0.):
msgs.warn('Bad normalization of archive in flexure. You are probably using wavelengths '
'well beyond the archive.')
return None
# Deal with bad pixels
msgs.work("Need to mask bad pixels")
# Deal with underlying continuum
msgs.work("Consider taking median first [5 pixel]")
everyn = obj_skyspec.npix // 20
bspline_par = dict(everyn=everyn)
mask, ct = utils.robust_polyfit(obj_skyspec.wavelength.value, obj_skyspec.flux.value, 3,
function='bspline', sigma=3., bspline_par=bspline_par)
obj_sky_cont = utils.func_val(ct, obj_skyspec.wavelength.value, 'bspline')
obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont
mask, ct_arx = utils.robust_polyfit(arx_skyspec.wavelength.value, arx_skyspec.flux.value, 3,
function='bspline', sigma=3., bspline_par=bspline_par)
arx_sky_cont = utils.func_val(ct_arx, arx_skyspec.wavelength.value, 'bspline')
arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont
# Consider sharpness filtering (e.g. LowRedux)
msgs.work("Consider taking median first [5 pixel]")
#Cross correlation of spectra
#corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same")
corr = np.correlate(arx_sky_flux, obj_sky_flux, "same")
#Create array around the max of the correlation function for fitting for subpixel max
# Restrict to pixels within maxshift of zero lag
lag0 = corr.size//2
#mxshft = settings.argflag['reduce']['flexure']['maxshift']
max_corr = np.argmax(corr[lag0-mxshft:lag0+mxshft]) + lag0-mxshft
subpix_grid = np.linspace(max_corr-3., max_corr+3., 7.)
#Fit a 2-degree polynomial to peak of correlation function
fit = utils.func_fit(subpix_grid, corr[subpix_grid.astype(np.int)], 'polynomial', 2)
max_fit = -0.5*fit[1]/fit[2]
#Calculate and apply shift in wavelength
shift = float(max_fit)-lag0
msgs.info("Flexure correction of {:g} pixels".format(shift))
#model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0]
flex_dict = dict(polyfit=fit, shift=shift, subpix=subpix_grid,
corr=corr[subpix_grid.astype(np.int)],
sky_spec=obj_skyspec,
arx_spec=arx_skyspec,
corr_cen=corr.size/2, smooth=smooth_sig_pix)
# Return
return flex_dict
'''
def flexure_slit():
"""Correct wavelength down slit center for flexure
Parameters:
----------
slf :
det : int
"""
debugger.set_trace() # THIS METHOD IS NOT BEING USED THESE DAYS
# Load Archive
skyspec_fil, arx_sky = flexure_archive()
# Extract
censpec_wv = arextract.boxcar_cen(slf, det, slf._mswave[det-1])
censpec_fx = arextract.boxcar_cen(slf, det, slf._bgframe[det-1])
cen_sky = xspectrum1d.XSpectrum1D.from_tuple((censpec_wv, censpec_fx))
# Find shift
fdict = flex_shift(slf, det, cen_sky, arx_sky)
msgs.work("Flexure shift = {:g} down slit center".format(fdict['shift']))
# Refit
# What if xfit shifts outside of 0-1?
xshift = fdict['shift']/(slf._msarc[det-1].shape[0]-1)
mask, fit = utils.robust_polyfit(np.array(slf._wvcalib[det-1]['xfit'])+xshift,
np.array(slf._wvcalib[det-1]['yfit']),
len(slf._wvcalib[det-1]['fitc']),
function=slf._wvcalib[det-1]['function'], sigma=slf._wvcalib[det-1]['nrej'], minv=slf._wvcalib[det-1]['fmin'], maxv=slf._wvcalib[det-1]['fmax'])
# Update wvcalib
slf._wvcalib[det-1]['shift'] = fdict['shift'] # pixels
slf._wvcalib[det-1]['fitc'] = fit
msgs.work("Add another QA for wavelengths?")
# Update mswave
wv_calib = slf._wvcalib[det-1]
slf._mswave[det-1] = utils.func_val(wv_calib['fitc'], slf._tilts[det-1], wv_calib['function'], minv=wv_calib['fmin'], maxv=wv_calib['fmax'])
# Write to Masters? Not for now
# For QA (kludgy..)
censpec_wv = arextract.boxcar_cen(slf, det, slf._mswave[det-1])
fdict['sky_spec'] = xspectrum1d.XSpectrum1D.from_tuple((censpec_wv, censpec_fx))
flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
corr_cen=[], spec_file=skyspec_fil, smooth=[],
arx_spec=[], sky_spec=[])
#debugger.set_trace()
#debugger.xplot(censpec_wv, censpec_fx, xtwo=fdict['arx_spec'].wavelength, ytwo=fdict['arx_spec'].flux*50)
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'sky_spec', 'arx_spec']:
flex_dict[key].append(fdict[key])
return flex_dict
'''
# TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted
def flexure_obj(specobjs, maskslits, method, sky_file, mxshft=None):
"""Correct wavelengths for flexure, object by object
Parameters:
----------
method : str
'boxcar' -- Recommneded
'slitpix' --
sky_file: str
Returns:
----------
flex_list: list
list of dicts containing flexure results
Aligned with specobjs
Filled with a basically empty dict if the slit is skipped or there is no object
"""
sv_fdict = None
msgs.work("Consider doing 2 passes in flexure as in LowRedux")
# Load Archive
sky_spectrum = load_sky_spectrum(sky_file)
nslits = len(maskslits)
gdslits = np.where(~maskslits)[0]
# Loop on objects
flex_list = []
# Slit/objects to come back to
return_later_sobjs = []
# Loop over slits, and then over objects here
for slit in range(nslits):
msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(slit))
indx = specobjs.slitid == slit
this_specobjs = specobjs[indx]
# Reset
flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
corr_cen=[], spec_file=sky_file, smooth=[],
arx_spec=[], sky_spec=[])
# If no objects on this slit append an empty dictionary
if slit not in gdslits:
flex_list.append(flex_dict.copy())
continue
for ss, specobj in enumerate(this_specobjs):
if specobj is None:
continue
msgs.info("Working on flexure for object # {:d}".format(specobj.objid) + "in slit # {:d}".format(specobj.slitid))
# Using boxcar
if method in ['boxcar', 'slitcen']:
sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
sky_flux = specobj.boxcar['COUNTS_SKY']
else:
msgs.error("Not ready for this flexure method: {}".format(method))
# Generate 1D spectrum for object
obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))
# Calculate the shift
fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)
punt = False
if fdict is None:
msgs.warn("Flexure shift calculation failed for this spectrum.")
if sv_fdict is not None:
msgs.warn("Will used saved estimate from a previous slit/object")
fdict = copy.deepcopy(sv_fdict)
else:
# One does not exist yet
# Save it for later
return_later_sobjs.append([slit, ss])
punt = True
else:
sv_fdict = copy.deepcopy(fdict)
# Punt?
if punt:
break
# Interpolate
new_sky = specobj.flexure_interp(sky_wave, fdict)
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
flex_dict[key].append(fdict[key])
flex_dict['sky_spec'].append(new_sky)
flex_list.append(flex_dict.copy())
# Do we need to go back?
for items in return_later_sobjs:
if sv_fdict is None:
msgs.info("No flexure corrections could be made")
break
# Setup
slit, ss = items
flex_dict = flex_list[slit]
specobj = specobjs[ss]
sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
# Copy me
fdict = copy.deepcopy(sv_fdict)
# Interpolate
new_sky = specobj.flexure_interp(sky_wave, fdict)
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
flex_dict[key].append(fdict[key])
flex_dict['sky_spec'].append(new_sky)
return flex_list
# TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted
def flexure_obj_oldbuggyversion(specobjs, maskslits, method, sky_spectrum, sky_file=None, mxshft=None):
"""Correct wavelengths for flexure, object by object
Parameters:
----------
method : str
'boxcar' -- Recommneded
'slitpix' --
Returns:
----------
flex_list: list
list of dicts containing flexure results
Aligned with specobjs
Filled with a basically empty dict if the slit is skipped or there is no object
"""
msgs.work("Consider doing 2 passes in flexure as in LowRedux")
# Load Archive
# skyspec_fil, arx_sky = flexure_archive(spectrograph=spectrograph, skyspec_fil=skyspec_fil)
# Loop on objects
flex_list = []
gdslits = np.where(~maskslits)[0]
for sl in range(len(specobjs)):
# Reset
flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[],
corr_cen=[], spec_file=sky_file, smooth=[],
arx_spec=[], sky_spec=[])
if sl not in gdslits:
flex_list.append(flex_dict.copy())
continue
msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(sl))
for specobj in specobjs[sl]: # for convenience
if specobj is None:
continue
# Using boxcar
if method in ['boxcar', 'slitcen']:
sky_wave = specobj.boxcar['WAVE'] #.to('AA').value
sky_flux = specobj.boxcar['COUNTS_SKY']
else:
msgs.error("Not ready for this flexure method: {}".format(method))
# Generate 1D spectrum for object
obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux))
# Calculate the shift
fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft)
# Simple interpolation to apply
npix = len(sky_wave)
x = np.linspace(0., 1., npix)
# Apply
for attr in ['boxcar', 'optimal']:
if not hasattr(specobj, attr):
continue
if 'WAVE' in getattr(specobj, attr).keys():
msgs.info("Applying flexure correction to {0:s} extraction for object:".format(attr) +
msgs.newline() + "{0:s}".format(str(specobj)))
f = interpolate.interp1d(x, sky_wave, bounds_error=False, fill_value="extrapolate")
getattr(specobj, attr)['WAVE'] = f(x+fdict['shift']/(npix-1))*units.AA
# Shift sky spec too
cut_sky = fdict['sky_spec']
x = np.linspace(0., 1., cut_sky.npix)
f = interpolate.interp1d(x, cut_sky.wavelength.value, bounds_error=False, fill_value="extrapolate")
twave = f(x + fdict['shift']/(cut_sky.npix-1))*units.AA
new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, cut_sky.flux))
# Update dict
for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']:
flex_dict[key].append(fdict[key])
flex_dict['sky_spec'].append(new_sky)
flex_list.append(flex_dict.copy())
return flex_list
def geomotion_calculate(radec, time, longitude, latitude, elevation, refframe):
"""
Correct the wavelength calibration solution to the desired reference frame
"""
# Time
loc = (longitude * units.deg, latitude * units.deg, elevation * units.m,)
obstime = Time(time.value, format=time.format, scale='utc', location=loc)
return geomotion_velocity(obstime, radec, frame=refframe)
def geomotion_correct(specObjs, radec, time, maskslits, longitude, latitude,
elevation, refframe):
"""
Correct the wavelength of every pixel to a barycentric/heliocentric frame.
Args:
specObjs (SpecObjs object):
radec (astropy.coordiantes.SkyCoord):
time (:obj:`astropy.time.Time`):
maskslits
fitstbl : Table/PypeItMetaData
Containing the properties of every fits file
longitude (float): deg
latitude (float): deg
elevation (float): m
refframe (str):
Returns:
Two objects are returned::
- float: - The velocity correction that should be applied to the wavelength array.
- float: The relativistic velocity correction that should be multiplied by the
wavelength array to convert each wavelength into the user-specified
reference frame.
"""
# Calculate
vel = geomotion_calculate(radec, time, longitude, latitude, elevation, refframe)
vel_corr = np.sqrt((1. + vel/299792.458) / (1. - vel/299792.458))
gdslits = np.where(~maskslits)[0]
# Loop on slits to apply
for slit in gdslits:
indx = (specObjs.slitid-1) == slit
this_specobjs = specObjs[indx]
# Loop on objects
for specobj in this_specobjs:
if specobj is None:
continue
# Loop on extraction methods
for attr in ['boxcar', 'optimal']:
if not hasattr(specobj, attr):
continue
if 'WAVE' in getattr(specobj, attr).keys():
msgs.info('Applying {0} correction to '.format(refframe)
+ '{0} extraction for object:'.format(attr)
+ msgs.newline() + "{0}".format(str(specobj)))
getattr(specobj, attr)['WAVE'] = getattr(specobj, attr)['WAVE'] * vel_corr
# Return
return vel, vel_corr # Mainly for debugging
def geomotion_velocity(time, skycoord, frame="heliocentric"):
""" Perform a barycentric/heliocentric velocity correction.
For the correciton, this routine uses the ephemeris: astropy.coordinates.solar_system_ephemeris.set
For more information see `~astropy.coordinates.solar_system_ephemeris`.
Parameters
----------
time : astropy.time.Time
The time of observation, including the location.
skycoord: astropy.coordinates.SkyCoord
The RA and DEC of the pointing, as a SkyCoord quantity.
frame : str
The reference frame that should be used for the calculation.
Returns
-------
vcorr : float
The velocity correction that should be added to the original velocity.
"""
# Check that the RA/DEC of the object is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
msgs.error("Cannot transform RA/DEC of object to the ICRS")
# Calculate ICRS position and velocity of Earth's geocenter
ep, ev = solar_system.get_body_barycentric_posvel('earth', time)
# Calculate GCRS position and velocity of observatory
op, ov = time.location.get_gcrs_posvel(time)
# ICRS and GCRS are axes-aligned. Can add the velocities
velocity = ev + ov
if frame == "heliocentric":
# ICRS position and velocity of the Sun
sp, sv = solar_system.get_body_barycentric_posvel('sun', time)
velocity += sv
# Get unit ICRS vector in direction of SkyCoord
sc_cartesian = skycoord.icrs.represent_as(UnitSphericalRepresentation).represent_as(CartesianRepresentation)
return sc_cartesian.dot(velocity).to(units.km / units.s).value
def airtovac(wave):
""" Convert air-based wavelengths to vacuum
Parameters:
----------
wave: Quantity array
Wavelengths
Returns:
----------
wave: Quantity array
Wavelength array corrected to vacuum wavelengths
"""
# Convert to AA
wave = wave.to(units.AA)
wavelength = wave.value
# Standard conversion format
sigma_sq = (1.e4/wavelength)**2. #wavenumber squared
factor = 1 + (5.792105e-2/(238.0185-sigma_sq)) + (1.67918e-3/(57.362-sigma_sq))
factor = factor*(wavelength>=2000.) + 1.*(wavelength<2000.) #only modify above 2000A
# Convert
wavelength = wavelength*factor
# Units
new_wave = wavelength*units.AA
new_wave.to(wave.unit)
return new_wave
def vactoair(wave):
"""Convert to air-based wavelengths from vacuum
Parameters:
----------
wave: Quantity array
Wavelengths
Returns:
----------
wave: Quantity array
Wavelength array corrected to air
"""
# Convert to AA
wave = wave.to(units.AA)
wavelength = wave.value
# Standard conversion format
sigma_sq = (1.e4/wavelength)**2. #wavenumber squared
factor = 1 + (5.792105e-2/(238.0185-sigma_sq)) + (1.67918e-3/(57.362-sigma_sq))
factor = factor*(wavelength>=2000.) + 1.*(wavelength<2000.) #only modify above 2000A
# Convert
wavelength = wavelength/factor
new_wave = wavelength*units.AA
new_wave.to(wave.unit)
return new_wave
# TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted
# AND THIS IS WHY THE CODE IS CRASHING
def flexure_qa(specobjs, maskslits, basename, det, flex_list,
slit_cen=False, out_dir=None):
"""
Args:
specobjs:
maskslits (np.ndarray):
basename (str):
det (int):
flex_list (list):
slit_cen:
out_dir:
"""
plt.rcdefaults()
plt.rcParams['font.family']= 'times new roman'
# Grab the named of the method
method = inspect.stack()[0][3]
#
gdslits = np.where(np.invert(maskslits))[0]
# Loop over slits, and then over objects here
for slit in gdslits:
indx = specobjs.slitid == slit
this_specobjs = specobjs[indx]
this_flex_dict = flex_list[slit]
# Setup
if slit_cen:
nobj = 1
ncol = 1
else:
nobj = np.sum(indx)
ncol = min(3, nobj)
#
if nobj == 0:
continue
nrow = nobj // ncol + ((nobj % ncol) > 0)
# Outfile, one QA file per slit
outfile = qa.set_qa_filename(basename, method + '_corr', det=det,slit=(slit + 1), out_dir=out_dir)
plt.figure(figsize=(8, 5.0))
plt.clf()
gs = gridspec.GridSpec(nrow, ncol)
for iobj, specobj in enumerate(this_specobjs):
if specobj is None:
continue
# Correlation QA
ax = plt.subplot(gs[iobj//ncol, iobj % ncol])
# Fit
fit = this_flex_dict['polyfit'][iobj]
xval = np.linspace(-10., 10, 100) + this_flex_dict['corr_cen'][iobj] #+ flex_dict['shift'][o]
#model = (fit[2]*(xval**2.))+(fit[1]*xval)+fit[0]
model = utils.func_val(fit, xval, 'polynomial')
mxmod = np.max(model)
ylim = [np.min(model/mxmod), 1.3]
ax.plot(xval-this_flex_dict['corr_cen'][iobj], model/mxmod, 'k-')
# Measurements
ax.scatter(this_flex_dict['subpix'][iobj]-this_flex_dict['corr_cen'][iobj],
this_flex_dict['corr'][iobj]/mxmod, marker='o')
# Final shift
ax.plot([this_flex_dict['shift'][iobj]]*2, ylim, 'g:')
# Label
if slit_cen:
ax.text(0.5, 0.25, 'Slit Center', transform=ax.transAxes, size='large', ha='center')
else:
ax.text(0.5, 0.25, '{:s}'.format(specobj.idx), transform=ax.transAxes, size='large', ha='center')
ax.text(0.5, 0.15, 'flex_shift = {:g}'.format(this_flex_dict['shift'][iobj]),
transform=ax.transAxes, size='large', ha='center')#, bbox={'facecolor':'white'})
# Axes
ax.set_ylim(ylim)
ax.set_xlabel('Lag')
# Finish
plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)
plt.savefig(outfile, dpi=400)
plt.close()
# Sky line QA (just one object)
if slit_cen:
iobj = 0
else:
iobj = 0
specobj = this_specobjs[iobj]
sky_spec = this_flex_dict['sky_spec'][iobj]
arx_spec = this_flex_dict['arx_spec'][iobj]
# Sky lines
sky_lines = np.array([3370.0, 3914.0, 4046.56, 4358.34, 5577.338, 6300.304,
7340.885, 7993.332, 8430.174, 8919.610, 9439.660,
10013.99, 10372.88])*units.AA
dwv = 20.*units.AA
gdsky = np.where((sky_lines > sky_spec.wvmin) & (sky_lines < sky_spec.wvmax))[0]
if len(gdsky) == 0:
msgs.warn("No sky lines for Flexure QA")
return
if len(gdsky) > 6:
idx = np.array([0, 1, len(gdsky)//2, len(gdsky)//2+1, -2, -1])
gdsky = gdsky[idx]
# Outfile
outfile = qa.set_qa_filename(basename, method+'_sky', det=det,slit=(slit + 1), out_dir=out_dir)
# Figure
plt.figure(figsize=(8, 5.0))
plt.clf()
nrow, ncol = 2, 3
gs = gridspec.GridSpec(nrow, ncol)
if slit_cen:
plt.suptitle('Sky Comparison for Slit Center', y=1.05)
else:
plt.suptitle('Sky Comparison for {:s}'.format(specobj.idx), y=1.05)
for ii, igdsky in enumerate(gdsky):
skyline = sky_lines[igdsky]
ax = plt.subplot(gs[ii//ncol, ii % ncol])
# Norm
pix = np.where(np.abs(sky_spec.wavelength-skyline) < dwv)[0]
f1 = np.sum(sky_spec.flux[pix])
f2 = np.sum(arx_spec.flux[pix])
norm = f1/f2
# Plot
ax.plot(sky_spec.wavelength[pix], sky_spec.flux[pix], 'k-', label='Obj',
drawstyle='steps-mid')
pix2 = np.where(np.abs(arx_spec.wavelength-skyline) < dwv)[0]
ax.plot(arx_spec.wavelength[pix2], arx_spec.flux[pix2]*norm, 'r-', label='Arx',
drawstyle='steps-mid')
# Axes
ax.xaxis.set_major_locator(plt.MultipleLocator(dwv.value))
ax.set_xlabel('Wavelength')
ax.set_ylabel('Counts')
# Legend
plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3,
handletextpad=0.3, fontsize='small', numpoints=1)
# Finish
plt.savefig(outfile, dpi=400)
plt.close()
#plt.close()
plt.rcdefaults()
return
def flexure_qa_oldbuggyversion(specobjs, maskslits, basename, det, flex_list, slit_cen=False):
""" QA on flexure measurement
Parameters
----------
det
flex_list : list
list of dict containing flexure results
slit_cen : bool, optional
QA on slit center instead of objects
Returns
-------
"""
plt.rcdefaults()
plt.rcParams['font.family']= 'times new roman'
# Grab the named of the method
method = inspect.stack()[0][3]
#
gdslits = np.where(~maskslits)[0]
for sl in range(len(specobjs)):
if sl not in gdslits:
continue
if specobjs[sl][0] is None:
continue
# Setup
if slit_cen:
nobj = 1
ncol = 1
else:
nobj = len(specobjs[sl])
ncol = min(3, nobj)
#
if nobj==0:
continue
nrow = nobj // ncol + ((nobj % ncol) > 0)
# Get the flexure dictionary
flex_dict = flex_list[sl]
# Outfile
outfile = qa.set_qa_filename(basename, method+'_corr', det=det,
slit=specobjs[sl][0].slitid)
plt.figure(figsize=(8, 5.0))
plt.clf()
gs = gridspec.GridSpec(nrow, ncol)
# Correlation QA
for o in range(nobj):
ax = plt.subplot(gs[o//ncol, o % ncol])
# Fit
fit = flex_dict['polyfit'][o]
xval = np.linspace(-10., 10, 100) + flex_dict['corr_cen'][o] #+ flex_dict['shift'][o]
#model = (fit[2]*(xval**2.))+(fit[1]*xval)+fit[0]
model = utils.func_val(fit, xval, 'polynomial')
mxmod = np.max(model)
ylim = [np.min(model/mxmod), 1.3]
ax.plot(xval-flex_dict['corr_cen'][o], model/mxmod, 'k-')
# Measurements
ax.scatter(flex_dict['subpix'][o]-flex_dict['corr_cen'][o],
flex_dict['corr'][o]/mxmod, marker='o')
# Final shift
ax.plot([flex_dict['shift'][o]]*2, ylim, 'g:')
# Label
if slit_cen:
ax.text(0.5, 0.25, 'Slit Center', transform=ax.transAxes, size='large', ha='center')
else:
ax.text(0.5, 0.25, '{:s}'.format(specobjs[sl][o].idx), transform=ax.transAxes, size='large', ha='center')
ax.text(0.5, 0.15, 'flex_shift = {:g}'.format(flex_dict['shift'][o]),
transform=ax.transAxes, size='large', ha='center')#, bbox={'facecolor':'white'})
# Axes
ax.set_ylim(ylim)
ax.set_xlabel('Lag')
# Finish
plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)
plt.savefig(outfile, dpi=400)
plt.close()
# Sky line QA (just one object)
if slit_cen:
o = 0
else:
o = 0
specobj = specobjs[sl][o]
sky_spec = flex_dict['sky_spec'][o]
arx_spec = flex_dict['arx_spec'][o]
# Sky lines
sky_lines = np.array([3370.0, 3914.0, 4046.56, 4358.34, 5577.338, 6300.304,
7340.885, 7993.332, 8430.174, 8919.610, 9439.660,
10013.99, 10372.88])*units.AA
dwv = 20.*units.AA
gdsky = np.where((sky_lines > sky_spec.wvmin) & (sky_lines < sky_spec.wvmax))[0]
if len(gdsky) == 0:
msgs.warn("No sky lines for Flexure QA")
return
if len(gdsky) > 6:
idx = np.array([0, 1, len(gdsky)//2, len(gdsky)//2+1, -2, -1])
gdsky = gdsky[idx]
# Outfile
outfile = qa.set_qa_filename(basename, method+'_sky', det=det,
slit=specobjs[sl][0].slitid)
# Figure
plt.figure(figsize=(8, 5.0))
plt.clf()
nrow, ncol = 2, 3
gs = gridspec.GridSpec(nrow, ncol)
if slit_cen:
plt.suptitle('Sky Comparison for Slit Center', y=1.05)
else:
plt.suptitle('Sky Comparison for {:s}'.format(specobj.idx), y=1.05)
for ii, igdsky in enumerate(gdsky):
skyline = sky_lines[igdsky]
ax = plt.subplot(gs[ii//ncol, ii % ncol])
# Norm
pix = np.where(np.abs(sky_spec.wavelength-skyline) < dwv)[0]
f1 = | np.sum(sky_spec.flux[pix]) | numpy.sum |
import argparse
import os
import sys
##############################################
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=5)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--eps', type=float, default=1.)
parser.add_argument('--init', type=str, default="glorot_uniform")
parser.add_argument('--save', type=int, default=0)
parser.add_argument('--name', type=str, default="segnet")
parser.add_argument('--load', type=str, default=None)
args = parser.parse_args()
if args.gpu >= 0:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
##############################################
import numpy as np
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
from collections import deque
from lib.SegNet import SegNet
####################################
exxact = 1
if exxact:
val_path = '/home/bcrafton3/Data_SSD/datasets/cityscapes/val/'
train_path = '/home/bcrafton3/Data_SSD/datasets/cityscapes/train/'
else:
val_path = '/usr/scratch/datasets/cityscapes/val/'
train_path = '/usr/scratch/datasets/cityscapes/train/'
####################################
def get_val_filenames():
val_filenames = []
print ("building validation dataset")
for subdir, dirs, files in os.walk(val_path):
for file in files:
val_filenames.append(os.path.join(val_path, file))
# np.random.shuffle(val_filenames)
return sorted(val_filenames)
def get_train_filenames():
train_filenames = []
print ("building training dataset")
for subdir, dirs, files in os.walk(train_path):
for file in files:
train_filenames.append(os.path.join(train_path, file))
# np.random.shuffle(train_filenames)
return sorted(train_filenames)
def extract_fn(record):
_feature={
'image_raw': tf.FixedLenFeature([], tf.string),
'label_raw': tf.FixedLenFeature([], tf.string)
}
sample = tf.parse_single_example(record, _feature)
image = tf.decode_raw(sample['image_raw'], tf.float32)
image = tf.reshape(image, (1, 480, 480, 3))
label = tf.decode_raw(sample['label_raw'], tf.int32)
label = tf.reshape(label, (1, 480, 480))
return [image, label]
####################################
train_filenames = get_train_filenames()
val_filenames = get_val_filenames()
train_examples = len(train_filenames)
val_examples = len(val_filenames)
####################################
filename = tf.placeholder(tf.string, shape=[None])
batch_size = tf.placeholder(tf.int32, shape=())
lr = tf.placeholder(tf.float32, shape=())
####################################
val_dataset = tf.data.TFRecordDataset(filename)
val_dataset = val_dataset.map(extract_fn, num_parallel_calls=4)
val_dataset = val_dataset.batch(args.batch_size)
val_dataset = val_dataset.repeat()
val_dataset = val_dataset.prefetch(8)
train_dataset = tf.data.TFRecordDataset(filename)
train_dataset = train_dataset.map(extract_fn, num_parallel_calls=4)
train_dataset = train_dataset.batch(args.batch_size)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.prefetch(8)
####################################
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)
x, y = iterator.get_next()
image = tf.reshape(x, [batch_size, 480, 480, 3])
label = tf.reshape(y, [batch_size, 480, 480])
label_one_hot = tf.one_hot(label, depth=30, axis=-1)
train_iterator = train_dataset.make_initializable_iterator()
val_iterator = val_dataset.make_initializable_iterator()
####################################
model = SegNet(batch_size=batch_size, init='glorot_uniform', load='./MobileNetWeights.npy')
out = model.predict(image)
predict = tf.argmax(tf.nn.softmax(out), axis=3, output_type=tf.int32)
tf_correct = tf.reduce_sum(tf.cast(tf.equal(predict, label), tf.float32))
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=label_one_hot, logits=out)
train = tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=args.eps).minimize(loss)
####################################
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
train_handle = sess.run(train_iterator.string_handle())
val_handle = sess.run(val_iterator.string_handle())
###############################################################
for ii in range(args.epochs):
####################################
sess.run(train_iterator.initializer, feed_dict={filename: train_filenames})
total_correct = deque(maxlen=25)
losses = deque(maxlen=25)
for jj in range(0, train_examples, args.batch_size):
[np_correct, np_loss, img_in, img_out, _] = sess.run([tf_correct, loss, image, predict, train], feed_dict={handle: train_handle, batch_size: args.batch_size, lr: args.lr})
total_correct.append(np_correct)
losses.append(np_loss)
if (jj % 100 == 0):
print ('%d %f %f' % (jj, np.average(total_correct) / (args.batch_size * 480. * 480.), np.average(losses)))
for kk in range(args.batch_size):
top = | np.average(img_in[kk], axis=2) | numpy.average |
import nibabel as nib
import numpy as np
import os
from glob import glob
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage import affine_transform
def brainweb(brainweb_raw_dir = os.path.join('..','data','training_data','brainweb','raw'),
subject = 'subject54',
gm_contrast = 4,
wm_contrast = 1,
csf_contrast = 0.05,
skin_contrast = 0.5,
fat_contrast = 0.25,
bone_contrast = 0.1,
blood_contrast = 0.8):
dmodel_path = os.path.join(brainweb_raw_dir, subject + '_crisp_v.mnc.gz')
t1_path = os.path.join(brainweb_raw_dir, subject + '_t1w_p4.mnc.gz')
# the simulated t1 has different voxel size and FOV)
dmodel_affine = nib.load(dmodel_path).affine.copy()
t1_affine = nib.load(t1_path).affine.copy()
dmodel_voxsize = np.sqrt((dmodel_affine**2).sum(0))[:-1]
t1_voxsize = np.sqrt((t1_affine**2).sum(0))[:-1]
dmodel = nib.load(dmodel_path).get_data()
t1 = nib.load(t1_path).get_data()
# create low frequent variation in GM
v = gaussian_filter( | np.random.rand(*dmodel.shape) | numpy.random.rand |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os, sys
import matplotlib.ticker as plticker
import pandas as pd
from collections import OrderedDict
from random import randint
from PIL import Image, ImageChops
from scipy.ndimage.measurements import center_of_mass
def visualize_grid(img, width, height):
fig = plt.figure()
ax = fig.add_subplot(111)
my_intervalx = width # width
my_intervaly = height # height
locx = plticker.MultipleLocator(base=my_intervalx)
locy = plticker.MultipleLocator(base=my_intervaly)
ax.xaxis.set_major_locator(locx)
ax.yaxis.set_major_locator(locy)
# Add the grid
ax.grid(which='major', axis='both', linestyle='-')
# Add the image
ax.imshow(img)
# Find number of gridsquares in x and y direction
nx = abs(int(float(ax.get_xlim()[1] - ax.get_xlim()[0]) / float(my_intervalx)))
ny = abs(int(float(ax.get_ylim()[1] - ax.get_ylim()[0]) / float(my_intervaly)))
plt.show()
def preprocess_img(img, fmask, all_masks, clipLimit=3.0):
# convert to PIL
pil_image = Image.fromarray(img)
bg = Image.new(pil_image.mode, pil_image.size, pil_image.getpixel((0, 0)))
diff = ImageChops.difference(pil_image, bg)
diff = ImageChops.add(diff, diff, 2.0, -20)
bbox = diff.getbbox()
del pil_image
# crop image if bbox is found
if bbox:
crop = img[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
crop_mask = all_masks[bbox[1]:bbox[3], bbox[0]:bbox[2]]
else:
crop = img
crop_mask = all_masks
# onverting image to LAB Color model
lab = cv2.cvtColor(crop, cv2.COLOR_BGR2LAB)
# splitting the LAB image to different channels
l, a, b = cv2.split(lab)
# applying CLAHE to L-channel
clahe = cv2.createCLAHE(clipLimit=clipLimit)
cl = clahe.apply(l)
# merge the CLAHE enhanced L-channel with the a and b channel
limg = cv2.merge((cl, a, b))
# converting image from LAB Color model to RGB model
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
# normalize to float32 and 0-1 range
all_masks = cv2.normalize(all_masks.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX).astype(np.float32)
img_final = cv2.normalize(final.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX).astype(np.float32)
return img_final[:, :, 1], all_masks
def extract_patches(img, centers, patch_size=50, n_patches=30, augment=False):
n_centers = centers.shape[0]
n_patches_per_center = np.ceil(n_patches / n_centers).astype(np.uint8)
if n_patches_per_center == 0:
n_patches_per_center = 1
tx_max = 30
tx_min = -30
tx_range = np.arange(tx_min, tx_max+1, 1).astype(np.int8)
ty_range = np.arange(tx_min, tx_max+1, 1).astype(np.int8)
patches = []
rows, cols = img.shape
for m in range(0, n_centers):
cx = centers[m, 0]
cy = centers[m, 1]
for npat in range(0, n_patches_per_center):
offx = np.random.choice(tx_range, 1)[0]
offy = np.random.choice(ty_range, 1)[0]
row_start = cx - patch_size + offx
row_end = cx + patch_size + offx
col_start = cy - patch_size + offy
col_end = cy + patch_size + offy
if row_start < 0:
small_offset = np.random.randint(1, 20)
row_start = 0 + small_offset
row_end = 2*patch_size + small_offset
if row_end > rows:
small_offset = | np.random.randint(1, 20) | numpy.random.randint |
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import time
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window_3D(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t())
_3D_window = _1D_window.mm(_2D_window.reshape(1, -1)).reshape(window_size, window_size,
window_size).float().unsqueeze(0).unsqueeze(0)
window = Variable(_3D_window.expand(channel, 1, window_size, window_size, window_size).contiguous())
return window
def _ssim_3D(img1, img2, window, window_size, channel, size_average=False, mask=None):
mu1 = F.conv3d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv3d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv3d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv3d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv3d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
if img1.size()[0] == 1:
if mask is not None:
res = [ ssim_map[mm >0].mean() for mm in mask]
res.append( ssim_map[(img1>0) * (img2>0)].mean() )
else:
ssim_map = ssim_map[(img1>0) * (img2>0)]
res = ssim_map #ssim_map.mean()
else:
print('WARNIGN RRR remove 0 in image')
res = ssim_map.mean(axis=list(range(1, img1.ndim))) #one value per patch
return res.squeeze(0)
else:
return ssim_map
def _ssim_3D_dist(img1, img2, window, window_size, channel, aggregate="avg"):
if len(img1.size()) == 4: #missing batch dim
img1 = img1.unsqueeze(0)
img2 = img2.unsqueeze(0)
(_, channel, _, _, _) = img1.size()
window = create_window_3D(window_size, channel)
mu1 = F.conv3d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv3d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv3d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv3d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv3d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
s1 = (2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)
s2 = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
d1 = torch.sqrt(1 - s1)
d2 = torch.sqrt(1 - s2)
d1[torch.isnan(d1)] = 0
d2[torch.isnan(d2)] = 0
if aggregate.lower() == "normed":
res = torch.norm(torch.sqrt(d1 ** 2 + d2 ** 2), 2)
else:
res = torch.mean(torch.sqrt(d1 ** 2 + d2 ** 2))
return res
class SSIM3D_old(torch.nn.Module):
def __init__(self, window_size=3, size_average=True, distance=0):
super(SSIM3D_old, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window_3D(window_size, self.channel)
self.distance = distance
def forward(self, img1, img2):
if img1.ndim == 4:
img1 = img1.unsqueeze(0)
if img2.ndim == 4:
img2 = img2.unsqueeze(0)
(_, channel, _, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window_3D(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
if self.distance==1:
res = 1 - _ssim_3D(img1, img2, window, self.window_size, channel, self.size_average)
else :
res = _ssim_3D(img1, img2, window, self.window_size, channel)
return res.squeeze(0)
def ssim3D(img1, img2, window_size=3, size_average=True, verbose=False, mask=None):
if verbose:
start = time.time()
if len(img1.size()) == 4: #missing batch dim
img1 = img1.unsqueeze(0)
img2 = img2.unsqueeze(0)
if mask is not None:
mask = [ mm.unsqueeze(0) for mm in mask ]
#print('mask 1 shape {} mask last {}'.format(mask[0].shape, mask[-1].shape))
(_, channel, _, _, _) = img1.size()
window = create_window_3D(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
res = _ssim_3D(img1, img2, window, window_size, channel, size_average, mask)
if verbose:
duration = time.time() - start
print(f'Ssim calculation : {duration:.3f} seconds')
return res
########################################################################################################################
def th_pearsonr(x, y):
"""
mimics scipy.stats.pearsonr
"""
x = torch.flatten(x)
y = torch.flatten(y)
mean_x = torch.mean(x)
mean_y = torch.mean(y)
xm = x.sub(mean_x)
ym = y.sub(mean_y)
r_num = xm.dot(ym)
r_den = torch.norm(xm, 2) * torch.norm(ym, 2)
r_val = r_num / r_den
return r_val
########################################################################################################################
class nrmse:
def __init__(self, normalization="euclidean"):
self.normalization = normalization.lower()
def __call__(self, image_true, image_test):
'''
A Pytorch version of scikit-image's implementation of normalized_root_mse
https://scikit-image.org/docs/dev/api/skimage.metrics.html#skimage.metrics.normalized_root_mse
Compute the normalized root mean-squared error (NRMSE) between two
images.
Parameters
----------
image_true : ndarray
Ground-truth image, same shape as im_test.
image_test : ndarray
Test image.
normalization : {'euclidean', 'min-max', 'mean'}, optional
Controls the normalization method to use in the denominator of the
NRMSE. There is no standard method of normalization across the
literature [1]_. The methods available here are as follows:
- 'euclidean' : normalize by the averaged Euclidean norm of
``im_true``::
NRMSE = RMSE * sqrt(N) / || im_true ||
where || . || denotes the Frobenius norm and ``N = im_true.size``.
This result is equivalent to::
NRMSE = || im_true - im_test || / || im_true ||.
- 'min-max' : normalize by the intensity range of ``im_true``.
- 'mean' : normalize by the mean of ``im_true``
Returns
-------
nrmse : float
The NRMSE metric.
References
----------
.. [1] https://en.wikipedia.org/wiki/Root-mean-square_deviation
'''
if self.normalization == "min-max":
denom = image_true.max() - image_true.min()
elif self.normalization == "mean":
denom = image_true.mean()
else:
if self.normalization != "euclidean":
raise Warning("Unsupported norm type. Found {}.\nUsing euclidean by default".format(self.normalization))
denom = torch.sqrt(torch.mean(image_true ** 2))
return (F.mse_loss(image_true, image_test).sqrt())/denom
#code from https://github.com/rogerberm/pytorch-ncc/blob/master/NCC.py
def patch_mean(images, patch_shape):
"""
Computes the local mean of an image or set of images.
Args:
images (Tensor): Expected size is (n_images, n_channels, *image_size). 1d, 2d, and 3d images are accepted.
patch_shape (tuple): shape of the patch tensor (n_channels, *patch_size)
Returns:
Tensor same size as the image, with local means computed independently for each channel.
Example::
>>> images = torch.randn(4, 3, 15, 15) # 4 images, 3 channels, 15x15 pixels each
>>> patch_shape = 3, 5, 5 # 3 channels, 5x5 pixels neighborhood
>>> means = patch_mean(images, patch_shape)
>>> expected_mean = images[3, 2, :5, :5].mean() # mean of the third image, channel 2, top left 5x5 patch
>>> computed_mean = means[3, 2, 5//2, 5//2] # computed mean whose 5x5 neighborhood covers same patch
>>> computed_mean.isclose(expected_mean).item()
1
"""
channels, *patch_size = patch_shape
dimensions = len(patch_size)
padding = tuple(side // 2 for side in patch_size)
conv = (F.conv1d, F.conv2d, F.conv3d)[dimensions - 1]
# Convolution with these weights will effectively compute the channel-wise means
patch_elements = torch.Tensor(patch_size).prod().item()
weights = torch.full((channels, channels, *patch_size), fill_value=1 / patch_elements)
weights = weights.to(images.device)
# Make convolution operate on single channels
channel_selector = torch.eye(channels).byte()
weights[1 - channel_selector] = 0
result = conv(images, weights, padding=padding, bias=None)
return result
def patch_std(image, patch_shape):
"""
Computes the local standard deviations of an image or set of images.
Args:
images (Tensor): Expected size is (n_images, n_channels, *image_size). 1d, 2d, and 3d images are accepted.
patch_shape (tuple): shape of the patch tensor (n_channels, *patch_size)
Returns:
Tensor same size as the image, with local standard deviations computed independently for each channel.
Example::
>>> images = torch.randn(4, 3, 15, 15) # 4 images, 3 channels, 15x15 pixels each
>>> patch_shape = 3, 5, 5 # 3 channels, 5x5 pixels neighborhood
>>> stds = patch_std(images, patch_shape)
>>> patch = images[3, 2, :5, :5]
>>> expected_std = patch.std(unbiased=False) # standard deviation of the third image, channel 2, top left 5x5 patch
>>> computed_std = stds[3, 2, 5//2, 5//2] # computed standard deviation whose 5x5 neighborhood covers same patch
>>> computed_std.isclose(expected_std).item()
1
"""
return (patch_mean(image**2, patch_shape) - patch_mean(image, patch_shape)**2).sqrt()
def channel_normalize(template):
"""
Z-normalize image channels independently.
"""
reshaped_template = template.clone().view(template.shape[0], -1)
reshaped_template.sub_(reshaped_template.mean(dim=-1, keepdim=True))
reshaped_template.div_(reshaped_template.std(dim=-1, keepdim=True, unbiased=False))
return reshaped_template.view_as(template)
class NCC(torch.nn.Module):
"""
Computes the [Zero-Normalized Cross-Correlation][1] between an image and a template.
Example:
>>> lena_path = "https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png"
>>> lena_tensor = torch.Tensor(plt.imread(lena_path)).permute(2, 0, 1).cuda()
>>> patch_center = 275, 275
>>> y1, y2 = patch_center[0] - 25, patch_center[0] + 25
>>> x1, x2 = patch_center[1] - 25, patch_center[1] + 25
>>> lena_patch = lena_tensor[:, y1:y2 + 1, x1:x2 + 1]
>>> ncc = NCC(lena_patch)
>>> ncc_response = ncc(lena_tensor[None, ...])
>>> ncc_response.max()
tensor(1.0000, device='cuda:0')
>>> np.unravel_index(ncc_response.argmax(), lena_tensor.shape)
(0, 275, 275)
[1]: https://en.wikipedia.org/wiki/Cross-correlation#Zero-normalized_cross-correlation_(ZNCC)
"""
def __init__(self, template, keep_channels=False):
super().__init__()
self.keep_channels = keep_channels
channels, *template_shape = template.shape
dimensions = len(template_shape)
self.padding = tuple(side // 2 for side in template_shape)
self.conv_f = (F.conv1d, F.conv2d, F.conv3d)[dimensions - 1]
self.normalized_template = channel_normalize(template)
ones = template.dim() * (1, )
self.normalized_template = self.normalized_template.repeat(channels, *ones)
# Make convolution operate on single channels
channel_selector = torch.eye(channels).byte()
self.normalized_template[1 - channel_selector] = 0
# Reweight so that output is averaged
patch_elements = torch.Tensor(template_shape).prod().item()
self.normalized_template.div_(patch_elements)
def forward(self, image):
result = self.conv_f(image, self.normalized_template, padding=self.padding, bias=None)
std = patch_std(image, self.normalized_template.shape[1:])
result.div_(std)
if not self.keep_channels:
result = result.mean(dim=1)
return result
#code from https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429
'''
Non-parametric computation of entropy and mutual-information
Adapted by <NAME> for code created by <NAME>, itself
from several papers (see in the code).
These computations rely on nearest-neighbor statistics
'''
import numpy as np
from scipy.special import gamma,psi
from scipy import ndimage
from scipy.linalg import det
from numpy import pi
__all__=['entropy', 'mutual_information', 'entropy_gaussian']
EPS = np.finfo(float).eps
def nearest_distances(X, k=1):
from sklearn.neighbors import NearestNeighbors
'''
X = array(N,M)
N = number of points
M = number of dimensions
returns the distance to the kth nearest neighbor for every point in X
'''
knn = NearestNeighbors(n_neighbors=k + 1)
knn.fit(X)
d, _ = knn.kneighbors(X) # the first nearest neighbor is itself
return d[:, -1] # returns the distance to the kth nearest neighbor
def entropy_gaussian(C):
'''
Entropy of a gaussian variable with covariance matrix C
'''
if np.isscalar(C): # C is the variance
return .5*(1 + np.log(2*pi)) + .5*np.log(C)
else:
n = C.shape[0] # dimension
return .5*n*(1 + np.log(2*pi)) + .5*np.log(abs(det(C)))
def entropy(X, k=1):
''' Returns the entropy of the X.
Parameters
===========
X : array-like, shape (n_samples, n_features)
The data the entropy of which is computed
k : int, optional
number of nearest neighbors for density estimation
Notes
======
Kozachenko, <NAME>. & <NAME>. 1987 Sample estimate of entropy
of a random vector. Probl. Inf. Transm. 23, 95-101.
See also: <NAME>. 2008 A computationally efficient estimator for
mutual information, Proc. R. Soc. A 464 (2093), 1203-1215.
and:
<NAME>, <NAME>, <NAME>. (2004). Estimating mutual
information. Phys Rev E 69(6 Pt 2):066138.
'''
# Distance to kth nearest neighbor
r = nearest_distances(X, k) # squared distances
n, d = X.shape
volume_unit_ball = (pi**(.5*d)) / gamma(.5*d + 1)
'''
<NAME>, (2008). Estimation of Information Theoretic Measures
for Continuous Random Variables. Advances in Neural Information
Processing Systems 21 (NIPS). Vancouver (Canada), December.
return d*mean(log(r))+log(volume_unit_ball)+log(n-1)-log(k)
'''
return (d*np.mean(np.log(r + np.finfo(X.dtype).eps))
+ np.log(volume_unit_ball) + psi(n) - psi(k))
def mutual_information(variables, k=1):
'''
Returns the mutual information between any number of variables.
Each variable is a matrix X = array(n_samples, n_features)
where
n = number of samples
dx,dy = number of dimensions
Optionally, the following keyword argument can be specified:
k = number of nearest neighbors for density estimation
Example: mutual_information((X, Y)), mutual_information((X, Y, Z), k=5)
'''
if len(variables) < 2:
raise AttributeError(
"Mutual information must involve at least 2 variables")
all_vars = np.hstack(variables)
# check that mi(X, X) = entropy(X)
check = np.unique(all_vars, axis=1)
if all_vars.shape[1] != check.shape[1]:
print(f'WARNING: dropping {all_vars.shape[1] - check.shape[1]} variables as the samples are identical!')
all_vars = check
return (sum([entropy(X, k=k) for X in variables])
- entropy(all_vars, k=k))
def mutual_information_2d(x, y, sigma=1, normalized=False):
"""
Computes (normalized) mutual information between two 1D variate from a
joint histogram.
Parameters
----------
x : 1D array
first variable
y : 1D array
second variable
sigma: float
sigma for Gaussian smoothing of the joint histogram
Returns
-------
nmi: float
the computed similariy measure
"""
bins = (256, 256)
jh = np.histogram2d(x, y, bins=bins)[0]
# smooth the jh with a gaussian filter of given sigma
ndimage.gaussian_filter(jh, sigma=sigma, mode='constant',
output=jh)
# compute marginal histograms
jh = jh + EPS
sh = np.sum(jh)
jh = jh / sh
s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))
s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))
# Normalised Mutual Information of:
# Studholme, jhill & jhawkes (1998).
# "A normalized entropy measure of 3-D medical image alignment".
# in Proc. Medical Imaging 1998, vol. 3338, San Diego, CA, pp. 132-143.
if normalized:
mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2)))
/ np.sum(jh * np.log(jh))) - 1
else:
mi = ( np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1))
- np.sum(s2 * np.log(s2)))
return mi
###############################################################################
# Tests
def test_entropy():
# Testing against correlated Gaussian variables
# (analytical results are known)
# Entropy of a 3-dimensional gaussian variable
rng = np.random.RandomState(0)
n = 50000
d = 3
P = np.array([[1, 0, 0], [0, 1, .5], [0, 0, 1]])
C = np.dot(P, P.T)
Y = rng.randn(d, n)
X = | np.dot(P, Y) | numpy.dot |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pathlib import Path
import os
import numpy as np
from pymatgen.core.operations import SymmOp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.symmetry.analyzer import (
PointGroupAnalyzer,
SpacegroupAnalyzer,
cluster_sites,
iterative_symmetrize,
)
from pymatgen.util.testing import PymatgenTest
test_dir_mol = os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules")
class SpacegroupAnalyzerTest(PymatgenTest):
def setUp(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
self.structure = p.structure
self.sg = SpacegroupAnalyzer(self.structure, 0.001)
self.disordered_structure = self.get_structure("Li10GeP2S12")
self.disordered_sg = SpacegroupAnalyzer(self.disordered_structure, 0.001)
s = p.structure.copy()
site = s[0]
del s[0]
s.append(site.species, site.frac_coords)
self.sg3 = SpacegroupAnalyzer(s, 0.001)
graphite = self.get_structure("Graphite")
graphite.add_site_property("magmom", [0.1] * len(graphite))
self.sg4 = SpacegroupAnalyzer(graphite, 0.001)
self.structure4 = graphite
def test_primitive(self):
s = Structure.from_spacegroup("Fm-3m", np.eye(3) * 3, ["Cu"], [[0, 0, 0]])
a = SpacegroupAnalyzer(s)
self.assertEqual(len(s), 4)
self.assertEqual(len(a.find_primitive()), 1)
def test_is_laue(self):
s = Structure.from_spacegroup("Fm-3m", | np.eye(3) | numpy.eye |
import os
import logging
import yaml
import numpy as np
from matplotlib import pyplot as plt
# import pandas as pd
# import scipy
import LCTM.metrics
from kinemparse import decode
from mathtools import utils # , metrics
# from blocks.core import blockassembly
logger = logging.getLogger(__name__)
def eval_metrics(pred_seq, true_seq, name_suffix='', append_to={}):
state_acc = (pred_seq == true_seq).astype(float).mean()
metric_dict = {
'State Accuracy' + name_suffix: state_acc,
'State Edit Score' + name_suffix: LCTM.metrics.edit_score(pred_seq, true_seq) / 100,
'State Overlap Score' + name_suffix: LCTM.metrics.overlap_score(pred_seq, true_seq) / 100
}
append_to.update(metric_dict)
return append_to
def suppress_nonmax(scores):
col_idxs = scores.argmax(axis=1)
new_scores = np.zeros_like(scores)
row_idxs = np.arange(scores.shape[0])
new_scores[row_idxs, col_idxs] = scores[row_idxs, col_idxs]
return new_scores
def make_event_assembly_transition_priors(event_vocab, assembly_vocab):
def isValid(event, cur_assembly, next_assembly):
is_valid = diff == event
return is_valid
num_events = len(event_vocab)
num_assemblies = len(assembly_vocab)
priors = np.zeros((num_events, num_assemblies, num_assemblies), dtype=bool)
for j, cur_assembly in enumerate(assembly_vocab):
for k, next_assembly in enumerate(assembly_vocab):
try:
diff = next_assembly - cur_assembly
except ValueError:
continue
for i, event in enumerate(event_vocab):
priors[i, j, k] = diff == event
return priors
def make_assembly_transition_priors(assembly_vocab):
def isValid(diff):
for i in range(diff.connections.shape[0]):
c = diff.connections.copy()
c[i, :] = 0
c[:, i] = 0
if not c.any():
return True
return False
num_assemblies = len(assembly_vocab)
priors = np.zeros((num_assemblies, num_assemblies), dtype=bool)
for j, cur_assembly in enumerate(assembly_vocab):
for k, next_assembly in enumerate(assembly_vocab):
if cur_assembly == next_assembly:
continue
try:
diff = next_assembly - cur_assembly
except ValueError:
continue
priors[j, k] = isValid(diff)
return priors
def count_transitions(label_seqs, num_classes, support_only=False):
start_counts = np.zeros(num_classes, dtype=float)
end_counts = np.zeros(num_classes, dtype=float)
for label_seq in label_seqs:
start_counts[label_seq[0]] += 1
end_counts[label_seq[-1]] += 1
start_probs = start_counts / start_counts.sum()
end_probs = end_counts / end_counts.sum()
if support_only:
start_probs = (start_probs > 0).astype(float)
end_probs = (end_probs > 0).astype(float)
return start_probs, end_probs
def count_priors(label_seqs, num_classes, stride=None, approx_upto=None, support_only=False):
dur_counts = {}
class_counts = {}
for label_seq in label_seqs:
for label, dur in zip(*utils.computeSegments(label_seq[::stride])):
class_counts[label] = class_counts.get(label, 0) + 1
dur_counts[label, dur] = dur_counts.get((label, dur), 0) + 1
class_priors = np.zeros((num_classes))
for label, count in class_counts.items():
class_priors[label] = count
class_priors /= class_priors.sum()
max_dur = max(dur for label, dur in dur_counts.keys())
dur_priors = | np.zeros((num_classes, max_dur)) | numpy.zeros |
# Copyright 2017. <NAME>. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import re
try:
import nest
m = re.search(r'.*(\d+)\.(\d+)\.(\d+).*', nest.version())
ver_major = int(m.group(1))
ver_minor = int(m.group(2))
built_in_glifs = ver_major >= 2 and ver_minor >= 20
except Exception as e:
built_in_glifs = False
def lif_aibs_converter(config, tau_syn=[5.5, 8.5, 2.8, 5.8]):
"""
:param config:
:return:
"""
coeffs = config['coeffs']
params = {'V_th': coeffs['th_inf'] * config['th_inf'] * 1.0e03 + config['El_reference'] * 1.0e03,
'g': coeffs['G'] / config['R_input'] * 1.0e09,
'E_L': config['El'] * 1.0e03 + config['El_reference'] * 1.0e03,
'C_m': coeffs['C'] * config['C'] * 1.0e12,
't_ref': config['spike_cut_length'] * config['dt'] * 1.0e03,
'V_reset': config['El_reference'] * 1.0e03,
'tau_syn': tau_syn,
'V_dynamics_method': 'linear_exact'} # 'linear_forward_euler' or 'linear_exact'
return params
def lif_asc_aibs_converter(config, tau_syn=[5.5, 8.5, 2.8, 5.8]):
"""
:param config:
:return:
"""
coeffs = config['coeffs']
params = {'V_th': coeffs['th_inf'] * config['th_inf'] * 1.0e03 + config['El_reference'] * 1.0e03,
'g': coeffs['G'] / config['R_input'] * 1.0e09,
'E_L': config['El'] * 1.0e03 + config['El_reference'] * 1.0e03,
'C_m': coeffs['C'] * config['C'] * 1.0e12,
't_ref': config['spike_cut_length'] * config['dt'] * 1.0e03,
'V_reset': config['El_reference'] * 1.0e03,
'asc_init': np.array(config['init_AScurrents']) * 1.0e12,
'k': 1.0 / np.array(config['asc_tau_array']) * 1.0e-03,
'tau_syn': tau_syn,
'asc_decay': 1.0 / np.array(config['asc_tau_array']) * 1.0e-03,
'asc_amps': np.array(config['asc_amp_array']) * np.array(coeffs['asc_amp_array']) * 1.0e12,
'V_dynamics_method': 'linear_exact'
}
return params
def lif_r_aibs_converter(config):
"""
:param config:
:return:
"""
coeffs = config['coeffs']
threshold_params = config['threshold_dynamics_method']['params']
reset_params = config['voltage_reset_method']['params']
params = {'V_th': coeffs['th_inf'] * config['th_inf'] * 1.0e03 + config['El_reference'] * 1.0e03,
'g': coeffs['G'] / config['R_input'] * 1.0e09,
'E_L': config['El'] * 1.0e03 + config['El_reference'] * 1.0e03,
'C_m': coeffs['C'] * config['C'] * 1.0e12,
't_ref': config['spike_cut_length'] * config['dt'] * 1.0e03,
'a_spike': threshold_params['a_spike'] * 1.0e03,
'b_spike': threshold_params['b_spike'] * 1.0e-03,
'a_reset': reset_params['a'],
'b_reset': reset_params['b'] * 1.0e03,
'V_dynamics_method': 'linear_exact'}
return params
def lif_r_asc_aibs_converter(config):
"""Creates a nest glif_lif_r_asc object"""
coeffs = config['coeffs']
threshold_params = config['threshold_dynamics_method']['params']
reset_params = config['voltage_reset_method']['params']
params={'V_th': coeffs['th_inf'] * config['th_inf'] * 1.0e03 + config['El_reference'] * 1.0e03,
'g': coeffs['G'] / config['R_input'] * 1.0e09,
'E_L': config['El'] * 1.0e03 + config['El_reference'] * 1.0e03,
'C_m': coeffs['C'] * config['C'] * 1.0e12,
't_ref': config['spike_cut_length'] * config['dt'] * 1.0e03,
'a_spike': threshold_params['a_spike'] * 1.0e03,
'b_spike': threshold_params['b_spike'] * 1.0e-03,
'a_reset': reset_params['a'],
'b_reset': reset_params['b'] * 1.0e03,
'asc_init': np.array(config['init_AScurrents']) * 1.0e12,
'k': 1.0 / | np.array(config['asc_tau_array']) | numpy.array |
import time
import numpy as np
from slam.Variables import R2Variable
from factors.Factors import UnaryR2GaussianPriorFactor, \
R2RelativeGaussianLikelihoodFactor
from slam.NFiSAM import NFiSAM, NFiSAMArgs
import matplotlib.pyplot as plt
import os
def plot_data(x,i=0,j=1, **kwargs):
plt.scatter(x[:,i], x[:,j], marker="x", **kwargs)
plt.xlim((-10, 15))
plt.ylim((-10, 15))
if __name__ == '__main__':
node_x0 = R2Variable('x0')
node_x1 = R2Variable('x1')
node_x2 = R2Variable('x2')
node_x3 = R2Variable('x3')
node_x4 = R2Variable('x4')
node_x5 = R2Variable('x5')
node_l1 = R2Variable('l1')
node_l2 = R2Variable('l2')
nodes = [node_x0,node_x1,node_x2,node_x3,node_x4,node_x5,node_l1,node_l2]
disp_x0_l1 = | np.array([5, 5]) | numpy.array |
#Importing
import librosa
import numpy as np
import scipy
from scipy.spatial.distance import pdist, squareform
from scipy.interpolate import interp2d
from scipy.sparse.csgraph import laplacian
from scipy.spatial.distance import directed_hausdorff
from scipy.cluster import hierarchy
from scipy.linalg import eigh
from scipy.ndimage import median_filter
import cv2
from sklearn import metrics
import dill
import sys
import glob
import os
import random
from segment_covers80 import segment
#change matplotlib backend to save rendered plots correctly on linux
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
#--supress warnings--#
import warnings
warnings.filterwarnings("ignore")
#-------------#
#---reading---#
#-------------#
all_dirs = []
all_names = []
all_roots = []
all_audio = []
max_files = 40000
for root, dirs, files in os.walk('/home/ismir/Documents/ISMIR/Datasets/covers80/'):
for name in files:
if (('.wav' in name) or ('.aif' in name) or ('.mp3' in name)):
filepath = os.path.join(root, name)
all_dirs.append(filepath)
all_names.append(name[:-4])
all_roots.append(root)
if len(all_dirs)>=max_files:
break
if len(all_dirs)>=max_files:
break
file_no = len(all_dirs)
#----------------#
#---load audio---#
#----------------#
for f in range(file_no):
y, sr = librosa.load(all_dirs[f], sr=16000, mono=True)
#bug: empty mel bins
all_audio.append((y,sr))
#progress
sys.stdout.write("\rLoading %i/%s pieces." % ((f+1), str(file_no)))
sys.stdout.flush()
print('')
#-----------#
#---cover---#
#-----------# cover (True) vs non-cover (False)
covers = np.zeros((file_no, file_no), dtype=np.bool_)
for i in range(file_no):
for j in range(file_no):
if (all_roots[i] == all_roots[j]):
covers[i][j] = True
else:
covers[i][j] = False
#-------------------------#
#---Distance dictionary---#
#-------------------------#
"""Terminology
distances: L1, fro, dtw, hau, pair, sh2, sh3
format: rs_size-approx[0]-approx[1]-distance e.g. 128-2-8-L1
"""
distances = {}
#----------------------#
#---Score dictionary---#
#----------------------#
"""Terminology
distances: L1, fro, dtw, hau, pair, sh2, sh3
format: (filt-)rs_size-approx[0]-approx[1]-distance e.g. filt-128-2-8-L1
"""
scores = {}
#--------------------------------------------#
#---traverse parameters, segment, evaluate---#
#--------------------------------------------#
#figure directory
fig_dir = '/home/ismir/Documents/ISMIR/figures/covers80_run2/'
#for rs_size in [32]: #test config
for rs_size in [32, 64, 128, 256]: #resampling parameters
#for approx in [[2,6]]: #test config
for approx in [[2,6], [2,8], [4,10], [8,12]]: #min number of approximations is 3
for filtering in [True, False]:
#string for keys to indicate filtering
if filtering:
filt = 'filt-'
else:
filt = ''
#print configuration
print("--------------------")
print("Resampling size:", str(rs_size))
print("Approximation range: [" + str(approx[0]) + ',' + str(approx[1]) + ']')
print("Filtering:", str(filtering))
#--------------------#
#--------------------#
#-----Formatting-----#
#--------------------#
#--------------------#
#hold all structures and their formats
all_struct = [] #kmax-kmin sets each with a square matrix
all_flat = [] #kmax-kmin sets each with a flattened matrix
all_merged = [] #single concatenated vector with all flattened matrices
all_shingled2 = [] #shingle adjacent pairs of flat approoximations
all_shingled3 = [] #shingle adjacent triples of flat approoximations
#traverse songs
for f in range(file_no):
#structure segmentation
struct = segment(all_audio[f][0], all_audio[f][1],
rs_size, approx[0], approx[1], filtering)
all_struct.append(struct)
# #debug
# fig, axs = plt.subplots(1, approx[1]-approx[0], figsize=(20, 20))
# for i in range(approx[1]-approx[0]):
# axs[i].matshow(struct[i])
# plt.savefig(all_names[f])
#formatting
flat_approximations = []
merged_approximations = np.empty((0))
for j in range(approx[1]-approx[0]):
flat_approximations.append(struct[j].flatten())
merged_approximations = | np.concatenate((merged_approximations, flat_approximations[j])) | numpy.concatenate |
# -*- coding: utf-8 -*-
"""
Label the atoms of two molecules in a commensurate way. This will be implemented
using the atomic bonding neighborhoods of each atom and the molecular connectivity.
In addition, it would be nice if there was a particular unique way of doing
this such that it wouldn't require comparing two molecules, but that should
come later.
"""
import copy,itertools
import numpy as np
import networkx as nx
from ase.data import vdw_radii,atomic_numbers
from ase.data.colors import jmol_colors
def label(mol, bonds_kw={"mult": 1.20, "skin": 0, "update": False}):
"""
Orders the atoms in the molecule in a unique way using a graph
representation. The initial order of the atoms will not impact the order
of labelling of nodes in the molecular graph. Note that if a labelling is
symmetrically equivalent based on a undirected and unweighted graphical
representation of the molecule, then the label order may be dependent
on the initial ordering of atoms. This is important for cases where side
groups have the exact same chemical identity, but perhaps very different
torsion angles. Unique labelling based on torsion angles, for example,
cannot be handled directly by this function. In nearly all other
cases, this method will provide a unique ordering of the atoms in the
molecule.
"""
return order_atoms(mol, bonds_kw=bonds_kw)
def order_atoms(mol, bonds_kw={"mult": 1.20, "skin": 0, "update": False}):
"""
Purpose of this function is as follows . . . .
The algorithm works as follows . . . .
This is useful because . . . .
Agruments
---------
mol: Structure
Structure object to order labels of
bonds_kw: dict
Dictionary for computing bonds
"""
unique_path = traverse_mol(mol, bonds_kw, ret="idx")
geo = mol.get_geo_array()
ele = mol.elements
mol.from_geo_array(geo[unique_path], ele[unique_path])
### Need to update bonds after changing the atomic ordering using lookup
### dict of the bonds currently stored in the molecule
bond_idx_lookup = {}
for new_idx,former_idx in enumerate(unique_path):
bond_idx_lookup[former_idx] = new_idx
### First the bonds have to be resorted
mol.properties["bonds"] = [mol.properties["bonds"][x] for x in unique_path]
### Then indices have to change
for idx1,bond_list in enumerate(mol.properties["bonds"]):
for idx2,atom_idx in enumerate(bond_list):
mol.properties["bonds"][idx1][idx2] = bond_idx_lookup[atom_idx]
return mol
def match_labels(mol1, mol2,
bonds_kw={"mult": 1.20,
"skin": 0,
"update": False},
warn=False):
"""
Arguments
---------
warn: bool
If False, then an exception will be raised if the ordering of the
molecules cannot be matched exactly. If True, then only a warning will
be raised.
"""
formula1 = mol1.formula
formula2 = mol2.formula
if formula1 != formula2:
### It might be possible to include some fuzzier implementation that
### finds best match even if the labels are not identical
### But that should be done later
raise Exception("Matching atom labels requires that the formulas "+
"of the molecules is identical")
### Make sure that the correct bonds have been obtained based on input arg
mol1.get_bonds(**bonds_kw)
mol2.get_bonds(**bonds_kw)
### Just label the first molecule arbitrarily because the ordering of the
### second molecule will be built to match this one
### Also, note that once the atom ordering is changed once, it will never
### change again thus this is a very safe thing to do even if mol1 is
### fed into this function many multiples of times. There is just some
### argument against this for the sake of inefficiency, but the label
### algorithm should be very fasty anyways...
mol1 = label(mol1)
### Any neighborhood list that matches this target list will be exactly
### an identical ordering up-to the symmetry of the molecular graph
target_list = traverse_mol(mol1, bonds_kw, ret="neigh")
### Start with label mol2 and then start traversing by possible starting pos
mol2 = label(mol2)
### Just need to iterate over the possible starting positions for the
### second molecule until an exact match is found
ele2 = mol2.elements
ele_numbers = [atomic_numbers[x] for x in ele2]
max_idx_list = np.where(ele_numbers == np.max(ele_numbers))[0]
final_idx_list = []
for start_idx,_ in enumerate(max_idx_list):
candidate_list = traverse_mol(mol2,
start_idx=start_idx,
bonds_kw=bonds_kw,
ret="neigh")
if candidate_list == target_list:
final_idx_list = traverse_mol(mol2,
start_idx=start_idx,
bonds_kw=bonds_kw,
ret="idx")
break
if len(final_idx_list) == 0:
if not warn:
raise Exception("Atom ordering could not be matched for {} {}"
.format(mol1.struct_id, mol2.struct_id))
else:
print("Atom ordering could not be matched for {} {}"
.format(mol1.struct_id, mol2.struct_id))
### Shouldn't make any modification if this is the case
return mol1,mol2
### Reorder mol2 before returning
geo = mol2.get_geo_array()
ele = mol2.elements
mol2.from_geo_array(geo[final_idx_list], ele[final_idx_list])
### Need to update bonds after changing the atomic ordering using lookup
### dict of the bonds currently stored in the molecule
bond_idx_lookup = {}
for new_idx,former_idx in enumerate(final_idx_list):
bond_idx_lookup[former_idx] = new_idx
### First the bonds have to be resorted
mol2.properties["bonds"] = [mol2.properties["bonds"][x] for x in final_idx_list]
### Then indices have to change
for idx1,bond_list in enumerate(mol2.properties["bonds"]):
for idx2,atom_idx in enumerate(bond_list):
mol2.properties["bonds"][idx1][idx2] = bond_idx_lookup[atom_idx]
return mol1,mol2
def mol2graph(mol, bonds_kw={"mult": 1.20,"skin": 0,"update": False}):
return mol_to_graph(mol, bonds_kw)
def mol_to_graph(mol,
bonds_kw={"mult": 1.20,"skin": 0,"update": False}):
ele = mol.elements
bonds = mol.get_bonds(**bonds_kw)
g = nx.Graph()
### Add node to graph for each atom in struct
g.add_nodes_from(range(len(ele)))
### Add edges
for i,bond_list in enumerate(bonds):
[g.add_edge(i,x) for x in bond_list]
### Add neighborhoods to each atom of molecule
neighbors = [[[]] for x in g.nodes]
for i,idx_list in enumerate(neighbors):
neighbor_list = [x for x in g.adj[i]]
neighbor_list.append(i)
idx_list[0] += neighbor_list
sorted_list = _sort_neighborlist(mol, g, neighbors)
mol.properties["neighbors"] = sorted_list
fragment_list = [mol.elements[tuple(x)] for x in sorted_list]
fragment_list = [''.join(x) for x in fragment_list]
mol.properties["neighbor_str"] = fragment_list
attr_dict = {}
for idx in range(len(g)):
attr_dict[idx] = {"ele": ele[idx], "neighborhood": fragment_list[idx]}
nx.set_node_attributes(g, attr_dict)
return g
def traverse_mol(mol,
bonds_kw={"mult": 1.20,"skin": 0,"update": False},
start_idx=0,
ret="idx",
):
"""
Arguments
---------
start_idx: int
This is used to index into the indices of atoms in the molecule that
all have the same maximum atomic number.
"""
g = mol_to_graph(mol, bonds_kw)
ele = mol.elements
ele_numbers = [atomic_numbers[x] for x in ele]
max_idx_list = np.where(ele_numbers == np.max(ele_numbers))[0]
if start_idx == 0:
start_idx = max_idx_list[0]
else:
if start_idx >= len(max_idx_list):
start_idx = max_idx_list[-1]
else:
start_idx = max_idx_list[start_idx]
# print("START IDX: {}".format(start_idx))
path = ordered_traversal_of_molecule_graph(g, start_idx)
if ret == "idx":
return path
elif ret == "neigh" or \
ret == "neighbor" or \
ret == "neighborhood" or \
ret == "neighborhoods":
return [mol.properties["neighbor_str"][x] for x in path]
else:
raise Exception("ret not known")
return path
def basic_traversal_of_molecule_graph(graph,
node_idx,
current_path=None):
"""
This recursive function returns a basic traversal of a molecular graph.
This traversal obeys the following rules:
1. Locations may only be visited once
2. All locations must be visted
"""
if current_path == None:
current_path = []
path = [node_idx]
current_path += [node_idx]
neighbors = graph.adj[node_idx]
### Need to take care of cycles
for entry in neighbors:
if entry in current_path:
continue
neigh_path = basic_traversal_of_molecule_graph(graph, entry, current_path)
if len(neigh_path) > 0:
path += neigh_path
current_path += path
return path
def ordered_traversal_of_molecule_graph(graph,
node_idx,
current_path=None):
"""
This algorithm works as follows:
1.
"""
path = [node_idx]
current_path = [node_idx]
current_node_idx = node_idx
current_path_idx = 0
while True:
add_site = add_one_for_ordered_traversal(graph, current_node_idx, current_path)
# print("Path: {}; Add Site: {}".format(path, add_site))
### Check for the case where no new site has been added
### If no new site has been added go backwards in path list and try
### to add a new site again
if len(add_site) == 1:
current_path_idx -= 1
### Completion condition
if current_path_idx < 0:
if len(path) == len(graph):
# print("FINISH")
break
else:
raise Exception("Error")
current_node_idx = path[current_path_idx]
continue
path += [add_site[1]]
current_path += [add_site[1]]
current_path_idx = len(path)
current_node_idx = path[current_path_idx - 1]
if len(path) == len(graph):
# print("NORMAL FINISH")
break
return path
def add_one_for_ordered_traversal(graph,
node_idx,
current_path=None):
"""
This recursive function returns an ordered traversal of a molecular graph.
This traversal obeys the following rules:
1. Locations may only be visited once
2. All locations must be visted
3. Locations are visited in the order in which the shortest path is
followed
- If potential paths are identical in length, then the one that
provides lightest total weight is followed
- If the total weight of each path is identical (which would be
the case for a molecule that contains any cycle) then the
path the provides the lightest first atom is chosen
- If the lightest first atom is identical, then.............
Recursive algorithm works as follows:
1. Go from node to node until reaching a node that has no neighbors.
2. Once this node is reached, it returns itself back up the stack.
3. If a node only has a single path, this is also immediately returned
up the stack.
4. Once a node is reach that has two possible paths, a choice is made
between the two competing paths. The path that is the shortest is
automatically chosen... But this is actually not what I want.
What I want is that the path leading down is fully traversed and
then the path that provides the lightest direction is gone down first
If both paths are then equal in weight (such as should be the case
for a cycle) then the the path that provides the most direct route
to the heaviest group will be prefered.
If the paths are completely identical, then it should not matter
which one is chosen first from the perspective of a graph.
"""
if current_path == None:
current_path = []
### Make copy of input current_path
current_path = [x for x in current_path]
path = [node_idx]
current_path += [node_idx]
neighbors = graph.adj[node_idx]
### Build entire traversal list
neigh_path_list = []
for entry in neighbors:
# print(node_idx, entry)
if entry in current_path:
continue
neigh_path = add_one_for_ordered_traversal(graph, entry, current_path)
if len(neigh_path) > 0:
neigh_path_list.append(neigh_path)
# print(node_idx, entry, neigh_path)
### Only a single option
if len(neigh_path_list) == 1:
if len(neigh_path_list[0]) == 1:
path += neigh_path_list[0]
return path
elif len(neigh_path_list) == 0:
return [node_idx]
### If there's more than single option, then an algorithm that seeks
### to stich together the neighbor paths in a reasonable and unique way
### should be used
neigh_list_sorted = _sort_neighbor_path_list(graph, neigh_path_list)
# print("SORTED: ", neigh_list_sorted)
path += neigh_list_sorted
return path
def _sort_neighbor_path_list(graph, neigh_path_list):
"""
Recursive algorithm for sorting the neigh_path_list. Sorts by following
criteria:
1.
X. For a cycle, find the largest detour from the most direct path in
the cycle
"""
# print(neigh_path_list)
if len(neigh_path_list) == 0:
return []
neigh_path_list = copy.deepcopy(neigh_path_list)
final_path_list = []
path_length = np.zeros((len(neigh_path_list))).astype(int)
for idx,entry in enumerate(neigh_path_list):
path_length[idx] = len(entry)
min_idx = np.argwhere(path_length == np.min(path_length)).ravel()
if len(min_idx) == 1:
final_path_list += neigh_path_list[min_idx[0]]
del(neigh_path_list[min_idx[0]])
else:
### Take into account atomic weights
path_weight = np.zeros((len(min_idx))).astype(int)
path_weight_list = []
for idx,entry in enumerate(min_idx):
temp_path = neigh_path_list[entry]
temp_weight_list = [atomic_numbers[graph.nodes[x]["ele"]] for x in temp_path]
temp_weight = np.sum(temp_weight_list)
path_weight[idx] = temp_weight
path_weight_list.append(temp_weight_list)
min_idx = np.argwhere(path_weight == np.min(path_weight)).ravel()
if len(min_idx) == 1:
final_path_list += neigh_path_list[min_idx[0]]
del(neigh_path_list[min_idx[0]])
### If absolutely identical, then it should matter which is used first
elif all(x==path_weight_list[0] for x in path_weight_list):
final_path_list += neigh_path_list[min_idx[0]]
del(neigh_path_list[min_idx[0]])
else:
# ### FOR TESTING
# path_weight_list = [[6, 0, 1, 6, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1],
# [6, 1, 6, 1, 6, 1, 6, 1, 6, 6, 1, 6, 6, 1, 6, 1],
# [6, 1, 6, 1, 6, 1, 6, 1, 6, 6, 1, 6, 6, 1, 6, 1]]
### Total path weights are identical but not ordered the same
### Likely, this is because there's a cycle in the molecule
# print("CYCLES?: {}".format(neigh_path_list))
# print("TOTAL PATH WEIGHTS: {}".format(path_weight))
# print("PATH WEGITH LIST: {}".format(path_weight_list))
### Just choose the one that encounters the heaviest atom first
keep_mask = np.ones((len(path_weight_list,))).astype(int)
chosen_idx = -1
for idx in range(len(path_weight_list[0])):
current_pos = np.array([x[idx] for x in path_weight_list])
### Mask off entries that have already been discarded
current_pos = current_pos[keep_mask.astype(bool)]
### Keep track of idx that are now described by current_pos
keep_idx = np.where(keep_mask == 1)[0]
### Get mask that describes which paths have maximum at this
### position in the path
max_avail_mask = current_pos == np.max(current_pos)
max_avail_idx = np.where(max_avail_mask > 0)[0]
### Get those that need to be discarded
add_to_discard_idx = np.where(max_avail_mask < 1)[0]
### De-reference add_to_discard_idx wrt the keep_idx and
### add these to the keep_mask
add_to_discard_idx = keep_idx[add_to_discard_idx]
keep_mask[add_to_discard_idx] = 0
### If there's only one option left, then this is the optimal path
if len(max_avail_idx) == 1:
### De-reference max idx with the paths that have been
### kept, described by keep_idx
chosen_idx = keep_idx[max_avail_idx[0]]
break
### The only possible case that a path was not chosen, is that
### two of the paths in the lists are identical and optimal.
### Therefore, just choose one of them (the first one)
if chosen_idx < 0:
keep_idx = np.where(keep_mask > 0)[0]
chosen_idx = keep_idx[0]
# print("MORE THAN ONE OPTIMAL PATH")
# print("CHOSEN DUE TO INDEX: ", idx)
# print("CHOSEN PATH IDX: ", chosen_idx)
# print("KEEP MASK: ", keep_mask)
# print("MAX IDX: ", max_idx)
# raise Exception("NEED TO IMPLEMENT HERE")
final_path_list += neigh_path_list[chosen_idx]
del(neigh_path_list[chosen_idx])
if len(neigh_path_list) != 0:
final_path_list += _sort_neighbor_path_list(graph, neigh_path_list)
return final_path_list
def _sort_neighborlist(mol, g, neighbor_list):
"""
Sorts neighborlist according to definition in _calc_neighbors. Only
works for a radius of 1.
Arguments
---------
g: nx.Graph
neighbor_list: list of int
List of adjacent nodes plus the node itself as i
"""
ele = mol.elements
sorted_list_final = [[[]] for x in g.nodes]
for i,temp in enumerate(neighbor_list):
# Preparing things which aren't writting well
idx_list = temp[0]
current_node = i
terminal_groups = []
bonded_groups = []
for idx in idx_list:
if g.degree(idx) == 1:
terminal_groups.append(idx)
else:
bonded_groups.append(idx)
terminal_ele = ele[terminal_groups]
alphabet_idx = np.argsort(terminal_ele)
terminal_groups = [terminal_groups[x] for x in alphabet_idx]
sorted_list = terminal_groups
if current_node not in terminal_groups:
sorted_list.append(current_node)
remove_idx = bonded_groups.index(current_node)
del(bonded_groups[remove_idx])
bonded_ele = ele[bonded_groups]
alphabet_idx = np.argsort(bonded_ele)
bonded_groups = [bonded_groups[x] for x in alphabet_idx]
sorted_list += bonded_groups
sorted_list_final[i][0] = sorted_list
return sorted_list_final
def get_bond_fragments(mol,
bonds_kw={"mult": 1.20,"skin": 0,"update": False},
return_counts=True):
g = mol_to_graph(mol, bonds_kw=bonds_kw)
neigh_list = []
for node_idx in g.nodes:
neigh_list.append(g.nodes[node_idx]["neighborhood"])
return np.unique(neigh_list, return_counts=return_counts)
def draw_mol_graph(mol, bonds_kw={"mult": 1.20, "skin": 0, "update": False}):
g = mol_to_graph(mol, bonds_kw=bonds_kw)
ele = mol.elements
idx = [atomic_numbers[x] for x in ele]
colors = jmol_colors[idx]
nx.draw(g, node_color=colors, with_labels=True)
def label_mp(mol, sort_method="hash"):
"""
Will label the atoms of the molecule using the new message passing
method
"""
g = mol2graph(mol)
message_descriptors = message_passing_atom_descriptors(g)
### Now, just need to decide order based on the message_descriptors
### Simple method is just based on sorting resulting hashes to provide
### a truely unique ordering (up to graph symmetry)
if sort_method != "hash":
raise Exception("Only the hash sort method is implemented")
hash_list = []
for _,message in message_descriptors.items():
hash_list.append(hash(message))
unique_path = np.argsort(hash_list)
geo = mol.get_geo_array()
ele = mol.geometry["element"]
mol.from_geo_array(geo[unique_path], ele[unique_path])
### Need to update bonds after changing the atomic ordering using lookup
### dict of the bonds currently stored in the molecule
bond_idx_lookup = {}
for new_idx,former_idx in enumerate(unique_path):
bond_idx_lookup[former_idx] = new_idx
### First the bonds have to be resorted
mol.properties["bonds"] = [mol.properties["bonds"][x] for x in unique_path]
### Then indices have to change
for idx1,bond_list in enumerate(mol.properties["bonds"]):
for idx2,atom_idx in enumerate(bond_list):
mol.properties["bonds"][idx1][idx2] = bond_idx_lookup[atom_idx]
### Recompute properties from graph
mol_to_graph(mol)
return mol
def get_hash_order(mol):
g = mol2graph(mol)
message_descriptors = message_passing_atom_descriptors(g)
hash_list = []
for _,message in message_descriptors.items():
hash_list.append(hash(message))
sort_idx = np.argsort(hash_list)
return sort_idx
def match2graph(mol,target_graph):
"""
Return whether or not the given molecule matches the target_graph. If it
does not match, a empty list will be returned. If it does match, the
index that matches the atoms from the molecule to the target_graph will be
given
This new implementation will work much better for matching two molecules.
The performance should be greatly increased.
"""
### First check formula
target_graph_formula = {}
target_graph_ele = []
for node_idx in target_graph.nodes:
target_graph_ele.append(target_graph.nodes[node_idx]["ele"])
ele,counts = np.unique(target_graph_ele, return_counts=True)
target_graph_formula = dict(zip(ele,counts))
mol_formula = mol.formula()
if mol_formula != target_graph_formula:
return []
### Then check neigh strings
target_graph_neigh = []
for node_idx in target_graph.nodes:
target_graph_neigh.append(target_graph.nodes[node_idx]["neighborhood"])
neigh,counts = np.unique(target_graph_neigh, return_counts=True)
target_graph_neigh = dict(zip(neigh,counts))
input_graph = mol_to_graph(mol)
input_neigh = []
for node_idx in input_graph.nodes:
input_neigh.append(input_graph.nodes[node_idx]["neighborhood"])
neigh,counts = np.unique(input_neigh, return_counts=True)
input_neigh = dict(zip(neigh,counts))
if input_neigh != target_graph_neigh:
return []
### If neighborhoods & counts match find the ordering that matches the
### neighborhood orderings of the two graph
### Start by getting the message descriptors for each atom given the index
target_md = message_passing_atom_descriptors(target_graph)
input_md = message_passing_atom_descriptors(input_graph)
### Now, is as simple as building a lookup table that will take the input_md
### and transform to the target_md
input_lookup_dict = {}
for idx,message in input_md.items():
### Account for atoms that may have the same messages
if message in input_lookup_dict:
input_lookup_dict[message].append(idx)
else:
input_lookup_dict[message] = [idx]
### Now iterate through target, matching each target to an index in the
### input molecule
matching_indices = []
for idx,message in target_md.items():
if message not in input_lookup_dict:
raise Exception("This should not be possible.")
input_idx_list = input_lookup_dict[message]
if len(input_idx_list) == 0:
raise Exception("This should not be possible")
matching_indices.append(input_idx_list.pop())
return matching_indices
def message_passing_atom_descriptors(mol_graph):
"""
Message passing is utilized to provide a unique identification of each
node in the graph and where it is with respect to the rest of the molecule.
This is accomplished by iteratively collecting messages from atoms of
increasing bond distance until all possible messages have been received.
The messages that are passed by each atom are the sorted atom neighborhood
strings. Messages are passed by and from every atom in the molecule,
thereby collecting at each atom a distinctly unique set of messages that
details is exact chemical environment within the molecule. Sorting of the
atoms is then the task of providing a unique ordering for the messages of
a given molecule. One simple way is to hash all of message descriptors that
are collected by each atom into a integer. Then a unique ordering of the
atoms in the molecule is provided by ordering this integer hash.
"""
message_dict = {}
for node_idx in mol_graph.nodes:
temp_neigh = mol_graph.nodes[node_idx]["neighborhood"]
message_dict[node_idx] = {
"messages": {0: {"message_list": [temp_neigh], "from_list": [node_idx]}},
"used_idx": {node_idx: True}
}
message_order = 1
while True:
test_converged = _get_message(mol_graph, message_dict, message_order)
message_order += 1
if not test_converged:
break
message_descriptors = {}
for node_idx in mol_graph.nodes:
temp_full_message = ""
for key,value in message_dict[node_idx]["messages"].items():
sorted_message = np.sort(value["message_list"])
temp_full_message += "_".join(sorted_message)
temp_full_message += " | "
message_descriptors[node_idx] = temp_full_message
return message_descriptors
def _get_message(mol_graph, message_dict, message_order):
### Store whether or not a meaningful message has been passed in order to
### check that algorithm has converged
message_passed = False
# ### Testing writing next messages
# message_order = 2
for node_idx in mol_graph.nodes:
message_dict[node_idx]["messages"][message_order] = {"message_list": [],
"from_list": []}
temp_dict = message_dict[node_idx]["messages"][message_order]
prev_nodes = message_dict[node_idx]["messages"][message_order-1]["from_list"]
for temp_idx in prev_nodes:
adj_list = [x for x in mol_graph.adj[temp_idx]]
for next_node_idx in adj_list:
### Check that it has not already been visited
if next_node_idx in message_dict[node_idx]["used_idx"]:
continue
### Otherwise, collect the message
temp_message = mol_graph.nodes[next_node_idx]["neighborhood"]
temp_dict["message_list"].append(temp_message)
temp_dict["from_list"].append(next_node_idx)
message_dict[node_idx]["used_idx"][next_node_idx] = True
message_passed = True
return message_passed
def get_symmetric_ordering(mol, global_min_result=False):
"""
Get all possible atom orderings of the given molecule that provide a
graphically symmetric result. Using this method, RMSD calculations can
occur between two graphically symmetry molecules returning the global
minimum result in an efficient way, performing the smallest possible
number of calculations.
The only assumption made here is that the graph should start from the atom
type that has the smallest number of graphically unique locations in the
molecule. If there is a tie, then the graphically unique location with the
smallest hash is used. This does not lead to the literal smallest number
of calculations. This is because all starting locations would have to be
searched over in order to provide the smallest possible of symmetric orderings.
However, it's likely that this would increase the computational cost beyond.
Therefore, this is the purpose of the argument global_min_result.
If global_min_result is True, then all possible starting hashes are searched
over to provide the literal minimum possible number of symmetric orderings.
If global_min_result is False, then a reasonable assumption is used, as
explained above, to provide a very good estimate for the minimum possible
number of symmetry orderings at a much smaller computational cost.
Certain algorithms will benefit from an efficient estimate, while certain
algorithms will benefit from the global minimum at increased initial
computational cost. Therefore, this is left to the user/developer to decide.
Timing Info:
global_min_result=False
TATB : 96 ms, 384 paths, tatb
QQQBRD02: 179 ms, 4608 paths, hexanitroethane
1142965 : 2.1 ms, 16 paths, p-nitrobenzene
1211485 : 4.6 ms, 144 paths, N,N-dimethylnitramine
1142948 : 3.37 ms, 8 paths, m-Dinitrobenzene
ZZZMUC : 21.5 ms, 96 paths, TNT
ZZZFYW : 14.3 ms, 8 paths, 1,2-dinitrobenzene
CUBANE : 627 ms, 240 paths, cubane
220699 : 4.31 ms, 288 paths, N,N-dimethyl-4-nitroaniline
DATB : 11.9 ms, 64 paths, datb
HEVRUV : 9.31 ms, 48 paths, trinitromethane
PERYTN12: 485 ms, 6144 paths, pentaerythritol tetranitrate
ZZZQSC : 19.2 ms, 48 paths, 2,6-Dinitrotoluene
CTMTNA14: 13.2 ms, 384 paths, rdx
MNTDMA : 4172 ms, 144 paths, m-Nitro-N,N-dimethylaniline
DNOPHL01: 11.8 ms, 4 paths, 2,4-Dinitrophenol
TNPHNT : 27 ms, 192 paths, 2,4,6-Trinitrophenetole
HIHHAH : 15.6 ms, 122 paths, 1-(3-Nitrophenyl)ethanone
SEDTUQ10: 5.42 ms, 64 paths, fox-7
SEDTUQ01: 8.65 ms, 64 paths, fox-7
PUBMUU : 782 ms, 512 paths, CL-20
PICRAC12: 14.2 ms, 16 paths, Picric acid
CAXNIY : 6170 ms, 2048 paths, 2,2-Dinitroadamantane
HOJCOB : 702 ms, 2048 paths, HMX
OCHTET03: 650 ms, 2048 paths, HMX
MNPHOL02: 4.65 ms, 2 paths, m-Nitrophenol
TNIOAN : 9.77 ms, 32 paths, 2,4,6-Trinitroaniline
EJIQEU01: 2.76 ms, 2 paths, 5-Amino-1H-tetrazole
TNBENZ13: 2.39 ms, 48 paths, 1,3,5-trinitrobenzene
global_min_result=True
TATB : 96 ms, 384 paths, tatb
QQQBRD02: 1000 ms, 4608 paths, hexanitroethane
1142965 : 77.3 ms, 16 paths, p-nitrobenzene
1211485 : 87.6 ms, 144 paths, N,N-dimethylnitramine
1142948 : 92.9 ms, 8 paths, m-Dinitrobenzene
ZZZMUC : 513 ms, 96 paths, TNT
ZZZFYW : 140 ms, 8 paths, 1,2-dinitrobenzene
CUBANE : 2000 ms, 240 paths, cubane
220699 : 2150 ms, 288 paths, N,N-dimethyl-4-nitroaniline
DATB : 358 ms, 64 paths, datb
HEVRUV : 41.5 ms, 48 paths, trinitromethane
PERYTN12: 5730 ms, 6144 paths, pentaerythritol tetranitrate
ZZZQSC : 234 ms, 48 paths, 2,6-Dinitrotoluene
CTMTNA14: 832 ms, 384 paths, rdx
MNTDMA : 1690 ms, 144 paths, m-Nitro-N,N-dimethylaniline
DNOPHL01: 93.4 ms, 4 paths, 2,4-Dinitrophenol
TNPHNT : 927 ms, 192 paths, 2,4,6-Trinitrophenetole
HIHHAH : 123 ms, 12 paths, 1-(3-Nitrophenyl)ethanone
SEDTUQ10: 68.9 ms, 64 paths, fox-7
SEDTUQ01: 49.5 ms, 64 paths, fox-7
PUBMUU : 25060 ms, 512 paths, CL-20
PICRAC12: 125 ms, 16 paths, Picric acid
CAXNIY : ms, paths, 2,2-Dinitroadamantane
HOJCOB : ms, 2048 paths, HMX
OCHTET03: ms, 2048 paths, HMX
MNPHOL02: ms, 2 paths, m-Nitrophenol
TNIOAN : ms, 32 paths, 2,4,6-Trinitroaniline
EJIQEU01: ms, 2 paths, 5-Amino-1H-tetrazole
TNBENZ13: ms, 48 paths, 1,3,5-trinitrobenzene
As one can see above, for all the molecules in the dataset, the global
minimum number of paths is found using the simple assumptions described
above. Although, it may be possible to conceive of a molecule that would
break this observation. However, the default option will be set to
False for global minimum searching because it is unlikely that the global
minimum will not be found using the simple implemented assumptions.
PAHs Dataset
--------------------------
0/89: BZPHAN
NUM ATOMS: 30
UNIQUE PATHS: 2
24.3 ms ± 3.84 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
1/89: CORANN12
NUM ATOMS: 30
UNIQUE PATHS: 20
489 ms ± 72.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
2/89: DBNTHR02
NUM ATOMS: 36
UNIQUE PATHS: 2
65 ms ± 879 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
3/89: CENYAV
NUM ATOMS: 66
UNIQUE PATHS: 8
156 ms ± 866 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
4/89: DBPHEN02
NUM ATOMS: 36
UNIQUE PATHS: 4
69.9 ms ± 1.59 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
5/89: ZZZNTQ01
NUM ATOMS: 62
UNIQUE PATHS: 128
88.7 ms ± 1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
6/89: KUBWAF01
NUM ATOMS: 46
UNIQUE PATHS: 8
107 ms ± 897 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
7/89: PERLEN07
NUM ATOMS: 32
UNIQUE PATHS: 8
93.7 ms ± 1.17 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
8/89: ABECAL
NUM ATOMS: 46
UNIQUE PATHS: 4
110 ms ± 2.44 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
9/89: BIFUOR
NUM ATOMS: 44
UNIQUE PATHS: 8
46.6 ms ± 460 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
10/89: CENXUO
NUM ATOMS: 66
UNIQUE PATHS: 8
174 ms ± 1.41 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
11/89: HPHBNZ03
NUM ATOMS: 72
UNIQUE PATHS: 768
253 ms ± 3.34 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
12/89: PEZPUG
NUM ATOMS: 52
UNIQUE PATHS: 2
83.1 ms ± 683 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
13/89: HBZCOR
NUM ATOMS: 60
UNIQUE PATHS: 24
1min 23s ± 15.5 s per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
14/89: KUBVUY
NUM ATOMS: 66
UNIQUE PATHS: 32
897 ms ± 138 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
15/89: ANTCEN
NUM ATOMS: 24
UNIQUE PATHS: 4
65.2 ms ± 10.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
16/89: BIPHNE01
NUM ATOMS: 20
UNIQUE PATHS: 8
77.7 ms ± 2.44 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
17/89: TRIPHE12
NUM ATOMS: 30
UNIQUE PATHS: 12
221 ms ± 2.85 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
18/89: SANQII
NUM ATOMS: 36
UNIQUE PATHS: 2
174 ms ± 18.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
19/89: VEHCAM
NUM ATOMS: 96
UNIQUE PATHS: 128
822 ms ± 20.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
20/89: HPHBNZ02
NUM ATOMS: 72
UNIQUE PATHS: 768
371 ms ± 6.65 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
21/89: FLUANT02
NUM ATOMS: 26
UNIQUE PATHS: 2
34.1 ms ± 1.57 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
22/89: RAKGOA
NUM ATOMS: 42
UNIQUE PATHS: 2
439 ms ± 4.44 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
23/89: KAGGEG
NUM ATOMS: 38
UNIQUE PATHS: 4
310 ms ± 6.84 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
24/89: QQQCIG04
NUM ATOMS: 70
UNIQUE PATHS: 64
338 ms ± 5.86 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
25/89: WOQPAT
NUM ATOMS: 54
UNIQUE PATHS: 2
1.75 s ± 56 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
26/89: TERPHE02
NUM ATOMS: 32
UNIQUE PATHS: 16
23.6 ms ± 415 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
27/89: XECJIZ
NUM ATOMS: 50
UNIQUE PATHS: 8
151 ms ± 1.74 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
28/89: NAPHTA04
NUM ATOMS: 18
UNIQUE PATHS: 4
13.5 ms ± 287 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
--------------------------
29/89: BEANTR
NUM ATOMS: 30
UNIQUE PATHS: 1
28.8 ms ± 898 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
30/89: VUFHUA
NUM ATOMS: 66
UNIQUE PATHS: 2
3.81 s ± 68.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
31/89: VEBJIW
NUM ATOMS: 56
UNIQUE PATHS: 16
157 ms ± 2.63 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
32/89: REBWIE
NUM ATOMS: 48
UNIQUE PATHS: 4
197 ms ± 1.86 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
33/89: QUATER10
NUM ATOMS: 60
UNIQUE PATHS: 4
6.39 s ± 173 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
34/89: VEBKAP
NUM ATOMS: 46
UNIQUE PATHS: 4
106 ms ± 1.27 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
35/89: DPANTR01
NUM ATOMS: 44
UNIQUE PATHS: 16
102 ms ± 2.22 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
36/89: DUPHAX
NUM ATOMS: 62
UNIQUE PATHS: 1
5.6 s ± 95.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
37/89: DNAPAN
NUM ATOMS: 48
UNIQUE PATHS: 2
347 ms ± 3.75 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
38/89: TBZPYR
NUM ATOMS: 44
UNIQUE PATHS: 2
498 ms ± 2.98 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
39/89: ZZZOYC01
NUM ATOMS: 36
UNIQUE PATHS: 2
88.6 ms ± 2.05 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
40/89: FOVVOB
NUM ATOMS: 52
UNIQUE PATHS: 32
50.1 ms ± 414 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
41/89: PUTVUV
NUM ATOMS: 102
UNIQUE PATHS: 512
177 ms ± 1.6 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
42/89: IZUCIP
NUM ATOMS: 52
UNIQUE PATHS: 8
307 ms ± 1.53 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
43/89: TETCEN01
NUM ATOMS: 30
UNIQUE PATHS: 4
48.2 ms ± 1.63 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
44/89: PYRCEN
NUM ATOMS: 26
UNIQUE PATHS: 64
216 ms ± 5.32 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
45/89: PUNVEA
NUM ATOMS: 132
UNIQUE PATHS: 16384
3.08 s ± 136 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
46/89: BNPERY
NUM ATOMS: 34
UNIQUE PATHS: 2
293 ms ± 4.66 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
47/89: TERPHO02
NUM ATOMS: 32
UNIQUE PATHS: 1
7.01 ms ± 180 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
--------------------------
48/89: KIDJUD03
NUM ATOMS: 52
UNIQUE PATHS: 64
66.9 ms ± 792 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
49/89: PENCEN
NUM ATOMS: 36
UNIQUE PATHS: 4
108 ms ± 2.57 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
50/89: QUPHEN01
NUM ATOMS: 42
UNIQUE PATHS: 32
89.1 ms ± 2.28 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
51/89: PHENAN
NUM ATOMS: 24
UNIQUE PATHS: 2
30.8 ms ± 536 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
52/89: TBZPER
NUM ATOMS: 52
UNIQUE PATHS: 2
1.64 s ± 16.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
53/89: KAFVUI
NUM ATOMS: 38
UNIQUE PATHS: 1
124 ms ± 1.77 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
54/89: PEZPIU
NUM ATOMS: 52
UNIQUE PATHS: 1
95.2 ms ± 1.32 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
55/89: KAGFUV
NUM ATOMS: 56
UNIQUE PATHS: 4
876 ms ± 9.36 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
56/89: BOXGAW
NUM ATOMS: 56
UNIQUE PATHS: 1
4.21 s ± 67 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
57/89: PUNVIE
NUM ATOMS: 62
UNIQUE PATHS: 64
58.9 ms ± 582 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
58/89: FANNUL
NUM ATOMS: 28
UNIQUE PATHS: 28
88.6 ms ± 2.02 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
59/89: VEBJAO
NUM ATOMS: 136
UNIQUE PATHS: 4096
1min 2s ± 11.9 s per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
60/89: TEBNAP
NUM ATOMS: 42
UNIQUE PATHS: 4
229 ms ± 15.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
61/89: LIMCUF
NUM ATOMS: 72
UNIQUE PATHS: 256
649 ms ± 6.43 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
62/89: CORXAI10
NUM ATOMS: 52
UNIQUE PATHS: 1
1.44 s ± 23.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
63/89: CEQGEL
NUM ATOMS: 32
UNIQUE PATHS: 2
186 ms ± 3.82 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
64/89: CORONE01
NUM ATOMS: 36
UNIQUE PATHS: 96
1.79 s ± 122 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
65/89: SURTAA
NUM ATOMS: 36
UNIQUE PATHS: 1
241 ms ± 1.94 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
66/89: ZZZNKU01
NUM ATOMS: 52
UNIQUE PATHS: 64
66.3 ms ± 1.96 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
67/89: JULSOY
NUM ATOMS: 64
UNIQUE PATHS: 2
5.2 s ± 23 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
68/89: KAGFOP
NUM ATOMS: 58
UNIQUE PATHS: 8
656 ms ± 8.55 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
69/89: VEBJES
NUM ATOMS: 76
UNIQUE PATHS: 64
369 ms ± 2.69 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
70/89: BIGJUX
NUM ATOMS: 120
UNIQUE PATHS: 256
2 s ± 8.52 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
71/89: NAPANT01
NUM ATOMS: 52
UNIQUE PATHS: 4
2.06 s ± 335 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
72/89: YITJAN
NUM ATOMS: 40
UNIQUE PATHS: 2
62.9 ms ± 5.14 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
73/89: PUJQIV
NUM ATOMS: 34
UNIQUE PATHS: 2
32.9 ms ± 1.53 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
74/89: YOFCUR
NUM ATOMS: 68
UNIQUE PATHS: 8
1min 46s ± 8.95 s per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
75/89: FACPEE
NUM ATOMS: 68
UNIQUE PATHS: 16
1.1 s ± 149 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
76/89: TPHBEN01
NUM ATOMS: 42
UNIQUE PATHS: 48
70.7 ms ± 806 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
77/89: CRYSEN01
NUM ATOMS: 30
UNIQUE PATHS: 2
58.2 ms ± 361 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
78/89: POBPIG
NUM ATOMS: 48
UNIQUE PATHS: 4
1.4 s ± 30.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
79/89: PYRPYR10
NUM ATOMS: 46
UNIQUE PATHS: 4
601 ms ± 18.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
80/89: PERLEN05
NUM ATOMS: 32
UNIQUE PATHS: 8
119 ms ± 2.75 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
81/89: TBZHCE
NUM ATOMS: 64
UNIQUE PATHS: 4
2.72 s ± 12.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
82/89: KEGHEJ01
NUM ATOMS: 22
UNIQUE PATHS: 4
44.7 ms ± 206 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
83/89: BIPHEN
NUM ATOMS: 22
UNIQUE PATHS: 8
16 ms ± 70.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
--------------------------
84/89: PHNAPH
NUM ATOMS: 46
UNIQUE PATHS: 4
663 ms ± 5.01 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
85/89: DBZCOR
NUM ATOMS: 48
UNIQUE PATHS: 4
1.63 s ± 32.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
86/89: BNPYRE10
NUM ATOMS: 32
UNIQUE PATHS: 1
67.2 ms ± 523 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
--------------------------
87/89: VEBJOC
NUM ATOMS: 96
UNIQUE PATHS: 256
1.26 s ± 5.21 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
--------------------------
88/89: BENZEN
NUM ATOMS: 12
UNIQUE PATHS: 12
14.6 ms ± 275 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
Based on the PAHs dataset, it becomes clear the the scaling of the algorithm
has very little to do with the number of atoms in the molecule or even the
number of unique paths. The scaling of the algorithm is primairly concerned
with the number of connected cycles in the molecule. Such chemical groups are
relatively uncommon in all molecular classes besides organic electronics.
Particularly interesting for this class of systems is that despite that
they are only made up of fused rings of hydrogen and carbon, for some
very large and complex molecules, there may only be one graphically
symmetric path for the molecule (BOXGAW) demonstrating the success of the
general approach applied here.
"""
gmol = mol2graph(mol)
### Take care of the possibility of multiple molecules
component_idx_lists = []
for sub_idx in nx.components.connected_components(gmol):
sub_idx = [x for x in sub_idx]
sub_g = gmol.subgraph(sub_idx)
#### Reorder nodes in g according to sub_idx
g = nx.Graph()
g.add_nodes_from(range(len(sub_g)))
attr_dict = {}
lookup = {}
rlookup = {}
for iter_idx,temp_node_idx in enumerate(sub_g):
temp_attr = sub_g.nodes[temp_node_idx]
attr_dict[iter_idx] = temp_attr
lookup[iter_idx] = temp_node_idx
rlookup[temp_node_idx] = iter_idx
nx.set_node_attributes(g, attr_dict)
for temp_edge in sub_g.edges:
g.add_edge(rlookup[temp_edge[0]], rlookup[temp_edge[1]])
hash_list = [g.nodes[x]["hash"] for x in g.nodes]
unique_hash,counts = np.unique(hash_list, return_counts=True)
count_sort_idx = np.argsort(counts)
all_paths_list = []
for iter_idx,temp_idx in enumerate(count_sort_idx):
all_paths_list.append([])
temp_hash = unique_hash[temp_idx]
temp_hash_idx = np.where(hash_list == temp_hash)[0]
for temp_atom_idx in temp_hash_idx:
temp_paths = ordered_descriptor_traversal(g,temp_atom_idx)
all_paths_list[iter_idx] += temp_paths
if not global_min_result:
break
### Use the hash the provides the minimum number of unique orderings
num_paths = np.array([len(x) for x in all_paths_list])
min_paths_idx = np.argmin(num_paths)
min_paths = all_paths_list[min_paths_idx]
### Need to de-reference each of the paths wrt the subgraph
temp_final_paths = []
for temp_path in min_paths:
temp_deref_path = [lookup[x] for x in temp_path]
temp_final_paths.append(temp_deref_path)
component_idx_lists.append(temp_final_paths)
### Assume that connected components are not identical. This assumption
### may be incorrect for some systems. However, that may be updated later.
final_paths = [list(itertools.chain.from_iterable(x))
for x in itertools.product(*component_idx_lists)]
return final_paths
def ordered_descriptor_traversal(
g,
node_idx,
current_path=None):
"""
Uses the message descriptors to make and ordered traversal of the molecule.
Traversal means that the path must be connected. Ordered means that provided
the same graph and starting position, the same traversal will always be
returned.
IN ADDITION, will return all possible graphically symmetric traversals.
This function will now keep track and append all symmetric traversals all
at once. This will create a significantly easier to use API compared to
what was built above.
Symmetry switches are no longer returned. This is because they will be
handled by create NEW PATH lists for every symmetry switch. Then each
path that is created from a symmetry switch will always be acted upon
independently as it moves up the stack. THEREBY, all symmetrically
equivalent paths will be built and returned all at once.
Timing info:
- TATB: 48 ms
"""
if current_path == None:
current_path = []
### Make copy of input current_path
current_path = [x for x in current_path]
### Get neighbors and sort them by their hash value, thereby neighbors
### with smallest hash values are always visted first
path = [node_idx]
current_path += [node_idx]
neigh = [x for x in g.adj[node_idx]]
neigh_hash = [g.nodes[x]["hash"] for x in neigh]
sorted_neigh_idx = np.argsort(neigh_hash)
neigh = [neigh[x] for x in sorted_neigh_idx]
### Build entire traversal list
neigh_path_list = []
for entry in neigh:
# print(node_idx, entry)
if entry in current_path:
continue
neigh_path = ordered_descriptor_traversal(
g,
entry,
current_path)
if len(neigh_path) > 0:
neigh_path_list.append(neigh_path)
### Only a single option so safe to add
if len(neigh_path_list) == 0:
return [[node_idx]]
### Returned is a list of lists
symmetry_switches = combine_neigh_path_lists(
neigh_path_list,
g,
current_path)
sym_paths = []
for neigh_list_sorted in symmetry_switches:
sym_paths.append(path+neigh_list_sorted)
# print("-----------------------------------------")
# print("NODE", node_idx)
# print("NEIGH", neigh)
# if node_idx == 3:
# print("NEIGH PATH",
# neigh_path_list[0][0],
# neigh_path_list[1][1],
# neigh_path_list[2][2])
# print("CURRENT PATH", current_path)
# print("NUM PATHS", len(sym_paths), len(np.unique(np.vstack(sym_paths), axis=0)))
# # print("SYM", sym_paths)
# if len(sym_paths) > 64:
# raise Exception()
return sym_paths
def combine_neigh_path_lists(sym_neigh_path_list,
g,
current_path):
"""
Returned is a list of lists which describes all possible symmetry
switches for the given neigh_path_lists
Input is a list like this [[[6]],[[7]]] that must be combined together and
returned providing all symmetric orders as separate paths like this
[[6,7],[7,6]] if 6 and 7 are graphically symmetric. If 6 and 7 are not
graphically symmetric, then the one that has a smaller hash will always
appear first. For example [[6,7]] if 6 has the smaller hash.
Symmetry switches, like railroad switches on a track, provide the ordering
with a different path around the molecule. Symmetry refers to that this
different order provides a graphically symmetry path, thus even though
a switch occurs it provides a symmetric outcome.
"""
### First build data structures
all_path_list = [] ### Concatenated list of input
neigh_ref_idx_list = [] ### Stores which list each path comes from
lookup = {} ### Stores lookup for each ref
for outer_idx,sym_path_list in enumerate(sym_neigh_path_list):
lookup[outer_idx] = []
for temp_path in sym_path_list:
all_path_list.append(np.array(temp_path))
neigh_ref_idx_list.append(outer_idx)
lookup[outer_idx].append(len(all_path_list)-1)
### Sort the neighbor lists by the hash value of the first entry. In
### addition, find which neighbors share this minimum hash value, which
### are graphically symmetry atoms thus indicating a symmetry switch
hash_ref = np.array([g.nodes[x[0][0]]["hash"] for x in sym_neigh_path_list])
hash_ref_sort_idx = np.argsort(hash_ref)
hash_list = np.array([g.nodes[x[0]]["hash"] for x in all_path_list])
hash_sort_idx = np.argsort(hash_list)
unique_hash,counts = np.unique(hash_list,return_counts=True)
min_hash = hash_list[hash_sort_idx[0]]
### In addition, build a data-structure that will be able to identify
### which ref actually add atoms to the path for each possible permutation
meaningful_dict = {}
for ref_list in itertools.permutations(list(range(len(sym_neigh_path_list)))):
used_idx = {}
temp_use_ref_list = []
keep_idx = []
for iter_idx,temp_ref in enumerate(ref_list):
meaningful = False
temp_idx_list = sym_neigh_path_list[temp_ref][0]
for temp_idx in temp_idx_list:
if temp_idx not in used_idx:
meaningful = True
used_idx[temp_idx] = True
if meaningful:
temp_use_ref_list.append(temp_ref)
keep_idx.append(iter_idx)
meaningful_dict[tuple(ref_list)] = {"use": temp_use_ref_list,
"keep_idx": keep_idx}
### Make list by combining all unique hashes together while also ensuring
### all neigh_ref are included
all_idx_list = [[]]
for iter_idx,temp_hash in enumerate(unique_hash):
### Find which neighbor lists are possible for this hash
temp_hash_idx = | np.where(hash_list == temp_hash) | numpy.where |
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from tools import utils
import datetime
import io
import time
import shutil
import boto
import botocore.config
import xarray as xr
import numpy as np
import pandas as pd
import scipy.misc
import psutil
from joblib import delayed, Parallel
import torch
from torch.utils.data import Dataset, DataLoader
import tools
## Interact with NOAA GOES ABI dataset via S3 and local paths
class NOAAGOESS3(object):
'<Key: noaa-goes16,ABI-L1b-RadC/2000/001/12/OR_ABI-L1b-RadC-M3C01_G16_s20000011200000_e20000011200000_c20170671748180.nc>'
def __init__(self, product='ABI-L1b-RadM', channels=range(1,17),
save_directory='./GOES16',
skip_connection=False):
self.bucket_name = 'noaa-goes16'
self.product = product
self.channels = channels
self.save_directory = os.path.join(save_directory, product)
if not skip_connection:
self._connect_to_s3()
def _connect_to_s3(self):
config = botocore.config.Config(connect_timeout=5, retries={'max_attempts': 1})
self.conn = boto.connect_s3() #host='s3.amazonaws.com', config=config)
self.goes_bucket = self.conn.get_bucket(self.bucket_name)
def year_day_pairs(self):
'''
Gets all year and day pairs in S3 for the given product
Return:
list of pairs
'''
days = []
for key_year in self.goes_bucket.list(self.product+"/", "/"):
y = int(key_year.name.split('/')[1])
if y == 2000:
continue
for key_day in self.goes_bucket.list(key_year.name, "/"):
d = int(key_day.name.split('/')[2])
days += [(y, d)]
return days
def day_keys(self, year, day, hours=range(12,24)):
keybase = '%(product)s/%(year)04i/%(day)03i/' % dict(product=self.product,
year=year, day=day)
data = []
for key_hour in self.goes_bucket.list(keybase, "/"):
hour = int(key_hour.name.split('/')[3])
if hour not in hours:
continue
for key_nc in self.goes_bucket.list(key_hour.name, '/'):
fname = key_nc.name.split('/')[4]
info = fname.split('-')[3]
c, g, t, _, _ = info.split("_")
spatial = fname.split('-')[2]
c = int(c[3:])
if c not in self.channels:
continue
minute = int(t[10:12])
second = int(t[12:15])
data.append(dict(channel=c, year=year, day=day, hour=hour,
minute=minute, second=second, spatial=spatial,
keyname=key_nc.name))
#if len(data) > 100:
# break
return pd.DataFrame(data)
def _open_file(self, f, normalize=True):
try:
ds = xr.open_dataset(f)
except IOError:
os.remove(f)
return None
if normalize:
mn = ds['min_radiance_value_of_valid_pixels'].values
mx = ds['max_radiance_value_of_valid_pixels'].values
ds['Rad'] = (ds['Rad'] - mn) / (mx - mn)
return ds
def download_from_s3(self, keyname, directory):
if not os.path.exists(directory):
os.makedirs(directory)
k = boto.s3.key.Key(self.goes_bucket)
k.key = keyname
data_file = os.path.join(directory, os.path.basename(keyname))
if os.path.exists(data_file):
pass
elif k.exists():
print("writing file to {}".format(data_file))
k.get_contents_to_filename(data_file)
else:
data_file = None
return data_file
def read_nc_from_s3(self, keyname, normalize=True):
data_file = self.download_from_s3(keyname, self.save_directory)
if data_file is not None:
ds = self._open_file(data_file, normalize=normalize)
if ds is None:
self.read_nc_from_s3(keyname, normalize=normalize)
else:
ds = None
data_file = None
return ds, data_file
def download_day(self, year, day, hours=range(12, 25)):
'''
Downloads all files for a given year and dayofyear for the defined channels
'''
keys_df = self.day_keys(year, day, hours=hours)
for i, row in keys_df.iterrows():
save_dir= os.path.join(self.save_directory,
'%04i/%03i/%02i/' % (year, day, row.hour))
data_file = self.download_from_s3(row.keyname, save_dir)
def local_files(self, year=None, dayofyear=None):
tmp_path = os.path.join(os.path.dirname(__file__), '.cache')
filelist_file = tmp_path + '/localfilelist_{}_{}_{}.pkl'.format(self.product, year, dayofyear)
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
if False: #os.path.exists(filelist_file):
data = pd.read_pickle(filelist_file)
else:
data = []
base_dir = self.save_directory
if year is not None:
base_dir = os.path.join(base_dir, '%04i' % year)
if dayofyear is not None:
base_dir = os.path.join(base_dir, '%03i' % dayofyear)
#for f in os.listdir(self.save_directory):
if not os.path.exists(base_dir):
return pd.DataFrame()
for directory, folders, files in os.walk(base_dir):
for f in files:
if (f[-3:] == '.nc') and ("L1b" in f):
meta = get_filename_metadata(f)
meta['file'] = os.path.join(directory, f)
data.append(meta)
data = pd.DataFrame(data)
if len(data) > 0:
data = data.set_index(['year', 'dayofyear', 'hour', 'minute', 'second', 'spatial'])
data = data.pivot(columns='channel')
data.to_pickle(filelist_file)
return data
def iterate_day(self, year, day, hours=range(12,25), max_queue_size=5, min_queue_size=3,
normalize=True):
# get locally stored files
file_df = self.local_files(year, day)
print('files', file_df)
if len(file_df) == 0: return
grouped_spatial = file_df.groupby(by=['spatial'])
for sname, sgrouped in grouped_spatial:
running_samples = []
for idx, row in sgrouped.iterrows():
# (2018, 1, 12, 0, 577, 'RadM2')
year, day, hour, minute, second, _ = idx
if hour not in hours:
running_samples = []
continue
files = row['file'][self.channels]
da = _open_and_merge_2km(files.values, normalize=normalize)
try:
da = _open_and_merge_2km(files.values, normalize=normalize)
running_samples.append(da)
except ValueError as err:
print("Error: {}".format(err))
running_samples = []
continue
#if len(running_samples) > 1:
# running_samples[-1]['x'].values = running_samples[0]['x'].values
# running_samples[-1]['y'].values = running_samples[0]['y'].values
is_x = np.all(running_samples[-1]['x'].values == running_samples[0]['x'].values)
is_y = np.all(running_samples[-1]['y'].values == running_samples[0]['y'].values)
if (not is_x) or (not is_y):
running_samples = []
continue
if len(running_samples) == max_queue_size:
try:
yield xr.concat(running_samples, dim='t')
except Exception as err:
print([type(el) for el in running_samples])
print(err)
while len(running_samples) > min_queue_size:
running_samples.pop(0)
def write_pytorch_examples(self, example_directory, year, day, force=False,
patch_width=128+6):
counter = 0
#if (self._check_directory(year, day)) and (not force): return
# save blocks such that 15 minutes (16 timestamps) + 4 for randomness n=20
# overlap by 5 minutes
data_iterator = self.iterate_day(year, day, max_queue_size=20,
min_queue_size=5)
for data in data_iterator:
blocked_data = utils.blocks(data, width=patch_width)
for b in blocked_data:
if np.all(np.isfinite(b)):
fname = "%04i_%03i_%07i.npy" % (year, day, counter)
save_file = os.path.join(example_directory, fname)
print("saved file: %s" % save_file)
np.save(save_file, b)
counter += 1
else:
pass
def load_snapshots(self, year, dayofyear, hour, minute, minute_delta):
files = self.local_files(year=year, dayofyear=dayofyear)
channel_idxs = [c-1 for c in self.channels]
I0files = files.loc[year, dayofyear, hour, minute].values[0,channel_idxs]
I1files = files.loc[year, dayofyear, hour, minute+minute_delta].values[0,channel_idxs]
I0 = _open_and_merge_2km(I0files)
I1 = _open_and_merge_2km(I1files)
return I0, I1
## Interpolation Training Dataset on NOAA GOES S3 data
class GOESDataset(Dataset):
def __init__(self, example_directory, n_upsample=9, n_overlap=5, train=True):
self.example_directory = example_directory
if not os.path.exists(self.example_directory):
os.makedirs(self.example_directory)
self._example_files()
self.n_upsample = n_upsample
self.n_overlap = n_overlap
self.train = train
def _example_files(self):
self.example_files = [os.path.join(self.example_directory, f) for f in
os.listdir(self.example_directory) if 'npy' == f[-3:]]
self.N_files = len(self.example_files)
def _check_directory(self, year, day):
yeardayfiles = [f for f in self.example_files if '%4i_%03i' % (year, day) in f]
if len(list(yeardayfiles)) > 0:
return True
return False
def transform(self, block):
n_select = self.n_upsample + 1
# randomly shift temporally
i = np.random.choice(range(0,self.n_overlap))
block = block[i:n_select+i]
# randomly shift vertically
i = np.random.choice(range(0,6))
block = block[:,:,i:i+128]
# randomly shift horizontally
i = np.random.choice(range(0,6))
block = block[:,:,:,i:i+128]
# randomly rotate image
k = int(( | np.random.uniform() | numpy.random.uniform |
from PIL import Image, ImageEnhance
import numpy as np
import sys
import os
import pathlib
def get_sharpness(imfile):
im = Image.open(imfile).convert('L') # to grayscale
array = np.asarray(im, dtype=np.int32)
gy, gx = np.gradient(array)
gnorm = | np.sqrt(gx**2 + gy**2) | numpy.sqrt |
import os
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from logger import Logger, Visualizer
import numpy as np
import imageio
from sync_batchnorm import DataParallelWithCallback
import dlib
from cropped import feature_generator
import matplotlib.pyplot as plt
def reconstruction(config, generator, kp_detector, checkpoint, log_dir, dataset):
png_dir = os.path.join(log_dir, 'reconstruction/png')
log_dir = os.path.join(log_dir, 'reconstruction')
if checkpoint is not None:
Logger.load_cpk(checkpoint, generator=generator, kp_detector=kp_detector)
else:
raise AttributeError("Checkpoint should be specified for mode='reconstruction'.")
dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(png_dir):
os.makedirs(png_dir)
loss_list = []
if torch.cuda.is_available():
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
seg_model = r'D:\homework\shape_predictor_68_face_landmarks.dat'
predictor = dlib.shape_predictor(seg_model)
#!!!!!!!!!!!!!!!!!!!!!!!!!
for it, x in tqdm(enumerate(dataloader)):
if config['reconstruction_params']['num_videos'] is not None:
if it > config['reconstruction_params']['num_videos']:
break
with torch.no_grad():
predictions = []
visualizations = []
if torch.cuda.is_available():
x['video'] = x['video'].cuda()
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
img = x['video'][:, :, 0][0]
img = img.transpose(0,1)
img = img.transpose(1,2)
img = np.array(img.cpu())
img = img*255
img = img.astype('uint8')
kpoint = feature_generator(img, predictor)
print('kpoint:', type(kpoint[1]))
# print(kpoint[1])
plt.imshow(img)
plt.show()
kp_source = kp_detector(x['video'][:, :, 0], kpoint[1].unsqueeze(dim=0))
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for frame_idx in range(x['video'].shape[2]):
source = x['video'][:, :, 0]
driving = x['video'][:, :, frame_idx]
kp_driving = kp_detector(driving, kpoint[1].unsqueeze(dim=0))
out = generator(source, kp_source=kp_source, kp_driving=kp_driving)
out['kp_source'] = kp_source
out['kp_driving'] = kp_driving
del out['sparse_deformed']
predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
visualization = Visualizer(**config['visualizer_params']).visualize(source=source,
driving=driving, out=out)
visualizations.append(visualization)
loss_list.append(torch.abs(out['prediction'] - driving).mean().cpu().numpy())
predictions = | np.concatenate(predictions, axis=1) | numpy.concatenate |
import numpy as np
#Withness is a measure of how well 1D data clusters into two groups
############################################
def withness(x):
x = | np.sort(x) | numpy.sort |
import sys
import numpy as np
from .. import face
from ..utils import utils
from ..utils.recognize import distance
class Face_test:
def __init__(self, model):
self.model = model
self.labels = np.array(model['labels'])
self.encodes = | np.array(model['encodes']) | numpy.array |
"""Module defining functions to run amset."""
from __future__ import annotations
import logging
import subprocess
import numpy as np
from pydash import get
__all__ = ["run_amset", "check_converged"]
logger = logging.getLogger(__name__)
_CONVERGENCE_PROPERTIES = ("mobility.overall", "seebeck")
def run_amset():
"""Run amset in the current directory."""
# Run AMSET using the command line as calling from python can cause issues
# with multiprocessing
with open("std_out.log", "w") as f_std, open("std_err.log", "w") as f_err:
subprocess.call(["amset", "run"], stdout=f_std, stderr=f_err)
def check_converged(
new_transport: dict,
old_transport: dict,
properties: tuple[str, ...] = _CONVERGENCE_PROPERTIES,
tolerance: float = 0.1,
) -> bool:
"""
Check if all transport properties (averaged) are converged within the tol.
Parameters
----------
new_transport : dict
The new transport data.
old_transport : dict
The old transport data.
properties : tuple of str
List of properties for which convergence is assessed. The calculation is only
flagged as converged if all properties pass the convergence checks. Options are:
"conductivity", "seebeck", "mobility.overall", "electronic thermal conductivity.
tolerance : float
Relative convergence tolerance. Default is ``0.1`` (i.e. 10 %).
Returns
-------
bool
Whether the new transport data is converged.
"""
converged = True
for prop in properties:
new_prop = get(new_transport, prop, None)
old_prop = get(old_transport, prop, None)
if new_prop is None or old_prop is None:
logger.info(f"'{prop}' not in new or old transport data, skipping...")
continue
new_avg = tensor_average(new_prop)
old_avg = tensor_average(old_prop)
diff = np.abs((new_avg - old_avg) / new_avg)
diff[~ | np.isfinite(diff) | numpy.isfinite |
from datetime import datetime
from datetime import timedelta
import pytz
import time
import numpy as np
import json
import pandas as pd
from fitness_all import fitnessOfPath
import random
from scipy.stats import truncnorm
from matplotlib import pyplot as plt
def evaluate_fitness_of_all(generation,sessions,travel_time,daysotw,timezones,dictionary):
m = np.size(generation,1)
total_time = []
for i in range(m):
gen_time = 0
path = generation.astype(int)
path = path[:,i]
listpath = str(path.tolist())
if listpath in dictionary:
fitness_path = dictionary[listpath]
else:
fitness_path = fitnessOfPath(path,sessions,travel_time,daysotw,timezones)
dictionary[listpath] = fitness_path
total_time.append(fitness_path)
return total_time
def runExperiment(cross_percent_ordered,cross_percent_swap,mutat_percent,num_gen,gen_size,tourneykeep,dictionary,sessions,travel_time,daysotw,timezones,all_history,all_fitness,all_times,xopts,fopts,all_iterations):
start = time.time()
tourny_size = 2
num_temples = len(timezones)
old_gen = np.zeros((num_temples,gen_size))
parents = np.zeros((2,))
children = np.zeros((num_temples,2))
for i in range(gen_size):
col = np.random.permutation(num_temples)
old_gen[:,i] = np.transpose(col)
initial_gen = old_gen
initial_fit = evaluate_fitness_of_all(old_gen, sessions, travel_time, daysotw, timezones, dictionary)
prev_fit = np.array(initial_fit)
# Generation For Loop
fitness_history = []
best_history = []
prev_fit_one_behind = 20000000000000000
end_timer = 0
for gen in range(num_gen):
# Child Generation For loop
old_fit = prev_fit.tolist()
# Do a tournament
new_gen = np.zeros((num_temples,gen_size*2))
for i in range(int(gen_size)):
# Two tournaments for the two parents
for j in range(2):
# Select Parents (By fitness) (Tournament Style)
tourny_participants = random.sample(list(range(gen_size)), tourny_size)
arg = np.argmin(np.array(old_fit)[tourny_participants])
if(np.random.rand(1)>tourneykeep):
del tourny_participants[arg]
parents[j] = np.copy(tourny_participants[0])
else:
parents[j]= np.copy(tourny_participants[arg])
children[:,0] = np.copy(old_gen[:,np.copy(int(parents[0]))])
children[:,1] = np.copy(old_gen[:,np.copy(int(parents[1]))])
if end_timer > 200:
if np.array_equal(children[:,0],children[:,1]):
children[:,1] = np.random.permutation(num_temples)
#Crossover (Uniform) (With chromosome repair)
for j in range(num_temples): #Iterate through the genes of the children.
# TODO preallocate random numbers in the beginning
if np.random.rand(1) < cross_percent_swap:
#Store the genes
temp1 = np.copy(children[j][0]) #Temporarily store child one's gene
temp2 = np.copy(children[j][1])
#Child one gene swap and chromosome repair
gene_loc_1 = np.argwhere(children[:,0]==temp2).flatten()[0] #Find the location of the gene to be swapped
gene_loc_2 = np.argwhere(children[:,1]==temp1).flatten()[0]
children[gene_loc_1][0] = np.copy(temp1)
children[j][0] = np.copy(temp2)
children[gene_loc_2][1] = np.copy(temp2)
children[j][1] = np.copy(temp1)
#Ordered Crossover
crossover_values = []
for j in range(num_temples): #Iterate through the genes of the children.
if np.random.rand(1) < cross_percent_ordered:
crossover_values.append(j)
# array of the order of the values of the first parent
if len(crossover_values) != 0:
child1 = children[:,0]
child2 = children[:,1]
indices1 = np.sort([np.where(child1==cv)[0][0] for cv in crossover_values])
indices2 = np.sort([np.where(child2==cv)[0][0] for cv in crossover_values])
temp1 = np.copy(child1)
temp2 = np.copy(child2)
child1[indices1] = np.copy(temp2[indices2])
child2[indices2] = np.copy(temp1[indices1])
#Mutation (Uniform)
for chil in range(2):
for j in range(num_temples): #Iterate through the genes of the children.
if np.random.rand(1) < mutat_percent:
# Child gene insertion
mutated_value = np.random.randint(0,num_temples)
if mutated_value == children[j,chil]:
continue
gene_loc_mutate = np.argwhere(children[:,chil]==mutated_value).flatten()[0]
child = children[:,chil]
updated_child = np.insert(child,j,mutated_value)
if j > gene_loc_mutate:
child = | np.delete(updated_child,gene_loc_mutate) | numpy.delete |
'''
Utility functions
'''
import argparse
import numpy as np
class ListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
values = [float(x)
for x in values.replace('[', '').replace(']', '').split(',')]
setattr(namespace, self.dest, values)
def print_statistics(results, mins, iterations, max_iterations, function):
'''
Print some statistics related to multiple runtimes
of the implemented algorithms
'''
best_solution = | np.argmin(mins) | numpy.argmin |
import sys
import numpy as np
from gym import spaces
import gym
def PAdam(A, b, x_start, length, bound=10, step_size=1.0):
obj_vals = []
def Obj_func(x):
return np.dot(np.dot(A, x), x) + np.dot(x, b)
def Grad_eval(x):
return np.dot(A + A.transpose(), x).flatten() + b
x = x_start
time_since_grad_reset = 0
beta_1 = 0.9
beta_2 = 0.999
grad_mean = 0
grad_var = 0
for i in range(length):
obj_vals.append(Obj_func(x))
epsilon = 1e-8
current_grad = Grad_eval(x)
grad_mean = beta_1 * grad_mean + (1.0 - beta_1) * current_grad
grad_var = beta_2 * grad_var + (1.0 - beta_2) * np.square(current_grad)
time_since_grad_reset += 1
t = time_since_grad_reset # t is really t+1 here
mean_hat = grad_mean / (1 - beta_1 ** t)
var_hat = grad_var / (1 - beta_2 ** t)
step_size = 1.0
x_action_delta = step_size * np.divide(mean_hat, np.sqrt(var_hat) + epsilon)
# The clipping operation could cause issues with adam from a motivational stand point
x = | np.clip(x - x_action_delta, -bound, bound) | numpy.clip |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 20:05:57 2019
@author: rulix
"""
import os
import logging
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.basicConfig(
format='%(asctime)-15s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.cluster import KMeans
import sklearn
trn_dir = '../data_small/trn/'
val_dir = '../data_small/val/'
tst_dir = '../data_small/tst/'
def metric(y_true, y_pred):
kc = sklearn.metrics.cohen_kappa_score(y_true, y_pred)
oa = sklearn.metrics.accuracy_score(y_true, y_pred)
return oa, kc
def LoadNpy(filename=None):
npy = np.load(file=filename)
image_t1 = npy['image_t1']
image_t1 = image_t1.astype(np.float32)/ | np.max(image_t1) | numpy.max |
import unittest
import numpy as np
import pandas as pd
from minotor.data_managers.data_types import DataType
class TestDataType(unittest.TestCase):
def test_numpy_float2data_type(self):
arr = | np.array([[1.0, 1.2]]) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
'''Tests for io.py'''
import pytest
import numpy as np
import os
import warnings
import shutil
import copy
from collections import OrderedDict as odict
import pyuvdata
from pyuvdata import UVCal, UVData, UVFlag
from pyuvdata.utils import parse_polstr, parse_jpolstr
import glob
import sys
from .. import io
from ..io import HERACal, HERAData
from ..datacontainer import DataContainer
from ..utils import polnum2str, polstr2num, jnum2str, jstr2num
from ..data import DATA_PATH
from hera_qm.data import DATA_PATH as QM_DATA_PATH
class Test_HERACal(object):
def setup_method(self):
self.fname_xx = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits")
self.fname_yy = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits")
self.fname_both = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.HH.uvcA.omni.calfits")
self.fname_t0 = os.path.join(DATA_PATH, 'test_input/zen.2458101.44615.xx.HH.uv.abs.calfits_54x_only')
self.fname_t1 = os.path.join(DATA_PATH, 'test_input/zen.2458101.45361.xx.HH.uv.abs.calfits_54x_only')
self.fname_t2 = os.path.join(DATA_PATH, 'test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only')
def test_init(self):
hc = HERACal(self.fname_xx)
assert hc.filepaths == [self.fname_xx]
hc = HERACal([self.fname_xx, self.fname_yy])
assert hc.filepaths == [self.fname_xx, self.fname_yy]
hc = HERACal((self.fname_xx, self.fname_yy))
assert hc.filepaths == [self.fname_xx, self.fname_yy]
with pytest.raises(TypeError):
hc = HERACal([0, 1])
with pytest.raises(ValueError):
hc = HERACal(None)
def test_read(self):
# test one file with both polarizations and a non-None total quality array
hc = HERACal(self.fname_both)
gains, flags, quals, total_qual = hc.read()
uvc = UVCal()
uvc.read_calfits(self.fname_both)
np.testing.assert_array_equal(uvc.gain_array[0, 0, :, :, 0].T, gains[9, parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(uvc.flag_array[0, 0, :, :, 0].T, flags[9, parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(uvc.quality_array[0, 0, :, :, 0].T, quals[9, parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(uvc.total_quality_array[0, :, :, 0].T, total_qual[parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(np.unique(uvc.freq_array), hc.freqs)
np.testing.assert_array_equal(np.unique(uvc.time_array), hc.times)
assert hc.pols == [parse_jpolstr('jxx', x_orientation=hc.x_orientation), parse_jpolstr('jyy', x_orientation=hc.x_orientation)]
assert set([ant[0] for ant in hc.ants]) == set(uvc.ant_array)
# test list loading
hc = HERACal([self.fname_xx, self.fname_yy])
gains, flags, quals, total_qual = hc.read()
assert len(gains.keys()) == 36
assert len(flags.keys()) == 36
assert len(quals.keys()) == 36
assert hc.freqs.shape == (1024,)
assert hc.times.shape == (3,)
assert sorted(hc.pols) == [parse_jpolstr('jxx', x_orientation=hc.x_orientation), parse_jpolstr('jyy', x_orientation=hc.x_orientation)]
def test_read_select(self):
# test read multiple files and select times
hc = io.HERACal([self.fname_t0, self.fname_t1, self.fname_t2])
g, _, _, _ = hc.read()
g2, _, _, _ = hc.read(times=hc.times[30:90])
np.testing.assert_array_equal(g2[54, 'Jee'], g[54, 'Jee'][30:90, :])
# test read multiple files and select freqs/chans
hc = io.HERACal([self.fname_t0, self.fname_t1, self.fname_t2])
g, _, _, _ = hc.read()
g2, _, _, _ = hc.read(frequencies=hc.freqs[0:100])
g3, _, _, _ = hc.read(freq_chans=np.arange(100))
np.testing.assert_array_equal(g2[54, 'Jee'], g[54, 'Jee'][:, 0:100])
np.testing.assert_array_equal(g3[54, 'Jee'], g[54, 'Jee'][:, 0:100])
# test select on antenna numbers
hc = io.HERACal([self.fname_xx, self.fname_yy])
g, _, _, _ = hc.read(antenna_nums=[9, 10])
hc2 = io.HERACal(self.fname_both)
g2, _, _, _ = hc.read(antenna_nums=[9, 10])
for k in g2:
assert k[0] in [9, 10]
np.testing.assert_array_equal(g[k], g2[k])
# test select on pols
hc = io.HERACal(self.fname_xx)
g, _, _, _ = hc.read()
hc2 = io.HERACal(self.fname_both)
g2, _, _, _ = hc.read(pols=['Jee'])
for k in g2:
np.testing.assert_array_equal(g[k], g2[k])
def test_write(self):
hc = HERACal(self.fname_both)
gains, flags, quals, total_qual = hc.read()
for key in gains.keys():
gains[key] *= 2.0 + 1.0j
flags[key] = np.logical_not(flags[key])
quals[key] *= 2.0
for key in total_qual.keys():
total_qual[key] *= 2
hc.update(gains=gains, flags=flags, quals=quals, total_qual=total_qual)
hc.write_calfits('test.calfits', clobber=True)
gains_in, flags_in, quals_in, total_qual_in = hc.read()
hc2 = HERACal('test.calfits')
gains_out, flags_out, quals_out, total_qual_out = hc2.read()
for key in gains_in.keys():
np.testing.assert_array_equal(gains_in[key] * (2.0 + 1.0j), gains_out[key])
np.testing.assert_array_equal(np.logical_not(flags_in[key]), flags_out[key])
np.testing.assert_array_equal(quals_in[key] * (2.0), quals_out[key])
for key in total_qual.keys():
np.testing.assert_array_equal(total_qual_in[key] * (2.0), total_qual_out[key])
os.remove('test.calfits')
@pytest.mark.filterwarnings("ignore:It seems that the latitude and longitude are in radians")
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.filterwarnings("ignore:invalid value encountered in double_scalars")
class Test_HERAData(object):
def setup_method(self):
self.uvh5_1 = os.path.join(DATA_PATH, "zen.2458116.61019.xx.HH.XRS_downselected.uvh5")
self.uvh5_2 = os.path.join(DATA_PATH, "zen.2458116.61765.xx.HH.XRS_downselected.uvh5")
self.miriad_1 = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.miriad_2 = os.path.join(DATA_PATH, "zen.2458043.13298.xx.HH.uvORA")
self.uvfits = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvA.vis.uvfits')
self.four_pol = [os.path.join(DATA_PATH, 'zen.2457698.40355.{}.HH.uvcA'.format(pol))
for pol in ['xx', 'yy', 'xy', 'yx']]
def test_init(self):
# single uvh5 file
hd = HERAData(self.uvh5_1)
assert hd.filepaths == [self.uvh5_1]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is not None
assert len(hd.freqs) == 1024
assert len(hd.bls) == 3
assert len(hd.times) == 60
assert len(hd.lsts) == 60
assert hd._writers == {}
# multiple uvh5 files
files = [self.uvh5_1, self.uvh5_2]
hd = HERAData(files)
individual_hds = {files[0]: HERAData(files[0]), files[1]: HERAData(files[1])}
assert hd.filepaths == files
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is not None
for f in files:
assert len(hd.freqs[f]) == 1024
np.testing.assert_array_equal(hd.freqs[f], individual_hds[f].freqs)
assert len(hd.bls[f]) == 3
np.testing.assert_array_equal(hd.bls[f], individual_hds[f].bls)
assert len(hd.times[f]) == 60
np.testing.assert_array_equal(hd.times[f], individual_hds[f].times)
assert len(hd.lsts[f]) == 60
np.testing.assert_array_equal(hd.lsts[f], individual_hds[f].lsts)
assert not hasattr(hd, '_writers')
# miriad
hd = HERAData(self.miriad_1, filetype='miriad')
assert hd.filepaths == [self.miriad_1]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is None
# uvfits
hd = HERAData(self.uvfits, filetype='uvfits')
assert hd.filepaths == [self.uvfits]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is None
# test errors
with pytest.raises(TypeError):
hd = HERAData([1, 2])
with pytest.raises(ValueError):
hd = HERAData(None)
with pytest.raises(NotImplementedError):
hd = HERAData(self.uvh5_1, 'not a real type')
with pytest.raises(IOError):
hd = HERAData('fake path')
def test_add(self):
hd = HERAData(self.uvh5_1)
hd.read()
# test add
hd2 = copy.deepcopy(hd)
hd2.polarization_array[0] = -6
hd3 = hd + hd2
assert len(hd3._polnum_indices) == 2
def test_select(self):
hd = HERAData(self.uvh5_1)
hd.read()
hd2 = copy.deepcopy(hd)
hd2.polarization_array[0] = -6
hd += hd2
# blt select
d1 = hd.get_data(53, 54, 'xx')
hd.select(bls=[(53, 54)])
assert len(hd._blt_slices) == 1
d2 = hd.get_data(53, 54, 'xx')
np.testing.assert_array_almost_equal(d1, d2)
hd.select(times=np.unique(hd.time_array)[-5:])
d3 = hd.get_data(53, 54, 'xx')
np.testing.assert_array_almost_equal(d2[-5:], d3)
# pol select
hd.select(polarizations=['yy'])
assert len(hd._polnum_indices) == 1
def test_reset(self):
hd = HERAData(self.uvh5_1)
hd.read()
hd.reset()
assert hd.data_array is None
assert hd.flag_array is None
assert hd.nsample_array is None
assert hd.filepaths == [self.uvh5_1]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is not None
assert len(hd.freqs) == 1024
assert len(hd.bls) == 3
assert len(hd.times) == 60
assert len(hd.lsts) == 60
assert hd._writers == {}
def test_get_metadata_dict(self):
hd = HERAData(self.uvh5_1)
metas = hd.get_metadata_dict()
for meta in hd.HERAData_metas:
assert meta in metas
assert len(metas['freqs']) == 1024
assert len(metas['bls']) == 3
assert len(metas['times']) == 60
assert len(metas['lsts']) == 60
np.testing.assert_array_equal(metas['times'], np.unique(list(metas['times_by_bl'].values())))
np.testing.assert_array_equal(metas['lsts'], np.unique(list(metas['lsts_by_bl'].values())))
def test_determine_blt_slicing(self):
hd = HERAData(self.uvh5_1)
for s in hd._blt_slices.values():
assert isinstance(s, slice)
for bl, s in hd._blt_slices.items():
np.testing.assert_array_equal(np.arange(180)[np.logical_and(hd.ant_1_array == bl[0],
hd.ant_2_array == bl[1])], np.arange(180)[s])
# test check for non-regular spacing
with pytest.raises(NotImplementedError):
hd.select(blt_inds=[0, 1, 3, 5, 23, 48])
hd._determine_blt_slicing()
def test_determine_pol_indexing(self):
hd = HERAData(self.uvh5_1)
assert hd._polnum_indices == {-5: 0}
hd = HERAData(self.four_pol, filetype='miriad')
hd.read(bls=[(53, 53)], axis='polarization')
assert hd._polnum_indices == {-8: 3, -7: 2, -6: 1, -5: 0}
def test_get_slice(self):
hd = HERAData(self.uvh5_1)
hd.read()
for bl in hd.bls:
np.testing.assert_array_equal(hd._get_slice(hd.data_array, bl), hd.get_data(bl))
np.testing.assert_array_equal(hd._get_slice(hd.data_array, (54, 53, 'EE')),
hd.get_data((54, 53, 'EE')))
np.testing.assert_array_equal(hd._get_slice(hd.data_array, (53, 54))[parse_polstr('XX', x_orientation=hd.x_orientation)],
hd.get_data((53, 54, 'EE')))
np.testing.assert_array_equal(hd._get_slice(hd.data_array, 'EE')[(53, 54)],
hd.get_data((53, 54, 'EE')))
with pytest.raises(KeyError):
hd._get_slice(hd.data_array, (1, 2, 3, 4))
hd = HERAData(self.four_pol, filetype='miriad')
d, f, n = hd.read(bls=[(80, 81)])
for p in d.pols():
np.testing.assert_array_almost_equal(hd._get_slice(hd.data_array, (80, 81, p)),
hd.get_data((80, 81, p)))
np.testing.assert_array_almost_equal(hd._get_slice(hd.data_array, (81, 80, p)),
hd.get_data((81, 80, p)))
def test_set_slice(self):
hd = HERAData(self.uvh5_1)
hd.read()
np.random.seed(21)
for bl in hd.bls:
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
hd._set_slice(hd.data_array, bl, new_vis)
np.testing.assert_array_almost_equal(new_vis, hd.get_data(bl))
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
hd._set_slice(hd.data_array, (54, 53, 'xx'), new_vis)
np.testing.assert_array_almost_equal(np.conj(new_vis), hd.get_data((53, 54, 'xx')))
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
hd._set_slice(hd.data_array, (53, 54), {'xx': new_vis})
np.testing.assert_array_almost_equal(new_vis, hd.get_data((53, 54, 'xx')))
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
to_set = {(53, 54): new_vis, (54, 54): 2 * new_vis, (53, 53): 3 * new_vis}
hd._set_slice(hd.data_array, 'XX', to_set)
np.testing.assert_array_almost_equal(new_vis, hd.get_data((53, 54, 'xx')))
with pytest.raises(KeyError):
hd._set_slice(hd.data_array, (1, 2, 3, 4), None)
def test_build_datacontainers(self):
hd = HERAData(self.uvh5_1)
d, f, n = hd.read()
for bl in hd.bls:
np.testing.assert_array_almost_equal(d[bl], hd.get_data(bl))
np.testing.assert_array_almost_equal(f[bl], hd.get_flags(bl))
np.testing.assert_array_almost_equal(n[bl], hd.get_nsamples(bl))
for dc in [d, f, n]:
assert isinstance(dc, DataContainer)
for k in dc.antpos.keys():
assert np.all(dc.antpos[k] == hd.antpos[k])
assert len(dc.antpos) == 52
assert len(hd.antpos) == 52
for k in dc.data_antpos.keys():
assert np.all(dc.data_antpos[k] == hd.data_antpos[k])
assert len(dc.data_antpos) == 2
assert len(hd.data_antpos) == 2
assert np.all(dc.freqs == hd.freqs)
assert np.all(dc.times == hd.times)
assert np.all(dc.lsts == hd.lsts)
for k in dc.times_by_bl.keys():
assert np.all(dc.times_by_bl[k] == hd.times_by_bl[k])
assert np.all(dc.times_by_bl[k] == dc.times_by_bl[(k[1], k[0])])
assert np.all(dc.lsts_by_bl[k] == hd.lsts_by_bl[k])
assert np.all(dc.lsts_by_bl[k] == dc.lsts_by_bl[(k[1], k[0])])
def test_write_read_filter_cache_scratch(self):
# most of write_filter_cache_scratch and all of read_filter_cache_scratch are covered in
# test_delay_filter.test_load_dayenu_filter_and_write()
# This test covers a few odds and ends that are not covered.
# The case not covered is writing a filter cache with no skip_keys
# or filter directory.
io.write_filter_cache_scratch(filter_cache={'crazy': 'universe'})
# make sure file (and only one file) was written.
assert len(glob.glob(os.getcwd() + '/*.filter_cache')) == 1
# make sure read works and read cache is identical to written cache.
cache = io.read_filter_cache_scratch(os.getcwd())
assert cache['crazy'] == 'universe'
assert len(cache) == 1
# now cleanup cache files.
cleanup = glob.glob(os.getcwd() + '/*.filter_cache')
for file in cleanup:
os.remove(file)
@pytest.mark.filterwarnings("ignore:miriad does not support partial loading")
def test_read(self):
# uvh5
hd = HERAData(self.uvh5_1)
d, f, n = hd.read(bls=(53, 54, 'xx'), frequencies=hd.freqs[0:100], times=hd.times[0:10])
assert hd.last_read_kwargs['bls'] == (53, 54, parse_polstr('XX'))
assert hd.last_read_kwargs['polarizations'] is None
for dc in [d, f, n]:
assert len(dc) == 1
assert list(dc.keys()) == [(53, 54, parse_polstr('XX', x_orientation=hd.x_orientation))]
assert dc[53, 54, 'ee'].shape == (10, 100)
with pytest.raises(ValueError):
d, f, n = hd.read(polarizations=['xy'])
# assert return data = False
o = hd.read(bls=[(53, 53), (53, 54)], return_data=False)
assert o is None
# assert __getitem__ isn't a read when key exists
o = hd[(53, 53, 'ee')]
assert len(hd._blt_slices) == 2
# assert __getitem__ is a read when key does not exist
o = hd[(54, 54, 'ee')]
assert len(hd._blt_slices) == 1
# test read with extra UVData kwargs
hd = HERAData(self.uvh5_1)
d, f, n = hd.read(bls=hd.bls[:2], frequencies=hd.freqs[:100], multidim_index=True)
k = list(d.keys())[0]
assert len(d) == 2
assert d[k].shape == (hd.Ntimes, 100)
# test read list
hd = HERAData([self.uvh5_1, self.uvh5_2])
d, f, n = hd.read(axis='blt')
for dc in [d, f, n]:
assert len(dc) == 3
assert len(dc.times) == 120
assert len(dc.lsts) == 120
assert len(dc.freqs) == 1024
for i in [53, 54]:
for j in [53, 54]:
assert (i, j, 'ee') in dc
for bl in dc:
assert dc[bl].shape == (120, 1024)
# miriad
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = hd.read()
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = hd.read(bls=(52, 53), polarizations=['XX'], frequencies=d.freqs[0:30], times=d.times[0:10])
assert hd.last_read_kwargs['polarizations'] == ['XX']
for dc in [d, f, n]:
assert len(dc) == 1
assert list(dc.keys()) == [(52, 53, parse_polstr('XX', x_orientation=hd.x_orientation))]
assert dc[52, 53, 'ee'].shape == (10, 30)
with pytest.raises(NotImplementedError):
d, f, n = hd.read(read_data=False)
# uvfits
hd = HERAData(self.uvfits, filetype='uvfits')
d, f, n = hd.read(bls=(0, 1, 'xx'), freq_chans=list(range(10)))
assert hd.last_read_kwargs['freq_chans'] == list(range(10))
for dc in [d, f, n]:
assert len(dc) == 1
assert list(dc.keys()) == [(0, 1, parse_polstr('XX', x_orientation=hd.x_orientation))]
assert dc[0, 1, 'ee'].shape == (60, 10)
with pytest.raises(NotImplementedError):
d, f, n = hd.read(read_data=False)
def test_getitem(self):
hd = HERAData(self.uvh5_1)
hd.read()
for bl in hd.bls:
np.testing.assert_array_almost_equal(hd[bl], hd.get_data(bl))
def test_update(self):
hd = HERAData(self.uvh5_1)
d, f, n = hd.read()
for bl in hd.bls:
d[bl] *= (2.0 + 1.0j)
f[bl] = np.logical_not(f[bl])
n[bl] += 1
hd.update(data=d, flags=f, nsamples=n)
d2, f2, n2 = hd.build_datacontainers()
for bl in hd.bls:
np.testing.assert_array_almost_equal(d[bl], d2[bl])
np.testing.assert_array_equal(f[bl], f2[bl])
np.testing.assert_array_equal(n[bl], n2[bl])
def test_partial_write(self):
hd = HERAData(self.uvh5_1)
assert hd._writers == {}
d, f, n = hd.read(bls=hd.bls[0])
assert hd.last_read_kwargs['bls'] == (53, 53, parse_polstr('XX', x_orientation=hd.x_orientation))
d[(53, 53, 'EE')] *= (2.0 + 1.0j)
hd.partial_write('out.h5', data=d, clobber=True)
assert 'out.h5' in hd._writers
assert isinstance(hd._writers['out.h5'], HERAData)
for meta in hd.HERAData_metas:
try:
assert np.all(getattr(hd, meta) == getattr(hd._writers['out.h5'], meta))
except BaseException:
for k in getattr(hd, meta).keys():
assert np.all(getattr(hd, meta)[k] == getattr(hd._writers['out.h5'], meta)[k])
d, f, n = hd.read(bls=hd.bls[1])
assert hd.last_read_kwargs['bls'] == (53, 54, parse_polstr('XX', x_orientation=hd.x_orientation))
d[(53, 54, 'EE')] *= (2.0 + 1.0j)
hd.partial_write('out.h5', data=d, clobber=True)
d, f, n = hd.read(bls=hd.bls[2])
assert hd.last_read_kwargs['bls'] == (54, 54, parse_polstr('XX', x_orientation=hd.x_orientation))
d[(54, 54, 'EE')] *= (2.0 + 1.0j)
hd.partial_write('out.h5', data=d, clobber=True, inplace=True)
d_after, _, _ = hd.build_datacontainers()
np.testing.assert_array_almost_equal(d[(54, 54, 'EE')], d_after[(54, 54, 'EE')])
hd = HERAData(self.uvh5_1)
d, f, n = hd.read()
hd2 = HERAData('out.h5')
d2, f2, n2 = hd2.read()
for bl in hd.bls:
np.testing.assert_array_almost_equal(d[bl] * (2.0 + 1.0j), d2[bl])
np.testing.assert_array_equal(f[bl], f2[bl])
np.testing.assert_array_equal(n[bl], n2[bl])
os.remove('out.h5')
# test errors
hd = HERAData(self.miriad_1, filetype='miriad')
with pytest.raises(NotImplementedError):
hd.partial_write('out.uv')
hd = HERAData([self.uvh5_1, self.uvh5_2])
with pytest.raises(NotImplementedError):
hd.partial_write('out.h5')
hd = HERAData(self.uvh5_1)
def test_iterate_over_bls(self):
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_bls(Nbls=2):
for dc in (d, f, n):
assert len(dc.keys()) == 1 or len(dc.keys()) == 2
assert list(dc.values())[0].shape == (60, 1024)
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_bls():
for dc in (d, f, n):
assert len(d.keys()) == 1
assert list(d.values())[0].shape == (120, 1024)
# try cover include_autos = False.
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_bls(include_autos=False):
for dc in (d, f, n):
assert len(d.keys()) == 1
bl = list(d.keys())[0]
# make sure no autos present.
assert bl[0] != bl[1]
assert list(d.values())[0].shape == (120, 1024)
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = next(hd.iterate_over_bls(bls=[(52, 53, 'xx')]))
assert list(d.keys()) == [(52, 53, parse_polstr('XX', x_orientation=hd.x_orientation))]
with pytest.raises(NotImplementedError):
next(hd.iterate_over_bls())
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_bls(chunk_by_redundant_group=True, Nbls=1):
# check that all baselines in chunk are redundant
# this will be the case when Nbls = 1
bl_lens = np.asarray([hd.antpos[bl[0]] - hd.antpos[bl[1]] for bl in d])
assert np.all(np.isclose(bl_lens - bl_lens[0], 0., atol=1.0))
for dc in (d, f, n):
assert list(d.values())[0].shape == (60, 1024)
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_bls(chunk_by_redundant_group=True):
for dc in (d, f, n):
assert list(d.values())[0].shape == (120, 1024)
with pytest.raises(NotImplementedError):
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = next(hd.iterate_over_bls(bls=[(52, 53, 'xx')], chunk_by_redundant_group=True))
def test_iterate_over_freqs(self):
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_freqs(Nchans=256):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (60, 256)
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_freqs(Nchans=512):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (120, 512)
hd = HERAData(self.uvfits, filetype='uvfits')
d, f, n = hd.read()
d, f, n = next(hd.iterate_over_freqs(Nchans=2, freqs=d.freqs[0:2]))
for value in d.values():
assert value.shape == (60, 2)
with pytest.raises(NotImplementedError):
next(hd.iterate_over_bls())
def test_iterate_over_times(self):
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_times(Nints=20):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (20, 1024)
hd.read(frequencies=hd.freqs[0:512])
hd.write_uvh5('out1.h5', clobber=True)
hd.read(frequencies=hd.freqs[512:])
hd.write_uvh5('out2.h5', clobber=True)
hd = HERAData(['out1.h5', 'out2.h5'])
for (d, f, n) in hd.iterate_over_times(Nints=30):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (30, 1024)
os.remove('out1.h5')
os.remove('out2.h5')
hd = HERAData(self.uvfits, filetype='uvfits')
d, f, n = hd.read()
d, f, n = next(hd.iterate_over_times(Nints=2, times=d.times[0:2]))
for value in d.values():
assert value.shape == (2, 64)
with pytest.raises(NotImplementedError):
next(hd.iterate_over_times())
def test_uvflag_compatibility(self):
# Test that UVFlag is able to successfully init from the HERAData object
uv = UVData()
uv.read_uvh5(self.uvh5_1)
uvf1 = UVFlag(uv)
hd = HERAData(self.uvh5_1)
hd.read()
uvf2 = UVFlag(hd)
assert uvf1 == uvf2
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
class Test_Visibility_IO_Legacy(object):
def test_load_vis(self):
# inheretied testing from the old abscal_funcs.UVData2AbsCalDict
# load into pyuvdata object
self.data_file = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.uvd = UVData()
self.uvd.read_miriad(self.data_file)
self.freq_array = np.unique(self.uvd.freq_array)
self.antpos, self.ants = self.uvd.get_ENU_antpos(center=True, pick_data_ants=True)
self.antpos = odict(list(zip(self.ants, self.antpos)))
self.time_array = np.unique(self.uvd.time_array)
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
data, flags = io.load_vis(fname, pop_autos=False)
assert data[(24, 25, 'ee')].shape == (60, 64)
assert flags[(24, 25, 'ee')].shape == (60, 64)
assert (24, 24, parse_polstr('EE', x_orientation=self.uvd.x_orientation)) in data
data, flags = io.load_vis([fname])
assert data[(24, 25, 'ee')].shape == (60, 64)
# test pop autos
data, flags = io.load_vis(fname, pop_autos=True)
assert (24, 24, parse_polstr('EE', x_orientation=self.uvd.x_orientation)) not in data
# test uvd object
uvd = UVData()
uvd.read_miriad(fname)
data, flags = io.load_vis(uvd)
assert data[(24, 25, 'ee')].shape == (60, 64)
data, flags = io.load_vis([uvd])
assert data[(24, 25, 'ee')].shape == (60, 64)
# test multiple
fname2 = os.path.join(DATA_PATH, "zen.2458043.13298.xx.HH.uvORA")
data, flags = io.load_vis([fname, fname2])
assert data[(24, 25, 'ee')].shape == (120, 64)
assert flags[(24, 25, 'ee')].shape == (120, 64)
# test w/ meta
d, f, ap, a, f, t, l, p = io.load_vis([fname, fname2], return_meta=True)
assert len(ap[24]) == 3
assert len(f) == len(self.freq_array)
with pytest.raises(NotImplementedError):
d, f = io.load_vis(fname, filetype='not_a_real_filetype')
with pytest.raises(NotImplementedError):
d, f = io.load_vis(['str1', 'str2'], filetype='not_a_real_filetype')
# test w/ meta pick_data_ants
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
d, f, ap, a, f, t, l, p = io.load_vis(fname, return_meta=True, pick_data_ants=False)
assert len(ap[24]) == 3
assert len(a) == 47
assert len(f) == len(self.freq_array)
with pytest.raises(TypeError):
d, f = io.load_vis(1.0)
def test_load_vis_nested(self):
# duplicated testing from firstcal.UVData_to_dict
filename1 = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
filename2 = os.path.join(DATA_PATH, 'zen.2458043.13298.xx.HH.uvORA')
uvd1 = UVData()
uvd1.read_miriad(filename1)
uvd2 = UVData()
uvd2.read_miriad(filename2)
if uvd1.phase_type != 'drift':
uvd1.unphase_to_drift()
if uvd2.phase_type != 'drift':
uvd2.unphase_to_drift()
uvd = uvd1 + uvd2
d, f = io.load_vis([uvd1, uvd2], nested_dict=True)
for i, j in d:
for pol in d[i, j]:
uvpol = list(uvd1.polarization_array).index(polstr2num(pol, x_orientation=uvd1.x_orientation))
uvmask = np.all(
np.array(list(zip(uvd.ant_1_array, uvd.ant_2_array))) == [i, j], axis=1)
np.testing.assert_equal(d[i, j][pol], np.resize(
uvd.data_array[uvmask][:, 0, :, uvpol], d[i, j][pol].shape))
np.testing.assert_equal(f[i, j][pol], np.resize(
uvd.flag_array[uvmask][:, 0, :, uvpol], f[i, j][pol].shape))
d, f = io.load_vis([filename1, filename2], nested_dict=True)
for i, j in d:
for pol in d[i, j]:
uvpol = list(uvd.polarization_array).index(polstr2num(pol, x_orientation=uvd.x_orientation))
uvmask = np.all(
np.array(list(zip(uvd.ant_1_array, uvd.ant_2_array))) == [i, j], axis=1)
np.testing.assert_equal(d[i, j][pol], np.resize(
uvd.data_array[uvmask][:, 0, :, uvpol], d[i, j][pol].shape))
np.testing.assert_equal(f[i, j][pol], np.resize(
uvd.flag_array[uvmask][:, 0, :, uvpol], f[i, j][pol].shape))
uvd = UVData()
uvd.read_miriad(filename1)
assert len(io.load_vis([uvd], nested_dict=True)[0]) == uvd.Nbls
# reorder baseline array
uvd.baseline_array = uvd.baseline_array[np.argsort(uvd.baseline_array)]
assert len(io.load_vis(filename1, nested_dict=True)[0]) == uvd.Nbls
@pytest.mark.filterwarnings("ignore:The expected shape of the ENU array")
@pytest.mark.filterwarnings("ignore:antenna_diameters is not set")
@pytest.mark.filterwarnings("ignore:Unicode equal comparison failed")
def test_write_vis(self):
# get data
uvd = UVData()
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2458044.41632.xx.HH.XRAA.uvh5"))
data, flgs, ap, a, f, t, l, p = io.load_vis(uvd, return_meta=True)
nsample = copy.deepcopy(data)
for k in nsample.keys():
nsample[k] = np.ones_like(nsample[k], np.float)
# test basic execution
uvd = io.write_vis("ex.uvh5", data, l, f, ap, start_jd=2458044, return_uvd=True, overwrite=True, verbose=True, x_orientation='east', filetype='uvh5')
hd = HERAData("ex.uvh5")
hd.read()
assert os.path.exists('ex.uvh5')
assert uvd.data_array.shape == (1680, 1, 64, 1)
assert hd.data_array.shape == (1680, 1, 64, 1)
assert np.allclose(data[(24, 25, 'ee')][30, 32], uvd.get_data(24, 25, 'ee')[30, 32])
assert np.allclose(data[(24, 25, 'ee')][30, 32], hd.get_data(24, 25, 'ee')[30, 32])
assert hd.x_orientation.lower() == 'east'
for ant in ap:
np.testing.assert_array_almost_equal(hd.antpos[ant], ap[ant])
os.remove("ex.uvh5")
# test with nsample and flags
uvd = io.write_vis("ex.uv", data, l, f, ap, start_jd=2458044, flags=flgs, nsamples=nsample, x_orientation='east', return_uvd=True, overwrite=True, verbose=True)
assert uvd.nsample_array.shape == (1680, 1, 64, 1)
assert uvd.flag_array.shape == (1680, 1, 64, 1)
assert np.allclose(nsample[(24, 25, 'ee')][30, 32], uvd.get_nsamples(24, 25, 'ee')[30, 32])
assert np.allclose(flgs[(24, 25, 'ee')][30, 32], uvd.get_flags(24, 25, 'ee')[30, 32])
assert uvd.x_orientation.lower() == 'east'
# test exceptions
pytest.raises(AttributeError, io.write_vis, "ex.uv", data, l, f, ap)
pytest.raises(AttributeError, io.write_vis, "ex.uv", data, l, f, ap, start_jd=2458044, filetype='foo')
if os.path.exists('ex.uv'):
shutil.rmtree('ex.uv')
def test_update_vis(self):
# load in cal
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
outname = os.path.join(DATA_PATH, "test_output/zen.2458043.12552.xx.HH.modified.uvORA")
uvd = UVData()
uvd.read_miriad(fname)
data, flags, antpos, ants, freqs, times, lsts, pols = io.load_vis(fname, return_meta=True)
# make some modifications
new_data = {key: (2. + 1.j) * val for key, val in data.items()}
new_flags = {key: np.logical_not(val) for key, val in flags.items()}
io.update_vis(fname, outname, data=new_data, flags=new_flags,
add_to_history='hello world', clobber=True, telescope_name='PAPER')
# test modifications
data, flags, antpos, ants, freqs, times, lsts, pols = io.load_vis(outname, return_meta=True)
for k in data.keys():
assert np.all(new_data[k] == data[k])
assert np.all(new_flags[k] == flags[k])
uvd2 = UVData()
uvd2.read_miriad(outname)
assert pyuvdata.utils._check_histories(uvd2.history, uvd.history + 'hello world')
assert uvd2.telescope_name == 'PAPER'
shutil.rmtree(outname)
# Coverage for errors
with pytest.raises(TypeError):
io.update_vis(uvd, outname, data=new_data, flags=new_flags, filetype_out='not_a_real_filetype',
add_to_history='hello world', clobber=True, telescope_name='PAPER')
with pytest.raises(NotImplementedError):
io.update_vis(fname, outname, data=new_data, flags=new_flags, filetype_in='not_a_real_filetype',
add_to_history='hello world', clobber=True, telescope_name='PAPER')
# #now try the same thing but with a UVData object instead of path
io.update_vis(uvd, outname, data=new_data, flags=new_flags,
add_to_history='hello world', clobber=True, telescope_name='PAPER')
data, flags, antpos, ants, freqs, times, lsts, pols = io.load_vis(outname, return_meta=True)
for k in data.keys():
assert np.all(new_data[k] == data[k])
assert np.all(new_flags[k] == flags[k])
uvd2 = UVData()
uvd2.read_miriad(outname)
assert pyuvdata.utils._check_histories(uvd2.history, uvd.history + 'hello world')
assert uvd2.telescope_name == 'PAPER'
shutil.rmtree(outname)
class Test_Calibration_IO_Legacy(object):
def test_load_cal(self):
with pytest.raises(TypeError):
io.load_cal(1.0)
fname = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits")
gains, flags = io.load_cal(fname)
assert len(gains.keys()) == 18
assert len(flags.keys()) == 18
cal = UVCal()
cal.read_calfits(fname)
gains, flags = io.load_cal(cal)
assert len(gains.keys()) == 18
assert len(flags.keys()) == 18
with pytest.raises(TypeError):
io.load_cal([fname, cal])
fname_xx = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits")
fname_yy = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits")
gains, flags, quals, total_qual, ants, freqs, times, pols = io.load_cal([fname_xx, fname_yy], return_meta=True)
assert len(gains.keys()) == 36
assert len(flags.keys()) == 36
assert len(quals.keys()) == 36
assert freqs.shape == (1024,)
assert times.shape == (3,)
assert sorted(pols) == [parse_jpolstr('jxx', x_orientation=cal.x_orientation), parse_jpolstr('jyy', x_orientation=cal.x_orientation)]
cal_xx, cal_yy = UVCal(), UVCal()
cal_xx.read_calfits(fname_xx)
cal_yy.read_calfits(fname_yy)
gains, flags, quals, total_qual, ants, freqs, times, pols = io.load_cal([cal_xx, cal_yy], return_meta=True)
assert len(gains.keys()) == 36
assert len(flags.keys()) == 36
assert len(quals.keys()) == 36
assert freqs.shape == (1024,)
assert times.shape == (3,)
assert sorted(pols) == [parse_jpolstr('jxx', x_orientation=cal_xx.x_orientation), parse_jpolstr('jyy', x_orientation=cal_yy.x_orientation)]
def test_write_cal(self):
# create fake data
ants = np.arange(10)
pols = np.array(['Jnn'])
freqs = np.linspace(100e6, 200e6, 64, endpoint=False)
Nfreqs = len(freqs)
times = np.linspace(2458043.1, 2458043.6, 100)
Ntimes = len(times)
gains = {}
quality = {}
flags = {}
total_qual = {}
for i, p in enumerate(pols):
total_qual[p] = np.ones((Ntimes, Nfreqs), np.float)
for j, a in enumerate(ants):
gains[(a, p)] = np.ones((Ntimes, Nfreqs), np.complex)
quality[(a, p)] = np.ones((Ntimes, Nfreqs), np.float) * 2
flags[(a, p)] = np.zeros((Ntimes, Nfreqs), np.bool)
# set some terms to zero
gains[(5, 'Jnn')][20:30] *= 0
# test basic execution
uvc = io.write_cal("ex.calfits", gains, freqs, times, flags=flags, quality=quality,
total_qual=total_qual, overwrite=True, return_uvc=True, write_file=True)
assert os.path.exists("ex.calfits")
assert uvc.gain_array.shape == (10, 1, 64, 100, 1)
assert np.allclose(uvc.gain_array[5].min(), 1.0)
assert np.allclose(uvc.gain_array[0, 0, 0, 0, 0], (1 + 0j))
assert np.allclose(np.sum(uvc.gain_array), (64000 + 0j))
assert not np.any(uvc.flag_array[0, 0, 0, 0, 0])
assert np.sum(uvc.flag_array) == 640
assert np.allclose(uvc.quality_array[0, 0, 0, 0, 0], 2)
assert np.allclose(np.sum(uvc.quality_array), 128000.0)
assert len(uvc.antenna_numbers) == 10
assert uvc.total_quality_array is not None
if os.path.exists('ex.calfits'):
os.remove('ex.calfits')
# test execution with different parameters
uvc = io.write_cal("ex.calfits", gains, freqs, times, overwrite=True)
if os.path.exists('ex.calfits'):
os.remove('ex.calfits')
# test single integration write
gains2 = odict(list(map(lambda k: (k, gains[k][:1]), gains.keys())))
uvc = io.write_cal("ex.calfits", gains2, freqs, times[:1], return_uvc=True, outdir='./')
assert | np.allclose(uvc.integration_time, 0.0) | numpy.allclose |
isoIn = False
clIn = False
class clusterObj:
def __init__(self,name='genericCluster',basedir='clusters/',brightThreshold=15):
#Declare instance variables
self.name = name
self.basedir = basedir
self.dataPath = self.basedir + f"{name}/data/"
self.imgPath = self.basedir + f"{name}/plots/"
self.unfilteredWide = []
self.unfilteredNarrow = []
self.filtered = []
self.mag = []
self.iso = []
self.condensed = []
self.unfilteredBright = []
self.filteredBright = []
self.brightmag = []
self.distFiltered = []
self.brightThreshold = brightThreshold
self.mean_par = 0
self.stdev_par = 0
self.mean_ra = 0
self.mean_dec = 0
self.stdev_ra = 0
self.stdev_dec = 0
self.mean_pmra = 0
self.stdev_pmra = 0
self.mean_pmdec = 0
self.stdev_pmdec = 0
self.mean_a_g = 0
self.stdev_a_g = 0
self.mean_e_bp_rp = 0
self.stdev_e_bp_rp = 0
self.mean_par_over_ra = 0
self.stdev_par_over_ra = 0
self.dist_mod = 0
self.turnPoint = 0
self.reddening = 0
#Check directory locations
import os
if not os.path.isdir(self.dataPath):
os.mkdir(self.dataPath)
if not os.path.isdir(self.imgPath):
os.mkdir(self.imgPath)
class starObj:
def __init__(self,name,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err,ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr,astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_match_obs,astro_sigma5d,match_obs,g_mag,b_mag,r_mag,b_r,b_g,g_r,radvel,radvel_err,variable,teff,a_g,e_bp_rp,lum):
#Declare instance variables
self.name = name
self.ra = float(ra)
self.ra_err = float(ra_err)
self.dec = float(dec)
self.dec_err = float(dec_err)
self.par = float(par)
self.par_err = float(par_err)
self.par_over_err = float(par_over_err)
self.pmra = float(pmra)
self.pmra_err = float(pmra_err)
self.pmdec = float(pmdec)
self.pmdec_err = float(pmdec_err)
self.ra_dec_corr = float(ra_dec_corr)
self.ra_par_corr = float(ra_par_corr)
self.ra_pmra_corr = float(ra_pmra_corr)
self.ra_pmdec_corr = float(ra_pmdec_corr)
self.dec_par_corr = float(dec_par_corr)
self.dec_pmra_corr = float(dec_pmra_corr)
self.dec_pmdec_corr = float(dec_pmdec_corr)
self.par_pmra_corr = float(par_pmra_corr)
self.par_pmdec_corr = float(par_pmdec_corr)
self.pmra_pmdec_corr = float(pmra_pmdec_corr)
self.astro_n_obs = float(astro_n_obs)
self.astro_n_good_obs = float(astro_n_good_obs)
self.astro_n_bad_obs = float(astro_n_bad_obs)
self.astro_gof = float(astro_gof)
self.astro_chi2 = float(astro_chi2)
self.astro_noise = float(astro_noise)
self.astro_noise_sig = float(astro_noise_sig)
self.astro_match_obs = float(astro_match_obs)
self.astro_sigma5d = float(astro_sigma5d)
self.match_obs = float(match_obs)
self.g_mag = float(g_mag)
self.b_mag = float(b_mag)
self.r_mag = float(r_mag)
self.b_r = float(b_r)
self.b_g = float(b_g)
self.g_r = float(g_r)
self.radvel = float(radvel)
self.radvel_err = float(radvel_err)
self.variable = variable
self.teff = float(teff)
self.a_g = float(a_g)
self.e_bp_rp = float(e_bp_rp)
self.lum = float(lum)
self.member = 0
self.par_over_ra = float(par)/float(ra)
self.par_over_dec = float(par)/float(dec)
self.par_over_pmra = float(par)/float(pmra)
self.par_over_pmdec = float(par)/float(pmdec)
class isochroneObj:
def __init__(self,age=404,feh=404,afe=404,name='genericIsochrone',basedir='isochrones/',subdir='processed',isodir=''):
#Declare instance variables
self.name = name
self.basedir = basedir
self.subdir = subdir
self.isodir = isodir
self.starList = []
self.age = age
self. feh = feh
self.afe = afe
self.distance = 0
self.coeff = []
self.g = []
self.br = []
class fakeStarObj:
def __init__(self,g_mag,b_mag,r_mag):
#Declare instance variables
self.g_mag = g_mag
self.b_mag = b_mag
self.r_mag = r_mag
self.b_r = self.b_mag-self.r_mag
self.b_g = self.b_mag-self.g_mag
self.g_r = self.g_mag-self.r_mag
self.score = 0
def readClusters(clusters=["M67"],basedir="clusters/",smRad=0.35):
#Imports
import numpy as np
import pandas as pd
global clusterList
global stars
global clIn
clusterList=[]
#Loop through clusters
for clname in clusters:
#Create cluster objects
cluster = clusterObj(name=clname,basedir=basedir)
"""
#Generate wide-field star list
starlist = np.genfromtxt(cluster.dataPath+"narrow.csv", delimiter=",", skip_header=1, usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17))
starlist = preFilter(starlist)
for s in starlist:
star = starObj(s[0],s[1],s[2],s[3],s[4],s[5],s[6],s[7],s[8],s[9],s[10],s[11],s[12],s[13],s[14],s[15],s[16],s[17])
cluster.unfilteredNarrow.append(star)
"""
#Generate narrow-field star list
starlist = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
stars = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
starlist = starlist.to_numpy(dtype=str)
#starlist = np.genfromtxt(cluster.dataPath+"wide.csv", delimiter=",", skip_header=1)
print(len(starlist))
starlist = preFilter(starlist)
ramean = np.mean([float(x) for x in starlist[:,1]])
decmean = np.mean([float(x) for x in starlist[:,3]])
for s in starlist:
star = starObj(*s)
cluster.unfilteredWide.append(star)
if np.less_equal(star.g_mag,cluster.brightThreshold):
cluster.unfilteredBright.append(star)
if np.less_equal(np.sqrt(((star.ra-ramean)*np.cos(np.pi/180*star.dec))**2+(star.dec-decmean)**2),smRad):
cluster.unfilteredNarrow.append(star)
clusterList.append(cluster)
calcStats(cluster)
rmOutliers()
clIn = True
toDict()
def pad(string, pads):
spl = string.split(',')
return '\n'.join([','.join(spl[i:i+pads]) for i in range(0,len(spl),pads)])
def processIso(basedir='isochrones/',subdir='raw/'):
#Imports
import os
import re
path = basedir + subdir
for fn in os.listdir(path):
main = open(path+fn).read()
part = main.split('\n\n\n')
part[0] = part[0].split('#----------------------------------------------------')[3].split('\n',1)[1]
for a in range(len(part)):
temp = part[a].split('#AGE=')[1].split(' EEPS=')[0]
age = temp.strip()
out = part[a].split('\n',2)[2]
out = re.sub("\s+", ",", out.strip())
out = pad(out,8)
filename = f"{basedir}processed/"+fn.split('.')[0]+'/'+age+".csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename,"w") as f:
f.write(out)
def testLoad():
#Imports
import numpy as np
import pandas as pd
global stars
stars = pd.read_csv("clusters/M67/data/wide.csv",sep=',',dtype=str)
stars = stars.to_numpy(dtype=str)
#print(stars)
def readIsochrones(basedir='isochrones/',subdir='processed/'):
#Imports
import os
import numpy as np
global isoList
global isoIn
isoList=[]
for folder in os.listdir(basedir+subdir):
for fn in os.listdir(basedir+subdir+folder):
#Get the age and metallicities of the isochrones
ageStr = fn.split('.csv')[0]
fehStr = folder.split('feh')[1].split('afe')[0]
afeStr = folder.split('afe')[1].split('y')[0]
feh = float(fehStr[1]+fehStr[2])/10
afe = float(afeStr[1])/10
age = float(ageStr)
if fehStr[0] == 'm':
feh = feh*-1
if afeStr[0] == 'm':
afe = afe*-1
#Debug
#print(f"folder:{folder} fn:{fn} fehStr:{fehStr} feh:{feh} afeStr:{afeStr} afe:{afe} ageStr:{ageStr} age:{age}")
#Create isochone object
iso = isochroneObj(age=age,feh=feh,afe=afe,name=f"feh_{feh}_afe_{afe}_age_{age}",basedir=basedir,subdir=subdir,isodir=folder+'/')
isoArr = np.genfromtxt(basedir+subdir+folder+"/"+fn, delimiter=",")
for s in isoArr:
star = fakeStarObj(s[5],s[6],s[7])
iso.starList.append(star)
iso.br.append(s[6]-s[7])
iso.g.append(s[5])
isoList.append(iso)
isoIn = True
toDict()
def preFilter(starList):
#Imports
import numpy as np
final = []
cols = list(range(1,12))+list(range(32,38))
#Filters out NaN values except for the last two columns
for n,s in enumerate(starList):
dump = False
for c in cols:
if np.isnan(float(s[c])):
dump = True
if not dump:
final.append(starList[n])
#Reshapes array
final = np.array(final)
return final
def rmOutliers():
#Imports
global clusterList
import numpy as np
for cluster in clusterList:
#Variables
pmthreshold = 1*np.sqrt(cluster.stdev_pmra**2+cluster.stdev_pmdec**2)
pmpthreshold = 50
parthreshold = 3
toRemove=[]
#print(cluster.mean_pmra,cluster.mean_pmdec,cluster.stdev_pmra,cluster.stdev_pmdec)
#print(len(cluster.unfilteredWide))
#Classifies outliers
for star in cluster.unfilteredWide:
#print(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),star.pmra,star.pmdec)
if np.greater(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),pmthreshold) or np.greater(np.sqrt((star.pmra/star.par)**2+(star.pmdec/star.par)**2),pmpthreshold) or np.greater(abs(star.par),parthreshold):
#if np.greater(np.sqrt((star.pmra-cluster.mean_pmra)**2+(star.pmdec-cluster.mean_pmdec)**2),threshold):
toRemove.append(star)
#Removes the outliers from the array
for rm in toRemove:
cluster.unfilteredWide.remove(rm)
try:
cluster.unfilteredNarrow.remove(rm)
except ValueError:
pass
#print(len(cluster.unfilteredWide))
def calcStats(cluster,bright=False):
#Imports
import numpy as np
#Reads in all the values for a cluster
par=[]
ra=[]
dec=[]
pmra=[]
pmdec=[]
a_g=[]
e_bp_rp=[]
loopList=[]
if bright:
loopList = cluster.filteredBright
else:
loopList = cluster.unfilteredNarrow
for star in loopList:
par.append(star.par)
pmra.append(star.pmra)
pmdec.append(star.pmdec)
ra.append(star.ra)
dec.append(star.dec)
if not np.isnan(star.a_g) and not star.a_g == 0:
a_g.append(star.a_g)
if not np.isnan(star.e_bp_rp) and not star.e_bp_rp == 0:
e_bp_rp.append(star.e_bp_rp)
#Calculate the statistics
cluster.mean_par = np.mean(par[:])
cluster.mean_ra = np.mean(ra[:])
cluster.mean_dec = np.mean(dec[:])
cluster.stdev_ra = np.std(ra[:])
cluster.stdev_dec = np.std(dec[:])
cluster.stdev_par = np.std(par[:])
cluster.mean_pmra = np.mean(pmra[:])
cluster.stdev_pmra = np.std(pmra[:])
cluster.mean_pmdec = np.mean(pmdec[:])
cluster.stdev_pmdec = np.std(pmdec[:])
cluster.mean_a_g = np.mean(a_g[:])
cluster.stdev_a_g = np.std(a_g[:])
cluster.mean_e_bp_rp = np.mean(e_bp_rp[:])
cluster.stdev_e_bp_rp = np.std(e_bp_rp[:])
cluster.mean_par_over_ra = np.mean([x/y for x,y in zip(par,ra)])
cluster.stdev_par_over_ra = np.std([x/y for x,y in zip(par,ra)])
cluster.dist_mod = 5*np.log10(1000/cluster.mean_par)-5
def saveClusters():
#Imports
import pickle
global clusterList
#Creates a pickle file with all of the saved instances
for cluster in clusterList:
with open(f"{cluster.dataPath}filtered.pk1", 'wb') as output:
pickle.dump(cluster, output, pickle.HIGHEST_PROTOCOL)
def saveIsochrones():
#Imports
import pickle
global clusterList
#Creates a pickle file with all of the saved instances
for iso in isoList:
with open(f"{iso.basedir}pickled/{iso.name}.pk1", 'wb') as output:
pickle.dump(iso, output, pickle.HIGHEST_PROTOCOL)
def loadClusters(clusterNames=["M67"],basedir='clusters/'):
#Imports
import pickle
global clusterList
global clIn
clusterList=[]
for clusterName in clusterNames:
#Reads in instances from the saved pickle file
with open(f"{basedir}{clusterName}/data/filtered.pk1",'rb') as input:
cluster = pickle.load(input)
clusterList.append(cluster)
clIn = True
toDict()
def loadIsochrones(basedir='isochrones/'):
#Imports
import pickle
import os
global isoList
global isoIn
isoList=[]
for fn in os.listdir(basedir+"pickled/"):
#Reads in instances from the saved pickle file
with open(f"{basedir}pickled/{fn}",'rb') as input:
iso = pickle.load(input)
isoList.append(iso)
isoIn = True
toDict()
"""
def turboFilter():
#Imports
import numpy as np
global clusterList
iter = 10
for cluster in clusterList:
starList = np.empty((0,4))
for star in cluster.unfilteredWide:
starList.r_[starList,[[star,star.par,star.pmra,star.pmdec]]]
for count in range(iter):
starList = clumpFilter(cluster,starList)
starList = distFilter(cluster,starList)
"""
"""
def distFilter():
#Imports
import numpy as np
import matplotlib.pyplot as plt
global clusterList
for cluster in clusterList:
parray = np.empty((0,2))
for star in cluster.unfilteredWide:
parray = np.r_[parray,[[star,star.par]]]
parray[parray[:,1].argsort()]
par = list(parray[:,1])
pmin = parray[0,1]
pmax = parray[-1,1]
div = 500
seg = (pmax-pmin)/div
slices = []
for i in range(div):
sliced = parray[par.index(find_nearest(par,pmin+i*seg)):par.index(find_nearest(par,pmin+(i+1)*seg))]
slices.append(sliced)
plt.hist([len(x) for x in slices])
"""
def turboFilter():
#Imports
import numpy as np
global clusterList
for cluster in clusterList:
cluster.filtered,cluster.mag = pmFilter(cluster.unfilteredWide,cluster.name)
setFlag()
def pmFilter(starList,name):
#Imports
import numpy as np
if name == 'M67':
#Thresholds
pmra_center=-11
pmdec_center=-3
pm_radius=1.25
if name == 'M35':
#Thresholds
pmra_center = 2.35
pmdec_center = -2.9
pm_radius = 0.4
filtered = []
mag = np.empty((0,2))
for star in starList:
if np.less_equal(np.sqrt((star.pmra-pmra_center)**2+(star.pmdec-pmdec_center)**2),pm_radius):
filtered.append(star)
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
return filtered,mag
def distFilter(cluster):
#Imports
import numpy as np
#Set back to 1.5 after done with capstone paper
threshold = 100
for star in cluster.unfilteredWide:
if not np.greater(np.abs(star.par-cluster.mean_par),threshold*cluster.stdev_par):
cluster.distFiltered.append(star)
def turboFit():
#Imports
global clusterList
fitCount = 10
conversion = 2.1
condense()
for cluster in clusterList:
reddening = 0.5
difference = 0.5
for fit in range(fitCount):
shapeFit(cluster,reddening + difference)
upScore = cluster.iso[0][1]
shapeFit(cluster,reddening - difference)
downScore = cluster.iso[0][1]
if upScore < downScore:
reddening = reddening + difference
else:
reddening = reddening - difference
difference = difference/2
cluster.reddening = reddening
print(f"Reddening: {reddening}")
cluster.mag[:,0] -= reddening
cluster.mag[:,1] -= reddening*conversion
condense()
for cluster in clusterList:
shapeFit(cluster,0)
def shapeFit(cluster,reddening):
#Imports
import numpy as np
import shapely.geometry as geom
global isoList
conversion = 2.1
cluster.iso = np.empty((0,2))
for iso in isoList:
isoLine = geom.LineString(tuple(zip([x+conversion*reddening for x in iso.br],[x+cluster.dist_mod+reddening for x in iso.g])))
dist = []
for star in cluster.condensed:
starPt = geom.Point(star[0],star[1])
#print(starPt.distance(isoLine))
dist.append(np.abs(starPt.distance(isoLine)))
isoScore = np.mean(dist[:])
#print(list(geom.shape(isoLine).coords))
cluster.iso = np.r_[cluster.iso,[[iso,isoScore]]]
#print(isoScore)
cluster.iso = sorted(cluster.iso,key=lambda x: x[1])
"""
def getReddening(iso="isochrones/processed/fehm05afem2/10.000.csv"):
#Imports
import numpy as np
import matplotlib.pyplot as plt
global isoLine
global fullIso
fullIso = np.genfromtxt(iso,delimiter=",")
plt.figure("red")
plt.gca().invert_yaxis()
plt.scatter(fullIso[:,6]-fullIso[:,7],fullIso[:,5],color='olive')
gmin = 5
gmax = 8.5
isoX = []
isoY = []
for s in fullIso:
if s[5] >= gmin and s[5] <= gmax:
isoX.append(s[6]-s[7])
isoY.append(s[5])
isoLine = np.polyfit(isoX[:],isoY[:],1)
x=np.linspace(isoX[0],isoX[-1],50)
y=isoLine[0]*x+isoLine[1]
plt.plot(x,y,color='midnightblue')
plt.savefig("reddening.pdf")
"""
def clumpFind(data,count):
#Imports
import numpy as np
from sklearn.neighbors import NearestNeighbors,KDTree
tree = KDTree(data)
return tree.query(tree,k=count,return_distance=False)
def find_nearest(array, value):
#Imports
import numpy as np
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def condense():
#Imports
import numpy as np
global clusterList
global isoList
global mag
for cluster in clusterList:
mag = cluster.mag[:,:]
mag[mag[:,1].argsort()]
gmag = list(mag[:,1])
gmin = mag[0,1]
gmax = mag[-1,1]
div = 50
seg = (gmax-gmin)/div
minpoints = 1
condensed = np.empty((0,3))
turnPoints = []
for i in range(div):
sliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
#print(np.array(sliced).shape)
#Skip forseen problems with empty arrays
if len(sliced) < minpoints:
continue
condensed = np.r_[condensed,[[np.median(sliced[:,0]),np.median(sliced[:,1]),0]]]
condensed = condensed[::-1]
#Find Turning Point
for i,point1 in enumerate(condensed):
count = 0
total = 0
for j,point2 in enumerate(condensed):
if j > i:
total += 1
if point2[0] > point1[0]:
count += 1
threshold = 0.5
if not (total == 0) and not (count == 0):
if count/total >= threshold:
turnPoints.append([point1[0],point1[1]])
if len(turnPoints) == 0:
print("No turning point identified")
return
else:
turnPoints = sorted(turnPoints,key=lambda x: x[0])
cluster.turnPoint = turnPoints[0]
print(f"Turning point: {cluster.turnPoint}")
#Recalc with the turnPoint limit enforced - Ignore blue stragglers
condensed = np.empty((0,3))
for i in range(div):
rawSliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
sliced = np.empty((0,2))
for point in rawSliced:
#print(point)
if point[0] >= cluster.turnPoint[0]:
sliced = np.r_[sliced,[[point[0],point[1]]]]
#Skip forseen problems with empty arrays
if len(sliced) == 0:
continue
#print(sliced)
condensed = np.r_[condensed,[[np.median(sliced[:,0]),np.median(sliced[:,1]),0]]]
condensed = condensed[::-1]
distances=[]
#Find average distance to nearby points
for i,point1 in enumerate(condensed):
for j,point2 in enumerate(condensed):
if np.abs(i-j) == 1:
distances.append(np.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2))
medDist = | np.median(distances) | numpy.median |
import sys
import os
from utils.io import IO
import numpy as np
import torch
from utils.shapenet_pre_handle import pre_handle
from extensions.chamfer_dist import ChamferDistance
from utils.file_parsing import file_parsing
chamfer_dist = ChamferDistance()
def MMD_Cal(shapenet_list):
shapenet_MMD = []
for folders in shapenet_list:
folder_MMD = []
for couples in folders:
forward_cloud = IO.get(couples[0]).astype(np.float32)
forward_cloud = np.expand_dims(forward_cloud, axis=0)
forward_cloud = torch.Tensor(forward_cloud)
forward_cloud = forward_cloud.cuda()
next_cloud = IO.get(couples[1]).astype(np.float32)
next_cloud = np.expand_dims(next_cloud, axis=0)
next_cloud = torch.Tensor(next_cloud)
next_cloud = next_cloud.cuda()
score_MMD = chamfer_dist(forward_cloud, next_cloud).item()
folder_MMD.append(score_MMD)
shapenet_MMD.append(folder_MMD)
avg_list = []
for folders in shapenet_MMD:
avg = | np.mean(folders) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_logn_mean_regression [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_logn_mean_regression&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression).
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from arpym.tools.logo import add_logo
from arpym.statistics import simulate_normal
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression-parameters)
# +
mu_xz = [0, 0] # location parameter
# dispersion parameters
rho_xz = -0.5
sig_x = 0.92
sig_z = 0.85
j_ = 10**4 # number of simulations
def chi_arb(var):
return 1/var
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression-implementation-step01): Generate samples
# +
sig2_xz = np.array([[sig_x**2, rho_xz*sig_x*sig_z],
[rho_xz*sig_x*sig_z, sig_z**2]])
# jointly lognormal samples
x, z = np.exp(simulate_normal(mu_xz, sig2_xz, j_).T)
no_points_grid = 500
x_grid = np.linspace(10**-6, 2*max(np.percentile(x, 95), np.percentile(z, 95)),
no_points_grid)
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression-implementation-step02): Compute prediction, residuals and E{X|z}
# +
def chi(var):
return np.exp(mu_xz[0]+rho_xz*sig_x/sig_z*(np.log(var)-mu_xz[1]) +
0.5*(1-rho_xz**2)*sig_x)
xhat = chi(z)
u = x-xhat
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression-implementation-step03): Expectation and covariance of (Xhat, U)
# +
# expectation and covariance of (lnXhat, lnX)
a = mu_xz[0]-mu_xz[1]*rho_xz*sig_x/sig_z+0.5*(1-rho_xz**2)*sig_x**2
b = rho_xz*sig_x/sig_z
mu_logxhat_logx = np.array([a+b*mu_xz[1], mu_xz[0]])
sig2_logxhat_logx = [[b**2*sig_z**2, b*rho_xz*sig_x*sig_z],
[b*rho_xz*sig_x*sig_z, sig_x**2]]
sig2_logxhat_logx = np.array(sig2_logxhat_logx)
# expectation and covariance of (Xhat,X)
mu_xhat_x = np.exp(mu_logxhat_logx+0.5*np.diag(sig2_logxhat_logx))
sig2_xhat_x = np.diag(mu_xhat_x)@\
(np.exp(sig2_logxhat_logx) - np.ones((2, 2)))@\
np.diag(mu_xhat_x)
# expectation and covariance of (Xhat,U)
d = np.array([[1, 0], [-1, 1]]) # (Xhat, U)=d*(Xhat, X)
mu_xhat_u = d@mu_xhat_x
sig2_xhat_u = d@[email protected]
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_logn_mean_regression-implementation-step04): expectation-covariance ellipsoid computations
# +
# matrix decomposition
lambda2, e = np.linalg.eig(sig2_xhat_u)
lambda2, order = np.sort(lambda2), np.argsort(lambda2)
e = e[:, order]
diag_lambda = np.diagflat(np.sqrt(lambda2))
# expectation-covariance ellipsoid computations
theta = np.linspace(0, 2*np.pi, no_points_grid) # angle
y = [ | np.cos(theta) | numpy.cos |
import dask
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pandas.util.testing import assert_frame_equal
import cudf
import dask_cudf
import dask_cudf as dgd
def test_from_cudf():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=10000), "y": np.random.normal(size=10000)}
)
gdf = cudf.DataFrame.from_pandas(df)
# Test simple around to/from dask
ingested = dgd.from_cudf(gdf, npartitions=2)
assert_frame_equal(ingested.compute().to_pandas(), df)
# Test conversion to dask.dataframe
ddf = ingested.to_dask_dataframe()
assert_frame_equal(ddf.compute(), df)
def _fragmented_gdf(df, nsplit):
n = len(df)
# Split dataframe in *nsplit*
subdivsize = n // nsplit
starts = [i * subdivsize for i in range(nsplit)]
ends = starts[1:] + [None]
frags = [df[s:e] for s, e in zip(starts, ends)]
return frags
def test_concat():
np.random.seed(0)
n = 1000
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=n), "y": np.random.normal(size=n)}
)
gdf = cudf.DataFrame.from_pandas(df)
frags = _fragmented_gdf(gdf, nsplit=13)
# Combine with concat
concated = dgd.concat(frags)
assert_frame_equal(df, concated.compute().to_pandas())
def test_append():
np.random.seed(0)
n = 1000
df = pd.DataFrame(
{"x": | np.random.randint(0, 5, size=n) | numpy.random.randint |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for reading/manipulating/writing VASP ouput files.
"""
import datetime
import glob
import itertools
import json
import logging
import math
import os
import re
import warnings
import xml.etree.ElementTree as ET
from collections import defaultdict
from io import StringIO
from pathlib import Path
from typing import DefaultDict, List, Optional, Tuple, Union
import numpy as np
from monty.dev import deprecated
from monty.io import reverse_readfile, zopen
from monty.json import MSONable, jsanitize
from monty.os.path import zpath
from monty.re import regrep
from scipy.interpolate import RegularGridInterpolator
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.units import unitized
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
get_reconstructed_band_structure,
)
from pymatgen.electronic_structure.core import Magmom, Orbital, OrbitalType, Spin
from pymatgen.electronic_structure.dos import CompleteDos, Dos
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, Potcar
from pymatgen.io.wannier90 import Unk
from pymatgen.util.io_utils import clean_lines, micro_pyawk
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
logger = logging.getLogger(__name__)
def _parse_parameters(val_type, val):
"""
Helper function to convert a Vasprun parameter into the proper type.
Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
"""
if val_type == "logical":
return val == "T"
if val_type == "int":
return int(val)
if val_type == "string":
return val.strip()
return float(val)
def _parse_v_parameters(val_type, val, filename, param_name):
r"""
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains *** for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.split()]
elif val_type == "int":
try:
val = [int(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise OSError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise OSError("Error in parsing vasprun.xml")
return val
def _parse_varray(elem):
if elem.get("type", None) == "logical":
m = [[i == "T" for i in v.text.split()] for v in elem]
else:
m = [[_vasprun_float(i) for i in v.text.split()] for v in elem]
return m
def _parse_from_incar(filename, key):
"""
Helper function to parse a parameter from the INCAR.
"""
dirname = os.path.dirname(filename)
for f in os.listdir(dirname):
if re.search(r"INCAR", f):
warnings.warn("INCAR found. Using " + key + " from INCAR.")
incar = Incar.from_file(os.path.join(dirname, f))
if key in incar:
return incar[key]
return None
return None
def _vasprun_float(f):
"""
Large numbers are often represented as ********* in the vasprun.
This function parses these values as np.nan
"""
try:
return float(f)
except ValueError as e:
f = f.strip()
if f == "*" * len(f):
warnings.warn("Float overflow (*******) encountered in vasprun")
return np.nan
raise e
class Vasprun(MSONable):
"""
Vastly improved cElementTree-based parser for vasprun.xml files. Uses
iterparse to support incremental parsing of large files.
Speedup over Dom is at least 2x for smallish files (~1Mb) to orders of
magnitude for larger files (~10Mb).
**Vasp results**
.. attribute:: ionic_steps
All ionic steps in the run as a list of
{"structure": structure at end of run,
"electronic_steps": {All electronic step data in vasprun file},
"stresses": stress matrix}
.. attribute:: tdos
Total dos calculated at the end of run.
.. attribute:: idos
Integrated dos calculated at the end of run.
.. attribute:: pdos
List of list of PDos objects. Access as pdos[atomindex][orbitalindex]
.. attribute:: efermi
Fermi energy
.. attribute:: eigenvalues
Available only if parse_eigen=True. Final eigenvalues as a dict of
{(spin, kpoint index):[[eigenvalue, occu]]}.
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint index is 0-based (unlike the 1-based indexing in VASP).
.. attribute:: projected_eigenvalues
Final projected eigenvalues as a dict of {spin: nd-array}. To access
a particular value, you need to do
Vasprun.projected_eigenvalues[spin][kpoint index][band index][atom index][orbital_index]
This representation is based on actual ordering in VASP and is meant as
an intermediate representation to be converted into proper objects. The
kpoint, band and atom indices are 0-based (unlike the 1-based indexing
in VASP).
.. attribute:: projected_magnetisation
Final projected magnetisation as a numpy array with the shape (nkpoints, nbands,
natoms, norbitals, 3). Where the last axis is the contribution in the 3
cartesian directions. This attribute is only set if spin-orbit coupling
(LSORBIT = True) or non-collinear magnetism (LNONCOLLINEAR = True) is turned
on in the INCAR.
.. attribute:: other_dielectric
Dictionary, with the tag comment as key, containing other variants of
the real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
.. attribute:: nionic_steps
The total number of ionic steps. This number is always equal
to the total number of steps in the actual run even if
ionic_step_skip is used.
.. attribute:: force_constants
Force constants computed in phonon DFPT run(IBRION = 8).
The data is a 4D numpy array of shape (natoms, natoms, 3, 3).
.. attribute:: normalmode_eigenvals
Normal mode frequencies.
1D numpy array of size 3*natoms.
.. attribute:: normalmode_eigenvecs
Normal mode eigen vectors.
3D numpy array of shape (3*natoms, natoms, 3).
**Vasp inputs**
.. attribute:: incar
Incar object for parameters specified in INCAR file.
.. attribute:: parameters
Incar object with parameters that vasp actually used, including all
defaults.
.. attribute:: kpoints
Kpoints object for KPOINTS specified in run.
.. attribute:: actual_kpoints
List of actual kpoints, e.g.,
[[0.25, 0.125, 0.08333333], [-0.25, 0.125, 0.08333333],
[0.25, 0.375, 0.08333333], ....]
.. attribute:: actual_kpoints_weights
List of kpoint weights, E.g.,
[0.04166667, 0.04166667, 0.04166667, 0.04166667, 0.04166667, ....]
.. attribute:: atomic_symbols
List of atomic symbols, e.g., ["Li", "Fe", "Fe", "P", "P", "P"]
.. attribute:: potcar_symbols
List of POTCAR symbols. e.g.,
["PAW_PBE Li 17Jan2003", "PAW_PBE Fe 06Sep2000", ..]
Author: <NAME>
"""
def __init__(
self,
filename,
ionic_step_skip=None,
ionic_step_offset=0,
parse_dos=True,
parse_eigen=True,
parse_projected_eigen=False,
parse_potcar_file=True,
occu_tol=1e-8,
separate_spins=False,
exception_on_bad_xml=True,
):
"""
Args:
filename (str): Filename to parse
ionic_step_skip (int): If ionic_step_skip is a number > 1,
only every ionic_step_skip ionic steps will be read for
structure and energies. This is very useful if you are parsing
very large vasprun.xml files and you are not interested in every
single ionic step. Note that the final energies may not be the
actual final energy in the vasprun.
ionic_step_offset (int): Used together with ionic_step_skip. If set,
the first ionic step read will be offset by the amount of
ionic_step_offset. For example, if you want to start reading
every 10th structure but only from the 3rd structure onwards,
set ionic_step_skip to 10 and ionic_step_offset to 3. Main use
case is when doing statistical structure analysis with
extremely long time scale multiple VASP calculations of
varying numbers of steps.
parse_dos (bool): Whether to parse the dos. Defaults to True. Set
to False to shave off significant time from the parsing if you
are not interested in getting those data.
parse_eigen (bool): Whether to parse the eigenvalues. Defaults to
True. Set to False to shave off significant time from the
parsing if you are not interested in getting those data.
parse_projected_eigen (bool): Whether to parse the projected
eigenvalues and magnetisation. Defaults to False. Set to True to obtain
projected eigenvalues and magnetisation. **Note that this can take an
extreme amount of time and memory.** So use this wisely.
parse_potcar_file (bool/str): Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol (float): Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
separate_spins (bool): Whether the band gap, CBM, and VBM should be
reported for each individual spin channel. Defaults to False,
which computes the eigenvalue band properties independent of
the spin orientation. If True, the calculation must be spin-polarized.
exception_on_bad_xml (bool): Whether to throw a ParseException if a
malformed XML is detected. Default to True, which ensures only
proper vasprun.xml are parsed. You can set to False if you want
partial results (e.g., if you are monitoring a calculation during a
run), but use the results with care. A warning is issued.
"""
self.filename = filename
self.ionic_step_skip = ionic_step_skip
self.ionic_step_offset = ionic_step_offset
self.occu_tol = occu_tol
self.separate_spins = separate_spins
self.exception_on_bad_xml = exception_on_bad_xml
with zopen(filename, "rt") as f:
if ionic_step_skip or ionic_step_offset:
# remove parts of the xml file and parse the string
run = f.read()
steps = run.split("<calculation>")
# The text before the first <calculation> is the preamble!
preamble = steps.pop(0)
self.nionic_steps = len(steps)
new_steps = steps[ionic_step_offset :: int(ionic_step_skip)]
# add the tailing information in the last step from the run
to_parse = "<calculation>".join(new_steps)
if steps[-1] != new_steps[-1]:
to_parse = "{}<calculation>{}{}".format(preamble, to_parse, steps[-1].split("</calculation>")[-1])
else:
to_parse = f"{preamble}<calculation>{to_parse}"
self._parse(
StringIO(to_parse),
parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen,
)
else:
self._parse(
f,
parse_dos=parse_dos,
parse_eigen=parse_eigen,
parse_projected_eigen=parse_projected_eigen,
)
self.nionic_steps = len(self.ionic_steps)
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
self.update_charge_from_potcar(parse_potcar_file)
if self.incar.get("ALGO", "") not in ["CHI", "BSE"] and (not self.converged):
msg = "%s is an unconverged VASP run.\n" % filename
msg += "Electronic convergence reached: %s.\n" % self.converged_electronic
msg += "Ionic convergence reached: %s." % self.converged_ionic
warnings.warn(msg, UnconvergedVASPWarning)
def _parse(self, stream, parse_dos, parse_eigen, parse_projected_eigen):
self.efermi = None
self.eigenvalues = None
self.projected_eigenvalues = None
self.projected_magnetisation = None
self.dielectric_data = {}
self.other_dielectric = {}
ionic_steps = []
parsed_header = False
try:
for event, elem in ET.iterparse(stream):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
if not hasattr(self, "kpoints"):
(
self.kpoints,
self.actual_kpoints,
self.actual_kpoints_weights,
) = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "structure" and elem.attrib.get("name") == "initialpos":
self.initial_structure = self._parse_structure(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p, "hash": None} for p in self.potcar_symbols]
if tag == "calculation":
parsed_header = True
if not self.parameters.get("LCHIMAG", False):
ionic_steps.append(self._parse_calculation(elem))
else:
ionic_steps.extend(self._parse_chemical_shielding_calculation(elem))
elif parse_dos and tag == "dos":
try:
self.tdos, self.idos, self.pdos = self._parse_dos(elem)
self.efermi = self.tdos.efermi
self.dos_has_errors = False
except Exception:
self.dos_has_errors = True
elif parse_eigen and tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
(
self.projected_eigenvalues,
self.projected_magnetisation,
) = self._parse_projected_eigen(elem)
elif tag == "dielectricfunction":
if (
"comment" not in elem.attrib
or elem.attrib["comment"] == "INVERSE MACROSCOPIC DIELECTRIC TENSOR (including "
"local field effects in RPA (Hartree))"
):
if "density" not in self.dielectric_data:
self.dielectric_data["density"] = self._parse_diel(elem)
elif "velocity" not in self.dielectric_data:
# "velocity-velocity" is also named
# "current-current" in OUTCAR
self.dielectric_data["velocity"] = self._parse_diel(elem)
else:
raise NotImplementedError("This vasprun.xml has >2 unlabelled dielectric functions")
else:
comment = elem.attrib["comment"]
# VASP 6+ has labels for the density and current
# derived dielectric constants
if comment == "density-density":
self.dielectric_data["density"] = self._parse_diel(elem)
elif comment == "current-current":
self.dielectric_data["velocity"] = self._parse_diel(elem)
else:
self.other_dielectric[comment] = self._parse_diel(elem)
elif tag == "varray" and elem.attrib.get("name") == "opticaltransitions":
self.optical_transition = np.array(_parse_varray(elem))
elif tag == "structure" and elem.attrib.get("name") == "finalpos":
self.final_structure = self._parse_structure(elem)
elif tag == "dynmat":
hessian, eigenvalues, eigenvectors = self._parse_dynmat(elem)
natoms = len(self.atomic_symbols)
hessian = np.array(hessian)
self.force_constants = np.zeros((natoms, natoms, 3, 3), dtype="double")
for i in range(natoms):
for j in range(natoms):
self.force_constants[i, j] = hessian[i * 3 : (i + 1) * 3, j * 3 : (j + 1) * 3]
phonon_eigenvectors = []
for ev in eigenvectors:
phonon_eigenvectors.append(np.array(ev).reshape(natoms, 3))
self.normalmode_eigenvals = np.array(eigenvalues)
self.normalmode_eigenvecs = np.array(phonon_eigenvectors)
except ET.ParseError as ex:
if self.exception_on_bad_xml:
raise ex
warnings.warn(
"XML is malformed. Parsing has stopped but partial data is available.",
UserWarning,
)
self.ionic_steps = ionic_steps
self.vasp_version = self.generator["version"]
@property
def structures(self):
"""
Returns:
List of Structure objects for the structure at each ionic step.
"""
return [step["structure"] for step in self.ionic_steps]
@property
def epsilon_static(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant. Present when it's a DFPT run
(LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon", [])
@property
def epsilon_static_wolfe(self):
"""
Property only available for DFPT calculations.
Returns:
The static part of the dielectric constant without any local field
effects. Present when it's a DFPT run (LEPSILON=TRUE)
"""
return self.ionic_steps[-1].get("epsilon_rpa", [])
@property
def epsilon_ionic(self):
"""
Property only available for DFPT calculations and when IBRION=5, 6, 7 or 8.
Returns:
The ionic part of the static dielectric constant. Present when it's a
DFPT run (LEPSILON=TRUE) and IBRION=5, 6, 7 or 8
"""
return self.ionic_steps[-1].get("epsilon_ion", [])
@property
def dielectric(self):
"""
Returns:
The real and imaginary part of the dielectric constant (e.g., computed
by RPA) in function of the energy (frequency). Optical properties (e.g.
absorption coefficient) can be obtained through this.
The data is given as a tuple of 3 values containing each of them
the energy, the real part tensor, and the imaginary part tensor
([energies],[[real_partxx,real_partyy,real_partzz,real_partxy,
real_partyz,real_partxz]],[[imag_partxx,imag_partyy,imag_partzz,
imag_partxy, imag_partyz, imag_partxz]])
"""
return self.dielectric_data["density"]
@property
def optical_absorption_coeff(self):
"""
Calculate the optical absorption coefficient
from the dielectric constants. Note that this method is only
implemented for optical properties calculated with GGA and BSE.
Returns:
optical absorption coefficient in list
"""
if self.dielectric_data["density"]:
real_avg = [
sum(self.dielectric_data["density"][1][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))
]
imag_avg = [
sum(self.dielectric_data["density"][2][i][0:3]) / 3
for i in range(len(self.dielectric_data["density"][0]))
]
def f(freq, real, imag):
"""
The optical absorption coefficient calculated in terms of
equation
"""
hbar = 6.582119514e-16 # eV/K
coeff = np.sqrt(np.sqrt(real ** 2 + imag ** 2) - real) * np.sqrt(2) / hbar * freq
return coeff
absorption_coeff = [
f(freq, real, imag) for freq, real, imag in zip(self.dielectric_data["density"][0], real_avg, imag_avg)
]
return absorption_coeff
@property
def converged_electronic(self):
"""
Returns:
True if electronic step convergence has been reached in the final
ionic step
"""
final_esteps = self.ionic_steps[-1]["electronic_steps"]
if "LEPSILON" in self.incar and self.incar["LEPSILON"]:
i = 1
to_check = {"e_wo_entrp", "e_fr_energy", "e_0_energy"}
while set(final_esteps[i].keys()) == to_check:
i += 1
return i + 1 != self.parameters["NELM"]
return len(final_esteps) < self.parameters["NELM"]
@property
def converged_ionic(self):
"""
Returns:
True if ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw
@property
def converged(self):
"""
Returns:
True if a relaxation run is converged both ionically and
electronically.
"""
return self.converged_electronic and self.converged_ionic
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from the vasp run.
"""
try:
final_istep = self.ionic_steps[-1]
if final_istep["e_wo_entrp"] != final_istep["electronic_steps"][-1]["e_0_energy"]:
warnings.warn(
"Final e_wo_entrp differs from the final "
"electronic step. VASP may have included some "
"corrections, e.g., vdw. Vasprun will return "
"the final e_wo_entrp, i.e., including "
"corrections in such instances."
)
return final_istep["e_wo_entrp"]
return final_istep["electronic_steps"][-1]["e_0_energy"]
except (IndexError, KeyError):
warnings.warn(
"Calculation does not have a total energy. "
"Possibly a GW or similar kind of run. A value of "
"infinity is returned."
)
return float("inf")
@property
def complete_dos(self):
"""
A complete dos object which incorporates the total dos and all
projected dos.
"""
final_struct = self.final_structure
pdoss = {final_struct[i]: pdos for i, pdos in enumerate(self.pdos)}
return CompleteDos(self.final_structure, self.tdos, pdoss)
@property
def hubbards(self):
"""
Hubbard U values used if a vasprun is a GGA+U run. {} otherwise.
"""
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
if not self.incar.get("LDAU", False):
return {}
us = self.incar.get("LDAUU", self.parameters.get("LDAUU"))
js = self.incar.get("LDAUJ", self.parameters.get("LDAUJ"))
if len(js) != len(us):
js = [0] * len(us)
if len(us) == len(symbols):
return {symbols[i]: us[i] - js[i] for i in range(len(symbols))}
if sum(us) == 0 and sum(js) == 0:
return {}
raise VaspParserError("Length of U value parameters and atomic symbols are mismatched")
@property
def run_type(self):
"""
Returns the run type. Currently detects GGA, metaGGA, HF, HSE, B3LYP,
and hybrid functionals based on relevant INCAR tags. LDA is assigned if
PAW POTCARs are used and no other functional is detected.
Hubbard U terms and vdW corrections are detected automatically as well.
"""
GGA_TYPES = {
"RE": "revPBE",
"PE": "PBE",
"PS": "PBEsol",
"RP": "revPBE+Padé",
"AM": "AM05",
"OR": "optPBE",
"BO": "optB88",
"MK": "optB86b",
"--": "GGA",
}
METAGGA_TYPES = {
"TPSS": "TPSS",
"RTPSS": "revTPSS",
"M06L": "M06-L",
"MBJ": "modified Becke-Johnson",
"SCAN": "SCAN",
"R2SCAN": "R2SCAN",
"RSCAN": "RSCAN",
"MS0": "MadeSimple0",
"MS1": "MadeSimple1",
"MS2": "MadeSimple2",
}
IVDW_TYPES = {
1: "DFT-D2",
10: "DFT-D2",
11: "DFT-D3",
12: "DFT-D3-BJ",
2: "TS",
20: "TS",
21: "TS-H",
202: "MBD",
4: "dDsC",
}
if self.parameters.get("AEXX", 1.00) == 1.00:
rt = "HF"
elif self.parameters.get("HFSCREEN", 0.30) == 0.30:
rt = "HSE03"
elif self.parameters.get("HFSCREEN", 0.20) == 0.20:
rt = "HSE06"
elif self.parameters.get("AEXX", 0.20) == 0.20:
rt = "B3LYP"
elif self.parameters.get("LHFCALC", True):
rt = "PBEO or other Hybrid Functional"
elif self.incar.get("METAGGA") and self.incar.get("METAGGA") not in [
"--",
"None",
]:
incar_tag = self.incar.get("METAGGA", "").strip().upper()
rt = METAGGA_TYPES.get(incar_tag, incar_tag)
elif self.parameters.get("GGA"):
incar_tag = self.parameters.get("GGA", "").strip().upper()
rt = GGA_TYPES.get(incar_tag, incar_tag)
elif self.potcar_symbols[0].split()[0] == "PAW":
rt = "LDA"
else:
rt = "unknown"
warnings.warn("Unknown run type!")
if self.is_hubbard or self.parameters.get("LDAU", True):
rt += "+U"
if self.parameters.get("LUSE_VDW", False):
rt += "+rVV10"
elif self.incar.get("IVDW") in IVDW_TYPES:
rt += "+vdW-" + IVDW_TYPES[self.incar.get("IVDW")]
elif self.incar.get("IVDW"):
rt += "+vdW-unknown"
return rt
@property
def is_hubbard(self):
"""
True if run is a DFT+U run.
"""
if len(self.hubbards) == 0:
return False
return sum(self.hubbards.values()) > 1e-8
@property
def is_spin(self):
"""
True if run is spin-polarized.
"""
return self.parameters.get("ISPIN", 1) == 2
def get_computed_entry(
self, inc_structure=True, parameters=None, data=None, entry_id=f"vasprun-{datetime.datetime.now()}"
):
"""
Returns a ComputedEntry or ComputedStructureEntry from the vasprun.
Args:
inc_structure (bool): Set to True if you want
ComputedStructureEntries to be returned instead of
ComputedEntries.
parameters (list): Input parameters to include. It has to be one of
the properties supported by the Vasprun object. If
parameters is None, a default set of parameters that are
necessary for typical post-processing will be set.
data (list): Output data to include. Has to be one of the properties
supported by the Vasprun object.
entry_id (object): Specify an entry id for the ComputedEntry. Defaults to
"vasprun-{current datetime}"
Returns:
ComputedStructureEntry/ComputedEntry
"""
param_names = {
"is_hubbard",
"hubbards",
"potcar_symbols",
"potcar_spec",
"run_type",
}
if parameters:
param_names.update(parameters)
params = {p: getattr(self, p) for p in param_names}
data = {p: getattr(self, p) for p in data} if data is not None else {}
if inc_structure:
return ComputedStructureEntry(
self.final_structure, self.final_energy, parameters=params, data=data, entry_id=entry_id
)
return ComputedEntry(
self.final_structure.composition, self.final_energy, parameters=params, data=data, entry_id=entry_id
)
def get_band_structure(
self,
kpoints_filename: Optional[str] = None,
efermi: Optional[Union[float, str]] = None,
line_mode: bool = False,
force_hybrid_mode: bool = False,
):
"""
Returns the band structure as a BandStructure object
Args:
kpoints_filename: Full path of the KPOINTS file from which
the band structure is generated.
If none is provided, the code will try to intelligently
determine the appropriate KPOINTS file by substituting the
filename of the vasprun.xml with KPOINTS.
The latter is the default behavior.
efermi: The Fermi energy associated with the bandstructure, in eV. By
default (None), uses the value reported by VASP in vasprun.xml. To
manually set the Fermi energy, pass a float. Pass 'smart' to use the
`calculate_efermi()` method, which calculates the Fermi level by first
checking whether it lies within a small tolerance (by default 0.001 eV)
of a band edge) If it does, the Fermi level is placed in the center of
the bandgap. Otherwise, the value is identical to the value reported by
VASP.
line_mode: Force the band structure to be considered as
a run along symmetry lines. (Default: False)
force_hybrid_mode: Makes it possible to read in self-consistent band
structure calculations for every type of functional. (Default: False)
Returns:
a BandStructure object (or more specifically a
BandStructureSymmLine object if the run is detected to be a run
along symmetry lines)
Two types of runs along symmetry lines are accepted: non-sc with
Line-Mode in the KPOINT file or hybrid, self-consistent with a
uniform grid+a few kpoints along symmetry lines (explicit KPOINTS
file) (it's not possible to run a non-sc band structure with hybrid
functionals). The explicit KPOINTS file needs to have data on the
kpoint label as commentary.
"""
if not kpoints_filename:
kpoints_filename = zpath(os.path.join(os.path.dirname(self.filename), "KPOINTS"))
if kpoints_filename:
if not os.path.exists(kpoints_filename) and line_mode is True:
raise VaspParserError("KPOINTS needed to obtain band structure along symmetry lines.")
if efermi == "smart":
efermi = self.calculate_efermi()
elif efermi is None:
efermi = self.efermi
kpoint_file = None
if kpoints_filename and os.path.exists(kpoints_filename):
kpoint_file = Kpoints.from_file(kpoints_filename)
lattice_new = Lattice(self.final_structure.lattice.reciprocal_lattice.matrix)
kpoints = [np.array(self.actual_kpoints[i]) for i in range(len(self.actual_kpoints))]
p_eigenvals: DefaultDict[Spin, list] = defaultdict(list)
eigenvals: DefaultDict[Spin, list] = defaultdict(list)
nkpts = len(kpoints)
for spin, v in self.eigenvalues.items():
v = np.swapaxes(v, 0, 1)
eigenvals[spin] = v[:, :, 0]
if self.projected_eigenvalues:
peigen = self.projected_eigenvalues[spin]
# Original axes for self.projected_eigenvalues are kpoints,
# band, ion, orb.
# For BS input, we need band, kpoints, orb, ion.
peigen = np.swapaxes(peigen, 0, 1) # Swap kpoint and band axes
peigen = np.swapaxes(peigen, 2, 3) # Swap ion and orb axes
p_eigenvals[spin] = peigen
# for b in range(min_eigenvalues):
# p_eigenvals[spin].append(
# [{Orbital(orb): v for orb, v in enumerate(peigen[b, k])}
# for k in range(nkpts)])
# check if we have an hybrid band structure computation
# for this we look at the presence of the LHFCALC tag
hybrid_band = False
if self.parameters.get("LHFCALC", False) or 0.0 in self.actual_kpoints_weights:
hybrid_band = True
if kpoint_file is not None:
if kpoint_file.style == Kpoints.supported_modes.Line_mode:
line_mode = True
if line_mode:
labels_dict = {}
if hybrid_band or force_hybrid_mode:
start_bs_index = 0
for i in range(len(self.actual_kpoints)):
if self.actual_kpoints_weights[i] == 0.0:
start_bs_index = i
break
for i in range(start_bs_index, len(kpoint_file.kpts)):
if kpoint_file.labels[i] is not None:
labels_dict[kpoint_file.labels[i]] = kpoint_file.kpts[i]
# remake the data only considering line band structure k-points
# (weight = 0.0 kpoints)
nbands = len(eigenvals[Spin.up])
kpoints = kpoints[start_bs_index:nkpts]
up_eigen = [eigenvals[Spin.up][i][start_bs_index:nkpts] for i in range(nbands)]
if self.projected_eigenvalues:
p_eigenvals[Spin.up] = [p_eigenvals[Spin.up][i][start_bs_index:nkpts] for i in range(nbands)]
if self.is_spin:
down_eigen = [eigenvals[Spin.down][i][start_bs_index:nkpts] for i in range(nbands)]
eigenvals[Spin.up] = up_eigen
eigenvals[Spin.down] = down_eigen
if self.projected_eigenvalues:
p_eigenvals[Spin.down] = [
p_eigenvals[Spin.down][i][start_bs_index:nkpts] for i in range(nbands)
]
else:
eigenvals[Spin.up] = up_eigen
else:
if "" in kpoint_file.labels:
raise Exception(
"A band structure along symmetry lines "
"requires a label for each kpoint. "
"Check your KPOINTS file"
)
labels_dict = dict(zip(kpoint_file.labels, kpoint_file.kpts))
labels_dict.pop(None, None)
return BandStructureSymmLine(
kpoints,
eigenvals,
lattice_new,
efermi,
labels_dict,
structure=self.final_structure,
projections=p_eigenvals,
)
return BandStructure(
kpoints,
eigenvals,
lattice_new,
efermi,
structure=self.final_structure,
projections=p_eigenvals,
)
@property
def eigenvalue_band_properties(self):
"""
Band properties from the eigenvalues as a tuple,
(band gap, cbm, vbm, is_band_gap_direct). In the case of separate_spins=True,
the band gap, cbm, vbm, and is_band_gap_direct are each lists of length 2,
with index 0 representing the spin-up channel and index 1 representing
the spin-down channel.
"""
vbm = -float("inf")
vbm_kpoint = None
cbm = float("inf")
cbm_kpoint = None
vbm_spins = []
vbm_spins_kpoints = []
cbm_spins = []
cbm_spins_kpoints = []
if self.separate_spins and len(self.eigenvalues.keys()) != 2:
raise ValueError("The separate_spins flag can only be True if ISPIN = 2")
for spin, d in self.eigenvalues.items():
if self.separate_spins:
vbm = -float("inf")
cbm = float("inf")
for k, val in enumerate(d):
for (eigenval, occu) in val:
if occu > self.occu_tol and eigenval > vbm:
vbm = eigenval
vbm_kpoint = k
elif occu <= self.occu_tol and eigenval < cbm:
cbm = eigenval
cbm_kpoint = k
if self.separate_spins:
vbm_spins.append(vbm)
vbm_spins_kpoints.append(vbm_kpoint)
cbm_spins.append(cbm)
cbm_spins_kpoints.append(cbm_kpoint)
if self.separate_spins:
return (
[max(cbm_spins[0] - vbm_spins[0], 0), max(cbm_spins[1] - vbm_spins[1], 0)],
[cbm_spins[0], cbm_spins[1]],
[vbm_spins[0], vbm_spins[1]],
[vbm_spins_kpoints[0] == cbm_spins_kpoints[0], vbm_spins_kpoints[1] == cbm_spins_kpoints[1]],
)
return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
def calculate_efermi(self, tol=0.001):
"""
Calculate the Fermi level using a robust algorithm.
Sometimes VASP can put the Fermi level just inside of a band due to issues in
the way band occupancies are handled. This algorithm tries to detect and correct
for this bug.
Slightly more details are provided here: https://www.vasp.at/forum/viewtopic.php?f=4&t=17981
"""
# drop weights and set shape nbands, nkpoints
all_eigs = np.concatenate([eigs[:, :, 0].transpose(1, 0) for eigs in self.eigenvalues.values()])
def crosses_band(fermi):
eigs_below = np.any(all_eigs < fermi, axis=1)
eigs_above = np.any(all_eigs > fermi, axis=1)
return np.any(eigs_above & eigs_below)
def get_vbm_cbm(fermi):
return np.max(all_eigs[all_eigs < fermi]), np.min(all_eigs[all_eigs > fermi])
if not crosses_band(self.efermi):
# Fermi doesn't cross a band; safe to use VASP fermi level
return self.efermi
# if the Fermi level crosses a band, check if we are very close to band gap;
# if so, then likely this is a VASP tetrahedron bug
if not crosses_band(self.efermi + tol):
# efermi placed slightly in the valence band
# set Fermi level half way between valence and conduction bands
vbm, cbm = get_vbm_cbm(self.efermi + tol)
return (cbm + vbm) / 2
if not crosses_band(self.efermi - tol):
# efermi placed slightly in the conduction band
# set Fermi level half way between valence and conduction bands
vbm, cbm = get_vbm_cbm(self.efermi - tol)
return (cbm + vbm) / 2
# it is actually a metal
return self.efermi
def get_potcars(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar from path.
"""
def get_potcar_in_path(p):
for fn in os.listdir(os.path.abspath(p)):
if fn.startswith("POTCAR") and ".spec" not in fn:
pc = Potcar.from_file(os.path.join(p, fn))
if {d.header for d in pc} == set(self.potcar_symbols):
return pc
warnings.warn("No POTCAR file with matching TITEL fields" " was found in {}".format(os.path.abspath(p)))
return None
if isinstance(path, (str, Path)):
path = str(path)
if "POTCAR" in path:
potcar = Potcar.from_file(path)
if {d.TITEL for d in potcar} != set(self.potcar_symbols):
raise ValueError("Potcar TITELs do not match Vasprun")
else:
potcar = get_potcar_in_path(path)
elif isinstance(path, bool) and path:
potcar = get_potcar_in_path(os.path.split(self.filename)[0])
else:
potcar = None
return potcar
def get_trajectory(self):
"""
This method returns a Trajectory object, which is an alternative
representation of self.structures into a single object. Forces are
added to the Trajectory as site properties.
Returns: a Trajectory
"""
# required due to circular imports
# TODO: fix pymatgen.core.trajectory so it does not load from io.vasp(!)
from pymatgen.core.trajectory import Trajectory
structs = []
for step in self.ionic_steps:
struct = step["structure"].copy()
struct.add_site_property("forces", step["forces"])
structs.append(struct)
return Trajectory.from_structures(structs, constant_lattice=False)
def update_potcar_spec(self, path):
"""
:param path: Path to search for POTCARs
:return: Potcar spec from path.
"""
potcar = self.get_potcars(path)
if potcar:
self.potcar_spec = [
{"titel": sym, "hash": ps.get_potcar_hash()}
for sym in self.potcar_symbols
for ps in potcar
if ps.symbol == sym.split()[1]
]
def update_charge_from_potcar(self, path):
"""
Sets the charge of a structure based on the POTCARs found.
:param path: Path to search for POTCARs
"""
potcar = self.get_potcars(path)
if potcar and self.incar.get("ALGO", "") not in ["GW0", "G0W0", "GW", "BSE"]:
nelect = self.parameters["NELECT"]
if len(potcar) == len(self.initial_structure.composition.element_composition):
potcar_nelect = sum(
self.initial_structure.composition.element_composition[ps.element] * ps.ZVAL for ps in potcar
)
else:
nums = [len(list(g)) for _, g in itertools.groupby(self.atomic_symbols)]
potcar_nelect = sum(ps.ZVAL * num for ps, num in zip(potcar, nums))
charge = nelect - potcar_nelect
if charge:
for s in self.structures:
s._charge = charge
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {
"vasp_version": self.vasp_version,
"has_vasp_completed": self.converged,
"nsites": len(self.final_structure),
}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {
"incar": dict(self.incar.items()),
"crystal": self.initial_structure.as_dict(),
"kpoints": self.kpoints.as_dict(),
}
actual_kpts = [
{
"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i],
}
for i in range(len(self.actual_kpoints))
]
vin["kpoints"]["actual_points"] = actual_kpts
vin["nkpoints"] = len(actual_kpts)
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = dict(self.parameters.items())
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["input"] = vin
nsites = len(self.final_structure)
try:
vout = {
"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi,
}
except (ArithmeticError, TypeError):
vout = {
"ionic_steps": self.ionic_steps,
"final_energy": self.final_energy,
"final_energy_per_atom": None,
"crystal": self.final_structure.as_dict(),
"efermi": self.efermi,
}
if self.eigenvalues:
eigen = {str(spin): v.tolist() for spin, v in self.eigenvalues.items()}
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm, is_gap_direct=is_direct))
if self.projected_eigenvalues:
vout["projected_eigenvalues"] = {
str(spin): v.tolist() for spin, v in self.projected_eigenvalues.items()
}
if self.projected_magnetisation is not None:
vout["projected_magnetisation"] = self.projected_magnetisation.tolist()
vout["epsilon_static"] = self.epsilon_static
vout["epsilon_static_wolfe"] = self.epsilon_static_wolfe
vout["epsilon_ionic"] = self.epsilon_ionic
d["output"] = vout
return jsanitize(d, strict=True)
def _parse_params(self, elem):
params = {}
for c in elem:
name = c.attrib.get("name")
if c.tag not in ("i", "v"):
p = self._parse_params(c)
if name == "response functions":
# Delete duplicate fields from "response functions",
# which overrides the values in the root params.
p = {k: v for k, v in p.items() if k not in params}
params.update(p)
else:
ptype = c.attrib.get("type")
val = c.text.strip() if c.text else ""
if c.tag == "i":
params[name] = _parse_parameters(ptype, val)
else:
params[name] = _parse_v_parameters(ptype, val, self.filename, name)
elem.clear()
return Incar(params)
@staticmethod
def _parse_atominfo(elem):
for a in elem.findall("array"):
if a.attrib["name"] == "atoms":
atomic_symbols = [rc.find("c").text.strip() for rc in a.find("set")]
elif a.attrib["name"] == "atomtypes":
potcar_symbols = [rc.findall("c")[4].text.strip() for rc in a.find("set")]
# ensure atomic symbols are valid elements
def parse_atomic_symbol(symbol):
try:
return str(Element(symbol))
# vasprun.xml uses X instead of Xe for xenon
except ValueError as e:
if symbol == "X":
return "Xe"
if symbol == "r":
return "Zr"
raise e
elem.clear()
return [parse_atomic_symbol(sym) for sym in atomic_symbols], potcar_symbols
@staticmethod
def _parse_kpoints(elem):
e = elem
if elem.find("generation"):
e = elem.find("generation")
k = Kpoints("Kpoints from vasprun.xml")
k.style = Kpoints.supported_modes.from_string(e.attrib["param"] if "param" in e.attrib else "Reciprocal")
for v in e.findall("v"):
name = v.attrib.get("name")
toks = v.text.split()
if name == "divisions":
k.kpts = [[int(i) for i in toks]]
elif name == "usershift":
k.kpts_shift = [float(i) for i in toks]
elif name in {"genvec1", "genvec2", "genvec3", "shift"}:
setattr(k, name, [float(i) for i in toks])
for va in elem.findall("varray"):
name = va.attrib["name"]
if name == "kpointlist":
actual_kpoints = _parse_varray(va)
elif name == "weights":
weights = [i[0] for i in _parse_varray(va)]
elem.clear()
if k.style == Kpoints.supported_modes.Reciprocal:
k = Kpoints(
comment="Kpoints from vasprun.xml",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(k.kpts),
kpts=actual_kpoints,
kpts_weights=weights,
)
return k, actual_kpoints, weights
def _parse_structure(self, elem):
latt = _parse_varray(elem.find("crystal").find("varray"))
pos = _parse_varray(elem.find("varray"))
struct = Structure(latt, self.atomic_symbols, pos)
sdyn = elem.find("varray/[@name='selective']")
if sdyn:
struct.add_site_property("selective_dynamics", _parse_varray(sdyn))
return struct
@staticmethod
def _parse_diel(elem):
imag = [
[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("imag").find("array").find("set").findall("r")
]
real = [
[_vasprun_float(l) for l in r.text.split()]
for r in elem.find("real").find("array").find("set").findall("r")
]
elem.clear()
return [e[0] for e in imag], [e[1:] for e in real], [e[1:] for e in imag]
@staticmethod
def _parse_optical_transition(elem):
for va in elem.findall("varray"):
if va.attrib.get("name") == "opticaltransitions":
# opticaltransitions array contains oscillator strength and probability of transition
oscillator_strength = np.array(_parse_varray(va))[
0:,
]
probability_transition = np.array(_parse_varray(va))[0:, 1]
return oscillator_strength, probability_transition
def _parse_chemical_shielding_calculation(self, elem):
calculation = []
istep = {}
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["structure"] = s
istep["electronic_steps"] = []
calculation.append(istep)
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text) for i in scstep.find("energy").findall("i")}
cur_ene = d["e_fr_energy"]
min_steps = 1 if len(calculation) >= 1 else self.parameters.get("NELMIN", 5)
if len(calculation[-1]["electronic_steps"]) <= min_steps:
calculation[-1]["electronic_steps"].append(d)
else:
last_ene = calculation[-1]["electronic_steps"][-1]["e_fr_energy"]
if abs(cur_ene - last_ene) < 1.0:
calculation[-1]["electronic_steps"].append(d)
else:
calculation.append({"electronic_steps": [d]})
except AttributeError: # not all calculations have an energy
pass
calculation[-1].update(calculation[-1]["electronic_steps"][-1])
return calculation
def _parse_calculation(self, elem):
try:
istep = {i.attrib["name"]: float(i.text) for i in elem.find("energy").findall("i")}
except AttributeError: # not all calculations have an energy
istep = {}
pass
esteps = []
for scstep in elem.findall("scstep"):
try:
d = {i.attrib["name"]: _vasprun_float(i.text) for i in scstep.find("energy").findall("i")}
esteps.append(d)
except AttributeError: # not all calculations have an energy
pass
try:
s = self._parse_structure(elem.find("structure"))
except AttributeError: # not all calculations have a structure
s = None
pass
for va in elem.findall("varray"):
istep[va.attrib["name"]] = _parse_varray(va)
istep["electronic_steps"] = esteps
istep["structure"] = s
elem.clear()
return istep
@staticmethod
def _parse_dos(elem):
efermi = float(elem.find("i").text)
energies = None
tdensities = {}
idensities = {}
for s in elem.find("total").find("array").find("set").findall("set"):
data = np.array(_parse_varray(s))
energies = data[:, 0]
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
tdensities[spin] = data[:, 1]
idensities[spin] = data[:, 2]
pdoss = []
partial = elem.find("partial")
if partial is not None:
orbs = [ss.text for ss in partial.find("array").findall("field")]
orbs.pop(0)
lm = any("x" in s for s in orbs)
for s in partial.find("array").find("set").findall("set"):
pdos = defaultdict(dict)
for ss in s.findall("set"):
spin = Spin.up if ss.attrib["comment"] == "spin 1" else Spin.down
data = np.array(_parse_varray(ss))
nrow, ncol = data.shape
for j in range(1, ncol):
if lm:
orb = Orbital(j - 1)
else:
orb = OrbitalType(j - 1)
pdos[orb][spin] = data[:, j]
pdoss.append(pdos)
elem.clear()
return (
Dos(efermi, energies, tdensities),
Dos(efermi, energies, idensities),
pdoss,
)
@staticmethod
def _parse_eigen(elem):
eigenvalues = defaultdict(list)
for s in elem.find("array").find("set").findall("set"):
spin = Spin.up if s.attrib["comment"] == "spin 1" else Spin.down
for ss in s.findall("set"):
eigenvalues[spin].append(_parse_varray(ss))
eigenvalues = {spin: np.array(v) for spin, v in eigenvalues.items()}
elem.clear()
return eigenvalues
@staticmethod
def _parse_projected_eigen(elem):
root = elem.find("array").find("set")
proj_eigen = defaultdict(list)
for s in root.findall("set"):
spin = int(re.match(r"spin(\d+)", s.attrib["comment"]).group(1))
# Force spin to be +1 or -1
for kpt, ss in enumerate(s.findall("set")):
dk = []
for band, sss in enumerate(ss.findall("set")):
db = _parse_varray(sss)
dk.append(db)
proj_eigen[spin].append(dk)
proj_eigen = {spin: np.array(v) for spin, v in proj_eigen.items()}
if len(proj_eigen) > 2:
# non-collinear magentism (also spin-orbit coupling) enabled, last three
# "spin channels" are the projected magnetisation of the orbitals in the
# x, y, and z cartesian coordinates
proj_mag = np.stack([proj_eigen.pop(i) for i in range(2, 5)], axis=-1)
proj_eigen = {Spin.up: proj_eigen[1]}
else:
proj_eigen = {Spin.up if k == 1 else Spin.down: v for k, v in proj_eigen.items()}
proj_mag = None
elem.clear()
return proj_eigen, proj_mag
@staticmethod
def _parse_dynmat(elem):
hessian = []
eigenvalues = []
eigenvectors = []
for v in elem.findall("v"):
if v.attrib["name"] == "eigenvalues":
eigenvalues = [float(i) for i in v.text.split()]
for va in elem.findall("varray"):
if va.attrib["name"] == "hessian":
for v in va.findall("v"):
hessian.append([float(i) for i in v.text.split()])
elif va.attrib["name"] == "eigenvectors":
for v in va.findall("v"):
eigenvectors.append([float(i) for i in v.text.split()])
return hessian, eigenvalues, eigenvectors
class BSVasprun(Vasprun):
"""
A highly optimized version of Vasprun that parses only eigenvalues for
bandstructures. All other properties like structures, parameters,
etc. are ignored.
"""
def __init__(
self,
filename: str,
parse_projected_eigen: Union[bool, str] = False,
parse_potcar_file: Union[bool, str] = False,
occu_tol: float = 1e-8,
separate_spins: bool = False,
):
"""
Args:
filename: Filename to parse
parse_projected_eigen: Whether to parse the projected
eigenvalues. Defaults to False. Set to True to obtain projected
eigenvalues. **Note that this can take an extreme amount of time
and memory.** So use this wisely.
parse_potcar_file: Whether to parse the potcar file to read
the potcar hashes for the potcar_spec attribute. Defaults to True,
where no hashes will be determined and the potcar_spec dictionaries
will read {"symbol": ElSymbol, "hash": None}. By Default, looks in
the same directory as the vasprun.xml, with same extensions as
Vasprun.xml. If a string is provided, looks at that filepath.
occu_tol: Sets the minimum tol for the determination of the
vbm and cbm. Usually the default of 1e-8 works well enough,
but there may be pathological cases.
separate_spins (bool): Whether the band gap, CBM, and VBM should be
reported for each individual spin channel. Defaults to False,
which computes the eigenvalue band properties independent of
the spin orientation. If True, the calculation must be spin-polarized.
"""
self.filename = filename
self.occu_tol = occu_tol
self.separate_spins = separate_spins
with zopen(filename, "rt") as f:
self.efermi = None
parsed_header = False
self.eigenvalues = None
self.projected_eigenvalues = None
for event, elem in ET.iterparse(f):
tag = elem.tag
if not parsed_header:
if tag == "generator":
self.generator = self._parse_params(elem)
elif tag == "incar":
self.incar = self._parse_params(elem)
elif tag == "kpoints":
(
self.kpoints,
self.actual_kpoints,
self.actual_kpoints_weights,
) = self._parse_kpoints(elem)
elif tag == "parameters":
self.parameters = self._parse_params(elem)
elif tag == "atominfo":
self.atomic_symbols, self.potcar_symbols = self._parse_atominfo(elem)
self.potcar_spec = [{"titel": p, "hash": None} for p in self.potcar_symbols]
parsed_header = True
elif tag == "i" and elem.attrib.get("name") == "efermi":
self.efermi = float(elem.text)
elif tag == "eigenvalues":
self.eigenvalues = self._parse_eigen(elem)
elif parse_projected_eigen and tag == "projected":
(
self.projected_eigenvalues,
self.projected_magnetisation,
) = self._parse_projected_eigen(elem)
elif tag == "structure" and elem.attrib.get("name") == "finalpos":
self.final_structure = self._parse_structure(elem)
self.vasp_version = self.generator["version"]
if parse_potcar_file:
self.update_potcar_spec(parse_potcar_file)
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {
"vasp_version": self.vasp_version,
"has_vasp_completed": True,
"nsites": len(self.final_structure),
}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {
"incar": dict(self.incar),
"crystal": self.final_structure.as_dict(),
"kpoints": self.kpoints.as_dict(),
}
actual_kpts = [
{
"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i],
}
for i in range(len(self.actual_kpoints))
]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = dict(self.parameters)
vin["lattice_rec"] = self.final_structure.lattice.reciprocal_lattice.as_dict()
d["input"] = vin
vout = {"crystal": self.final_structure.as_dict(), "efermi": self.efermi}
if self.eigenvalues:
eigen = defaultdict(dict)
for spin, values in self.eigenvalues.items():
for i, v in enumerate(values):
eigen[i][str(spin)] = v
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm, is_gap_direct=is_direct))
if self.projected_eigenvalues:
peigen = []
for i in range(len(eigen)):
peigen.append({})
for spin, v in self.projected_eigenvalues.items():
for kpoint_index, vv in enumerate(v):
if str(spin) not in peigen[kpoint_index]:
peigen[kpoint_index][str(spin)] = vv
vout["projected_eigenvalues"] = peigen
d["output"] = vout
return jsanitize(d, strict=True)
class Outcar:
"""
Parser for data in OUTCAR that is not available in Vasprun.xml
Note, this class works a bit differently than most of the other
VaspObjects, since the OUTCAR can be very different depending on which
"type of run" performed.
Creating the OUTCAR class with a filename reads "regular parameters" that
are always present.
.. attribute:: magnetization
Magnetization on each ion as a tuple of dict, e.g.,
({"d": 0.0, "p": 0.003, "s": 0.002, "tot": 0.005}, ... )
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: chemical_shielding
chemical shielding on each ion as a dictionary with core and valence contributions
.. attribute:: unsym_cs_tensor
Unsymmetrized chemical shielding tensor matrixes on each ion as a list.
e.g.,
[[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]],
...
[[sigma11, sigma12, sigma13],
[sigma21, sigma22, sigma23],
[sigma31, sigma32, sigma33]]]
.. attribute:: cs_g0_contribution
G=0 contribution to chemical shielding. 2D rank 3 matrix
.. attribute:: cs_core_contribution
Core contribution to chemical shielding. dict. e.g.,
{'Mg': -412.8, 'C': -200.5, 'O': -271.1}
.. attribute:: efg
Electric Field Gradient (EFG) tensor on each ion as a tuple of dict, e.g.,
({"cq": 0.1, "eta", 0.2, "nuclear_quadrupole_moment": 0.3},
{"cq": 0.7, "eta", 0.8, "nuclear_quadrupole_moment": 0.9},
...)
.. attribute:: charge
Charge on each ion as a tuple of dict, e.g.,
({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
Note that this data is not always present. LORBIT must be set to some
other value than the default.
.. attribute:: is_stopped
True if OUTCAR is from a stopped run (using STOPCAR, see Vasp Manual).
.. attribute:: run_stats
Various useful run stats as a dict including "System time (sec)",
"Total CPU time used (sec)", "Elapsed time (sec)",
"Maximum memory used (kb)", "Average memory used (kb)",
"User time (sec)".
.. attribute:: elastic_tensor
Total elastic moduli (Kbar) is given in a 6x6 array matrix.
.. attribute:: drift
Total drift for each step in eV/Atom
.. attribute:: ngf
Dimensions for the Augementation grid
.. attribute: sampling_radii
Size of the sampling radii in VASP for the test charges for
the electrostatic potential at each atom. Total array size is the number
of elements present in the calculation
.. attribute: electrostatic_potential
Average electrostatic potential at each atomic position in order
of the atoms in POSCAR.
..attribute: final_energy_contribs
Individual contributions to the total final energy as a dictionary.
Include contirbutions from keys, e.g.:
{'DENC': -505778.5184347, 'EATOM': 15561.06492564, 'EBANDS': -804.53201231,
'EENTRO': -0.08932659, 'EXHF': 0.0, 'Ediel_sol': 0.0,
'PAW double counting': 664.6726974100002, 'PSCENC': 742.48691646,
'TEWEN': 489742.86847338, 'XCENC': -169.64189814}
.. attribute:: efermi
Fermi energy
.. attribute:: filename
Filename
.. attribute:: final_energy
Final (total) energy
.. attribute:: has_onsite_density_matrices
Boolean for if onsite density matrices have been set
.. attribute:: lcalcpol
If LCALCPOL has been set
.. attribute:: lepsilon
If LEPSILON has been set
.. attribute:: nelect
Returns the number of electrons in the calculation
.. attribute:: spin
If spin-polarization was enabled via ISPIN
.. attribute:: total_mag
Total magnetization (in terms of the number of unpaired electrons)
One can then call a specific reader depending on the type of run being
performed. These are currently: read_igpar(), read_lepsilon() and
read_lcalcpol(), read_core_state_eign(), read_avg_core_pot().
See the documentation of those methods for more documentation.
Authors: <NAME>, <NAME>
"""
def __init__(self, filename):
"""
Args:
filename (str): OUTCAR filename to parse.
"""
self.filename = filename
self.is_stopped = False
# data from end of OUTCAR
charge = []
mag_x = []
mag_y = []
mag_z = []
header = []
run_stats = {}
total_mag = None
nelect = None
efermi = None
total_energy = None
time_patt = re.compile(r"\((sec|kb)\)")
efermi_patt = re.compile(r"E-fermi\s*:\s*(\S+)")
nelect_patt = re.compile(r"number of electron\s+(\S+)\s+magnetization")
mag_patt = re.compile(r"number of electron\s+\S+\s+magnetization\s+(" r"\S+)")
toten_pattern = re.compile(r"free energy TOTEN\s+=\s+([\d\-\.]+)")
all_lines = []
for line in reverse_readfile(self.filename):
clean = line.strip()
all_lines.append(clean)
if clean.find("soft stop encountered! aborting job") != -1:
self.is_stopped = True
else:
if time_patt.search(line):
tok = line.strip().split(":")
try:
# try-catch because VASP 6.2.0 may print
# Average memory used (kb): N/A
# which cannot be parsed as float
run_stats[tok[0].strip()] = float(tok[1].strip())
except ValueError:
run_stats[tok[0].strip()] = None
continue
m = efermi_patt.search(clean)
if m:
try:
# try-catch because VASP sometimes prints
# 'E-fermi: ******** XC(G=0): -6.1327
# alpha+bet : -1.8238'
efermi = float(m.group(1))
continue
except ValueError:
efermi = None
continue
m = nelect_patt.search(clean)
if m:
nelect = float(m.group(1))
m = mag_patt.search(clean)
if m:
total_mag = float(m.group(1))
if total_energy is None:
m = toten_pattern.search(clean)
if m:
total_energy = float(m.group(1))
if all([nelect, total_mag is not None, efermi is not None, run_stats]):
break
# For single atom systems, VASP doesn't print a total line, so
# reverse parsing is very difficult
read_charge = False
read_mag_x = False
read_mag_y = False # for SOC calculations only
read_mag_z = False
all_lines.reverse()
for clean in all_lines:
if read_charge or read_mag_x or read_mag_y or read_mag_z:
if clean.startswith("# of ion"):
header = re.split(r"\s{2,}", clean.strip())
header.pop(0)
else:
m = re.match(r"\s*(\d+)\s+(([\d\.\-]+)\s+)+", clean)
if m:
toks = [float(i) for i in re.findall(r"[\d\.\-]+", clean)]
toks.pop(0)
if read_charge:
charge.append(dict(zip(header, toks)))
elif read_mag_x:
mag_x.append(dict(zip(header, toks)))
elif read_mag_y:
mag_y.append(dict(zip(header, toks)))
elif read_mag_z:
mag_z.append(dict(zip(header, toks)))
elif clean.startswith("tot"):
read_charge = False
read_mag_x = False
read_mag_y = False
read_mag_z = False
if clean == "total charge":
charge = []
read_charge = True
read_mag_x, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (x)":
mag_x = []
read_mag_x = True
read_charge, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (y)":
mag_y = []
read_mag_y = True
read_charge, read_mag_x, read_mag_z = False, False, False
elif clean == "magnetization (z)":
mag_z = []
read_mag_z = True
read_charge, read_mag_x, read_mag_y = False, False, False
elif re.search("electrostatic", clean):
read_charge, read_mag_x, read_mag_y, read_mag_z = (
False,
False,
False,
False,
)
# merge x, y and z components of magmoms if present (SOC calculation)
if mag_y and mag_z:
# TODO: detect spin axis
mag = []
for idx in range(len(mag_x)):
mag.append(
{key: Magmom([mag_x[idx][key], mag_y[idx][key], mag_z[idx][key]]) for key in mag_x[0].keys()}
)
else:
mag = mag_x
# data from beginning of OUTCAR
run_stats["cores"] = 0
with zopen(filename, "rt") as f:
for line in f:
if "running" in line:
run_stats["cores"] = line.split()[2]
break
self.run_stats = run_stats
self.magnetization = tuple(mag)
self.charge = tuple(charge)
self.efermi = efermi
self.nelect = nelect
self.total_mag = total_mag
self.final_energy = total_energy
self.data = {}
# Read "total number of plane waves", NPLWV:
self.read_pattern(
{"nplwv": r"total plane-waves NPLWV =\s+(\*{6}|\d+)"},
terminate_on_match=True,
)
try:
self.data["nplwv"] = [[int(self.data["nplwv"][0][0])]]
except ValueError:
self.data["nplwv"] = [[None]]
nplwvs_at_kpoints = [
n
for [n] in self.read_table_pattern(
r"\n{3}-{104}\n{3}",
r".+plane waves:\s+(\*{6,}|\d+)",
r"maximum and minimum number of plane-waves",
)
]
self.data["nplwvs_at_kpoints"] = [None for n in nplwvs_at_kpoints]
for (n, nplwv) in enumerate(nplwvs_at_kpoints):
try:
self.data["nplwvs_at_kpoints"][n] = int(nplwv)
except ValueError:
pass
# Read the drift:
self.read_pattern(
{"drift": r"total drift:\s+([\.\-\d]+)\s+([\.\-\d]+)\s+([\.\-\d]+)"},
terminate_on_match=False,
postprocess=float,
)
self.drift = self.data.get("drift", [])
# Check if calculation is spin polarized
self.spin = False
self.read_pattern({"spin": "ISPIN = 2"})
if self.data.get("spin", []):
self.spin = True
# Check if calculation is noncollinear
self.noncollinear = False
self.read_pattern({"noncollinear": "LNONCOLLINEAR = T"})
if self.data.get("noncollinear", []):
self.noncollinear = False
# Check if the calculation type is DFPT
self.dfpt = False
self.read_pattern(
{"ibrion": r"IBRION =\s+([\-\d]+)"},
terminate_on_match=True,
postprocess=int,
)
if self.data.get("ibrion", [[0]])[0][0] > 6:
self.dfpt = True
self.read_internal_strain_tensor()
# Check to see if LEPSILON is true and read piezo data if so
self.lepsilon = False
self.read_pattern({"epsilon": "LEPSILON= T"})
if self.data.get("epsilon", []):
self.lepsilon = True
self.read_lepsilon()
# only read ionic contribution if DFPT is turned on
if self.dfpt:
self.read_lepsilon_ionic()
# Check to see if LCALCPOL is true and read polarization data if so
self.lcalcpol = False
self.read_pattern({"calcpol": "LCALCPOL = T"})
if self.data.get("calcpol", []):
self.lcalcpol = True
self.read_lcalcpol()
self.read_pseudo_zval()
# Read electrostatic potential
self.electrostatic_potential = None
self.ngf = None
self.sampling_radii = None
self.read_pattern({"electrostatic": r"average \(electrostatic\) potential at core"})
if self.data.get("electrostatic", []):
self.read_electrostatic_potential()
self.nmr_cs = False
self.read_pattern({"nmr_cs": r"LCHIMAG = (T)"})
if self.data.get("nmr_cs", None):
self.nmr_cs = True
self.read_chemical_shielding()
self.read_cs_g0_contribution()
self.read_cs_core_contribution()
self.read_cs_raw_symmetrized_tensors()
self.nmr_efg = False
self.read_pattern({"nmr_efg": r"NMR quadrupolar parameters"})
if self.data.get("nmr_efg", None):
self.nmr_efg = True
self.read_nmr_efg()
self.read_nmr_efg_tensor()
self.has_onsite_density_matrices = False
self.read_pattern(
{"has_onsite_density_matrices": r"onsite density matrix"},
terminate_on_match=True,
)
if "has_onsite_density_matrices" in self.data:
self.has_onsite_density_matrices = True
self.read_onsite_density_matrices()
# Store the individual contributions to the final total energy
final_energy_contribs = {}
for k in [
"PSCENC",
"TEWEN",
"DENC",
"EXHF",
"XCENC",
"PAW double counting",
"EENTRO",
"EBANDS",
"EATOM",
"Ediel_sol",
]:
if k == "PAW double counting":
self.read_pattern({k: r"%s\s+=\s+([\.\-\d]+)\s+([\.\-\d]+)" % (k)})
else:
self.read_pattern({k: r"%s\s+=\s+([\d\-\.]+)" % (k)})
if not self.data[k]:
continue
final_energy_contribs[k] = sum(float(f) for f in self.data[k][-1])
self.final_energy_contribs = final_energy_contribs
def read_pattern(self, patterns, reverse=False, terminate_on_match=False, postprocess=str):
r"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(
self.filename,
patterns,
reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess,
)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(
self,
header_pattern,
row_pattern,
footer_pattern,
postprocess=str,
attribute_name=None,
last_one_only=True,
):
r"""
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, "rt") as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.split("\n"):
ml = rp.search(line)
# skip empty lines
if not ml:
continue
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.append(processed_line)
tables.append(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def read_electrostatic_potential(self):
"""
Parses the eletrostatic potential for the last ionic step
"""
pattern = {"ngf": r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)"}
self.read_pattern(pattern, postprocess=int)
self.ngf = self.data.get("ngf", [[]])[0]
pattern = {"radii": r"the test charge radii are((?:\s+[\.\-\d]+)+)"}
self.read_pattern(pattern, reverse=True, terminate_on_match=True, postprocess=str)
self.sampling_radii = [float(f) for f in self.data["radii"][0][0].split()]
header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s*[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self.read_table_pattern(header_pattern, table_pattern, footer_pattern)
pots = "".join(itertools.chain.from_iterable(pots))
pots = re.findall(r"\s+\d+\s*([\.\-\d]+)+", pots)
self.electrostatic_potential = [float(f) for f in pots]
@staticmethod
def _parse_sci_notation(line):
"""
Method to parse lines with values in scientific notation and potentially
without spaces in between the values. This assumes that the scientific
notation always lists two digits for the exponent, e.g. 3.535E-02
Args:
line: line to parse
Returns: an array of numbers if found, or empty array if not
"""
m = re.findall(r"[\.\-\d]+E[\+\-]\d{2}", line)
if m:
return [float(t) for t in m]
return []
def read_freq_dielectric(self):
"""
Parses the frequency dependent dielectric function (obtained with
LOPTICS). Frequencies (in eV) are in self.frequencies, and dielectric
tensor function is given as self.dielectric_tensor_function.
"""
plasma_pattern = r"plasma frequency squared.*"
dielectric_pattern = (
r"frequency dependent\s+IMAGINARY "
r"DIELECTRIC FUNCTION \(independent particle, "
r"no local field effects\)(\sdensity-density)*$"
)
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 3)
plasma_frequencies = defaultdict(list)
read_plasma = False
read_dielectric = False
energies = []
data = {"REAL": [], "IMAGINARY": []}
count = 0
component = "IMAGINARY"
with zopen(self.filename, "rt") as f:
for l in f:
l = l.strip()
if re.match(plasma_pattern, l):
read_plasma = "intraband" if "intraband" in l else "interband"
elif re.match(dielectric_pattern, l):
read_plasma = False
read_dielectric = True
row_pattern = r"\s+".join([r"([\.\-\d]+)"] * 7)
if read_plasma and re.match(row_pattern, l):
plasma_frequencies[read_plasma].append([float(t) for t in l.strip().split()])
elif read_plasma and Outcar._parse_sci_notation(l):
plasma_frequencies[read_plasma].append(Outcar._parse_sci_notation(l))
elif read_dielectric:
toks = None
if re.match(row_pattern, l.strip()):
toks = l.strip().split()
elif Outcar._parse_sci_notation(l.strip()):
toks = Outcar._parse_sci_notation(l.strip())
elif re.match(r"\s*-+\s*", l):
count += 1
if toks:
if component == "IMAGINARY":
energies.append(float(toks[0]))
xx, yy, zz, xy, yz, xz = (float(t) for t in toks[1:])
matrix = [[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]]
data[component].append(matrix)
if count == 2:
component = "REAL"
elif count == 3:
break
self.plasma_frequencies = {k: np.array(v[:3]) for k, v in plasma_frequencies.items()}
self.dielectric_energies = np.array(energies)
self.dielectric_tensor_function = np.array(data["REAL"]) + 1j * np.array(data["IMAGINARY"])
@property # type: ignore
@deprecated(message="frequencies has been renamed to dielectric_energies.")
def frequencies(self):
"""
Renamed to dielectric energies.
"""
return self.dielectric_energies
def read_chemical_shielding(self):
"""
Parse the NMR chemical shieldings data. Only the second part "absolute, valence and core"
will be parsed. And only the three right most field (ISO_SHIELDING, SPAN, SKEW) will be retrieved.
Returns:
List of chemical shieldings in the order of atoms from the OUTCAR. Maryland notation is adopted.
"""
header_pattern = (
r"\s+CSA tensor \(J\. Mason, Solid State Nucl\. Magn\. Reson\. 2, "
r"285 \(1993\)\)\s+"
r"\s+-{50,}\s+"
r"\s+EXCLUDING G=0 CONTRIBUTION\s+INCLUDING G=0 CONTRIBUTION\s+"
r"\s+-{20,}\s+-{20,}\s+"
r"\s+ATOM\s+ISO_SHIFT\s+SPAN\s+SKEW\s+ISO_SHIFT\s+SPAN\s+SKEW\s+"
r"-{50,}\s*$"
)
first_part_pattern = r"\s+\(absolute, valence only\)\s+$"
swallon_valence_body_pattern = r".+?\(absolute, valence and core\)\s+$"
row_pattern = r"\d+(?:\s+[-]?\d+\.\d+){3}\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"-{50,}\s*$"
h1 = header_pattern + first_part_pattern
cs_valence_only = self.read_table_pattern(
h1, row_pattern, footer_pattern, postprocess=float, last_one_only=True
)
h2 = header_pattern + swallon_valence_body_pattern
cs_valence_and_core = self.read_table_pattern(
h2, row_pattern, footer_pattern, postprocess=float, last_one_only=True
)
all_cs = {}
for name, cs_table in [
["valence_only", cs_valence_only],
["valence_and_core", cs_valence_and_core],
]:
all_cs[name] = cs_table
self.data["chemical_shielding"] = all_cs
def read_cs_g0_contribution(self):
"""
Parse the G0 contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = (
r"^\s+G\=0 CONTRIBUTION TO CHEMICAL SHIFT \(field along BDIR\)\s+$\n"
r"^\s+-{50,}$\n"
r"^\s+BDIR\s+X\s+Y\s+Z\s*$\n"
r"^\s+-{50,}\s*$\n"
)
row_pattern = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
footer_pattern = r"\s+-{50,}\s*$"
self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=float,
last_one_only=True,
attribute_name="cs_g0_contribution",
)
def read_cs_core_contribution(self):
"""
Parse the core contribution of NMR chemical shielding.
Returns:
G0 contribution matrix as list of list.
"""
header_pattern = (
r"^\s+Core NMR properties\s*$\n" r"\n" r"^\s+typ\s+El\s+Core shift \(ppm\)\s*$\n" r"^\s+-{20,}$\n"
)
row_pattern = r"\d+\s+(?P<element>[A-Z][a-z]?\w?)\s+(?P<shift>[-]?\d+\.\d+)"
footer_pattern = r"\s+-{20,}\s*$"
self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=str,
last_one_only=True,
attribute_name="cs_core_contribution",
)
core_contrib = {d["element"]: float(d["shift"]) for d in self.data["cs_core_contribution"]}
self.data["cs_core_contribution"] = core_contrib
def read_cs_raw_symmetrized_tensors(self):
"""
Parse the matrix form of NMR tensor before corrected to table.
Returns:
nsymmetrized tensors list in the order of atoms.
"""
header_pattern = r"\s+-{50,}\s+" r"\s+Absolute Chemical Shift tensors\s+" r"\s+-{50,}$"
first_part_pattern = r"\s+UNSYMMETRIZED TENSORS\s+$"
row_pattern = r"\s+".join([r"([-]?\d+\.\d+)"] * 3)
unsym_footer_pattern = r"^\s+SYMMETRIZED TENSORS\s+$"
with zopen(self.filename, "rt") as f:
text = f.read()
unsym_table_pattern_text = header_pattern + first_part_pattern + r"(?P<table_body>.+)" + unsym_footer_pattern
table_pattern = re.compile(unsym_table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
m = table_pattern.search(text)
if m:
table_text = m.group("table_body")
micro_header_pattern = r"ion\s+\d+"
micro_table_pattern_text = micro_header_pattern + r"\s*^(?P<table_body>(?:\s*" + row_pattern + r")+)\s+"
micro_table_pattern = re.compile(micro_table_pattern_text, re.MULTILINE | re.DOTALL)
unsym_tensors = []
for mt in micro_table_pattern.finditer(table_text):
table_body_text = mt.group("table_body")
tensor_matrix = []
for line in table_body_text.rstrip().split("\n"):
ml = rp.search(line)
processed_line = [float(v) for v in ml.groups()]
tensor_matrix.append(processed_line)
unsym_tensors.append(tensor_matrix)
self.data["unsym_cs_tensor"] = unsym_tensors
else:
raise ValueError("NMR UNSYMMETRIZED TENSORS is not found")
def read_nmr_efg_tensor(self):
"""
Parses the NMR Electric Field Gradient Raw Tensors
Returns:
A list of Electric Field Gradient Tensors in the order of Atoms from OUTCAR
"""
header_pattern = (
r"Electric field gradients \(V/A\^2\)\n" r"-*\n" r" ion\s+V_xx\s+V_yy\s+V_zz\s+V_xy\s+V_xz\s+V_yz\n" r"-*\n"
)
row_pattern = r"\d+\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)\s+([-\d\.]+)"
footer_pattern = r"-*\n"
data = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
tensors = [make_symmetric_matrix_from_upper_tri(d) for d in data]
self.data["unsym_efg_tensor"] = tensors
return tensors
def read_nmr_efg(self):
"""
Parse the NMR Electric Field Gradient interpretted values.
Returns:
Electric Field Gradient tensors as a list of dict in the order of atoms from OUTCAR.
Each dict key/value pair corresponds to a component of the tensors.
"""
header_pattern = (
r"^\s+NMR quadrupolar parameters\s+$\n"
r"^\s+Cq : quadrupolar parameter\s+Cq=e[*]Q[*]V_zz/h$\n"
r"^\s+eta: asymmetry parameters\s+\(V_yy - V_xx\)/ V_zz$\n"
r"^\s+Q : nuclear electric quadrupole moment in mb \(millibarn\)$\n"
r"^-{50,}$\n"
r"^\s+ion\s+Cq\(MHz\)\s+eta\s+Q \(mb\)\s+$\n"
r"^-{50,}\s*$\n"
)
row_pattern = (
r"\d+\s+(?P<cq>[-]?\d+\.\d+)\s+(?P<eta>[-]?\d+\.\d+)\s+" r"(?P<nuclear_quadrupole_moment>[-]?\d+\.\d+)"
)
footer_pattern = r"-{50,}\s*$"
self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=float,
last_one_only=True,
attribute_name="efg",
)
def read_elastic_tensor(self):
"""
Parse the elastic tensor data.
Returns:
6x6 array corresponding to the elastic tensor from the OUTCAR.
"""
header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+" r"Direction\s+([X-Z][X-Z]\s+)+" r"\-+"
row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"\-+"
et_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
self.data["elastic_tensor"] = et_table
def read_piezo_tensor(self):
"""
Parse the piezo tensor data
"""
header_pattern = r"PIEZOELECTRIC TENSOR for field in x, y, " r"z\s+\(C/m\^2\)\s+([X-Z][X-Z]\s+)+\-+"
row_pattern = r"[x-z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6)
footer_pattern = r"BORN EFFECTIVE"
pt_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float)
self.data["piezo_tensor"] = pt_table
def read_onsite_density_matrices(self):
"""
Parse the onsite density matrices, returns list with index corresponding
to atom index in Structure.
"""
# matrix size will vary depending on if d or f orbitals are present
# therefore regex assumes f, but filter out None values if d
header_pattern = r"spin component 1\n"
row_pattern = r"[^\S\r\n]*(?:(-?[\d.]+))" + r"(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?" * 6 + r".*?"
footer_pattern = r"\nspin component 2"
spin1_component = self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=lambda x: float(x) if x else None,
last_one_only=False,
)
# filter out None values
spin1_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin1_component]
# and repeat for Spin.down
header_pattern = r"spin component 2\n"
row_pattern = r"[^\S\r\n]*(?:([\d.-]+))" + r"(?:[^\S\r\n]*(-?[\d.]+)[^\S\r\n]*)?" * 6 + r".*?"
footer_pattern = r"\n occupancies and eigenvectors"
spin2_component = self.read_table_pattern(
header_pattern,
row_pattern,
footer_pattern,
postprocess=lambda x: float(x) if x else None,
last_one_only=False,
)
spin2_component = [[[e for e in row if e is not None] for row in matrix] for matrix in spin2_component]
self.data["onsite_density_matrices"] = [
{Spin.up: spin1_component[idx], Spin.down: spin2_component[idx]} for idx in range(len(spin1_component))
]
def read_corrections(self, reverse=True, terminate_on_match=True):
"""
Reads the dipol qudropol corrections into the
Outcar.data["dipol_quadrupol_correction"].
:param reverse: Whether to start from end of OUTCAR.
:param terminate_on_match: Whether to terminate once match is found.
"""
patterns = {"dipol_quadrupol_correction": r"dipol\+quadrupol energy " r"correction\s+([\d\-\.]+)"}
self.read_pattern(
patterns,
reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=float,
)
self.data["dipol_quadrupol_correction"] = self.data["dipol_quadrupol_correction"][0][0]
def read_neb(self, reverse=True, terminate_on_match=True):
"""
Reads NEB data. This only works with OUTCARs from both normal
VASP NEB calculations or from the CI NEB method implemented by
Henkelman et al.
Args:
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match. Defaults to True here since we usually
want only the final value.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern. Defaults to True here
since we usually want only the final value.
Renders accessible:
tangent_force - Final tangent force.
energy - Final energy.
These can be accessed under Outcar.data[key]
"""
patterns = {
"energy": r"energy\(sigma->0\)\s+=\s+([\d\-\.]+)",
"tangent_force": r"(NEB: projections on to tangent \(spring, REAL\)\s+\S+|tangential force \(eV/A\))\s+"
r"([\d\-\.]+)",
}
self.read_pattern(
patterns,
reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=str,
)
self.data["energy"] = float(self.data["energy"][0][0])
if self.data.get("tangent_force"):
self.data["tangent_force"] = float(self.data["tangent_force"][0][1])
def read_igpar(self):
"""
Renders accessible:
er_ev = e<r>_ev (dictionary with Spin.up/Spin.down as keys)
er_bp = e<r>_bp (dictionary with Spin.up/Spin.down as keys)
er_ev_tot = spin up + spin down summed
er_bp_tot = spin up + spin down summed
p_elc = spin up + spin down summed
p_ion = spin up + spin down summed
(See VASP section "LBERRY, IGPAR, NPPSTR, DIPOL tags" for info on
what these are).
"""
# variables to be filled
self.er_ev = {} # will be dict (Spin.up/down) of array(3*float)
self.er_bp = {} # will be dics (Spin.up/down) of array(3*float)
self.er_ev_tot = None # will be array(3*float)
self.er_bp_tot = None # will be array(3*float)
self.p_elec = None
self.p_ion = None
try:
search = []
# Nonspin cases
def er_ev(results, match):
results.er_ev[Spin.up] = np.array(map(float, match.groups()[1:4])) / 2
results.er_ev[Spin.down] = results.er_ev[Spin.up]
results.context = 2
search.append(
[
r"^ *e<r>_ev=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
er_ev,
]
)
def er_bp(results, match):
results.er_bp[Spin.up] = np.array([float(match.group(i)) for i in range(1, 4)]) / 2
results.er_bp[Spin.down] = results.er_bp[Spin.up]
search.append(
[
r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == 2,
er_bp,
]
)
# Spin cases
def er_ev_up(results, match):
results.er_ev[Spin.up] = np.array([float(match.group(i)) for i in range(1, 4)])
results.context = Spin.up
search.append(
[
r"^.*Spin component 1 *e<r>_ev=\( *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None,
er_ev_up,
]
)
def er_bp_up(results, match):
results.er_bp[Spin.up] = np.array(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.append(
[
r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.up,
er_bp_up,
]
)
def er_ev_dn(results, match):
results.er_ev[Spin.down] = np.array(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
results.context = Spin.down
search.append(
[
r"^.*Spin component 2 *e<r>_ev=\( *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None,
er_ev_dn,
]
)
def er_bp_dn(results, match):
results.er_bp[Spin.down] = np.array([float(match.group(i)) for i in range(1, 4)])
search.append(
[
r"^ *e<r>_bp=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
lambda results, line: results.context == Spin.down,
er_bp_dn,
]
)
# Always present spin/non-spin
def p_elc(results, match):
results.p_elc = np.array([float(match.group(i)) for i in range(1, 4)])
search.append(
[
r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None,
p_elc,
]
)
def p_ion(results, match):
results.p_ion = np.array([float(match.group(i)) for i in range(1, 4)])
search.append(
[
r"^.*ionic dipole moment: " r"*p\[ion\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
p_ion,
]
)
self.context = None
self.er_ev = {Spin.up: None, Spin.down: None}
self.er_bp = {Spin.up: None, Spin.down: None}
micro_pyawk(self.filename, search, self)
if self.er_ev[Spin.up] is not None and self.er_ev[Spin.down] is not None:
self.er_ev_tot = self.er_ev[Spin.up] + self.er_ev[Spin.down]
if self.er_bp[Spin.up] is not None and self.er_bp[Spin.down] is not None:
self.er_bp_tot = self.er_bp[Spin.up] + self.er_bp[Spin.down]
except Exception:
self.er_ev_tot = None
self.er_bp_tot = None
raise Exception("IGPAR OUTCAR could not be parsed.")
def read_internal_strain_tensor(self):
"""
Reads the internal strain tensor and populates self.internal_strain_tensor with an array of voigt notation
tensors for each site.
"""
search = []
def internal_strain_start(results, match):
results.internal_strain_ion = int(match.group(1)) - 1
results.internal_strain_tensor.append(np.zeros((3, 6)))
search.append(
[
r"INTERNAL STRAIN TENSOR FOR ION\s+(\d+)\s+for displacements in x,y,z \(eV/Angst\):",
None,
internal_strain_start,
]
)
def internal_strain_data(results, match):
if match.group(1).lower() == "x":
index = 0
elif match.group(1).lower() == "y":
index = 1
elif match.group(1).lower() == "z":
index = 2
else:
raise Exception(f"Couldn't parse row index from symbol for internal strain tensor: {match.group(1)}")
results.internal_strain_tensor[results.internal_strain_ion][index] = np.array(
[float(match.group(i)) for i in range(2, 8)]
)
if index == 2:
results.internal_strain_ion = None
search.append(
[
r"^\s+([x,y,z])\s+" + r"([-]?\d+\.\d+)\s+" * 6,
lambda results, line: results.internal_strain_ion is not None,
internal_strain_data,
]
)
self.internal_strain_ion = None
self.internal_strain_tensor = []
micro_pyawk(self.filename, search, self)
def read_lepsilon(self):
"""
Reads an LEPSILON run.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_index = -1
search.append(
[
r"MACROSCOPIC STATIC DIELECTRIC TENSOR \(",
None,
dielectric_section_start,
]
)
def dielectric_section_start2(results, match):
results.dielectric_index = 0
search.append(
[
r"-------------------------------------",
lambda results, line: results.dielectric_index == -1,
dielectric_section_start2,
]
)
def dielectric_data(results, match):
results.dielectric_tensor[results.dielectric_index, :] = np.array(
[float(match.group(i)) for i in range(1, 4)]
)
results.dielectric_index += 1
search.append(
[
r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_index >= 0
if results.dielectric_index is not None
else None,
dielectric_data,
]
)
def dielectric_section_stop(results, match):
results.dielectric_index = None
search.append(
[
r"-------------------------------------",
lambda results, line: results.dielectric_index >= 1
if results.dielectric_index is not None
else None,
dielectric_section_stop,
]
)
self.dielectric_index = None
self.dielectric_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_index = 0
search.append(
[
r"PIEZOELECTRIC TENSOR for field in x, y, z " r"\(C/m\^2\)",
None,
piezo_section_start,
]
)
def piezo_data(results, match):
results.piezo_tensor[results.piezo_index, :] = np.array([float(match.group(i)) for i in range(1, 7)])
results.piezo_index += 1
search.append(
[
r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_index >= 0 if results.piezo_index is not None else None,
piezo_data,
]
)
def piezo_section_stop(results, match):
results.piezo_index = None
search.append(
[
r"-------------------------------------",
lambda results, line: results.piezo_index >= 1 if results.piezo_index is not None else None,
piezo_section_stop,
]
)
self.piezo_index = None
self.piezo_tensor = np.zeros((3, 6))
def born_section_start(results, match):
results.born_ion = -1
search.append([r"BORN EFFECTIVE CHARGES ", None, born_section_start])
def born_ion(results, match):
results.born_ion = int(match.group(1)) - 1
results.born.append(np.zeros((3, 3)))
search.append(
[
r"ion +([0-9]+)",
lambda results, line: results.born_ion is not None,
born_ion,
]
)
def born_data(results, match):
results.born[results.born_ion][int(match.group(1)) - 1, :] = np.array(
[float(match.group(i)) for i in range(2, 5)]
)
search.append(
[
r"^ *([1-3]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+)$",
lambda results, line: results.born_ion >= 0 if results.born_ion is not None else results.born_ion,
born_data,
]
)
def born_section_stop(results, match):
results.born_ion = None
search.append(
[
r"-------------------------------------",
lambda results, line: results.born_ion >= 1 if results.born_ion is not None else results.born_ion,
born_section_stop,
]
)
self.born_ion = None
self.born = []
micro_pyawk(self.filename, search, self)
self.born = np.array(self.born)
self.dielectric_tensor = self.dielectric_tensor.tolist()
self.piezo_tensor = self.piezo_tensor.tolist()
except Exception:
raise Exception("LEPSILON OUTCAR could not be parsed.")
def read_lepsilon_ionic(self):
"""
Reads an LEPSILON run, the ionic component.
# TODO: Document the actual variables.
"""
try:
search = []
def dielectric_section_start(results, match):
results.dielectric_ionic_index = -1
search.append(
[
r"MACROSCOPIC STATIC DIELECTRIC TENSOR IONIC",
None,
dielectric_section_start,
]
)
def dielectric_section_start2(results, match):
results.dielectric_ionic_index = 0
search.append(
[
r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index == -1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_start2,
]
)
def dielectric_data(results, match):
results.dielectric_ionic_tensor[results.dielectric_ionic_index, :] = np.array(
[float(match.group(i)) for i in range(1, 4)]
)
results.dielectric_ionic_index += 1
search.append(
[
r"^ *([-0-9.Ee+]+) +([-0-9.Ee+]+) +([-0-9.Ee+]+) *$",
lambda results, line: results.dielectric_ionic_index >= 0
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_data,
]
)
def dielectric_section_stop(results, match):
results.dielectric_ionic_index = None
search.append(
[
r"-------------------------------------",
lambda results, line: results.dielectric_ionic_index >= 1
if results.dielectric_ionic_index is not None
else results.dielectric_ionic_index,
dielectric_section_stop,
]
)
self.dielectric_ionic_index = None
self.dielectric_ionic_tensor = np.zeros((3, 3))
def piezo_section_start(results, match):
results.piezo_ionic_index = 0
search.append(
[
r"PIEZOELECTRIC TENSOR IONIC CONTR for field in " r"x, y, z ",
None,
piezo_section_start,
]
)
def piezo_data(results, match):
results.piezo_ionic_tensor[results.piezo_ionic_index, :] = np.array(
[float(match.group(i)) for i in range(1, 7)]
)
results.piezo_ionic_index += 1
search.append(
[
r"^ *[xyz] +([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+) *([-0-9.Ee+]+) +([-0-9.Ee+]+)"
+ r" +([-0-9.Ee+]+)*$",
lambda results, line: results.piezo_ionic_index >= 0
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_data,
]
)
def piezo_section_stop(results, match):
results.piezo_ionic_index = None
search.append(
[
"-------------------------------------",
lambda results, line: results.piezo_ionic_index >= 1
if results.piezo_ionic_index is not None
else results.piezo_ionic_index,
piezo_section_stop,
]
)
self.piezo_ionic_index = None
self.piezo_ionic_tensor = np.zeros((3, 6))
micro_pyawk(self.filename, search, self)
self.dielectric_ionic_tensor = self.dielectric_ionic_tensor.tolist()
self.piezo_ionic_tensor = self.piezo_ionic_tensor.tolist()
except Exception:
raise Exception("ionic part of LEPSILON OUTCAR could not be parsed.")
def read_lcalcpol(self):
"""
Reads the lcalpol.
# TODO: Document the actual variables.
"""
self.p_elec = None
self.p_sp1 = None
self.p_sp2 = None
self.p_ion = None
try:
search = []
# Always present spin/non-spin
def p_elec(results, match):
results.p_elec = np.array(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.append(
[
r"^.*Total electronic dipole moment: "
r"*p\[elc\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) "
r"*([-0-9.Ee+]*) *\)",
None,
p_elec,
]
)
# If spin-polarized (and not noncollinear)
# save spin-polarized electronic values
if self.spin and not self.noncollinear:
def p_sp1(results, match):
results.p_sp1 = np.array(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.append(
[
r"^.*p\[sp1\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
p_sp1,
]
)
def p_sp2(results, match):
results.p_sp2 = np.array(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.append(
[
r"^.*p\[sp2\]=\( *([-0-9.Ee+]*) *([-0-9.Ee+]*) " r"*([-0-9.Ee+]*) *\)",
None,
p_sp2,
]
)
def p_ion(results, match):
results.p_ion = np.array(
[
float(match.group(1)),
float(match.group(2)),
float(match.group(3)),
]
)
search.append(
[
r"^.*Ionic dipole moment: *p\[ion\]=" r"\( *([-0-9.Ee+]*)" r" *([-0-9.Ee+]*) *([-0-9.Ee+]*) *\)",
None,
p_ion,
]
)
micro_pyawk(self.filename, search, self)
except Exception:
raise Exception("LCALCPOL OUTCAR could not be parsed.")
def read_pseudo_zval(self):
"""
Create pseudopotential ZVAL dictionary.
"""
# pylint: disable=E1101
try:
def atom_symbols(results, match):
element_symbol = match.group(1)
if not hasattr(results, "atom_symbols"):
results.atom_symbols = []
results.atom_symbols.append(element_symbol.strip())
def zvals(results, match):
zvals = match.group(1)
results.zvals = map(float, re.findall(r"-?\d+\.\d*", zvals))
search = []
search.append([r"(?<=VRHFIN =)(.*)(?=:)", None, atom_symbols])
search.append([r"^\s+ZVAL.*=(.*)", None, zvals])
micro_pyawk(self.filename, search, self)
zval_dict = {}
for x, y in zip(self.atom_symbols, self.zvals):
zval_dict.update({x: y})
self.zval_dict = zval_dict
# Clean-up
del self.atom_symbols
del self.zvals
except Exception:
raise Exception("ZVAL dict could not be parsed.")
def read_core_state_eigen(self):
"""
Read the core state eigenenergies at each ionic step.
Returns:
A list of dict over the atom such as [{"AO":[core state eig]}].
The core state eigenenergie list for each AO is over all ionic
step.
Example:
The core state eigenenergie of the 2s AO of the 6th atom of the
structure at the last ionic step is [5]["2s"][-1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
while line != "":
line = foutcar.readline()
if "NIONS =" in line:
natom = int(line.split("NIONS =")[1])
cl = [defaultdict(list) for i in range(natom)]
if "the core state eigen" in line:
iat = -1
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
break
data = line.split()
# data will contain odd number of elements if it is
# the start of a new entry, or even number of elements
# if it continues the previous entry
if len(data) % 2 == 1:
iat += 1 # started parsing a new ion
data = data[1:] # remove element with ion number
for i in range(0, len(data), 2):
cl[iat][data[i]].append(float(data[i + 1]))
return cl
def read_avg_core_poten(self):
"""
Read the core potential at each ionic step.
Returns:
A list for each ionic step containing a list of the average core
potentials for each atom: [[avg core pot]].
Example:
The average core potential of the 2nd atom of the structure at the
last ionic step is: [-1][1]
"""
with zopen(self.filename, "rt") as foutcar:
line = foutcar.readline()
aps = []
while line != "":
line = foutcar.readline()
if "the norm of the test charge is" in line:
ap = []
while line != "":
line = foutcar.readline()
# don't know number of lines to parse without knowing
# specific species, so stop parsing when we reach
# "E-fermi" instead
if "E-fermi" in line:
aps.append(ap)
break
# the average core potentials of up to 5 elements are
# given per line; the potentials are separated by several
# spaces and numbered from 1 to natoms; the potentials are
# parsed in a fixed width format
npots = int((len(line) - 1) / 17)
for i in range(npots):
start = i * 17
ap.append(float(line[start + 8 : start + 17]))
return aps
def as_dict(self):
"""
:return: MSONAble dict.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"run_stats": self.run_stats,
"magnetization": self.magnetization,
"charge": self.charge,
"total_magnetization": self.total_mag,
"nelect": self.nelect,
"is_stopped": self.is_stopped,
"drift": self.drift,
"ngf": self.ngf,
"sampling_radii": self.sampling_radii,
"electrostatic_potential": self.electrostatic_potential,
}
if self.lepsilon:
d.update(
{
"piezo_tensor": self.piezo_tensor,
"dielectric_tensor": self.dielectric_tensor,
"born": self.born,
}
)
if self.dfpt:
d.update({"internal_strain_tensor": self.internal_strain_tensor})
if self.dfpt and self.lepsilon:
d.update(
{
"piezo_ionic_tensor": self.piezo_ionic_tensor,
"dielectric_ionic_tensor": self.dielectric_ionic_tensor,
}
)
if self.lcalcpol:
d.update({"p_elec": self.p_elec, "p_ion": self.p_ion})
if self.spin and not self.noncollinear:
d.update({"p_sp1": self.p_sp1, "p_sp2": self.p_sp2})
d.update({"zval_dict": self.zval_dict})
if self.nmr_cs:
d.update(
{
"nmr_cs": {
"valence and core": self.data["chemical_shielding"]["valence_and_core"],
"valence_only": self.data["chemical_shielding"]["valence_only"],
"g0": self.data["cs_g0_contribution"],
"core": self.data["cs_core_contribution"],
"raw": self.data["unsym_cs_tensor"],
}
}
)
if self.nmr_efg:
d.update(
{
"nmr_efg": {
"raw": self.data["unsym_efg_tensor"],
"parameters": self.data["efg"],
}
}
)
if self.has_onsite_density_matrices:
# cast Spin to str for consistency with electronic_structure
# TODO: improve handling of Enum (de)serialization in monty
onsite_density_matrices = [{str(k): v for k, v in d.items()} for d in self.data["onsite_density_matrices"]]
d.update({"onsite_density_matrices": onsite_density_matrices})
return d
def read_fermi_contact_shift(self):
"""
output example:
Fermi contact (isotropic) hyperfine coupling parameter (MHz)
-------------------------------------------------------------
ion A_pw A_1PS A_1AE A_1c A_tot
-------------------------------------------------------------
1 -0.002 -0.002 -0.051 0.000 -0.052
2 -0.002 -0.002 -0.051 0.000 -0.052
3 0.056 0.056 0.321 -0.048 0.321
-------------------------------------------------------------
, which corresponds to
[[-0.002, -0.002, -0.051, 0.0, -0.052],
[-0.002, -0.002, -0.051, 0.0, -0.052],
[0.056, 0.056, 0.321, -0.048, 0.321]] from 'fch' data
"""
# Fermi contact (isotropic) hyperfine coupling parameter (MHz)
header_pattern1 = (
r"\s*Fermi contact \(isotropic\) hyperfine coupling parameter \(MHz\)\s+"
r"\s*\-+"
r"\s*ion\s+A_pw\s+A_1PS\s+A_1AE\s+A_1c\s+A_tot\s+"
r"\s*\-+"
)
row_pattern1 = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 5)
footer_pattern = r"\-+"
fch_table = self.read_table_pattern(
header_pattern1,
row_pattern1,
footer_pattern,
postprocess=float,
last_one_only=True,
)
# Dipolar hyperfine coupling parameters (MHz)
header_pattern2 = (
r"\s*Dipolar hyperfine coupling parameters \(MHz\)\s+"
r"\s*\-+"
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+A_xy\s+A_xz\s+A_yz\s+"
r"\s*\-+"
)
row_pattern2 = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 6)
dh_table = self.read_table_pattern(
header_pattern2,
row_pattern2,
footer_pattern,
postprocess=float,
last_one_only=True,
)
# Total hyperfine coupling parameters after diagonalization (MHz)
header_pattern3 = (
r"\s*Total hyperfine coupling parameters after diagonalization \(MHz\)\s+"
r"\s*\(convention: \|A_zz\| > \|A_xx\| > \|A_yy\|\)\s+"
r"\s*\-+"
r"\s*ion\s+A_xx\s+A_yy\s+A_zz\s+asymmetry \(A_yy - A_xx\)/ A_zz\s+"
r"\s*\-+"
)
row_pattern3 = r"(?:\d+)\s+" + r"\s+".join([r"([-]?\d+\.\d+)"] * 4)
th_table = self.read_table_pattern(
header_pattern3,
row_pattern3,
footer_pattern,
postprocess=float,
last_one_only=True,
)
fc_shift_table = {"fch": fch_table, "dh": dh_table, "th": th_table}
self.data["fermi_contact_shift"] = fc_shift_table
class VolumetricData(MSONable):
"""
Simple volumetric object for reading LOCPOT and CHGCAR type files.
.. attribute:: structure
Structure associated with the Volumetric Data object
..attribute:: is_spin_polarized
True if run is spin polarized
..attribute:: dim
Tuple of dimensions of volumetric grid in each direction (nx, ny, nz).
..attribute:: data
Actual data as a dict of {string: np.array}. The string are "total"
and "diff", in accordance to the output format of vasp LOCPOT and
CHGCAR files where the total spin density is written first, followed
by the difference spin density.
.. attribute:: ngridpts
Total number of grid points in volumetric data.
"""
def __init__(self, structure, data, distance_matrix=None, data_aug=None):
"""
Typically, this constructor is not used directly and the static
from_file constructor is used. This constructor is designed to allow
summation and other operations between VolumetricData objects.
Args:
structure: Structure associated with the volumetric data
data: Actual volumetric data.
data_aug: Any extra information associated with volumetric data
(typically augmentation charges)
distance_matrix: A pre-computed distance matrix if available.
Useful so pass distance_matrices between sums,
shortcircuiting an otherwise expensive operation.
"""
self.structure = structure
self.is_spin_polarized = len(data) >= 2
self.is_soc = len(data) >= 4
self.dim = data["total"].shape
self.data = data
self.data_aug = data_aug if data_aug else {}
self.ngridpts = self.dim[0] * self.dim[1] * self.dim[2]
# lazy init the spin data since this is not always needed.
self._spin_data = {}
self._distance_matrix = {} if not distance_matrix else distance_matrix
self.xpoints = np.linspace(0.0, 1.0, num=self.dim[0])
self.ypoints = np.linspace(0.0, 1.0, num=self.dim[1])
self.zpoints = np.linspace(0.0, 1.0, num=self.dim[2])
self.interpolator = RegularGridInterpolator(
(self.xpoints, self.ypoints, self.zpoints),
self.data["total"],
bounds_error=True,
)
self.name = "VolumetricData"
@property
def spin_data(self):
"""
The data decomposed into actual spin data as {spin: data}.
Essentially, this provides the actual Spin.up and Spin.down data
instead of the total and diff. Note that by definition, a
non-spin-polarized run would have Spin.up data == Spin.down data.
"""
if not self._spin_data:
spin_data = {}
spin_data[Spin.up] = 0.5 * (self.data["total"] + self.data.get("diff", 0))
spin_data[Spin.down] = 0.5 * (self.data["total"] - self.data.get("diff", 0))
self._spin_data = spin_data
return self._spin_data
def get_axis_grid(self, ind):
"""
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.dim
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def __add__(self, other):
return self.linear_add(other, 1.0)
def __sub__(self, other):
return self.linear_add(other, -1.0)
def copy(self):
"""
:return: Copy of Volumetric object
"""
return VolumetricData(
self.structure,
{k: v.copy() for k, v in self.data.items()},
distance_matrix=self._distance_matrix,
data_aug=self.data_aug,
)
def linear_add(self, other, scale_factor=1.0):
"""
Method to do a linear sum of volumetric objects. Used by + and -
operators as well. Returns a VolumetricData object containing the
linear sum.
Args:
other (VolumetricData): Another VolumetricData object
scale_factor (float): Factor to scale the other data by.
Returns:
VolumetricData corresponding to self + scale_factor * other.
"""
if self.structure != other.structure:
warnings.warn("Structures are different. Make sure you know what you are doing...")
if self.data.keys() != other.data.keys():
raise ValueError("Data have different keys! Maybe one is spin-" "polarized and the other is not?")
# To add checks
data = {}
for k in self.data.keys():
data[k] = self.data[k] + scale_factor * other.data[k]
return VolumetricData(self.structure, data, self._distance_matrix)
@staticmethod
def parse_file(filename):
"""
Convenience method to parse a generic volumetric data file in the vasp
like format. Used by subclasses for parsing file.
Args:
filename (str): Path of file to parse
Returns:
(poscar, data)
"""
# pylint: disable=E1136,E1126
poscar_read = False
poscar_string = []
dataset = []
all_dataset = []
# for holding any strings in input that are not Poscar
# or VolumetricData (typically augmentation charges)
all_dataset_aug = {}
dim = None
dimline = None
read_dataset = False
ngrid_pts = 0
data_count = 0
poscar = None
with zopen(filename, "rt") as f:
for line in f:
original_line = line
line = line.strip()
if read_dataset:
for tok in line.split():
if data_count < ngrid_pts:
# This complicated procedure is necessary because
# vasp outputs x as the fastest index, followed by y
# then z.
no_x = data_count // dim[0]
dataset[data_count % dim[0], no_x % dim[1], no_x // dim[1]] = float(tok)
data_count += 1
if data_count >= ngrid_pts:
read_dataset = False
data_count = 0
all_dataset.append(dataset)
elif not poscar_read:
if line != "" or len(poscar_string) == 0:
poscar_string.append(line)
elif line == "":
poscar = Poscar.from_string("\n".join(poscar_string))
poscar_read = True
elif not dim:
dim = [int(i) for i in line.split()]
ngrid_pts = dim[0] * dim[1] * dim[2]
dimline = line
read_dataset = True
dataset = np.zeros(dim)
elif line == dimline:
# when line == dimline, expect volumetric data to follow
# so set read_dataset to True
read_dataset = True
dataset = np.zeros(dim)
else:
# store any extra lines that were not part of the
# volumetric data so we know which set of data the extra
# lines are associated with
key = len(all_dataset) - 1
if key not in all_dataset_aug:
all_dataset_aug[key] = []
all_dataset_aug[key].append(original_line)
if len(all_dataset) == 4:
data = {
"total": all_dataset[0],
"diff_x": all_dataset[1],
"diff_y": all_dataset[2],
"diff_z": all_dataset[3],
}
data_aug = {
"total": all_dataset_aug.get(0, None),
"diff_x": all_dataset_aug.get(1, None),
"diff_y": all_dataset_aug.get(2, None),
"diff_z": all_dataset_aug.get(3, None),
}
# construct a "diff" dict for scalar-like magnetization density,
# referenced to an arbitrary direction (using same method as
# pymatgen.electronic_structure.core.Magmom, see
# Magmom documentation for justification for this)
# TODO: re-examine this, and also similar behavior in
# Magmom - @mkhorton
# TODO: does CHGCAR change with different SAXIS?
diff_xyz = np.array([data["diff_x"], data["diff_y"], data["diff_z"]])
diff_xyz = diff_xyz.reshape((3, dim[0] * dim[1] * dim[2]))
ref_direction = np.array([1.01, 1.02, 1.03])
ref_sign = np.sign(np.dot(ref_direction, diff_xyz))
diff = np.multiply(np.linalg.norm(diff_xyz, axis=0), ref_sign)
data["diff"] = diff.reshape((dim[0], dim[1], dim[2]))
elif len(all_dataset) == 2:
data = {"total": all_dataset[0], "diff": all_dataset[1]}
data_aug = {
"total": all_dataset_aug.get(0, None),
"diff": all_dataset_aug.get(1, None),
}
else:
data = {"total": all_dataset[0]}
data_aug = {"total": all_dataset_aug.get(0, None)}
return poscar, data, data_aug
def write_file(self, file_name, vasp4_compatible=False):
"""
Write the VolumetricData object to a vasp compatible file.
Args:
file_name (str): Path to a file
vasp4_compatible (bool): True if the format is vasp4 compatible
"""
def _print_fortran_float(f):
"""
Fortran codes print floats with a leading zero in scientific
notation. When writing CHGCAR files, we adopt this convention
to ensure written CHGCAR files are byte-to-byte identical to
their input files as far as possible.
:param f: float
:return: str
"""
s = f"{f:.10E}"
if f >= 0:
return "0." + s[0] + s[2:12] + "E" + f"{int(s[13:]) + 1:+03}"
return "-." + s[1] + s[3:13] + "E" + f"{int(s[14:]) + 1:+03}"
with zopen(file_name, "wt") as f:
p = Poscar(self.structure)
# use original name if it's been set (e.g. from Chgcar)
comment = getattr(self, "name", p.comment)
lines = comment + "\n"
lines += " 1.00000000000000\n"
latt = self.structure.lattice.matrix
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :])
if not vasp4_compatible:
lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n"
lines += "".join(["%6d" % x for x in p.natoms]) + "\n"
lines += "Direct\n"
for site in self.structure:
lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords)
lines += " \n"
f.write(lines)
a = self.dim
def write_spin(data_type):
lines = []
count = 0
f.write(f" {a[0]} {a[1]} {a[2]}\n")
for (k, j, i) in itertools.product(list(range(a[2])), list(range(a[1])), list(range(a[0]))):
lines.append(_print_fortran_float(self.data[data_type][i, j, k]))
count += 1
if count % 5 == 0:
f.write(" " + "".join(lines) + "\n")
lines = []
else:
lines.append(" ")
if count % 5 != 0:
f.write(" " + "".join(lines) + " \n")
f.write("".join(self.data_aug.get(data_type, [])))
write_spin("total")
if self.is_spin_polarized and self.is_soc:
write_spin("diff_x")
write_spin("diff_y")
write_spin("diff_z")
elif self.is_spin_polarized:
write_spin("diff")
def value_at(self, x, y, z):
"""
Get a data value from self.data at a given point (x, y, z) in terms
of fractional lattice parameters. Will be interpolated using a
RegularGridInterpolator on self.data if (x, y, z) is not in the original
set of data points.
Args:
x (float): Fraction of lattice vector a.
y (float): Fraction of lattice vector b.
z (float): Fraction of lattice vector c.
Returns:
Value from self.data (potentially interpolated) correspondisng to
the point (x, y, z).
"""
return self.interpolator([x, y, z])[0]
def linear_slice(self, p1, p2, n=100):
"""
Get a linear slice of the volumetric data with n data points from
point p1 to point p2, in the form of a list.
Args:
p1 (list): 3-element list containing fractional coordinates of the first point.
p2 (list): 3-element list containing fractional coordinates of the second point.
n (int): Number of data points to collect, defaults to 100.
Returns:
List of n data points (mostly interpolated) representing a linear slice of the
data from point p1 to point p2.
"""
assert type(p1) in [list, np.ndarray] and type(p2) in [list, np.ndarray]
assert len(p1) == 3 and len(p2) == 3
xpts = np.linspace(p1[0], p2[0], num=n)
ypts = np.linspace(p1[1], p2[1], num=n)
zpts = np.linspace(p1[2], p2[2], num=n)
return [self.value_at(xpts[i], ypts[i], zpts[i]) for i in range(n)]
def get_integrated_diff(self, ind, radius, nbins=1):
"""
Get integrated difference of atom index ind up to radius. This can be
an extremely computationally intensive process, depending on how many
grid points are in the VolumetricData.
Args:
ind (int): Index of atom.
radius (float): Radius of integration.
nbins (int): Number of bins. Defaults to 1. This allows one to
obtain the charge integration up to a list of the cumulative
charge integration values for radii for [radius/nbins,
2 * radius/nbins, ....].
Returns:
Differential integrated charge as a np array of [[radius, value],
...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],
data[:,1])
"""
# For non-spin-polarized runs, this is zero by definition.
if not self.is_spin_polarized:
radii = [radius / nbins * (i + 1) for i in range(nbins)]
data = np.zeros((nbins, 2))
data[:, 0] = radii
return data
struct = self.structure
a = self.dim
if ind not in self._distance_matrix or self._distance_matrix[ind]["max_radius"] < radius:
coords = []
for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):
coords.append([x / a[0], y / a[1], z / a[2]])
sites_dist = struct.lattice.get_points_in_sphere(coords, struct[ind].coords, radius)
self._distance_matrix[ind] = {
"max_radius": radius,
"data": np.array(sites_dist),
}
data = self._distance_matrix[ind]["data"]
# Use boolean indexing to find all charges within the desired distance.
inds = data[:, 1] <= radius
dists = data[inds, 1]
data_inds = np.rint(np.mod(list(data[inds, 0]), 1) * np.tile(a, (len(dists), 1))).astype(int)
vals = [self.data["diff"][x, y, z] for x, y, z in data_inds]
hist, edges = np.histogram(dists, bins=nbins, range=[0, radius], weights=vals)
data = np.zeros((nbins, 2))
data[:, 0] = edges[1:]
data[:, 1] = [sum(hist[0 : i + 1]) / self.ngridpts for i in range(nbins)]
return data
def get_average_along_axis(self, ind):
"""
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
m = self.data["total"]
ng = self.dim
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
def to_hdf5(self, filename):
"""
Writes the VolumetricData to a HDF5 format, which is a highly optimized
format for reading storing large data. The mapping of the VolumetricData
to this file format is as follows:
VolumetricData.data -> f["vdata"]
VolumetricData.structure ->
f["Z"]: Sequence of atomic numbers
f["fcoords"]: Fractional coords
f["lattice"]: Lattice in the pymatgen.core.lattice.Lattice matrix
format
f.attrs["structure_json"]: String of json representation
Args:
filename (str): Filename to output to.
"""
import h5py
with h5py.File(filename, "w") as f:
ds = f.create_dataset("lattice", (3, 3), dtype="float")
ds[...] = self.structure.lattice.matrix
ds = f.create_dataset("Z", (len(self.structure.species),), dtype="i")
ds[...] = np.array([sp.Z for sp in self.structure.species])
ds = f.create_dataset("fcoords", self.structure.frac_coords.shape, dtype="float")
ds[...] = self.structure.frac_coords
dt = h5py.special_dtype(vlen=str)
ds = f.create_dataset("species", (len(self.structure.species),), dtype=dt)
ds[...] = [str(sp) for sp in self.structure.species]
grp = f.create_group("vdata")
for k, v in self.data.items():
ds = grp.create_dataset(k, self.data[k].shape, dtype="float")
ds[...] = self.data[k]
f.attrs["name"] = self.name
f.attrs["structure_json"] = json.dumps(self.structure.as_dict())
@classmethod
def from_hdf5(cls, filename, **kwargs):
"""
Reads VolumetricData from HDF5 file.
:param filename: Filename
:return: VolumetricData
"""
import h5py
with h5py.File(filename, "r") as f:
data = {k: np.array(v) for k, v in f["vdata"].items()}
data_aug = None
if "vdata_aug" in f:
data_aug = {k: np.array(v) for k, v in f["vdata_aug"].items()}
structure = Structure.from_dict(json.loads(f.attrs["structure_json"]))
return cls(structure, data=data, data_aug=data_aug, **kwargs)
class Locpot(VolumetricData):
"""
Simple object for reading a LOCPOT file.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar): Poscar object containing structure.
data: Actual data.
"""
super().__init__(poscar.structure, data)
self.name = poscar.comment
@classmethod
def from_file(cls, filename, **kwargs):
"""
Reads a LOCPOT file.
:param filename: Filename
:return: Locpot
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data, **kwargs)
class Chgcar(VolumetricData):
"""
Simple object for reading a CHGCAR file.
"""
def __init__(self, poscar, data, data_aug=None):
"""
Args:
poscar (Poscar or Structure): Object containing structure.
data: Actual data.
data_aug: Augmentation charge data
"""
# allow for poscar or structure files to be passed
if isinstance(poscar, Poscar):
tmp_struct = poscar.structure
self.poscar = poscar
self.name = poscar.comment
elif isinstance(poscar, Structure):
tmp_struct = poscar
self.poscar = Poscar(poscar)
self.name = None
super().__init__(tmp_struct, data, data_aug=data_aug)
self._distance_matrix = {}
@staticmethod
def from_file(filename):
"""
Reads a CHGCAR file.
:param filename: Filename
:return: Chgcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return Chgcar(poscar, data, data_aug=data_aug)
@property
def net_magnetization(self):
"""
:return: Net magnetization from Chgcar
"""
if self.is_spin_polarized:
return np.sum(self.data["diff"])
return None
class Elfcar(VolumetricData):
"""
Read an ELFCAR file which contains the Electron Localization Function (ELF)
as calculated by VASP.
For ELF, "total" key refers to Spin.up, and "diff" refers to Spin.down.
This also contains information on the kinetic energy density.
"""
def __init__(self, poscar, data):
"""
Args:
poscar (Poscar or Structure): Object containing structure.
data: Actual data.
"""
# allow for poscar or structure files to be passed
if isinstance(poscar, Poscar):
tmp_struct = poscar.structure
self.poscar = poscar
elif isinstance(poscar, Structure):
tmp_struct = poscar
self.poscar = Poscar(poscar)
super().__init__(tmp_struct, data)
# TODO: modify VolumetricData so that the correct keys can be used.
# for ELF, instead of "total" and "diff" keys we have
# "Spin.up" and "Spin.down" keys
# I believe this is correct, but there's not much documentation -mkhorton
self.data = data
@classmethod
def from_file(cls, filename):
"""
Reads a ELFCAR file.
:param filename: Filename
:return: Elfcar
"""
(poscar, data, data_aug) = VolumetricData.parse_file(filename)
return cls(poscar, data)
def get_alpha(self):
"""
Get the parameter alpha where ELF = 1/(1+alpha^2).
"""
alpha_data = {}
for k, v in self.data.items():
alpha = 1 / v
alpha = alpha - 1
alpha = np.sqrt(alpha)
alpha_data[k] = alpha
return VolumetricData(self.structure, alpha_data)
class Procar:
"""
Object for reading a PROCAR file.
.. attribute:: data
The PROCAR data of the form below. It should VASP uses 1-based indexing,
but all indices are converted to 0-based here.::
{
spin: nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
.. attribute:: weights
The weights associated with each k-point as an nd.array of lenght
nkpoints.
..attribute:: phase_factors
Phase factors, where present (e.g. LORBIT = 12). A dict of the form:
{
spin: complex nd.array accessed with (k-point index, band index,
ion index, orbital index)
}
..attribute:: nbands
Number of bands
..attribute:: nkpoints
Number of k-points
..attribute:: nions
Number of ions
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing PROCAR.
"""
headers = None
with zopen(filename, "rt") as f:
preambleexpr = re.compile(r"# of k-points:\s*(\d+)\s+# of bands:\s*(\d+)\s+# of " r"ions:\s*(\d+)")
kpointexpr = re.compile(r"^k-point\s+(\d+).*weight = ([0-9\.]+)")
bandexpr = re.compile(r"^band\s+(\d+)")
ionexpr = re.compile(r"^ion.*")
expr = re.compile(r"^([0-9]+)\s+")
current_kpoint = 0
current_band = 0
done = False
spin = Spin.down
weights = None
# pylint: disable=E1137
for l in f:
l = l.strip()
if bandexpr.match(l):
m = bandexpr.match(l)
current_band = int(m.group(1)) - 1
done = False
elif kpointexpr.match(l):
m = kpointexpr.match(l)
current_kpoint = int(m.group(1)) - 1
weights[current_kpoint] = float(m.group(2))
if current_kpoint == 0:
spin = Spin.up if spin == Spin.down else Spin.down
done = False
elif headers is None and ionexpr.match(l):
headers = l.split()
headers.pop(0)
headers.pop(-1)
def ff():
return np.zeros((nkpoints, nbands, nions, len(headers)))
data = defaultdict(ff)
def f2():
return np.full(
(nkpoints, nbands, nions, len(headers)),
np.NaN,
dtype=np.complex128,
)
phase_factors = defaultdict(f2)
elif expr.match(l):
toks = l.split()
index = int(toks.pop(0)) - 1
num_data = np.array([float(t) for t in toks[: len(headers)]])
if not done:
data[spin][current_kpoint, current_band, index, :] = num_data
else:
if len(toks) > len(headers):
# new format of PROCAR (vasp 5.4.4)
num_data = np.array([float(t) for t in toks[: 2 * len(headers)]])
for orb in range(len(headers)):
phase_factors[spin][current_kpoint, current_band, index, orb] = complex(
num_data[2 * orb], num_data[2 * orb + 1]
)
else:
# old format of PROCAR (vasp 5.4.1 and before)
if np.isnan(phase_factors[spin][current_kpoint, current_band, index, 0]):
phase_factors[spin][current_kpoint, current_band, index, :] = num_data
else:
phase_factors[spin][current_kpoint, current_band, index, :] += 1j * num_data
elif l.startswith("tot"):
done = True
elif preambleexpr.match(l):
m = preambleexpr.match(l)
nkpoints = int(m.group(1))
nbands = int(m.group(2))
nions = int(m.group(3))
weights = np.zeros(nkpoints)
self.nkpoints = nkpoints
self.nbands = nbands
self.nions = nions
self.weights = weights
self.orbitals = headers
self.data = data
self.phase_factors = phase_factors
def get_projection_on_elements(self, structure):
"""
Method returning a dictionary of projections on elements.
Args:
structure (Structure): Input structure.
Returns:
a dictionary in the {Spin.up:[k index][b index][{Element:values}]]
"""
dico = {}
for spin in self.data.keys():
dico[spin] = [[defaultdict(float) for i in range(self.nkpoints)] for j in range(self.nbands)]
for iat in range(self.nions):
name = structure.species[iat].symbol
for spin, d in self.data.items():
for k, b in itertools.product(range(self.nkpoints), range(self.nbands)):
dico[spin][b][k][name] = np.sum(d[k, b, iat, :])
return dico
def get_occupation(self, atom_index, orbital):
"""
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the sum of all s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
"""
orbital_index = self.orbitals.index(orbital)
return {
spin: np.sum(d[:, :, atom_index, orbital_index] * self.weights[:, None]) for spin, d in self.data.items()
}
class Oszicar:
"""
A basic parser for an OSZICAR output from VASP. In general, while the
OSZICAR is useful for a quick look at the output from a VASP run, we
recommend that you use the Vasprun parser instead, which gives far richer
information about a run.
.. attribute:: electronic_steps
All electronic steps as a list of list of dict. e.g.,
[[{"rms": 160.0, "E": 4507.24605593, "dE": 4507.2, "N": 1,
"deps": -17777.0, "ncg": 16576}, ...], [....]
where electronic_steps[index] refers the list of electronic steps
in one ionic_step, electronic_steps[index][subindex] refers to a
particular electronic step at subindex in ionic step at index. The
dict of properties depends on the type of VASP run, but in general,
"E", "dE" and "rms" should be present in almost all runs.
.. attribute:: ionic_steps:
All ionic_steps as a list of dict, e.g.,
[{"dE": -526.36, "E0": -526.36024, "mag": 0.0, "F": -526.36024},
...]
This is the typical output from VASP at the end of each ionic step.
"""
def __init__(self, filename):
"""
Args:
filename (str): Filename of file to parse
"""
electronic_steps = []
ionic_steps = []
ionic_pattern = re.compile(
r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+" r"E0=\s*([\d\-\.E\+]+)\s+" r"d\s*E\s*=\s*([\d\-\.E\+]+)$"
)
ionic_mag_pattern = re.compile(
r"(\d+)\s+F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"d\s*E\s*=\s*([\d\-\.E\+]+)\s+"
r"mag=\s*([\d\-\.E\+]+)"
)
ionic_MD_pattern = re.compile(
r"(\d+)\s+T=\s*([\d\-\.E\+]+)\s+"
r"E=\s*([\d\-\.E\+]+)\s+"
r"F=\s*([\d\-\.E\+]+)\s+"
r"E0=\s*([\d\-\.E\+]+)\s+"
r"EK=\s*([\d\-\.E\+]+)\s+"
r"SP=\s*([\d\-\.E\+]+)\s+"
r"SK=\s*([\d\-\.E\+]+)"
)
electronic_pattern = re.compile(r"\s*\w+\s*:(.*)")
def smart_convert(header, num):
try:
if header in ("N", "ncg"):
v = int(num)
return v
v = float(num)
return v
except ValueError:
return "--"
header = []
with zopen(filename, "rt") as fid:
for line in fid:
line = line.strip()
m = electronic_pattern.match(line)
if m:
toks = m.group(1).split()
data = {header[i]: smart_convert(header[i], toks[i]) for i in range(len(toks))}
if toks[0] == "1":
electronic_steps.append([data])
else:
electronic_steps[-1].append(data)
elif ionic_pattern.match(line.strip()):
m = ionic_pattern.match(line.strip())
ionic_steps.append(
{
"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
}
)
elif ionic_mag_pattern.match(line.strip()):
m = ionic_mag_pattern.match(line.strip())
ionic_steps.append(
{
"F": float(m.group(2)),
"E0": float(m.group(3)),
"dE": float(m.group(4)),
"mag": float(m.group(5)),
}
)
elif ionic_MD_pattern.match(line.strip()):
m = ionic_MD_pattern.match(line.strip())
ionic_steps.append(
{
"T": float(m.group(2)),
"E": float(m.group(3)),
"F": float(m.group(4)),
"E0": float(m.group(5)),
"EK": float(m.group(6)),
"SP": float(m.group(7)),
"SK": float(m.group(8)),
}
)
elif re.match(r"^\s*N\s+E\s*", line):
header = line.strip().replace("d eps", "deps").split()
self.electronic_steps = electronic_steps
self.ionic_steps = ionic_steps
@property
def all_energies(self):
"""
Compilation of all energies from all electronic steps and ionic steps
as a tuple of list of energies, e.g.,
((4507.24605593, 143.824705755, -512.073149912, ...), ...)
"""
all_energies = []
for i in range(len(self.electronic_steps)):
energies = [step["E"] for step in self.electronic_steps[i]]
energies.append(self.ionic_steps[i]["F"])
all_energies.append(tuple(energies))
return tuple(all_energies)
@property # type: ignore
@unitized("eV")
def final_energy(self):
"""
Final energy from run.
"""
return self.ionic_steps[-1]["E0"]
def as_dict(self):
"""
:return: MSONable dict
"""
return {
"electronic_steps": self.electronic_steps,
"ionic_steps": self.ionic_steps,
}
class VaspParserError(Exception):
"""
Exception class for VASP parsing.
"""
pass
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None, projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get all branch dir names
branch_dir_names = [os.path.abspath(d) for d in glob.glob(f"{dir_name}/branch_*") if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sorted_branch_dir_names = sorted(branch_dir_names, key=lambda x: int(x.split("_")[-1]))
# populate branches with Bandstructure instances
branches = []
for dname in sorted_branch_dir_names:
xml_file = os.path.join(dname, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn(f"Skipping {dname}. Unable to find {xml_file}")
return get_reconstructed_band_structure(branches, efermi)
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections).get_band_structure(
kpoints_filename=None, efermi=efermi
)
return None
class Xdatcar:
"""
Class representing an XDATCAR file. Only tested with VASP 5.x files.
.. attribute:: structures
List of structures parsed from XDATCAR.
.. attribute:: comment
Optional comment string.
Authors: <NAME>
"""
def __init__(self, filename, ionicstep_start=1, ionicstep_end=None, comment=None):
"""
Init a Xdatcar.
Args:
filename (str): Filename of input XDATCAR file.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
"""
preamble = None
coords_str = []
structures = []
preamble_done = False
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_start < 1:
raise Exception("End ionic step cannot be less than 1")
# pylint: disable=E1136
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
title = l
elif title == l:
preamble_done = False
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
if ionicstep_cnt >= ionicstep_end:
break
ionicstep_cnt += 1
coords_str = []
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
if ionicstep_cnt >= ionicstep_end:
break
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
self.comment = comment or self.structures[0].formula
@property
def site_symbols(self):
"""
Sequence of symbols associated with the Xdatcar. Similar to 6th line in
vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [a[0] for a in itertools.groupby(syms)]
@property
def natoms(self):
"""
Sequence of number of sites of each type associated with the Poscar.
Similar to 7th line in vasp 5+ Xdatcar.
"""
syms = [site.specie.symbol for site in self.structures[0]]
return [len(tuple(a[1])) for a in itertools.groupby(syms)]
def concatenate(self, filename, ionicstep_start=1, ionicstep_end=None):
"""
Concatenate structures in file to Xdatcar.
Args:
filename (str): Filename of XDATCAR file to be concatenated.
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
TODO(rambalachandran):
Requires a check to ensure if the new concatenating file has the
same lattice structure and atoms as the Xdatcar class.
"""
preamble = None
coords_str = []
structures = self.structures
preamble_done = False
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_start < 1:
raise Exception("End ionic step cannot be less than 1")
# pylint: disable=E1136
ionicstep_cnt = 1
with zopen(filename, "rt") as f:
for l in f:
l = l.strip()
if preamble is None:
preamble = [l]
elif not preamble_done:
if l == "" or "Direct configuration=" in l:
preamble_done = True
tmp_preamble = [preamble[0]]
for i in range(1, len(preamble)):
if preamble[0] != preamble[i]:
tmp_preamble.append(preamble[i])
else:
break
preamble = tmp_preamble
else:
preamble.append(l)
elif l == "" or "Direct configuration=" in l:
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
ionicstep_cnt += 1
coords_str = []
else:
coords_str.append(l)
p = Poscar.from_string("\n".join(preamble + ["Direct"] + coords_str))
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
structures.append(p.structure)
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
structures.append(p.structure)
self.structures = structures
def get_string(self, ionicstep_start=1, ionicstep_end=None, significant_figures=8):
"""
Write Xdatcar class to a string.
Args:
ionicstep_start (int): Starting number of ionic step.
ionicstep_end (int): Ending number of ionic step.
significant_figures (int): Number of significant figures.
"""
if ionicstep_start < 1:
raise Exception("Start ionic step cannot be less than 1")
if ionicstep_end is not None and ionicstep_end < 1:
raise Exception("End ionic step cannot be less than 1")
latt = self.structures[0].lattice
if np.linalg.det(latt.matrix) < 0:
latt = Lattice(-latt.matrix)
lines = [self.comment, "1.0", str(latt)]
lines.append(" ".join(self.site_symbols))
lines.append(" ".join([str(x) for x in self.natoms]))
format_str = f"{{:.{significant_figures}f}}"
ionicstep_cnt = 1
output_cnt = 1
for cnt, structure in enumerate(self.structures):
ionicstep_cnt = cnt + 1
if ionicstep_end is None:
if ionicstep_cnt >= ionicstep_start:
lines.append("Direct configuration=" + " " * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
else:
if ionicstep_start <= ionicstep_cnt < ionicstep_end:
lines.append("Direct configuration=" + " " * (7 - len(str(output_cnt))) + str(output_cnt))
for (i, site) in enumerate(structure):
coords = site.frac_coords
line = " ".join([format_str.format(c) for c in coords])
lines.append(line)
output_cnt += 1
return "\n".join(lines) + "\n"
def write_file(self, filename, **kwargs):
"""
Write Xdatcar class into a file.
Args:
filename (str): Filename of output XDATCAR file.
The supported kwargs are the same as those for the
Xdatcar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
def __str__(self):
return self.get_string()
class Dynmat:
"""
Object for reading a DYNMAT file.
.. attribute:: data
A nested dict containing the DYNMAT data of the form::
[atom <int>][disp <int>]['dispvec'] =
displacement vector (part of first line in dynmat block, e.g. "0.01 0 0")
[atom <int>][disp <int>]['dynmat'] =
<list> list of dynmat lines for this atom and this displacement
Authors: <NAME>
"""
def __init__(self, filename):
"""
Args:
filename: Name of file containing DYNMAT
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
self._nspecs, self._natoms, self._ndisps = map(int, lines[0].split())
self._masses = map(float, lines[1].split())
self.data = defaultdict(dict)
atom, disp = None, None
for i, l in enumerate(lines[2:]):
v = list(map(float, l.split()))
if not i % (self._natoms + 1):
atom, disp = map(int, v[:2])
if atom not in self.data:
self.data[atom] = {}
if disp not in self.data[atom]:
self.data[atom][disp] = {}
self.data[atom][disp]["dispvec"] = v[2:]
else:
if "dynmat" not in self.data[atom][disp]:
self.data[atom][disp]["dynmat"] = []
self.data[atom][disp]["dynmat"].append(v)
def get_phonon_frequencies(self):
"""calculate phonon frequencies"""
# TODO: the following is most likely not correct or suboptimal
# hence for demonstration purposes only
frequencies = []
for k, v0 in self.data.items():
for v1 in v0.itervalues():
vec = map(abs, v1["dynmat"][k - 1])
frequency = math.sqrt(sum(vec)) * 2.0 * math.pi * 15.633302 # THz
frequencies.append(frequency)
return frequencies
@property
def nspecs(self):
"""returns the number of species"""
return self._nspecs
@property
def natoms(self):
"""returns the number of atoms"""
return self._natoms
@property
def ndisps(self):
"""returns the number of displacements"""
return self._ndisps
@property
def masses(self):
"""returns the list of atomic masses"""
return list(self._masses)
def get_adjusted_fermi_level(efermi, cbm, band_structure):
"""
When running a band structure computations the fermi level needs to be
take from the static run that gave the charge density used for the non-self
consistent band structure run. Sometimes this fermi level is however a
little too low because of the mismatch between the uniform grid used in
the static run and the band structure k-points (e.g., the VBM is on Gamma
and the Gamma point is not in the uniform mesh). Here we use a procedure
consisting in looking for energy levels higher than the static fermi level
(but lower than the LUMO) if any of these levels make the band structure
appears insulating and not metallic anymore, we keep this adjusted fermi
level. This procedure has shown to detect correctly most insulators.
Args:
efermi (float): Fermi energy of the static run
cbm (float): Conduction band minimum of the static run
run_bandstructure: a band_structure object
Returns:
a new adjusted fermi level
"""
# make a working copy of band_structure
bs_working = BandStructureSymmLine.from_dict(band_structure.as_dict())
if bs_working.is_metal():
e = efermi
while e < cbm:
e += 0.01
bs_working._efermi = e
if not bs_working.is_metal():
return e
return efermi
# a note to future confused people (i.e. myself):
# I use numpy.fromfile instead of scipy.io.FortranFile here because the records
# are of fixed length, so the record length is only written once. In fortran,
# this amounts to using open(..., form='unformatted', recl=recl_len). In
# constrast when you write UNK files, the record length is written at the
# beginning of each record. This allows you to use scipy.io.FortranFile. In
# fortran, this amounts to using open(..., form='unformatted') [i.e. no recl=].
class Wavecar:
"""
This is a class that contains the (pseudo-) wavefunctions from VASP.
Coefficients are read from the given WAVECAR file and the corresponding
G-vectors are generated using the algorithm developed in WaveTrans (see
acknowledgments below). To understand how the wavefunctions are evaluated,
please see the evaluate_wavefunc docstring.
It should be noted that the pseudopotential augmentation is not included in
the WAVECAR file. As a result, some caution should be exercised when
deriving value from this information.
The usefulness of this class is to allow the user to do projections or band
unfolding style manipulations of the wavefunction. An example of this can
be seen in the work of Shen et al. 2017
(https://doi.org/10.1103/PhysRevMaterials.1.065001).
.. attribute:: filename
String of the input file (usually WAVECAR)
.. attribute:: vasp_type
String that determines VASP type the WAVECAR was generated with (either
'std', 'gam', or 'ncl')
.. attribute:: nk
Number of k-points from the WAVECAR
.. attribute:: nb
Number of bands per k-point
.. attribute:: encut
Energy cutoff (used to define G_{cut})
.. attribute:: efermi
Fermi energy
.. attribute:: a
Primitive lattice vectors of the cell (e.g. a_1 = self.a[0, :])
.. attribute:: b
Reciprocal lattice vectors of the cell (e.g. b_1 = self.b[0, :])
.. attribute:: vol
The volume of the unit cell in real space
.. attribute:: kpoints
The list of k-points read from the WAVECAR file
.. attribute:: band_energy
The list of band eigenenergies (and corresponding occupancies) for
each kpoint, where the first index corresponds to the index of the
k-point (e.g. self.band_energy[kp])
.. attribute:: Gpoints
The list of generated G-points for each k-point (a double list), which
are used with the coefficients for each k-point and band to recreate
the wavefunction (e.g. self.Gpoints[kp] is the list of G-points for
k-point kp). The G-points depend on the k-point and reciprocal lattice
and therefore are identical for each band at the same k-point. Each
G-point is represented by integer multipliers (e.g. assuming
Gpoints[kp][n] == [n_1, n_2, n_3], then
G_n = n_1*b_1 + n_2*b_2 + n_3*b_3)
.. attribute:: coeffs
The list of coefficients for each k-point and band for reconstructing
the wavefunction. For non-spin-polarized, the first index corresponds
to the kpoint and the second corresponds to the band (e.g.
self.coeffs[kp][b] corresponds to k-point kp and band b). For
spin-polarized calculations, the first index is for the spin.
If the calculation was non-collinear, then self.coeffs[kp][b] will have
two columns (one for each component of the spinor).
Acknowledgments:
This code is based upon the Fortran program, WaveTrans, written by
<NAME> and <NAME> from the Dept. of Physics at Carnegie
Mellon University. To see the original work, please visit:
https://www.andrew.cmu.edu/user/feenstra/wavetrans/
Author: <NAME>
"""
def __init__(self, filename="WAVECAR", verbose=False, precision="normal", vasp_type=None):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): input file (default: WAVECAR)
verbose (bool): determines whether processing information is shown
precision (str): determines how fine the fft mesh is (normal or
accurate), only the first letter matters
vasp_type (str): determines the VASP type that is used, allowed
values are ['std', 'gam', 'ncl']
(only first letter is required)
"""
self.filename = filename
if not (vasp_type is None or vasp_type.lower()[0] in ["s", "g", "n"]):
raise ValueError(f"invalid vasp_type {vasp_type}")
self.vasp_type = vasp_type
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
self._C = 0.262465831
with open(self.filename, "rb") as f:
# read the header information
recl, spin, rtag = np.fromfile(f, dtype=np.float64, count=3).astype(np.int_)
if verbose:
print(f"recl={recl}, spin={spin}, rtag={rtag}")
recl8 = int(recl / 8)
self.spin = spin
# check to make sure we have precision correct
if rtag not in (45200, 45210, 53300, 53310):
# note that rtag=45200 and 45210 may not work if file was actually
# generated by old version of VASP, since that would write eigenvalues
# and occupations in way that does not span FORTRAN records, but
# reader below appears to assume that record boundaries can be ignored
# (see OUTWAV vs. OUTWAV_4 in vasp fileio.F)
raise ValueError(f"invalid rtag of {rtag}")
# padding to end of fortran REC=1
np.fromfile(f, dtype=np.float64, count=(recl8 - 3))
# extract kpoint, bands, energy, and lattice information
self.nk, self.nb, self.encut = np.fromfile(f, dtype=np.float64, count=3).astype(np.int_)
self.a = np.fromfile(f, dtype=np.float64, count=9).reshape((3, 3))
self.efermi = np.fromfile(f, dtype=np.float64, count=1)[0]
if verbose:
print(
"kpoints = {}, bands = {}, energy cutoff = {}, fermi "
"energy= {:.04f}\n".format(self.nk, self.nb, self.encut, self.efermi)
)
print(f"primitive lattice vectors = \n{self.a}")
self.vol = np.dot(self.a[0, :], np.cross(self.a[1, :], self.a[2, :]))
if verbose:
print(f"volume = {self.vol}\n")
# calculate reciprocal lattice
b = np.array(
[
np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :]),
]
)
b = 2 * np.pi * b / self.vol
self.b = b
if verbose:
print(f"reciprocal lattice vectors = \n{b}")
print(f"reciprocal lattice vector magnitudes = \n{np.linalg.norm(b, axis=1)}\n")
# calculate maximum number of b vectors in each direction
self._generate_nbmax()
if verbose:
print(f"max number of G values = {self._nbmax}\n\n")
self.ng = self._nbmax * 3 if precision.lower()[0] == "n" else self._nbmax * 4
# padding to end of fortran REC=2
np.fromfile(f, dtype=np.float64, count=recl8 - 13)
# reading records
self.Gpoints = [None for _ in range(self.nk)]
self.kpoints = []
if spin == 2:
self.coeffs = [[[None for i in range(self.nb)] for j in range(self.nk)] for _ in range(spin)]
self.band_energy = [[] for _ in range(spin)]
else:
self.coeffs = [[None for i in range(self.nb)] for j in range(self.nk)]
self.band_energy = []
for ispin in range(spin):
if verbose:
print(f"reading spin {ispin}")
for ink in range(self.nk):
# information for this kpoint
nplane = int(np.fromfile(f, dtype=np.float64, count=1)[0])
kpoint = np.fromfile(f, dtype=np.float64, count=3)
if ispin == 0:
self.kpoints.append(kpoint)
else:
assert np.allclose(self.kpoints[ink], kpoint)
if verbose:
print(f"kpoint {ink: 4} with {nplane: 5} plane waves at {kpoint}")
# energy and occupation information
enocc = np.fromfile(f, dtype=np.float64, count=3 * self.nb).reshape((self.nb, 3))
if spin == 2:
self.band_energy[ispin].append(enocc)
else:
self.band_energy.append(enocc)
if verbose:
print("enocc =\n", enocc[:, [0, 2]])
# padding to end of record that contains nplane, kpoints, evals and occs
np.fromfile(f, dtype=np.float64, count=(recl8 - 4 - 3 * self.nb) % recl8)
if self.vasp_type is None:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=True)
if len(self.Gpoints[ink]) == nplane:
self.vasp_type = "gam"
else:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=False)
self.vasp_type = "std" if len(self.Gpoints[ink]) == nplane else "ncl"
if verbose:
print("\ndetermined vasp_type =", self.vasp_type, "\n")
else:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=(self.vasp_type.lower()[0] == "g"))
if len(self.Gpoints[ink]) != nplane and 2 * len(self.Gpoints[ink]) != nplane:
raise ValueError(
f"Incorrect value of vasp_type given ({vasp_type})."
" Please open an issue if you are certain this WAVECAR"
" was generated with the given vasp_type."
)
self.Gpoints[ink] = np.array(self.Gpoints[ink] + extra_gpoints, dtype=np.float64)
# extract coefficients
for inb in range(self.nb):
if rtag in (45200, 53300):
data = np.fromfile(f, dtype=np.complex64, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - nplane)
elif rtag in (45210, 53310):
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
data = np.fromfile(f, dtype=np.complex128, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - 2 * nplane)
extra_coeffs = []
if len(extra_coeff_inds) > 0:
# reconstruct extra coefficients missing from gamma-only executable WAVECAR
for G_ind in extra_coeff_inds:
# no idea where this factor of sqrt(2) comes from, but empirically
# it appears to be necessary
data[G_ind] /= np.sqrt(2)
extra_coeffs.append(np.conj(data[G_ind]))
if spin == 2:
self.coeffs[ispin][ink][inb] = np.array(list(data) + extra_coeffs, dtype=np.complex64)
else:
self.coeffs[ink][inb] = np.array(list(data) + extra_coeffs, dtype=np.complex128)
if self.vasp_type.lower()[0] == "n":
self.coeffs[ink][inb].shape = (2, nplane // 2)
def _generate_nbmax(self) -> None:
"""
Helper function that determines maximum number of b vectors for
each direction.
This algorithm is adapted from WaveTrans (see Class docstring). There
should be no reason for this function to be called outside of
initialization.
"""
bmag = np.linalg.norm(self.b, axis=1)
b = self.b
# calculate maximum integers in each direction for G
phi12 = np.arccos(np.dot(b[0, :], b[1, :]) / (bmag[0] * bmag[1]))
sphi123 = np.dot(b[2, :], np.cross(b[0, :], b[1, :])) / (bmag[2] * np.linalg.norm(np.cross(b[0, :], b[1, :])))
nbmaxA = np.sqrt(self.encut * self._C) / bmag
nbmaxA[0] /= np.abs(np.sin(phi12))
nbmaxA[1] /= np.abs(np.sin(phi12))
nbmaxA[2] /= np.abs(sphi123)
nbmaxA += 1
phi13 = np.arccos(np.dot(b[0, :], b[2, :]) / (bmag[0] * bmag[2]))
sphi123 = np.dot(b[1, :], np.cross(b[0, :], b[2, :])) / (bmag[1] * np.linalg.norm(np.cross(b[0, :], b[2, :])))
nbmaxB = np.sqrt(self.encut * self._C) / bmag
nbmaxB[0] /= np.abs(np.sin(phi13))
nbmaxB[1] /= np.abs(sphi123)
nbmaxB[2] /= np.abs(np.sin(phi13))
nbmaxB += 1
phi23 = np.arccos(np.dot(b[1, :], b[2, :]) / (bmag[1] * bmag[2]))
sphi123 = np.dot(b[0, :], np.cross(b[1, :], b[2, :])) / (bmag[0] * np.linalg.norm(np.cross(b[1, :], b[2, :])))
nbmaxC = np.sqrt(self.encut * self._C) / bmag
nbmaxC[0] /= np.abs(sphi123)
nbmaxC[1] /= np.abs(np.sin(phi23))
nbmaxC[2] /= np.abs(np.sin(phi23))
nbmaxC += 1
self._nbmax = np.max([nbmaxA, nbmaxB, nbmaxC], axis=0).astype(np.int_)
def _generate_G_points(self, kpoint: np.ndarray, gamma: bool = False) -> Tuple[List, List, List]:
"""
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
gamma (bool): determines if G points for gamma-point only executable
should be generated
Returns:
a list containing valid G-points
"""
if gamma:
kmax = self._nbmax[0] + 1
else:
kmax = 2 * self._nbmax[0] + 1
gpoints = []
extra_gpoints = []
extra_coeff_inds = []
G_ind = 0
for i in range(2 * self._nbmax[2] + 1):
i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i
for j in range(2 * self._nbmax[1] + 1):
j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j
for k in range(kmax):
k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k
if gamma and ((k1 == 0 and j2 < 0) or (k1 == 0 and j2 == 0 and i3 < 0)):
continue
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g ** 2 / self._C
if E < self.encut:
gpoints.append(G)
if gamma and (k1, j2, i3) != (0, 0, 0):
extra_gpoints.append(-G)
extra_coeff_inds.append(G_ind)
G_ind += 1
return (gpoints, extra_gpoints, extra_coeff_inds)
def evaluate_wavefunc(self, kpoint: int, band: int, r: np.ndarray, spin: int = 0, spinor: int = 0) -> np.complex64:
r"""
Evaluates the wavefunction for a given position, r.
The wavefunction is given by the k-point and band. It is evaluated
at the given position by summing over the components. Formally,
\psi_n^k (r) = \sum_{i=1}^N c_i^{n,k} \exp (i (k + G_i^{n,k}) \cdot r)
where \psi_n^k is the wavefunction for the nth band at k-point k, N is
the number of plane waves, c_i^{n,k} is the ith coefficient that
corresponds to the nth band and k-point k, and G_i^{n,k} is the ith
G-point corresponding to k-point k.
NOTE: This function is very slow; a discrete fourier transform is the
preferred method of evaluation (see Wavecar.fft_mesh).
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
r (np.array): the position where the wavefunction will be evaluated
spin (int): spin index for the desired wavefunction (only for
ISPIN = 2, default = 0)
spinor (int): component of the spinor that is evaluated (only used
if vasp_type == 'ncl')
Returns:
a complex value corresponding to the evaluation of the wavefunction
"""
v = self.Gpoints[kpoint] + self.kpoints[kpoint]
u = np.dot(np.dot(v, self.b), r)
if self.vasp_type.lower()[0] == "n":
c = self.coeffs[kpoint][band][spinor, :]
elif self.spin == 2:
c = self.coeffs[spin][kpoint][band]
else:
c = self.coeffs[kpoint][band]
return np.sum(np.dot(c, np.exp(1j * u, dtype=np.complex64))) / np.sqrt(self.vol)
def fft_mesh(self, kpoint: int, band: int, spin: int = 0, spinor: int = 0, shift: bool = True) -> np.ndarray:
"""
Places the coefficients of a wavefunction onto an fft mesh.
Once the mesh has been obtained, a discrete fourier transform can be
used to obtain real-space evaluation of the wavefunction. The output
of this function can be passed directly to numpy's fft function. For
example:
mesh = Wavecar('WAVECAR').fft_mesh(kpoint, band)
evals = np.fft.ifftn(mesh)
Args:
kpoint (int): the index of the kpoint where the wavefunction
will be evaluated
band (int): the index of the band where the wavefunction will be
evaluated
spin (int): the spin of the wavefunction for the desired
wavefunction (only for ISPIN = 2, default = 0)
spinor (int): component of the spinor that is evaluated (only used
if vasp_type == 'ncl')
shift (bool): determines if the zero frequency coefficient is
placed at index (0, 0, 0) or centered
Returns:
a numpy ndarray representing the 3D mesh of coefficients
"""
if self.vasp_type.lower()[0] == "n":
tcoeffs = self.coeffs[kpoint][band][spinor, :]
elif self.spin == 2:
tcoeffs = self.coeffs[spin][kpoint][band]
else:
tcoeffs = self.coeffs[kpoint][band]
mesh = np.zeros(tuple(self.ng), dtype=np.complex_)
for gp, coeff in zip(self.Gpoints[kpoint], tcoeffs):
t = tuple(gp.astype(np.int_) + (self.ng / 2).astype(np.int_))
mesh[t] = coeff
if shift:
return np.fft.ifftshift(mesh)
return mesh
def get_parchg(
self,
poscar: Poscar,
kpoint: int,
band: int,
spin: Optional[int] = None,
spinor: Optional[int] = None,
phase: bool = False,
scale: int = 2,
) -> Chgcar:
"""
Generates a Chgcar object, which is the charge density of the specified
wavefunction.
This function generates a Chgcar object with the charge density of the
wavefunction specified by band and kpoint (and spin, if the WAVECAR
corresponds to a spin-polarized calculation). The phase tag is a
feature that is not present in VASP. For a real wavefunction, the phase
tag being turned on means that the charge density is multiplied by the
sign of the wavefunction at that point in space. A warning is generated
if the phase tag is on and the chosen kpoint is not Gamma.
Note: Augmentation from the PAWs is NOT included in this function. The
maximal charge density will differ from the PARCHG from VASP, but the
qualitative shape of the charge density will match.
Args:
poscar (pymatgen.io.vasp.inputs.Poscar): Poscar object that has the
structure associated with the WAVECAR file
kpoint (int): the index of the kpoint for the wavefunction
band (int): the index of the band for the wavefunction
spin (int): optional argument to specify the spin. If the Wavecar
has ISPIN = 2, spin is None generates a Chgcar with total spin
and magnetization, and spin == {0, 1} specifies just the spin
up or down component.
spinor (int): optional argument to specify the spinor component
for noncollinear data wavefunctions (allowed values of None,
0, or 1)
phase (bool): flag to determine if the charge density is multiplied
by the sign of the wavefunction. Only valid for real
wavefunctions.
scale (int): scaling for the FFT grid. The default value of 2 is at
least as fine as the VASP default.
Returns:
a pymatgen.io.vasp.outputs.Chgcar object
"""
if phase and not np.all(self.kpoints[kpoint] == 0.0):
warnings.warn("phase == True should only be used for the Gamma kpoint! I hope you know what you're doing!")
# scaling of ng for the fft grid, need to restore value at the end
temp_ng = self.ng
self.ng = self.ng * scale
N = np.prod(self.ng)
data = {}
if self.spin == 2:
if spin is not None:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=spin)) * N
den = np.abs(np.conj(wfr) * wfr)
if phase:
den = np.sign(np.real(wfr)) * den
data["total"] = den
else:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=0)) * N
denup = np.abs(np.conj(wfr) * wfr)
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spin=1)) * N
dendn = np.abs(np.conj(wfr) * wfr)
data["total"] = denup + dendn
data["diff"] = denup - dendn
else:
if spinor is not None:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spinor=spinor)) * N
den = np.abs(np.conj(wfr) * wfr)
else:
wfr = np.fft.ifftn(self.fft_mesh(kpoint, band, spinor=0)) * N
wfr_t = np.fft.ifftn(self.fft_mesh(kpoint, band, spinor=1)) * N
den = np.abs(np.conj(wfr) * wfr)
den += np.abs(np.conj(wfr_t) * wfr_t)
if phase and not (self.vasp_type.lower()[0] == "n" and spinor is None):
den = np.sign( | np.real(wfr) | numpy.real |
# Copyright (c) 2003-2015 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy
import treecorr
import os
import fitsio
from test_helper import get_script_name
def test_constant():
# A fairly trivial test is to use a constant value of kappa everywhere.
ngal = 100000
A = 0.05
L = 100.
numpy.random.seed(8675309)
x = (numpy.random.random_sample(ngal)-0.5) * L
y = (numpy.random.random_sample(ngal)-0.5) * L
kappa = A * numpy.ones(ngal)
cat = treecorr.Catalog(x=x, y=y, k=kappa, x_units='arcmin', y_units='arcmin')
kk = treecorr.KKCorrelation(bin_size=0.1, min_sep=0.1, max_sep=10., sep_units='arcmin')
kk.process(cat)
print('kk.xi = ',kk.xi)
numpy.testing.assert_almost_equal(kk.xi, A**2, decimal=10)
# Now add some noise to the values. It should still work, but at slightly lower accuracy.
kappa += 0.001 * (numpy.random.random_sample(ngal)-0.5)
cat = treecorr.Catalog(x=x, y=y, k=kappa, x_units='arcmin', y_units='arcmin')
kk.process(cat)
print('kk.xi = ',kk.xi)
numpy.testing.assert_almost_equal(kk.xi, A**2, decimal=6)
def test_kk():
# cf. http://adsabs.harvard.edu/abs/2002A%26A...389..729S for the basic formulae I use here.
#
# Use kappa(r) = A exp(-r^2/2s^2)
#
# The Fourier transform is: kappa~(k) = 2 pi A s^2 exp(-s^2 k^2/2) / L^2
# P(k) = (1/2pi) <|kappa~(k)|^2> = 2 pi A^2 (s/L)^4 exp(-s^2 k^2)
# xi(r) = (1/2pi) int( dk k P(k) J0(kr) )
# = pi A^2 (s/L)^2 exp(-r^2/2s^2/4)
# Note: I'm not sure I handled the L factors correctly, but the units at the end need
# to be kappa^2, so it needs to be (s/L)^2.
ngal = 1000000
A = 0.05
s = 10.
L = 30. * s # Not infinity, so this introduces some error. Our integrals were to infinity.
numpy.random.seed(8675309)
x = (numpy.random.random_sample(ngal)-0.5) * L
y = (numpy.random.random_sample(ngal)-0.5) * L
r2 = (x**2 + y**2)/s**2
kappa = A * numpy.exp(-r2/2.)
cat = treecorr.Catalog(x=x, y=y, k=kappa, x_units='arcmin', y_units='arcmin')
kk = treecorr.KKCorrelation(bin_size=0.1, min_sep=1., max_sep=50., sep_units='arcmin',
verbose=1)
kk.process(cat)
# log(<R>) != <logR>, but it should be close:
print('meanlogr - log(meanr) = ',kk.meanlogr - numpy.log(kk.meanr))
numpy.testing.assert_almost_equal(kk.meanlogr, | numpy.log(kk.meanr) | numpy.log |
#!/usr/bin/env python3.7
# -*- coding: utf8 -*-
import matplotlib as mat
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import scipy.stats as stats
import os
sns.set(rc={"figure.figsize":(8,4)})
sns.set_context('paper',font_scale=1.5,rc={'lines.linewidth':1.5})
sns.set_style('ticks')
mat.rc('text',usetex=True)
mat.rcParams['text.latex.preamble']=[r'\usepackage[utf8]{inputenc}',r'\usepackage[T1]{fontenc}',r'\usepackage[spanish]{babel}',r'\usepackage[scaled]{helvet}',r'\renewcommand\familydefault{\sfdefault}',r'\usepackage{amsmath,amsfonts,amssymb}',r'\usepackage{siunitx}']
home=os.environ['HOME']
dir='proyectos/scicrt/simulation/resultados-sim/pulse-shape'
nda0='{0}/{1}/19nov_2phe-t90.csv'.format(home,dir)
nda1='{0}/{1}/19aug_7phe-t90.csv'.format(home,dir)
nsim='{0}/{1}/ptimes_t9011.csv'.format(home,dir)
data0=np.loadtxt(nda0,dtype=np.float)
data1=np.loadtxt(nda1,dtype=np.float)
t90_exp=np.hstack((data0,data1))
#time_test=t90_exp>=100
#t90_exp=t90_exp[time_test]
t90_sim=np.loadtxt(nsim,dtype=np.float)
print(np.amax(t90_exp),np.amin(t90_exp))
print(np.amax(t90_sim),np.amin(t90_sim))
print( | np.size(t90_exp,0) | numpy.size |
"""
This module lists the component vectors of the direct and reciprocal lattices
for different crystals.
It is based on the following publication:
<NAME> and <NAME>, "High-throughput electronic band
structure calculations: Challenges and tools", Comp. Mat. Sci. 49 (2010),
pp. 291--312.
"""
import sys
import numpy as np
from numpy import pi, sqrt
from fractions import Fraction
import logging
from pprint import pprint, pformat
logger = logging.getLogger(__name__)
class Lattice(object):
"""Generic lattice class."""
def __init__(self, info):
try:
self.name = info['type']
except KeyError:
logger.critical('Cannot continue without lattice type')
sys.exit(2)
self.scale = info.get('scale', 2*pi)
try:
self.param = info['param']
except:
logger.critical('Cannot continue without lattice parameter(s)')
sys.exit(2)
try:
self.setting = info['setting']
lat = get_lattice[self.name](self.param, self.setting)
except KeyError:
lat = get_lattice[self.name](self.param)
self.constants = lat.constants
self.primv = lat.primv
self.convv = lat.convv
self.SymPts_k = lat.SymPts_k
self.path = info.get('path', lat.standard_path)
#
self.reciprv = get_recipr_cell(self.primv, self.scale)
self.SymPts = {}
for k,v in self.SymPts_k.items():
self.SymPts[k] = get_kvec(v, self.reciprv)
def get_kcomp(self, string):
"""Return the k-components given a string label or string set of fraction.
Example of string:
s = 'X'
s = '1/2 0 1/2'
s = '1/2, 0, 1/2'
s = '0.5 0 0.5'
"""
try:
# assume it is a label
comp = self.SymPts_k[string]
except KeyError:
# assume it is a triplet of fractions (string)
# i.e. '1/3 1/2 3/8' or '1/4, 0.5, 0'
frac = string.replace(',', ' ').split()
assert len(frac) == 3
comp = [Fraction(f) for f in frac]
return np.array(comp)
def get_kvec(self, kpt):
"""Return the real space vector corresponding to a k-point.
"""
return get_kvec(kpt, self.reciprv)
def __repr__(self):
return repr_lattice(self)
class CUB(object):
"""
This is CUBic, cP lattice
"""
def __init__(self, param, setting=None):
try:
a = param[0]
except TypeError:
a = param
self.setting = setting
self.constants = [a, a, a, pi/2, pi/2, pi/2]
self.a = a
self.primv = [ np.array((a, 0., 0.)),
np.array((0., a, 0.)),
np.array((0., 0., a)) ]
self.convv = self.primv
self.SymPts_k =\
{ 'Gamma': (0,0,0),
'M': (1./2., 1./2., 0.),
'R': (1./2., 1./2., 1./2.),
'X': (0., 1./2., 0.), }
self.standard_path = "Gamma-X-M-Gamma-R-X|M-R"
class FCC(object):
"""
This is Face Centered Cubic lattice (cF)
"""
def __init__(self, param, setting='curtarolo'):
try:
a = param[0]
except (TypeError, IndexError) as e:
a = param
self.constants = [a, a, a, pi/2, pi/2, pi/2]
self.a = a
self.setting = setting
if setting == 'curtarolo':
self.primv = [np.array((0 , a/2., a/2.)),
np.array((a/2., 0 , a/2.)),
np.array((a/2., a/2., 0 ))]
self.convv = [ np.array((a, 0, 0,)),
np.array((0, a, 0,)),
np.array((0, 0, a,)) ]
# symmetry points in terms of reciprocal lattice vectors
self.SymPts_k = \
{ 'Gamma': (0.,0.,0.),
'K': (3./8.,3./8.,3./4.),
'L': (1./2.,1./2.,1./2.),
'U': (5./8.,1./4.,5./8.),
'W': (1./2.,1./4.,3./4.),
'X': (1./2.,0.,1./2.), }
self.standard_path = "Gamma-X-W-K-Gamma-L-U-W-L-K|U-X"
else:
logger.error('Unsupported setting {} for {} lattice'.format(setting, self.__name__))
sys.exit(2)
class BCC(object):
"""
This is Body Centered Cubic lattice (cF)
"""
def __init__(self, param, setting='curtarolo'):
try:
a = param[0]
except (TypeError, IndexError) as e:
a = param
self.constants = [a, a, a, pi/2, pi/2, pi/2]
self.a = a
self.setting = setting
if setting == 'curtarolo':
self.primv = [ np.array((-a/2., a/2., a/2.)),
np.array(( a/2., -a/2., a/2.)),
np.array(( a/2., a/2., -a/2.)) ]
self.convv = [ np.array((a, 0, 0,)),
np.array((0, a, 0,)),
np.array((0, 0, a,)) ]
self.SymPts_k = \
{ 'Gamma': (0.,0.,0.),
'H': (1./2., -1./2., 1./2.),
'P': (1./4., 1./4., 1./4.),
'N': ( 0., 0., 1./2.) }
self.standard_path = "Gamma-H-N-Gamma-P-H|P-N"
else:
logger.error('Unsupported setting "{}" for {} lattice'.format(setting, self.__name__))
sys.exit(2)
class HEX(object):
"""
This is HEXAGONAL, hP lattice
"""
def __init__(self, param, setting='curtarolo'):
a, c = param[:2]
self.constants = [a, a, c, pi/2, pi/2, 2*pi/3]
self.a = a
self.c = c
self.setting = setting
if setting == 'curtarolo':
self.primv = [ np.array((a/2., -a*np.sqrt(3)/2., 0.)),
np.array((a/2., +a*np.sqrt(3)/2., 0.)),
np.array((0, 0, c)) ]
self.convv = self.primv
self.SymPts_k =\
{ 'Gamma': (0,0,0),
'A': (0., 0., 1./2.),
'H': (1./3., 1./3., 1./2.),
'K': (1./3., 1./3., 0.),
'L': (1./2., 0., 1./2.),
'M': (1./2., 0., 0.), }
self.standard_path = "Gamma-M-K-Gamma-A-L-H-A|L-M|K-H"
else:
logger.error('Unsupported setting "{}" for {} lattice'.format(setting, self.__name__))
sys.exit(2)
class TET(object):
"""
This is TETRAGONAL, tP lattice
"""
def __init__(self, param, setting='curtarolo'):
a, c = param[:2]
self.constants = [a, a, c, pi/2, pi/2, pi/2]
self.a = a
self.c = c
self.setting = setting
if setting == 'curtarolo':
self.primv = [ np.array((a, 0., 0.)),
np.array((0., a, 0.)),
np.array((0.,0., c)) ]
self.convv = self.primv
self.SymPts_k =\
{ 'Gamma': (0,0,0),
'A': (1./2., 1./2., 1./2.),
'M': (1./2., 1./2., 0.),
'R': (0., 1./2., 1./2.),
'X': (0., 1./2., 0.),
'Z': (0., 0., 1./2.), }
self.standard_path = "Gamma-X-M-Gamma-Z-R-A-Z|X-R|M-A"
else:
logger.error('Unsupported setting {} for {} lattice'.format(setting, self.__name__))
sys.exit(2)
class ORC(object):
"""
This is ORTHOROMBIC, oP lattice
"""
def __init__(self, param, setting='curtarolo'):
a, b, c = param[:3]
self.constants = [a, b, c]
self.a = a
self.b = b
self.c = c
self.setting = setting
if setting == 'curtarolo':
self.primv = [ np.array((a, 0., 0.)),
np.array((0., b, 0.)),
np.array((0., 0., c)) ]
self.convv = self.primv
self.SymPts_k =\
{ 'Gamma': (0,0,0),
'R': (1./2., 1./2., 1./2.),
'S': (1./2., 1./2., 0.),
'T': (0., 1./2., 1./2.),
'U': (1./2., 0., 1./2.),
'X': (1./2., 0., 0.),
'Y': (0., 1./2., 0.),
'Z': (0., 0., 1./2.), }
self.standard_path = "Gamma-X-S-Y-Gamma-Z-U-R-T-Z|Y-T|U-X|S-R"
else:
logger.error('Unsupported setting {} for {} lattice'.format(setting, self.__name__))
sys.exit(2)
class RHL(object):
"""
This is Rhombohedral RHL, hR lattice
Primitive lattice defined via:
a = b = c, and alpha = beta = gamma <> 90 degrees
Two variations exists: RHL1 (alpha < 90) and RHL2 (alpha > 90)
"""
def __init__(self, param, setting=None):
"""
Initialise the lattice parameter(s) upon instance creation, and
calculate the direct and reciprocal lattice vectors, as well
as the components of the k-vectors defining the high-symmetry
points known for the given lattice.
Note that RHL has two variants:
RHL1, where alpha < 90 degrees
RHL2, where alpha > 90 degrees
Symmetry points (SymPts_k) and standard_path are from:
<NAME>, <NAME> / Computational Materials Science 49 (2010) 299-312
"""
a, alpha = param[:2]
self.setting = setting
assert not abs(alpha - 90.0) < 1.e-5
self.alpha_rad = np.radians(alpha)
self.constants = [a, a, a, alpha, alpha, alpha]
self.a = a
self.angle = alpha
c1 = np.cos(self.alpha_rad)
c2 = np.cos(self.alpha_rad/2.)
s2 = np.sin(self.alpha_rad/2.)
self.primv = [ self.a*np.array([c2, -s2, 0.]),
self.a*np.array([c2, +s2, 0.]),
self.a*np.array([c1/c2, 0., np.sqrt(1-(c1/c2)**2)]) ]
self.convv = self.primv
# The fractions defining the symmetry points in terms of reciprocal vectors
# are dependent on the angle alpha of the RHL lattice
# So we cannot use the dictionary SymPts_k to get them.
if self.alpha_rad < pi/2.:
eta = (1 + 4 * np.cos(self.alpha_rad)) / (2 + 4 * np.cos(self.alpha_rad))
nu = 3./4. - eta/2.
self.SymPts_k =\
{ 'Gamma': (0, 0, 0),
'B' : (eta, 1./2., 1-eta),
'B1': (1./2., 1-eta, eta-1),
'F' : (1./2., 1./2., 0),
'L' : (1./2., 0, 0),
'L1': (0, 0, -1./2.),
'P' : (eta, nu, nu),
'P1': (1-nu, 1-nu, 1-eta),
'P2': (nu, nu, eta-1),
'Q' : (1-nu, nu, 0),
'X' : (nu, 0, -nu),
'Z' : (1./2., 1/2., 1./2.), }
self.standard_path = "Gamma-L-B1|B-Z-Gamma-X|Q-F-P1-Z|L-P"
else:
self.name = 'RHL2'
eta = 1./(2 * (np.tan(self.alpha_rad/2.))**2)
nu = 3./4. - eta/2.
self.SymPts_k =\
{ 'Gamma': (0, 0, 0),
'F' : (1./2., -1./2., 0),
'L' : (1./2., 0, 0),
'P' : (1-nu, -nu, 1-nu),
'P1': (nu, nu-1, nu-1),
'Q' : (eta, eta, eta),
'Q1': (1-eta, -eta, -eta),
'Z' : (1./2., -1/2., 1./2.), }
self.standard_path = "Gamma-P-Z-Q-Gamma-F-P1-Q1-L-Z"
self.eta = eta
self.nu = nu
class MCL(object):
"""
This is simple Monoclinic MCL_* (mP) lattice, set via
a, b <= c, and alpha < 90 degrees, beta = gamma = 90 degrees as in
<NAME>, <NAME> / Computational Materials Science 49 (2010) 299-312.
Setting=ITC should work for the standard setting (angle>90) of ITC-A,
but is not currently implemented.
Note that conventional and primitive cells are the same.
"""
def __init__(self, param, setting='curtarolo'):
"""
The default setting assumes that alpha < 90 as in
<NAME>, <NAME> / Computational Materials Science 49 (2010) 299-312
TODO: support for setting='ITC'
"""
a, b, c, beta = param[:4]
self.beta_rad = np.radians(beta)
self.constants = [a, b, c, pi/2, self.beta_rad, pi/2]
self.a = a
self.b = b
self.c = c
self.angle = beta
self.setting = setting
if setting == 'curtarolo':
assert (a<=c) and (b<=c) and (beta < 90)
a1c = a * np.array([1, 0, 0])
a2c = b * np.array([0, 1, 0])
a3c = c * np.array([0, np.cos(self.beta_rad), np.sin(self.beta_rad)])
self.convv = np.array([a1c, a2c, a3c])
# primitive cell
self.primv = self.convv
#
eta = ( 1 - self.b * np.cos(self.beta_rad) / self.c ) / ( 2 * (np.sin(self.beta_rad))**2 )
nu = 1./2. - eta * self.c * np.cos(self.beta_rad) / self.b
self.SymPts_k =\
{ 'Gamma': (0., 0., 0.),
'A' : (1./2., 1./2., 0.),
'C' : (0., 1./2., 1./2.),
'D' : (1./2., 0., 1./2.),
'D1' : (1./2., 0., -1./2.),
'E' : (1./2., 1./2., 1./2.),
'H' : (0., eta, 1-nu),
'H1' : (0., 1-eta, nu),
'H2' : (0., eta, -nu),
'M' : (1./2., eta, 1-nu),
'M1' : (1./2., 1-eta, nu),
'M2' : (1./2., eta, -nu),
'X' : (0., 1./2., 0.),
'Y' : (0., 0., 1./2.),
'Y1' : (0., 0., -1./2.),
'Z' : (1./2., 0., 0.), }
self.standard_path = "Gamma-Y-H-C-E-M1-A-X-H1|M-D-Z|Y-D"
else:
logger.error('Unsupported setting {} for {} lattice'.format(setting, self.__name__))
sys.exit(2)
class MCLC(object):
"""
This is base-centered Monoclinic MCLC_* mS, lattice
Primitive lattice defined via:
a <> b <> c, and alpha <> 90 degrees, beta = gamma = 90 degrees
"""
def __init__(self, param, setting='ITC'):
"""
Note that MCLC has several variants, depending on abc ordering and
face(base) centering or not:
Additionally, ITC stipulates alpha > 90 degrees, while in
<NAME>, <NAME> / Computational Materials Science 49 (2010) 299-312
alpha < 90 is used.
"""
a, b, c, angle = param[:4]
self.a = a
self.b = b
self.c = c
self.angle_rad = np.radians(angle)
self.angle = angle
self.constants = [a, b, c, angle]
self.setting = setting
if setting == 'ITC' and self.angle_rad > pi/2.:
assert (a >= b) and (a >= c) and (angle > 90)
# conventional cell
a1c = self.a * np.array([1, 0, 0])
a2c = self.b * np.array([0, 1, 0])
a3c = self.c * np.array([np.cos(self.angle_rad), 0, np.sin(self.angle_rad)])
self.convv = np.array([a1c, a2c, a3c])
# primitive cell
a1p = (+a1c + a2c) / 2.
a2p = (-a1c + a2c) / 2.
a3p = a3c
self.primv = np.array([a1p, a2p, a3p])
# The fractions defining the symmetry points in terms of reciprocal
# cell-vectors are dependent on the angle alpha of the MCLC lattice
# So we cannot use the dictionary SymPts_k to get them.
psi = 3./4. - (self.b / (2 * self.a * np.sin(self.angle_rad)))**2
phi = psi - ( 3./4. - psi ) * (self.a / self.c) * np.cos(self.angle_rad)
ksi = ( 2 + (self.a/self.c) * np.cos(self.angle_rad) ) / ( 2 * np.sin(self.angle_rad) )**2
eta = 1./2. - 2 * ksi * (self.c/self.a) * np.cos(self.angle_rad)
#logger.debug ((psi, phi, ksi, eta))
self.SymPts_k =\
{ 'Gamma': (0., 0., 0.),
'N' : (0., 1./2., 0.),
'N1': (1./2., 0, 0),
'F' : (ksi-1, 1-ksi, 1-eta),
'F1': (-ksi, ksi, eta),
'F2': (ksi, -ksi, 1-eta),
'I' : (phi-1, phi, 1./2.),
'I1': (1-phi, 1-phi, 1./2.),
'Y' : (-1./2., 1./2., 0),
'Y1': (1./2., 0., 0.),
'L' : (-1./2., 1./2., 1./2.),
'M' : (0., 1./2., 1./2.),
'X' : (1-psi, 1-psi, 0.),
'X1': (psi-1, psi, 0.),
'X2': (psi, psi-1, 0.),
'Z' : (0., 0., 1./2.), }
self.standard_path = "X1-Y-Gamma-N-X-Gamma-M-I-L-F-Y-Gamma-Z-F1-Z-I1"
else:
logger.error('Unsupported setting {} for {} lattice'.format(setting, self.__name__))
sys.exit(2)
def get_kvec(comp_rc, recipr_cell):
"""
*comp_rc* are the components of a vector expressed in terms of
reciprocal cell vectors *recipr_cell*.
Return the components of this vector in terms of reciprocal
unit vectors.
"""
kvec = np.dot(np.array(comp_rc), np.array(recipr_cell))
# the above is equivalent to: kvec = np.sum([beta[i]*Bvec[i] for i in range(3)], axis=0)
return kvec
def get_recipr_cell (A,scale):
"""
Given a set of set of three vectors *A*, assumed to be that defining
the primitive cell, return the corresponding set of vectors that define
the reciprocal cell, *B*, scaled by the input parameter *scale*,
which defaults to 2pi. The B-vectors are computed as follows:
B0 = scale * (A1 x A2)/(A0 . A1 x A2)
B1 = scale * (A2 x A0)/(A0 . A1 x A2)
B2 = scale * (A0 x A1)/(A0 . A1 x A2)
and are returnd as a list of 1D arrays.
Recall that the triple-scalar product is invariant under circular shift,
and equals the (signed) volume of the primitive cell.
"""
volume = np.dot(A[0],np.cross(A[1],A[2]))
B = []
# use this to realise circular shift
index = np.array(list(range(len(A))))
for i in range(len(A)):
(i1,i2) = | np.roll(index,-(i+1)) | numpy.roll |
import pykalman
import numpy as np
import tkanvas
# Draw each step of the Kalman filter onto a TKinter canvas
def draw_kalman_filter(src):
src.clear()
# update paths and draw them
for p in src.obs_path:
src.circle(p[0], p[1], 2, fill="white")
for p in src.track_path:
src.circle(p[0], p[1], 2, fill="blue")
track = not np.any( | np.isnan(src.obs[0]) | numpy.isnan |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import types
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
from numpy.testing import utils
from .example_models import models_1D, models_2D
from .. import (fitting, models, LabeledInput, SerialCompositeModel,
SummedCompositeModel)
from ..core import FittableModel
from ..polynomial import PolynomialBase
from ...tests.helper import pytest
from ...extern import six
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestSerialComposite(object):
"""
Test composite models evaluation in series
"""
def setup_class(self):
self.y, self.x = np.mgrid[:5, :5]
self.p1 = models.Polynomial1D(3)
self.p11 = models.Polynomial1D(3)
self.p2 = models.Polynomial2D(3)
def test_single_array_input(self):
model = SerialCompositeModel([self.p1, self.p11])
result = model(self.x)
xx = self.p11(self.p1(self.x))
utils.assert_almost_equal(xx, result)
def test_labeledinput_1(self):
labeled_input = LabeledInput([self.x, self.y], ['x', 'y'])
model = SerialCompositeModel([self.p2, self.p1],
[['x', 'y'], ['z']],
[['z'], ['z']])
result = model(labeled_input)
z = self.p2(self.x, self.y)
z1 = self.p1(z)
utils.assert_almost_equal(z1, result.z)
def test_labeledinput_2(self):
labeled_input = LabeledInput([self.x, self.y], ['x', 'y'])
rot = models.Rotation2D(angle=23.4)
offx = models.Shift(-2)
offy = models.Shift(1.2)
model = SerialCompositeModel([rot, offx, offy],
[['x', 'y'], ['x'], ['y']],
[['x', 'y'], ['x'], ['y']])
result = model(labeled_input)
x, y = rot(self.x, self.y)
x = offx(x)
y = offy(y)
utils.assert_almost_equal(x, result.x)
utils.assert_almost_equal(y, result.y)
def test_labeledinput_3(self):
labeled_input = LabeledInput([2, 4.5], ['x', 'y'])
rot = models.Rotation2D(angle=23.4)
offx = models.Shift(-2)
offy = models.Shift(1.2)
model = SerialCompositeModel([rot, offx, offy],
[['x', 'y'], ['x'], ['y']],
[['x', 'y'], ['x'], ['y']])
result = model(labeled_input)
x, y = rot(2, 4.5)
x = offx(x)
y = offy(y)
utils.assert_almost_equal(x, result.x)
utils.assert_almost_equal(y, result.y)
def test_multiple_input(self):
rot = models.Rotation2D(angle=-60)
model = SerialCompositeModel([rot, rot])
xx, yy = model(self.x, self.y)
x1, y1 = model.inverse(xx, yy)
utils.assert_almost_equal(x1, self.x)
utils.assert_almost_equal(y1, self.y)
class TestSummedComposite(object):
"""Test legacy composite models evaluation."""
def setup_class(self):
self.x = np.linspace(1, 10, 100)
self.y = np.linspace(1, 10, 100)
self.p1 = models.Polynomial1D(3)
self.p11 = models.Polynomial1D(3)
self.p2 = models.Polynomial2D(3)
self.p1.parameters = [1.4, 2.2, 3.1, 4]
self.p2.c0_0 = 100
def test_single_array_input(self):
model = SummedCompositeModel([self.p1, self.p11])
result = model(self.x)
delta11 = self.p11(self.x)
delta1 = self.p1(self.x)
xx = delta1 + delta11
utils.assert_almost_equal(xx, result)
def test_labeledinput(self):
labeled_input = LabeledInput([self.x, self.y], ['x', 'y'])
model = SummedCompositeModel([self.p1, self.p11], inmap=['x'],
outmap=['x'])
result = model(labeled_input)
delta11 = self.p11(self.x)
delta1 = self.p1(self.x)
xx = delta1 + delta11
utils.assert_almost_equal(xx, result.x)
def test_inputs_outputs_mismatch(self):
p2 = models.Polynomial2D(1)
ch2 = models.Chebyshev2D(1, 1)
with pytest.raises(ValueError):
SummedCompositeModel([p2, ch2])
def test_pickle():
p1 = models.Polynomial1D(3)
p11 = models.Polynomial1D(4)
g1 = models.Gaussian1D(10.3, 5.4, 1.2)
serial_composite_model = SerialCompositeModel([p1, g1])
parallel_composite_model = SummedCompositeModel([serial_composite_model,
p11])
s = pickle.dumps(parallel_composite_model)
s1 = pickle.loads(s)
assert s1(3) == parallel_composite_model(3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_custom_model(amplitude=4, frequency=1):
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model_1d(sine_model, func_fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
y = sin_model.evaluate(x, 5., 2.)
y_prime = sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
fitter = fitting.LevMarLSQFitter()
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model_1d
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model_1d
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2)
sub_arr = model(x1, y1, z1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
class Fittable2DModelTester(object):
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all((np.abs(model(x, y) - z) < self.eval_error))
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter2D(self, model_class, test_parameters):
"""Test if the parametric model works with the fitter."""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = | np.logspace(y_lim[0], y_lim[1], self.N) | numpy.logspace |
import numpy as np
from numpy.testing import assert_allclose, run_module_suite
from pyins import sim
from pyins import earth
from pyins import dcm
def test_from_position():
dt = 1e-1
n_points = 1000
lat = np.full(n_points, 50.0)
lon = np.full(n_points, 45.0)
alt = np.zeros(n_points)
h = np.zeros(n_points)
p = np.zeros(n_points)
r = np.zeros(n_points)
VE = np.zeros(n_points)
VN = np.zeros(n_points)
VU = np.zeros(n_points)
slat = np.sin(np.deg2rad(50))
clat = (1 - slat**2) ** 0.5
gyro = earth.RATE * np.array([0, clat, slat]) * dt
accel = np.array([0, 0, earth.gravity(50)]) * dt
traj, gyro_g, accel_g = sim.from_position(dt, lat, lon, alt, h, p, r)
assert_allclose(traj.lat, 50, rtol=1e-12)
assert_allclose(traj.lon, 45, rtol=1e-12)
assert_allclose(traj.VE, 0, atol=1e-7)
assert_allclose(traj.VN, 0, atol=1e-7)
assert_allclose(traj.h, 0, atol=1e-8)
assert_allclose(traj.p, 0, atol=1e-8)
assert_allclose(traj.r, 0, atol=1e-8)
for i in range(3):
assert_allclose(gyro_g[:, i], gyro[i], atol=1e-14)
assert_allclose(accel_g[:, i], accel[i], atol=1e-7)
traj, gyro_g, accel_g = sim.from_velocity(dt, 50, 45, 0, VE, VN, VU,
h, p, r)
assert_allclose(traj.lat, 50, rtol=1e-12)
assert_allclose(traj.lon, 45, rtol=1e-12)
assert_allclose(traj.VE, 0, atol=1e-7)
assert_allclose(traj.VN, 0, atol=1e-7)
assert_allclose(traj.h, 0, atol=1e-8)
assert_allclose(traj.p, 0, atol=1e-8)
assert_allclose(traj.r, 0, atol=1e-8)
for i in range(3):
assert_allclose(gyro_g[:, i], gyro[i], atol=1e-14)
assert_allclose(accel_g[:, i], accel[i], atol=1e-7)
def test_stationary():
dt = 0.1
n_points = 100
t = dt * np.arange(n_points)
lat = 58
alt = -10
h = np.full(n_points, 45)
p = | np.full(n_points, -10) | numpy.full |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
import os
import yaml
import numpy as np
import numpy.random as npr
from .generate_anchors import generate_anchors
from ..utils.cython_bbox import bbox_overlaps, bbox_intersections
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..fast_rcnn.bbox_transform import bbox_transform
# <<<< obsolete
def anchor_target_layer(rpn_cls_score, gt_boxes, gt_ishard, dontcare_areas, im_info, feat_stride_vec=[4,8,16,32,64],
anchor_scales=[2, 4, 8, 16, 32]):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
Parameters
----------
rpn_cls_score: for pytorch (1, Ax2, H, W) bg/fg scores of previous conv layer
gt_boxes: (G, 5) vstack of [x1, y1, x2, y2, class]
gt_ishard: (G, 1), 1 or 0 indicates difficult or not
dontcare_areas: (D, 4), some areas may contains small objs but no labelling. D may be 0
im_info: a list of [image_height, image_width, scale_ratios]
_feat_stride: the downsampling ratio of feature map to the original input image
anchor_scales: the scales to the basic_anchor (basic anchor is [16, 16])
----------
Returns
----------
rpn_labels : (HxWxA, 1), for each anchor, 0 denotes bg, 1 fg, -1 dontcare
rpn_bbox_targets: (HxWxA, 4), distances of the anchors to the gt_boxes(may contains some transform)
that are the regression objectives
rpn_bbox_inside_weights: (HxWxA, 4) weights of each boxes, mainly accepts hyper param in cfg
rpn_bbox_outside_weights: (HxWxA, 4) used to balance the fg/bg,
beacuse the numbers of bgs and fgs mays significiantly different
"""
# allow boxes to sit over the edge by a small amount
_allowed_border = 1000
im_info = im_info[0]
fpn_args = []
fpn_anchors_fid = np.zeros(0).astype(int)
fpn_anchors = np.zeros([0, 4])
fpn_labels = np.zeros(0)
fpn_inds_inside = []
fpn_size = len(rpn_cls_score) #[P2,P3,P4,P5,P6]
for i in range(fpn_size):
_anchors = generate_anchors(scales=np.array([anchor_scales[i]]))
_num_anchors = _anchors.shape[0]
_feat_stride = feat_stride_vec[i]
# map of shape (..., H, W)
#height, width = rpn_cls_score.shape[1:3]
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
# measure GT overlap
assert rpn_cls_score[i].shape[0] == 1, \
'Only single item batches are supported'
# map of shape (..., H, W)
# pytorch (bs, c, h, w)
height, width = rpn_cls_score[i].shape[2:4]
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * _feat_stride
shift_y = np.arange(0, height) * _feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y) # in W H order
# K is H x W
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = _num_anchors
K = shifts.shape[0]
all_anchors = (_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -_allowed_border) &
(all_anchors[:, 1] >= -_allowed_border) &
(all_anchors[:, 2] < im_info[1] + _allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + _allowed_border) # height
)[0]
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is dont care
# (A)
labels = np.empty((len(inds_inside),), dtype=np.float32)
labels.fill(-1)
fpn_anchors_fid = np.hstack((fpn_anchors_fid, len(inds_inside)))
fpn_anchors = np.vstack((fpn_anchors, anchors))
fpn_labels = np.hstack((fpn_labels, labels))
fpn_inds_inside.append(inds_inside)
fpn_args.append([height, width, A, total_anchors])
if len(gt_boxes) > 0:
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt), shape is A x G
overlaps = bbox_overlaps(
np.ascontiguousarray(fpn_anchors, dtype=np.float),
| np.ascontiguousarray(gt_boxes, dtype=np.float) | numpy.ascontiguousarray |
#!/usr/bin/env python
##########################################################################
# Copyright 2018 Kata.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
import argparse
import json
import math
import numpy as np
def truncate_paragraphs(obj, length):
for key in 'paragraphs gold_labels pred_labels'.split():
if key in obj:
obj[key] = obj[key][:length]
def create_outlier_detector(data):
q1, q3 = np.percentile(data, (25, 75))
iqr = q3 - q1
def is_outlier(x):
return x < q1 - 1.5 * iqr or x > q3 + 1.5 * iqr
return is_outlier
def read_jsonl(path, encoding='utf-8'):
with open(args.path, encoding=args.encoding) as f:
return [json.loads(line.strip()) for line in f]
def train_for_paras_length(train_objs):
paras_lengths = [len(obj['paragraphs']) for obj in train_objs]
return | np.mean(paras_lengths) | numpy.mean |
from copy import deepcopy
import logging
import os
import pickle
from bids.layout import BIDSImageFile
from bids.layout.writing import build_path as bids_build_path
import nibabel as nib
import numpy as np
import pandas as pd
import pytest
from rtCommon.bidsCommon import (
BIDS_DIR_PATH_PATTERN,
BIDS_FILE_PATTERN,
PYBIDS_PSEUDO_ENTITIES,
BidsFileExtension,
getNiftiData,
metadataFromProtocolName,
)
from rtCommon.bidsIncremental import BidsIncremental
from rtCommon.bidsArchive import BidsArchive
from rtCommon.errors import MissingMetadataError
from tests.common import isValidBidsArchive
logger = logging.getLogger(__name__)
# Test that construction fails for image metadata missing required fields
def testInvalidConstruction(sample2DNifti1, samplePseudo2DNifti1,
sample4DNifti1, imageMetadata):
# Test empty image
with pytest.raises(TypeError):
BidsIncremental(image=None,
imageMetadata=imageMetadata)
# Test 2-D image
with pytest.raises(ValueError) as err:
BidsIncremental(image=sample2DNifti1,
imageMetadata=imageMetadata)
assert "Image must have at least 3 dimensions" in str(err.value)
# Test 2-D image masquerading as 4-D image
with pytest.raises(ValueError) as err:
BidsIncremental(image=samplePseudo2DNifti1,
imageMetadata=imageMetadata)
assert ("Image's 3rd (and any higher) dimensions are <= 1, which means "
"it is a 2D image; images must have at least 3 dimensions" in
str(err.value))
# Test incomplete metadata
protocolName = imageMetadata.pop("ProtocolName")
for key in BidsIncremental.REQUIRED_IMAGE_METADATA:
value = imageMetadata.pop(key)
assert not BidsIncremental.isCompleteImageMetadata(imageMetadata)
with pytest.raises(MissingMetadataError):
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata)
imageMetadata[key] = value
imageMetadata["ProtocolName"] = protocolName
# Test too-large repetition and echo times
for key in ["RepetitionTime", "EchoTime"]:
original = imageMetadata[key]
imageMetadata[key] = 10**6
with pytest.raises(ValueError):
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata)
imageMetadata[key] = original
# Test non-image object
with pytest.raises(TypeError) as err:
notImage = "definitely not an image"
BidsIncremental(image=notImage,
imageMetadata=imageMetadata)
assert ("Image must be one of [nib.Nifti1Image, nib.Nifti2Image, "
f"BIDSImageFile (got {type(notImage)})" in str(err.value))
# Test non-functional data
with pytest.raises(NotImplementedError) as err:
original = imageMetadata['datatype']
invalidType = 'anat'
imageMetadata['datatype'] = invalidType
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata)
imageMetadata['datatype'] = original
assert ("BIDS Incremental for BIDS datatypes other than 'func' is not "
f"yet implemented (got '{invalidType}')") in str(err.value)
# Test that valid arguments produce a BIDS incremental
def testValidConstruction(sample3DNifti1, sample3DNifti2,
sample4DNifti1, sampleNifti2, bidsArchive4D,
imageMetadata):
# 3-D should be promoted to 4-D
assert BidsIncremental(sample3DNifti1, imageMetadata) is not None
assert BidsIncremental(sample3DNifti2, imageMetadata) is not None
# Both Nifti1 and Nifti2 images should work
assert BidsIncremental(sample4DNifti1, imageMetadata) is not None
assert BidsIncremental(sampleNifti2, imageMetadata) is not None
# If the metadata provides a RepetitionTime or EchoTime that works without
# adjustment, the construction should still work
repetitionTimeKey = "RepetitionTime"
original = imageMetadata[repetitionTimeKey]
imageMetadata[repetitionTimeKey] = 1.5
assert BidsIncremental(sample4DNifti1, imageMetadata) is not None
imageMetadata[repetitionTimeKey] = original
# Passing a BIDSImageFile is also valid
image = bidsArchive4D.getImages()[0]
assert type(image) is BIDSImageFile
assert BidsIncremental(image, imageMetadata) is not None
# Test that metadata values are of the correct types, if required by BIDS
def testMetadataTypes(validBidsI):
typeDict = {"RepetitionTime": float, "EchoTime": float}
for field, typ in typeDict.items():
assert type(validBidsI.getMetadataField(field)) is typ
# Test that the provided image metadata dictionary takes precedence over the
# metadata parsed from the protocol name, if any
def testConstructionMetadataPrecedence(sample4DNifti1, imageMetadata):
assert imageMetadata.get('ProtocolName', None) is not None
metadata = metadataFromProtocolName(imageMetadata['ProtocolName'])
assert len(metadata) > 0
assert metadata.get('run', None) is not None
newRunNumber = int(metadata['run']) + 1
imageMetadata['run'] = newRunNumber
assert metadata['run'] != imageMetadata['run']
incremental = BidsIncremental(sample4DNifti1, imageMetadata)
assert incremental.getMetadataField('run') == newRunNumber
# Test that the string output of the BIDS-I is as expected
def testStringOutput(validBidsI):
imageShape = str(validBidsI.getImageDimensions())
keyCount = len(validBidsI._imgMetadata.keys())
version = validBidsI.version
assert str(validBidsI) == f"Image shape: {imageShape}; " \
f"Metadata Key Count: {keyCount}; " \
f"BIDS-I Version: {version}"
# Test that equality comparison is as expected
def testEquals(sample4DNifti1, sample3DNifti1, imageMetadata):
# Test images with different headers
assert BidsIncremental(sample4DNifti1, imageMetadata) != \
BidsIncremental(sample3DNifti1, imageMetadata)
# Test images with the same header, but different data
newData = 2 * getNiftiData(sample4DNifti1)
reversedNifti1 = nib.Nifti1Image(newData, sample4DNifti1.affine,
header=sample4DNifti1.header)
assert BidsIncremental(sample4DNifti1, imageMetadata) != \
BidsIncremental(reversedNifti1, imageMetadata)
# Test different image metadata
modifiedImageMetadata = deepcopy(imageMetadata)
modifiedImageMetadata["subject"] = "newSubject"
assert BidsIncremental(sample4DNifti1, imageMetadata) != \
BidsIncremental(sample4DNifti1, modifiedImageMetadata)
# Test different dataset metadata
datasetMeta1 = {"Name": "Dataset_1", "BIDSVersion": "1.0"}
datasetMeta2 = {"Name": "Dataset_2", "BIDSVersion": "2.0"}
assert BidsIncremental(sample4DNifti1, imageMetadata, datasetMeta1) != \
BidsIncremental(sample4DNifti1, imageMetadata, datasetMeta2)
# Test different readme
incremental1 = BidsIncremental(sample4DNifti1, imageMetadata)
incremental2 = BidsIncremental(sample4DNifti1, imageMetadata)
readme1 = "README 1"
readme2 = "README 2"
incremental1.readme = readme1
incremental2.readme = readme2
assert incremental1 != incremental2
# Test different events file
incremental1 = BidsIncremental(sample4DNifti1, imageMetadata)
incremental2 = BidsIncremental(sample4DNifti1, imageMetadata)
events1 = {'onset': [1, 25, 50], 'duration': [10, 10, 10], 'response_time':
[15, 36, 70]}
events2 = {key: [v + 5 for v in events1[key]] for key in events1.keys()}
incremental1.events = pd.DataFrame(data=events1)
incremental2.events = pd.DataFrame(data=events2)
assert incremental1 != incremental2
# Test that image metadata dictionaries can be properly created by the class
def testImageMetadataDictCreation(imageMetadata):
createdDict = BidsIncremental.createImageMetadataDict(
subject=imageMetadata["subject"],
task=imageMetadata["task"],
suffix=imageMetadata["suffix"],
repetitionTime=imageMetadata["RepetitionTime"],
datatype='func')
for key in createdDict.keys():
assert createdDict.get(key) == imageMetadata.get(key)
# Ensure that the method is in sync with the required metadata
# Get all required fields as lowerCamelCase for passing as kwargs
requiredFieldsCamel = [(key[0].lower() + key[1:]) for key in
BidsIncremental.REQUIRED_IMAGE_METADATA]
dummyValue = 'n/a'
metadataDict = {key: dummyValue for key in requiredFieldsCamel}
createdDict = BidsIncremental.createImageMetadataDict(**metadataDict)
for field in BidsIncremental.REQUIRED_IMAGE_METADATA:
assert createdDict[field] == dummyValue
# Test that internal metadata dictionary is independent from the argument dict
def testMetadataDictionaryIndependence(sample4DNifti1, imageMetadata):
incremental = BidsIncremental(sample4DNifti1, imageMetadata)
key = 'subject'
assert incremental.getMetadataField(key) == imageMetadata[key]
old = incremental.getMetadataField(key)
imageMetadata[key] = 'a brand-new subject'
assert incremental.getMetadataField(key) == old
assert incremental.getMetadataField(key) != imageMetadata[key]
# Test that invalid dataset.json fields are rejected and valid ones are accepted
def testDatasetMetadata(sample4DNifti1, imageMetadata):
# Test invalid dataset metadata
with pytest.raises(MissingMetadataError):
BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata,
datasetDescription={"random_field": "doesnt work"})
# Test valid dataset metadata
dataset_name = "Test dataset"
bidsInc = BidsIncremental(image=sample4DNifti1,
imageMetadata=imageMetadata,
datasetDescription={"Name": dataset_name,
"BIDSVersion": "1.0"})
assert bidsInc.getDatasetName() == dataset_name
# Test that extracting metadata from the BIDS-I using its provided API returns
# the correct values
def testMetadataOutput(validBidsI, imageMetadata):
with pytest.raises(ValueError):
validBidsI.getMetadataField("InvalidEntityName", strict=True)
with pytest.raises(KeyError):
validBidsI.getMetadataField("InvalidEntityName")
# Data type - always 'func' currently
assert validBidsI.getDatatype() == "func"
# Entities
for entity in ['subject', 'task']:
assert validBidsI.getMetadataField(entity) == imageMetadata[entity]
# Suffix
assert validBidsI.getSuffix() == imageMetadata["suffix"]
# Test setting BIDS-I metadata API works as expected
def testSetMetadata(validBidsI):
# Test non-official BIDS entity fails with strict
with pytest.raises(ValueError):
validBidsI.setMetadataField("nonentity", "value", strict=True)
# Non-official BIDS entity succeeds without strict
validBidsI.setMetadataField("nonentity", "value", strict=False)
assert validBidsI.getMetadataField("nonentity", strict=False) == "value"
validBidsI.removeMetadataField("nonentity", strict=False)
# None field is invalid
with pytest.raises(ValueError):
validBidsI.setMetadataField(None, "test")
entityName = "subject"
newValue = "newValue"
originalValue = validBidsI.getMetadataField(entityName)
validBidsI.setMetadataField(entityName, newValue)
assert validBidsI.getMetadataField(entityName) == newValue
validBidsI.setMetadataField(entityName, originalValue)
assert validBidsI.getMetadataField(entityName) == originalValue
# Test removing BIDS-I metadata API works as expected
def testRemoveMetadata(validBidsI):
# Fail for entities that don't exist
with pytest.raises(ValueError):
validBidsI.removeMetadataField("nonentity", strict=True)
# Fail for entities that are required to be in the dictionary
with pytest.raises(RuntimeError):
validBidsI.removeMetadataField("subject")
entityName = "ProtocolName"
originalValue = validBidsI.getMetadataField(entityName)
validBidsI.removeMetadataField(entityName)
with pytest.raises(KeyError):
validBidsI.getMetadataField(entityName) is None
validBidsI.setMetadataField(entityName, originalValue)
assert validBidsI.getMetadataField(entityName) == originalValue
# Test that the BIDS-I interface methods for extracting internal NIfTI data
# return the correct values
def testQueryNifti(validBidsI):
# Image data
queriedData = validBidsI.getImageData()
exactData = getNiftiData(validBidsI.image)
assert np.array_equal(queriedData, exactData), "{} elements not equal" \
.format(np.sum(np.where(queriedData != exactData)))
# Header Data
queriedHeader = validBidsI.getImageHeader()
exactHeader = validBidsI.image.header
# Compare full image header
assert queriedHeader.keys() == exactHeader.keys()
for (field, queryValue) in queriedHeader.items():
exactValue = exactHeader.get(field)
if queryValue.dtype.char == 'S':
assert queryValue == exactValue
else:
assert | np.allclose(queryValue, exactValue, atol=0.0, equal_nan=True) | numpy.allclose |
"""
==============
Triinterp Demo
==============
Interpolation from triangular grid to quad grid.
"""
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
import numpy as np
# Create triangulation.
x = np.asarray([0, 1, 2, 3, 0.5, 1.5, 2.5, 1, 2, 1.5])
y = | np.asarray([0, 0, 0, 0, 1.0, 1.0, 1.0, 2, 2, 3.0]) | numpy.asarray |
#!/usr/bin/env python3
###############
# Author: <NAME>
# Purpose: Kinova 3-fingered gripper in mujoco environment
# Summer 2019
###############
#TODO: Remove unecesssary commented lines
#TODO: Make a brief description of each function commented at the top of it
from gym import utils, spaces
import gym
from gym import wrappers # Used to get Monitor wrapper to save rendering video
import glfw
from gym.utils import seeding
# from gym.envs.mujoco import mujoco_env
import numpy as np
from mujoco_py import MjViewer, load_model_from_path, MjSim #, MjRenderContextOffscreen
import mujoco_py
# from PID_Kinova_MJ import *
import math
import matplotlib.pyplot as plt
import time
import os, sys
from scipy.spatial.transform import Rotation as R
import random
import pickle
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
import xml.etree.ElementTree as ET
from classifier_network import LinearNetwork, ReducedLinearNetwork
import re
from scipy.stats import triang
import csv
import pandas as pd
from pathlib import Path
import threading #oh boy this might get messy
from PIL import Image, ImageFont, ImageDraw # Used to save images from rendering simulation
import shutil
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class KinovaGripper_Env(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, arm_or_end_effector="hand", frame_skip=15):
self.file_dir = os.path.dirname(os.path.realpath(__file__))
self.arm_or_hand=arm_or_end_effector
if arm_or_end_effector == "arm":
self._model = load_model_from_path(self.file_dir + "/kinova_description/j2s7s300.xml")
full_path = self.file_dir + "/kinova_description/j2s7s300.xml"
self.filename= "/kinova_description/j2s7s300.xml"
elif arm_or_end_effector == "hand":
pass
#self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1.xml"
#self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_scyl.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1_scyl.xml"
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mbox.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mbox.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mcyl.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mcyl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bcyl.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bcyl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bbox.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bbox.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_shg.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_shg.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mhg.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mhg.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bhg.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bhg.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_svase.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_svase.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mvase.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mvase.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bvase.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bvase.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bcap.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bcap.xml"
#full_path = file_dir + "/kinova_description/j2s7s300_end_effector_v1.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_blemon.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_blemon.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bbottle.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bbottle.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_btbottle.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_btbottle.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_slemon.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_slemon.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_sbottle.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_sbottle.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_stbottle.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_stbottle.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mlemon.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mlemon.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mbottle.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mbottle.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_msphere.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_sphere.xml"
#self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + "/kinova_description/DisplayStuff.xml"),'s',"/kinova_description/DisplayStuff.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bcone1.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bcone1.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mcone1.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mcone1.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_scone1.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1_scone1.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bcone2.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bcone2.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mcone2.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mcone2.xml"
#self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_scone2.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1_scone2.xml"
else:
print("CHOOSE EITHER HAND OR ARM")
raise ValueError
self._sim = MjSim(self._model) # The simulator. This holds all the information about object locations and orientations
self.Grasp_Reward=False #This varriable says whether or not a grasp reward has been given this run
self.with_grasp_reward=False # Set to True to use grasp reward from grasp classifier, otherwise grasp reward is 0
self.coords_filename=None # Name of the file used to sample initial object and hand pose coordinates from (Ex: noisy coordinates text file)
# coords_filename is default to None to randomly generate coordinate values
self.orientation='normal' # Stores string of exact hand orientation type (normal, rotated, top)
self._viewer = None # The render window
self.contacts=self._sim.data.ncon # The number of contacts in the simulation environment
self.Tfw=np.zeros([4,4]) # The trasfer matrix that gets us from the world frame to the local frame
self.wrist_pose=np.zeros(3) # The wrist position in world coordinates
self.thetas=[0,0,0,0,0,0,0] # The angles of the joints of a real robot arm used for calculating the jacobian of the hand
self._timestep = self._sim.model.opt.timestep
self.pid=False
self.step_coords='global'
self._torque = [0,0,0,0] #Unused
self._velocity = [0,0,0,0] #Unused
self._jointAngle = [5,0,0,0] #Usused
self._positions = [] # ??
self._numSteps = 0
self._simulator = "Mujoco"
self.action_scale = 0.0333
self.max_episode_steps = 30
self.site_count=0
# Parameters for cost function
self.state_des = 0.20
self.initial_state = np.array([0.0, 0.0, 0.0, 0.0])
self.action_space = spaces.Box(low=np.array([-0.8, -0.8, -0.8, -0.8]), high=np.array([0.8, 0.8, 0.8, 0.8]), dtype=np.float32) # Velocity action space
self.const_T=np.array([[0,-1,0,0],[0,0,-1,0],[1,0,0,0],[0,0,0,1]]) #Transfer matrix from world frame to un-modified hand frame
self.frame_skip = frame_skip # Used in step. Number of frames you go through before you reach the next step
self.all_states = None # This is the varriable we use to save the states before they are sent to the simulator when we are resetting.
self.state_rep = "local" # change accordingly
# Object data
self.obj_coords = [0,0,0]
self.objects = {}
self.obj_keys = list()
# Shape data for determining correct expert data to retrieve for sampling
self.random_shape = 'CubeS'
# Default index for orientation data files (coords and noise) based on hand pose
self.orientation_idx = 0
# Region to sample initial object coordinates from within the hand (left, center, right, target, origin)
self.obj_coord_region = None
# Dictionary containing all possible objects and their xml file
self.all_objects = {}
# Cube
self.all_objects["CubeS"] = "/kinova_description/j2s7s300_end_effector_v1_CubeS.xml"
self.all_objects["CubeM"] = "/kinova_description/j2s7s300_end_effector_v1_CubeM.xml"
self.all_objects["CubeB"] = "/kinova_description/j2s7s300_end_effector_v1_CubeB.xml"
# Cylinder
self.all_objects["CylinderS"] = "/kinova_description/j2s7s300_end_effector_v1_CylinderS.xml"
self.all_objects["CylinderM"] = "/kinova_description/j2s7s300_end_effector_v1_CylinderM.xml"
self.all_objects["CylinderB"] = "/kinova_description/j2s7s300_end_effector_v1_CylinderB.xml"
# Cube rotated by 45 degrees
self.all_objects["Cube45S"] = "/kinova_description/j2s7s300_end_effector_v1_Cube45S.xml"
self.all_objects["Cube45M"] = "/kinova_description/j2s7s300_end_effector_v1_Cube45M.xml"
self.all_objects["Cube45B"] = "/kinova_description/j2s7s300_end_effector_v1_Cube45B.xml"
# Vase 1
self.all_objects["Vase1S"] = "/kinova_description/j2s7s300_end_effector_v1_Vase1S.xml"
self.all_objects["Vase1M"] = "/kinova_description/j2s7s300_end_effector_v1_Vase1M.xml"
self.all_objects["Vase1B"] = "/kinova_description/j2s7s300_end_effector_v1_Vase1B.xml"
# Vase 2
self.all_objects["Vase2S"] = "/kinova_description/j2s7s300_end_effector_v1_Vase2S.xml"
self.all_objects["Vase2M"] = "/kinova_description/j2s7s300_end_effector_v1_Vase2M.xml"
self.all_objects["Vase2B"] = "/kinova_description/j2s7s300_end_effector_v1_Vase2B.xml"
# Cone 1
self.all_objects["Cone1S"] = "/kinova_description/j2s7s300_end_effector_v1_Cone1S.xml"
self.all_objects["Cone1M"] = "/kinova_description/j2s7s300_end_effector_v1_Cone1M.xml"
self.all_objects["Cone1B"] = "/kinova_description/j2s7s300_end_effector_v1_Cone1B.xml"
# Cone 2
self.all_objects["Cone2S"] = "/kinova_description/j2s7s300_end_effector_v1_Cone2S.xml"
self.all_objects["Cone2M"] = "/kinova_description/j2s7s300_end_effector_v1_Cone2M.xml"
self.all_objects["Cone2B"] = "/kinova_description/j2s7s300_end_effector_v1_Cone2B.xml"
## Nigel's Shapes ##
# Hourglass
self.all_objects["HourB"] = "/kinova_description/j2s7s300_end_effector_v1_bhg.xml"
self.all_objects["HourM"] = "/kinova_description/j2s7s300_end_effector_v1_mhg.xml"
self.all_objects["HourS"] = "/kinova_description/j2s7s300_end_effector_v1_shg.xml"
# Vase
self.all_objects["VaseB"] = "/kinova_description/j2s7s300_end_effector_v1_bvase.xml"
self.all_objects["VaseM"] = "/kinova_description/j2s7s300_end_effector_v1_mvase.xml"
self.all_objects["VaseS"] = "/kinova_description/j2s7s300_end_effector_v1_svase.xml"
# Bottle
self.all_objects["BottleB"] = "/kinova_description/j2s7s300_end_effector_v1_bbottle.xml"
self.all_objects["BottleM"] = "/kinova_description/j2s7s300_end_effector_v1_mbottle.xml"
self.all_objects["BottleS"] = "/kinova_description/j2s7s300_end_effector_v1_sbottle.xml"
# Bowl
self.all_objects["BowlB"] = "/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml"
self.all_objects["BowlM"] = "/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml"
self.all_objects["BowlS"] = "/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml"
# Lemon
self.all_objects["LemonB"] = "/kinova_description/j2s7s300_end_effector_v1_blemon.xml"
self.all_objects["LemonM"] = "/kinova_description/j2s7s300_end_effector_v1_mlemon.xml"
self.all_objects["LemonS"] = "/kinova_description/j2s7s300_end_effector_v1_slemon.xml"
# TBottle
self.all_objects["TBottleB"] = "/kinova_description/j2s7s300_end_effector_v1_btbottle.xml"
self.all_objects["TBottleM"] = "/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml"
self.all_objects["TBottleS"] = "/kinova_description/j2s7s300_end_effector_v1_stbottle.xml"
# RBowl
self.all_objects["RBowlB"] = "/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml"
self.all_objects["RBowlM"] = "/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml"
self.all_objects["RBowlS"] = "/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml"
# Originally used for defining min/max ranges of state input (currently not being used)
min_hand_xyz = [-0.1, -0.1, 0.0, -0.1, -0.1, 0.0, -0.1, -0.1, 0.0,-0.1, -0.1, 0.0, -0.1, -0.1, 0.0,-0.1, -0.1, 0.0, -0.1, -0.1, 0.0]
min_obj_xyz = [-0.1, -0.01, 0.0]
min_joint_states = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
min_obj_size = [0.0, 0.0, 0.0]
min_finger_obj_dist = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
min_obj_dot_prod = [0.0]
min_f_dot_prod = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
max_hand_xyz = [0.1, 0.1, 0.5, 0.1, 0.1, 0.5, 0.1, 0.1, 0.5,0.1, 0.1, 0.5, 0.1, 0.1, 0.5,0.1, 0.1, 0.5, 0.1, 0.1, 0.5]
max_obj_xyz = [0.1, 0.7, 0.5]
max_joint_states = [0.2, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
max_obj_size = [0.5, 0.5, 0.5]
max_finger_obj_dist = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
max_obj_dot_prod = [1.0]
max_f_dot_prod = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
# print()
if self.state_rep == "global" or self.state_rep == "local":
obs_min = min_hand_xyz + min_obj_xyz + min_joint_states + min_obj_size + min_finger_obj_dist + min_obj_dot_prod #+ min_f_dot_prod
obs_min = np.array(obs_min)
# print(len(obs_min))
obs_max = max_hand_xyz + max_obj_xyz + max_joint_states + max_obj_size + max_finger_obj_dist + max_obj_dot_prod #+ max_f_dot_prod
obs_max = np.array(obs_max)
# print(len(obs_max))
self.observation_space = spaces.Box(low=obs_min , high=obs_max, dtype=np.float32)
elif self.state_rep == "metric":
obs_min = list(np.zeros(17)) + [-0.1, -0.1, 0.0] + min_obj_xyz + min_joint_states + min_obj_size + min_finger_obj_dist + min_dot_prod
obs_max = list(np.full(17, np.inf)) + [0.1, 0.1, 0.5] + max_obj_xyz + max_joint_states + max_obj_size + max_finger_obj_dist + max_dot_prod
self.observation_space = spaces.Box(low=np.array(obs_min) , high=np.array(obs_max), dtype=np.float32)
elif self.state_rep == "joint_states":
obs_min = min_joint_states + min_obj_xyz + min_obj_size + min_dot_prod
obs_max = max_joint_states + max_obj_xyz + max_obj_size + max_dot_prod
self.observation_space = spaces.Box(low=np.array(obs_min) , high=np.array(obs_max), dtype=np.float32)
# <---- end of unused section
self.Grasp_net = pickle.load(open(self.file_dir+'/kinova_description/gc_model.pkl', "rb"))
#self.Grasp_net = LinearNetwork().to(device) # This loads the grasp classifier
#trained_model = "/home/orochi/KinovaGrasping/gym-kinova-gripper/trained_model_05_28_20_2105local.pt"
#trained_model = "/home/orochi/KinovaGrasping/gym-kinova-gripper/trained_model_01_23_20_2052local.pt"
# self.Grasp_net = GraspValid_net(54).to(device)
# trained_model = "/home/graspinglab/NCS_data/ExpertTrainedNet_01_04_20_0250.pt"
#model = torch.load(trained_model)
#self.Grasp_net.load_state_dict(model)
#self.Grasp_net.eval()
obj_list=['Coords_try1.txt','Coords_CubeM.txt','Coords_try1.txt','Coords_CubeB.txt','Coords_CubeM.txt','Coords_CubeS.txt']
self.random_poses=[[],[],[],[],[],[]]
for i in range(len(obj_list)):
random_poses_file=open("./shape_orientations/"+obj_list[i],"r")
#temp=random_poses_file.read()
lines_list = random_poses_file.readlines()
temp = [[float(val) for val in line.split()] for line in lines_list[1:]]
self.random_poses[i]=temp
random_poses_file.close()
self.instance=0#int(np.random.uniform(low=0,high=100))
# Funtion to get 3D transformation matrix of the palm and get the wrist position and update both those varriables
def _get_trans_mat_wrist_pose(self): #WHY MUST YOU HATE ME WHEN I GIVE YOU NOTHING BUT LOVE?
self.wrist_pose=np.copy(self._sim.data.get_geom_xpos('palm'))
Rfa=np.copy(self._sim.data.get_geom_xmat('palm'))
temp=np.matmul(Rfa,np.array([[0,0,1],[-1,0,0],[0,-1,0]]))
temp=np.transpose(temp)
Tfa=np.zeros([4,4])
Tfa[0:3,0:3]=temp
Tfa[3,3]=1
Tfw=np.zeros([4,4])
Tfw[0:3,0:3]=temp
Tfw[3,3]=1
self.wrist_pose=self.wrist_pose+np.matmul(np.transpose(Tfw[0:3,0:3]),[-0.009,0.048,0.0])
Tfw[0:3,3]=np.matmul(-(Tfw[0:3,0:3]),np.transpose(self.wrist_pose))
self.Tfw=Tfw
self.Twf=np.linalg.inv(Tfw)
def experimental_sensor(self,rangedata,finger_pose,gravity):
#print('flimflam')
#finger_joints = ["f1_prox", "f2_prox", "f3_prox", "f1_dist", "f2_dist", "f3_dist"]
finger_pose=np.array(finger_pose)
s1=finger_pose[0:3]-finger_pose[6:9]
s2=finger_pose[0:3]-finger_pose[3:6]
#print(finger_pose)
front_area=np.linalg.norm(np.cross(s1,s2))/2
#print('front area',front_area)
top1=np.linalg.norm(np.cross(finger_pose[0:3],finger_pose[9:12]))/2
top2=np.linalg.norm(np.cross(finger_pose[9:12],finger_pose[12:15]))/2
top3=np.linalg.norm(np.cross(finger_pose[3:6],finger_pose[12:15]))/2
top4=np.linalg.norm(np.cross(finger_pose[6:9],finger_pose[15:18]))/2
top5=np.linalg.norm(np.cross(finger_pose[9:12],finger_pose[15:18]))/2
total1=top1+top2+top3
total2=top1+top4+top5
top_area=max(total1,total2)
#print('front',front_area,'top',top_area)
sites=["palm","palm_1","palm_2","palm_3","palm_4"]
obj_pose=[]#np.zeros([5,3])
xs=[]
ys=[]
zs=[]
for i in range(len(sites)):
temp=self._sim.data.get_site_xpos(sites[i])
temp=np.append(temp,1)
temp=np.matmul(self.Tfw,temp)
temp=temp[0:3]
if rangedata[i] < 0.06:
temp[1]+=rangedata[i]
obj_pose=np.append(obj_pose,temp)
#obj_pose[i,:]=temp
for i in range(int(len(obj_pose)/3)):
xs=np.append(xs,obj_pose[i*3])
ys=np.append(ys,obj_pose[i*3+1])
zs=np.append(zs,obj_pose[i*3+2])
if xs ==[]:
sensor_pose=[0.2,0.2,0.2]
else:
sensor_pose=[np.average(xs),np.average(ys),np.average(zs)]
obj_size=np.copy(self._get_obj_size())
if np.argmax(np.abs(gravity))==2:
front_part=np.abs(obj_size[0]*obj_size[2])/front_area
top_part=np.abs(obj_size[0]*obj_size[1])/top_area
elif np.argmax(np.abs(gravity))==1:
front_part=np.abs(obj_size[0]*obj_size[2])/front_area
top_part=np.abs(obj_size[1]*obj_size[2])/top_area
else:
front_part=np.abs(obj_size[0]*obj_size[1])/front_area
top_part=np.abs(obj_size[0]*obj_size[2])/top_area
return sensor_pose,front_part, top_part
def get_sim_state(self): #this gives you the whole damn qpos
return np.copy(self._sim.data.qpos)
def set_sim_state(self,qpos,obj_state):#this just sets all the qpos of the simulation manually. Is it bad? Probably. Do I care at this point? Not really
self._sim.data.set_joint_qpos("object", [obj_state[0], obj_state[1], obj_state[2], 1.0, 0.0, 0.0, 0.0])
for i in range(len(self._sim.data.qpos)):
self._sim.data.qpos[i]=qpos[i]
self._sim.forward()
# Function to get the state of all the joints, including sliders
def _get_joint_states(self):
arr = []
for i in range(len(self._sim.data.sensordata)-17):
arr.append(self._sim.data.sensordata[i])
arr[0]=-arr[0]
arr[1]=-arr[1]
return arr # it is a list
def obs_test(self):
obj_pose = self._get_obj_pose()
obj_pose = np.copy(obj_pose)
tests_passed=[]
self._sim.data.site_xpos[0]=obj_pose
self._sim.data.site_xpos[1]=obj_pose
print(self._sim.data.qpos)
print('object position', obj_pose)
temp=True
while temp:
ans=input('do the red bars line up with the object center Y/N?')
if ans.lower()=='n':
print('Recording first test as failure')
tests_passed.append(False)
temp=False
elif ans.lower()=='y':
print('Recording first test as success')
tests_passed.append(True)
temp=False
else:
print('input not recognized, please input either Y or N. do the red bars line up with the object center Y/N?')
print('Next test, finger positions')
finger_joints = ["f1_prox", "f2_prox", "f3_prox", "f1_dist", "f2_dist", "f3_dist"]
fingers_6D_pose = []
for joint in finger_joints:
trans = self._sim.data.get_geom_xpos(joint)
trans = list(trans)
for i in range(3):
fingers_6D_pose.append(trans[i])
for i in range(6):
self._sim.data.site_xpos[0]=fingers_6D_pose[i*3:i*3+3]
self._sim.data.site_xpos[1]=fingers_6D_pose[i*3:i*3+3]
temp=True
while temp:
ans=input(f'do the red bars line up with the {finger_joints[i]} Y/N?')
if ans.lower()=='n':
print('Recording test as failure')
tests_passed.append(False)
temp=False
elif ans.lower()=='y':
print('Recording test as success')
tests_passed.append(True)
temp=False
else:
print(f'input not recognized, please input either Y or N. do the red bars line up with the {finger_joints[i]} Y/N?')
print('Next test, wrist position')
self._sim.data.site_xpos[0]=self.wrist_pose
self._sim.data.site_xpos[1]=self.wrist_pose
temp=True
while temp:
ans=input('do the red bars line up with the wrist position Y/N?')
if ans.lower()=='n':
print('Recording first test as failure')
tests_passed.append(False)
temp=False
elif ans.lower()=='y':
print('Recording first test as success')
tests_passed.append(True)
temp=False
else:
print('input not recognized, please input either Y or N. do the red bars line up with the wrist position Y/N?')
passed=np.sum(tests_passed)
failed=np.sum(np.invert(tests_passed))
print('out of', np.shape(tests_passed), f'tests, {passed} tests passed and {failed} tests failed')
print('tests passed')
print('object pose:',tests_passed[0])
print('wrist pose:',tests_passed[7])
for i in range(6):
print(finger_joints[i], 'pose:',tests_passed[i+1])
# Function to return global or local transformation matrix
def _get_obs(self, state_rep=None): #TODO: Add or subtract elements of this to match the discussions with Ravi and Cindy
'''
Local obs, all in local coordinates (from the center of the palm)
(18,) Finger Pos 0-17: (0: x, 1: y, 2: z) "f1_prox", (3-5) "f2_prox", (6-8) "f3_prox", (9-11) "f1_dist", (12-14) "f2_dist", (15-17) "f3_dist"
(3,) Wrist Pos 18-20 (18: x, 19: y, 20: z)
(3,) Obj Pos 21-23 (21: x, 22: y, 23: z)
(9,) Joint States 24-32
(3,) Obj Size 33-35
(12,) Finger Object Distance 36-47
(2,) X and Z angle 48-49
(17,) Rangefinder data 50-66
(3,) Gravity vector in local coordinates 67-69
(3,) Object location based on rangefinder data 70-72
(1,) Ratio of the area of the side of the shape to the open portion of the side of the hand 73
(1,) Ratio of the area of the top of the shape to the open portion of the top of the hand 74
(6, ) Finger dot product 75) "f1_prox", 76) "f2_prox", 77) "f3_prox", 78) "f1_dist", 79) "f2_dist", 80) "f3_dist" 75-80
(1, ) Dot product (wrist) 81
'''
'''
Global obs, all in global coordinates (from simulator 0,0,0)
(18,) Finger Pos 0-17
(3,) Wrist Pos 18-20
(3,) Obj Pos 21-23
(9,) Joint States 24-32
(3,) Obj Size 33-35
(12,) Finger Object Distance 36-47
"f1_prox", "f1_prox_1", "f2_prox", "f2_prox_1", "f3_prox", "f3_prox_1","f1_dist", "f1_dist_1", "f2_dist", "f2_dist_1", "f3_dist", "f3_dist_1"
(2,) X and Z angle 48-49
(17,) Rangefinder data 50-66
'''
if state_rep == None:
state_rep = self.state_rep
# states rep
obj_pose = self._get_obj_pose()
obj_pose = np.copy(obj_pose)
self._get_trans_mat_wrist_pose()
x_angle,z_angle = self._get_angles()
joint_states = self._get_joint_states() # Sensor reading (state) of a joint
obj_size = self._get_obj_size() # Returns size of object (length, width, height)
finger_obj_dist = self._get_finger_obj_dist() # Distance from finger joint to object center
range_data=self._get_rangefinder_data()
finger_joints = ["f1_prox", "f2_prox", "f3_prox", "f1_dist", "f2_dist", "f3_dist"]
gravity=[0,0,-1]
dot_prod=self._get_dot_product()
fingers_6D_pose = []
if state_rep == "global":#NOTE: only use local coordinates! global coordinates suck
finger_dot_prod=[]
for joint in finger_joints:
trans = self._sim.data.get_geom_xpos(joint)
trans = list(trans)
for i in range(3):
fingers_6D_pose.append(trans[i])
finger_dot_prod=self._get_fingers_dot_product(fingers_6D_pose)
fingers_6D_pose = fingers_6D_pose + list(self.wrist_pose) + list(obj_pose) + joint_states + [obj_size[0], obj_size[1], obj_size[2]*2] + finger_obj_dist + [x_angle, z_angle] + range_data +finger_dot_prod+ [dot_prod]#+ [self.obj_shape]
elif state_rep == "local":
finger_dot_prod=[]
for joint in finger_joints:
# Get the Cartesian coordinates (x,y,z) of the finger joint geom center
trans = np.copy(self._sim.data.get_geom_xpos(joint))
dot_prod_coords=list(trans)
# Append 1 to allow for rotation transformation
trans_for_roation=np.append(trans,1)
# Rotate finger joint geom coords using the current hand pose transformation matrix (Tfw)
trans_for_roation=np.matmul(self.Tfw,trans_for_roation)
trans = trans_for_roation[0:3]
trans = list(trans)
# Get dot product between finger joint wrt palm
temp_dot_prod=self._get_dot_product(dot_prod_coords)
finger_dot_prod.append(temp_dot_prod)
for i in range(3):
fingers_6D_pose.append(trans[i])
# Get wrist rotation matrix
wrist_for_rotation=np.append(self.wrist_pose,1)
wrist_for_rotation=np.matmul(self.Tfw,wrist_for_rotation)
wrist_pose = wrist_for_rotation[0:3]
# Get object rotation matrix
obj_for_roation=np.append(obj_pose,1)
obj_for_roation=np.matmul(self.Tfw,obj_for_roation)
obj_pose = obj_for_roation[0:3]
# Gravity and sensor location transformations
gravity=np.matmul(self.Tfw[0:3,0:3],gravity)
sensor_pos,front_thing,top_thing=self.experimental_sensor(range_data,fingers_6D_pose,gravity)
# Set full 6D pose, wrist and object coord positions, joint_states (sensor readings), object length, width, height, finger-object distance, x and z angle, rangefinder data, gravity data, sensor position coord data
fingers_6D_pose = fingers_6D_pose + list(wrist_pose) + list(obj_pose) + joint_states + [obj_size[0], obj_size[1], obj_size[2]*2] + finger_obj_dist + [x_angle, z_angle] + range_data + [gravity[0],gravity[1],gravity[2]] + [sensor_pos[0],sensor_pos[1],sensor_pos[2]] + [front_thing, top_thing] + finger_dot_prod + [dot_prod]#+ [self.obj_shape]
if self.pid:
fingers_6D_pose = fingers_6D_pose+ [self._get_dot_product()]
elif state_rep == "joint_states":
fingers_6D_pose = joint_states + list(obj_pose) + [obj_size[0], obj_size[1], obj_size[2]*2] + [x_angle, z_angle] #+ fingers_dot_prod
return fingers_6D_pose
# Function to get the distance between the digits on the fingers and the object center
# NOTE! This only takes into account the x and y differences. We might want to consider taking z into account as well for other orientations
def _get_finger_obj_dist(self):
finger_joints = ["f1_prox", "f1_prox_1", "f2_prox", "f2_prox_1", "f3_prox", "f3_prox_1","f1_dist", "f1_dist_1", "f2_dist", "f2_dist_1", "f3_dist", "f3_dist_1"]
obj = self._get_obj_pose()
dists = []
for i in finger_joints:
pos = self._sim.data.get_site_xpos(i)
dist = np.absolute(pos[0:3] - obj[0:3])
temp = np.linalg.norm(dist)
dists.append(temp)
return dists
# get range data from 1 step of time
# Uncertainty: rangefinder could only detect distance to the nearest geom, therefore it could detect geom that is not object
def _get_rangefinder_data(self):
range_data = []
for i in range(17):
if self._sim.data.sensordata[i+len(self._sim.data.sensordata)-17]==-1:
a=6
else:
a=self._sim.data.sensordata[i+len(self._sim.data.sensordata)-17]
range_data.append(a)
return range_data
# Function to return the object position in world coordinates
def _get_obj_pose(self):
arr = self._sim.data.get_geom_xpos("object")
return arr
# Function to return the angles between the palm normal and the object location
def _get_angles(self):
#t=time.time()
obj_pose = self._get_obj_pose()
self._get_trans_mat_wrist_pose()
local_obj_pos=np.copy(obj_pose)
local_obj_pos=np.append(local_obj_pos,1)
local_obj_pos=np.matmul(self.Tfw,local_obj_pos)
obj_wrist = local_obj_pos[0:3]/np.linalg.norm(local_obj_pos[0:3])
center_line = np.array([0,1,0])
z_dot = np.dot(obj_wrist[0:2],center_line[0:2])
z_angle = np.arccos(z_dot/np.linalg.norm(obj_wrist[0:2]))
x_dot = np.dot(obj_wrist[1:3],center_line[1:3])
x_angle = np.arccos(x_dot/np.linalg.norm(obj_wrist[1:3]))
return x_angle,z_angle
def _get_fingers_dot_product(self, fingers_6D_pose):
fingers_dot_product = []
for i in range(6):
fingers_dot_product.append(self._get_dot_product(fingers_6D_pose[3*i:3*i+3]))
return fingers_dot_product
#function to get the dot product. Only used for the pid controller
def _get_dot_product(self,obj_state=None):
if obj_state==None:
obj_state=self._get_obj_pose()
hand_pose = self._sim.data.get_body_xpos("j2s7s300_link_7")
obj_state_x = abs(obj_state[0] - hand_pose[0])
obj_state_y = abs(obj_state[1] - hand_pose[1])
obj_vec = np.array([obj_state_x, obj_state_y])
obj_vec_norm = np.linalg.norm(obj_vec)
obj_unit_vec = obj_vec / obj_vec_norm
center_x = abs(0.0 - hand_pose[0])
center_y = abs(0.0 - hand_pose[1])
center_vec = np.array([center_x, center_y])
center_vec_norm = np.linalg.norm(center_vec)
center_unit_vec = center_vec / center_vec_norm
dot_prod = np.dot(obj_unit_vec, center_unit_vec)
return dot_prod**20 # cuspy to get distinct reward
# Function to get rewards based only on the lift reward. This is primarily used to generate data for the grasp classifier
def _get_reward_DataCollection(self):
obj_target = 0.2
obs = self._get_obs(state_rep="global")
# TODO: change obs[23] and obs[5] to the simulator height object
if abs(obs[23] - obj_target) < 0.005 or (obs[23] >= obj_target): #Check to make sure that obs[23] is still the object height. Also local coordinates are a thing
lift_reward = 1
done = True
elif obs[20]>obj_target+0.2:
lift_reward=0.0
done=True
else:
lift_reward = 0
done = False
info = {"lift_reward":lift_reward}
return lift_reward, info, done
# Function to get rewards for RL training
def _get_reward(self,with_grasp_reward=False): # TODO: change obs[23] and obs[5] to the simulator height object and stop using _get_obs
#TODO: Make sure this works with the new grasp classifier
obj_target = 0.2 # Object height target (z-coord of object center)
grasp_reward = 0.0 # Grasp reward
finger_reward = 0.0 # Finger reward
obs = self._get_obs(state_rep="global")
local_obs=self._get_obs(state_rep='local')
#loc_obs=self._get_obs()
# Grasp reward set by grasp classifier, otherwise 0
if with_grasp_reward is True:
#network_inputs=obs[0:5]
#network_inputs=np.append(network_inputs,obs[6:23])
#network_inputs=np.append(network_inputs,obs[24:])
#inputs = torch.FloatTensor(np.array(network_inputs)).to(device)
# If proximal or distal finger position is close enough to object
#if np.max(np.array(obs[41:46])) < 0.035 or np.max(np.array(obs[35:40])) < 0.015:
# Grasp classifier determines how good grasp is
outputs = self.Grasp_net.predict(np.array(local_obs[0:75]).reshape(1,-1))#self.Grasp_net(inputs).cpu().data.numpy().flatten()
if (outputs >=0.3) & (not self.Grasp_Reward):
grasp_reward = 5.0
self.Grasp_Reward=True
else:
grasp_reward = 0.0
if abs(obs[23] - obj_target) < 0.005 or (obs[23] >= obj_target):
lift_reward = 50.0
done = True
else:
lift_reward = 0.0
done = False
""" Finger Reward
# obs[41:46]: DISTAL Finger-Object distance 41) "f1_dist", "f1_dist_1", "f2_dist", "f2_dist_1", "f3_dist", 46) "f3_dist_1"
# obs[35:40]: PROXIMAL Finger-Object distance 35) "f1_prox", "f1_prox_1", "f2_prox", "f2_prox_1", "f3_prox", 40) "f3_prox_1"
# Original Finger reward
#finger_reward = -np.sum((np.array(obs[41:46])) + (np.array(obs[35:40])))
# Negative or 0 Finger Reward: Negative velocity --> fingers moving outward/away from object
#if any(n < 0 for n in action):
# finger_reward = -np.sum((np.array(obs[41:46])) + (np.array(obs[35:40])))
#else:
# finger_reward = 0
"""
reward = 0.2*finger_reward + lift_reward + grasp_reward
info = {"finger_reward":finger_reward,"grasp_reward":grasp_reward,"lift_reward":lift_reward}
return reward, info, done
# only set proximal joints, cuz this is an underactuated hand
#we have a problem here (a binomial in the denomiator)
#ill use the quotient rule
def _set_state(self, states):
#print('sensor data',self._sim.data.sensordata[0:9])
#print('qpos',self._sim.data.qpos[0:9])
#print('states',states)
self._sim.data.qpos[0] = states[0]
self._sim.data.qpos[1] = states[1]
self._sim.data.qpos[2] = states[2]
self._sim.data.qpos[3] = states[3]
self._sim.data.qpos[5] = states[4]
self._sim.data.qpos[7] = states[5]
self._sim.data.set_joint_qpos("object", [states[6], states[7], states[8], 1.0, 0.0, 0.0, 0.0])
self._sim.forward()
# Function to get the dimensions of the object
def _get_obj_size(self):
#TODO: fix this shit
num_of_geoms=np.shape(self._sim.model.geom_size)
final_size=[0,0,0]
#print(self._sim.model.geom_size)
#print(num_of_geoms[0]-8)
for i in range(num_of_geoms[0]-8):
size=np.copy(self._sim.model.geom_size[-1-i])
diffs=[0,0,0]
if size[2]==0:
size[2]=size[1]
size[1]=size[0]
diffs[0]=abs(size[0]-size[1])
diffs[1]=abs(size[1]-size[2])
diffs[2]=abs(size[0]-size[2])
if ('lemon' in self.filename)|(np.argmin(diffs)!=0):
temp=size[0]
size[0]=size[2]
size[2]=temp
if 'Bowl' in self.filename:
if 'Rect' in self.filename:
final_size[0]=0.17
final_size[1]=0.17
final_size[2]=0.075
else:
final_size[0]=0.175
final_size[1]=0.175
final_size[2]=0.07
if self.obj_size=='m':
for j in range(3):
final_size[j]=final_size[j]*0.85
elif self.obj_size=='s':
for j in range(3):
final_size[j]=final_size[j]*0.7
else:
final_size[0]=max(size[0],final_size[0])
final_size[1]=max(size[1],final_size[1])
final_size[2]+=size[2]
#print(final_size)
return final_size
def set_obj_coords(self,x,y,z):
self.obj_coords[0] = x
self.obj_coords[1] = y
self.obj_coords[2] = z
def get_obj_coords(self):
return self.obj_coords
def set_random_shape(self,shape):
self.random_shape = shape
def get_random_shape(self):
return self.random_shape
def set_orientation_idx(self, idx):
""" Set hand orientation and rotation file index"""
self.orientation_idx = idx
def get_orientation_idx(self):
""" Get hand orientation and rotation file index"""
return self.orientation_idx
def set_obj_coord_region(self, region):
""" Set the region within the hand (left, center, right, target, origin) from where the initial object x,y
starting coordinate is being sampled from """
self.obj_coord_region = region
def get_obj_coord_region(self):
""" Get the region within the hand (left, center, right, target, origin) from where the initial object x,y
starting coordinate is being sampled from """
return self.obj_coord_region
# Returns hand orientation (normal, rotated, top)
def get_orientation(self):
return self.orientation
# Set hand orientation (normal, rotated, top)
def set_orientation(self, orientation):
self.orientation = orientation
def get_with_grasp_reward(self):
return self.with_grasp_reward
def set_with_grasp_reward(self,with_grasp):
self.with_grasp_reward=with_grasp
def get_coords_filename(self):
""" Returns the initial object and hand pose coordinate file name sampled from in the current environment """
return self.coords_filename
def set_coords_filename(self, coords_filename):
""" Sets the initial object and hand pose coordinate file name sampled from in the current environment (Default is None) """
self.coords_filename = coords_filename
# Dictionary of all possible objects (not just ones currently used)
def get_all_objects(self):
return self.all_objects
# Function to run all the experiments for RL training
def experiment(self, shape_keys): #TODO: Talk to people thursday about adding the hourglass and bottles to this dataset.
for key in shape_keys:
self.objects[key] = self.all_objects[key]
if len(shape_keys) == 0:
print("No shape keys")
raise ValueError
elif len(shape_keys) != len(self.objects):
print("Invlaid shape key requested")
raise ValueError
return self.objects
#Function to randomize the position of the object for grasp classifier data collection
def randomize_initial_pos_data_collection(self,orientation="side"):
print('ya done messed up A-A-ron')
size=self._get_obj_size()
#The old way to generate random poses
if orientation=='side':
'''
temp=self.random_poses[obj][self.instance]
rand_x=temp[0]
rand_y=temp[1]
z=temp[2]
self.instance+=1
'''
rand_x=triang.rvs(0.5)
rand_x=(rand_x-0.5)*(0.16-2*size[0])
rand_y=np.random.uniform()
if rand_x>=0:
rand_y=rand_y*(-(0.07-size[0]*np.sqrt(2))/(0.08-size[0])*rand_x-(-0.03-size[0]))
else:
rand_y=rand_y*((0.07-size[0]*np.sqrt(2))/(0.08-size[0])*rand_x-(-0.03-size[0]))
elif orientation=='rotated':
rand_x=0
rand_y=0
else:
theta=np.random.uniform(low=0,high=2*np.pi)
r=np.random.uniform(low=0,high=size[0]/2)
rand_x=np.sin(theta)*r
rand_y=np.cos(theta)*r
z = size[-1]/2
return rand_x, rand_y, z
def write_xml(self,new_rotation): #This function takes in a rotation vector [roll, pitch, yaw] and sets the hand rotation in the
#self.file_dir and self.filename to that rotation. It then sets up the simulator with the object
#incredibly far from the hand to prevent collisions and recalculates the rotation matrices of the hand
xml_file=open(self.file_dir+self.filename,"r")
xml_contents=xml_file.read()
xml_file.close()
starting_point=xml_contents.find('<body name="j2s7s300_link_7"')
euler_point=xml_contents.find('euler=',starting_point)
contents=re.search("[^\s]+\s[^\s]+\s[^>]+",xml_contents[euler_point:])
c_start=contents.start()
c_end=contents.end()
starting_point=xml_contents.find('joint name="j2s7s300_joint_7" type')
axis_point=xml_contents.find('axis=',starting_point)
contents=re.search("[^\s]+\s[^\s]+\s[^>]+",xml_contents[axis_point:])
starting_point=xml_contents.find('site name="local_origin_site" type="cylinder" size="0.0075 0.005" rgba="25 0.5 0.0 1"')
site_point=xml_contents.find('pos=',starting_point)
contents=re.search("[^\s]+\s[^\s]+\s[^>]+",xml_contents[starting_point:])
wrist_pose=self.wrist_pose
new_thing= str(wrist_pose[0]) + " " + str(wrist_pose[1]) + " " + str(wrist_pose[2])
p1=str(new_rotation[0])
p2=str(new_rotation[1])
p3=str(new_rotation[2])
xml_contents=xml_contents[:euler_point+c_start+7] + p1[0:min(5,len(p1))]+ " "+p2[0:min(5,len(p2))] +" "+ p3[0:min(5,len(p3))] \
+ xml_contents[euler_point+c_end-1:]# + new_thing + xml_contents[site_point+c2_end:]
xml_file=open(self.file_dir+self.filename,"w")
xml_file.write(xml_contents)
xml_file.close()
self._model = load_model_from_path(self.file_dir + self.filename)
self._sim = MjSim(self._model)
self._set_state(np.array([0, 0, 0, 0, 0, 0, 10, 10, 10]))
self._get_trans_mat_wrist_pose()
# Steph Added
def check_obj_file_empty(self,filename):
if os.path.exists(filename) == False:
return False
with open(filename, 'r') as read_obj:
# read first character
one_char = read_obj.read(1)
# if not fetched then file is empty
if not one_char:
return True
return False
def Generate_Latin_Square(self,max_elements,filename,shape_keys, test = False):
""" Generate uniform list of shapes """
### Choose an experiment ###
self.objects = self.experiment(shape_keys)
# TEMPORARY - Only uncomment for quicker testing
# max_elements = 1000
# n is the number of object types (sbox, bbox, bcyl, etc.)
num_elements = 0
elem_gen_done = 0
printed_row = 0
while num_elements < max_elements:
n = len(self.objects.keys())-1
#print("This is n: ",n)
k = n
# Loop to prrows
for i in range(0, n+1, 1):
# This loops runs only after first iteration of outer loop
# Prints nummbers from n to k
keys = list(self.objects.keys())
temp = k
while (temp <= n) :
if printed_row <= n: # Just used to print out one row instead of all of them
printed_row += 1
key_name = str(keys[temp])
self.obj_keys.append(key_name)
temp += 1
num_elements +=1
if num_elements == max_elements:
elem_gen_done = 1
break
if elem_gen_done:
break
# This loop prints numbers from 1 to k-1.
for j in range(0, k):
key_name = str(keys[j])
self.obj_keys.append(key_name)
num_elements +=1
if num_elements == max_elements:
elem_gen_done = 1
break
if elem_gen_done:
break
k -= 1
########## Function Testing Code########
if test:
test_key = self.obj_keys
if len(test_key) == max_elements:
test_key.sort()
num_elem_test = 1
for i in range(len(test_key)-2):
if test_key[i] != test_key[i+1]:
num_elem_test += 1
if num_elem_test == len(shape_keys):
print("Latin Square function is Generating Perfect Distribution")
else:
print("Latin Square function is not Generating Perfect Distribution")
########## Ends Here ###############
with open(filename, "w", newline="") as outfile:
writer = csv.writer(outfile)
for key in self.obj_keys:
writer.writerow(key)
def objects_file_to_list(self,filename, num_objects,shape_keys):
# print("FILENAME: ",filename)
my_file = Path(filename)
if my_file.is_file() is True:
if os.stat(filename).st_size == 0:
print("Object file is empty!")
self.Generate_Latin_Square(num_objects,filename,shape_keys)
else:
self.Generate_Latin_Square(num_objects, filename, shape_keys)
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
row = ''.join(row)
self.obj_keys.append(row)
#print('LAST OBJECT KEYS',self.obj_keys)
def get_obj_keys(self):
return self.obj_keys
def get_object(self,filename):
# Get random shape
random_shape = self.obj_keys.pop()
# remove current object file contents
f = open(filename, "w")
f.truncate()
f.close()
# write new object keys to file so new env will have updated list
with open(filename, "w", newline="") as outfile:
writer = csv.writer(outfile)
for key in self.obj_keys:
writer.writerow(key)
# Load model
self._model = load_model_from_path(self.file_dir + self.objects[random_shape])
self._sim = MjSim(self._model)
return random_shape, self.objects[random_shape]
# Get the initial object position
def sample_initial_object_hand_pos(self,coords_filename,with_noise=True,orient_idx=None,region=None):
""" Sample the initial object and hand x,y,z coordinate positions from the desired coordinate file (determined by shape, size, orientation, and noise) """
data = []
with open(coords_filename) as csvfile:
checker=csvfile.readline()
if ',' in checker:
delim=','
else:
delim=' '
reader = csv.reader(csvfile, delimiter=delim)
for i in reader:
if with_noise is True:
# Object x, y, z coordinates, followed by corresponding hand orientation x, y, z coords
data.append([float(i[0]), float(i[1]), float(i[2]), float(i[3]), float(i[4]), float(i[5])])
else:
# Hand orientation is set to (0, 0, 0) if no orientation is selected
data.append([float(i[0]), float(i[1]), float(i[2]), 0, 0, 0])
# Orientation index cooresponds to the hand orientation and object position noise coordinate file index
if orient_idx is None:
# Get coordinate from within the desired region within the hand to sample the x,y coordinate for the object
if region is not None:
all_regions = {"left": [-.09, -.03], "center": [-.03, .03], "target": [-.01, .01], "right": [.03, .09], "origin": [0, 0]}
if region == "origin":
x = 0
y = 0
z = data[0][2] # Get the z value based on the height of the object
orient_idx = None
return x, y, z, 0, 0, 0, orient_idx
else:
sampling_range = all_regions[region]
# Get all points from data file that lie within the sampling range (x-coordinate range boundary)
region_data = [data[i] for i in range(len(data)) if sampling_range[0] <= data[i][0] <= sampling_range[1]]
orient_idx = np.random.randint(0, len(region_data))
else:
# If no specific region is selected, randomly select from file
orient_idx = np.random.randint(0, len(data))
coords = data[orient_idx]
obj_x = coords[0]
obj_y = coords[1]
obj_z = coords[2]
hand_x = coords[3]
hand_y = coords[4]
hand_z = coords[5]
return obj_x, obj_y, obj_z, hand_x, hand_y, hand_z, orient_idx
def obj_shape_generator(self,obj_params):
""" Load the object given the desired object shape and size, then load the corresponding file within the simulated envrionment
obj_params: Array containing the [shape_name, shape_size], ex: Small Cube ('CubeS') would be ['Cube','S']
returns the full shape name, ex: 'CubeS'
"""
if obj_params[0] == "Cube":
if obj_params[1] == "B":
obj=0
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bbox.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bbox.xml"
elif obj_params[1] == "M":
obj=1
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mbox.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mbox.xml"
elif obj_params[1] == "S":
obj=2
self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1.xml"
elif obj_params[0] == "Cylinder":
if obj_params[1] == "B":
obj=3
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bcyl.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bcyl.xml"
elif obj_params[1] == "M":
obj=4
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mcyl.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mcyl.xml"
elif obj_params[1] == "S":
obj=5
self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_scyl.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1_scyl.xml"
elif obj_params[0] == "Hour":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bhg.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bhg.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mhg.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mhg.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_shg.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_shg.xml"
if obj_params[0] == "Vase":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bvase.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bvase.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mvase.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mvase.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_svase.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_svase.xml"
elif obj_params[0] == "Bottle":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bbottle.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bbottle.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mbottle.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mbottle.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_sbottle.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_sbottle.xml"
elif obj_params[0] == "Bowl":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bRoundBowl.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mRoundBowl.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_sRoundBowl.xml"
if obj_params[0] == "Lemon":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_blemon.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_blemon.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mlemon.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mlemon.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_slemon.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_slemon.xml"
elif obj_params[0] == "TBottle":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_btbottle.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_btbottle.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mtbottle.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_stbottle.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_stbottle.xml"
elif obj_params[0] == "RBowl":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml"), 'b',"/kinova_description/j2s7s300_end_effector_v1_bRectBowl.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml"), 'm',"/kinova_description/j2s7s300_end_effector_v1_mRectBowl.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml"), 's',"/kinova_description/j2s7s300_end_effector_v1_sRectBowl.xml"
elif obj_params[0] == "Cone1":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bcone1.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bcone1.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mcone1.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mcone1.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_scone1.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1_scone1.xml"
elif obj_params[0] == "Cone2":
if obj_params[1] == "B":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_bcone2.xml"),'b',"/kinova_description/j2s7s300_end_effector_v1_bcone2.xml"
elif obj_params[1] == "M":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_mcone2.xml"),'m',"/kinova_description/j2s7s300_end_effector_v1_mcone2.xml"
elif obj_params[1] == "S":
self._model,self.obj_size,self.filename= load_model_from_path(self.file_dir + "/kinova_description/j2s7s300_end_effector_v1_scone2.xml"),'s',"/kinova_description/j2s7s300_end_effector_v1_scone2.xml"
elif obj_params[0]=='display':
self._model,self.obj_size,self.filename = load_model_from_path(self.file_dir + "/kinova_description/DisplayStuff.xml"),'s',"/kinova_description/DisplayStuff.xml"
return obj_params[0]+obj_params[1]
def select_object(self,env_name, shape_keys, obj_params):
""" Determine object based on input parameters (shape, size)
env_name: Training loop environment (env) or evaluation environment (eval_env)
shape_keys: List of object names (ex: CubeS, CylinderM) to be used
obj_params: Specific object name and size, stored as an array [shape_name, size] (Ex: [Cube,S]
returns the object to be used (Ex: CubeS)
"""
# Based on environment, sets amount of objects and object file to store them in
if env_name == "env":
obj_list_filename = "objects.csv"
num_objects = 20000
else:
obj_list_filename = "eval_objects.csv"
num_objects = 200
# Replenish objects list if none left in list to grab
if len(self.objects) == 0:
self.objects = self.experiment(shape_keys)
if len(self.obj_keys) == 0:
self.objects_file_to_list(obj_list_filename,num_objects,shape_keys)
# Determine the current object from a set list of objects stored in obj_list_filename text file
if obj_params==None:
random_shape, self.filename = self.get_object(obj_list_filename)
else:
# Determine the current object from set object parameters ([shape_name, shape_size])
random_shape = self.obj_shape_generator(obj_params)
return random_shape
def select_orienation(self, random_shape, hand_orientation):
""" Determine hand orientation based on shape and desired hand orientation selection type (normal or random)
random_shape: Object shape (Ex: CubeS)
hand_orientation: Orientation of the hand relative to the object (Normal, Random)
returns hand orientation (Normal (0 deg), Rotated (45 deg), Top (90 deg))
"""
# Orientation is initialized as Normal
orientation = 'normal'
orientation_type = 0.330
# Alter the hand orientation type if special shapes are being used (which only work with certain orientations)
# If the shape is RBowl, only do rotated and top orientations
if random_shape.find("RBowl") != -1:
# Rotated orientation is > 0.333
# Top orientation is > 0.667
if hand_orientation == 'random':
orientation_type = np.random.uniform(0.333,1)
# If the shape is Lemon, only do normal and top orientations
elif random_shape.find("Lemon") != -1:
# Rotated orientation is > 0.333
# Top orientation is > 0.667
if hand_orientation == 'random':
Choice1 = np.random.uniform(0, 0.333)
Choice2 = np.random.uniform(0.667, 1)
orientation_type = np.random.choice([Choice1, Choice2])
# For all other shapes, given a random hand orientation
elif hand_orientation == 'random':
orientation_type = np.random.rand()
# Determine orientation type based on random selection
if orientation_type < 0.333:
# Normal (0 deg) Orientation
orientation = 'normal'
elif orientation_type > 0.667:
# Top (90 deg) Orientation
orientation = 'top'
else:
# Rotated (45 deg) orientation
orientation = 'rotated'
return orientation
def determine_obj_hand_coords(self, random_shape, mode, with_noise=True):
""" Select object and hand orientation coordinates then write them to the xml file for simulation in the current environment
random_shape: Desired shape to be used within the current environment
with_noise: Set to True if coordinates to be used are selected from the object/hand coordinate files with positional noise added
returns object and hand coordinates along with the cooresponding orientation index
"""
orient_idx = None # Line number (index) within the coordinate files from which the object position is selected from
if with_noise is True:
noise_file = 'with_noise/'
hand_x = 0
hand_y = 0
hand_z = 0
else:
noise_file = 'no_noise/'
# Expert data generation, pretraining and training will have the same coordinate files
if mode != "test":
mode = "train"
# Hand and object coordinates filename
coords_filename = "gym_kinova_gripper/envs/kinova_description/obj_hand_coords/" + noise_file + str(mode)+"_coords/" + str(self.orientation) + "/" + random_shape + ".txt"
if self.check_obj_file_empty(coords_filename) == False:
obj_x, obj_y, obj_z, hand_x, hand_y, hand_z, orient_idx = self.sample_initial_object_hand_pos(coords_filename, with_noise=with_noise, orient_idx=None, region=self.obj_coord_region)
else:
# If coordinate file is empty or does not exist, randomly generate coordinates
obj_x, obj_y, obj_z = self.randomize_initial_pos_data_collection(orientation=self.orientation)
coords_filename = None
# Use the exact hand orientation from the coordinate file
if with_noise:
new_rotation = np.array([hand_x, hand_y, hand_z])
# Otherwise generate hand coordinate value based on desired orientation
elif self.filename=="/kinova_description/j2s7s300_end_effector.xml": # Default xml file
if self.orientation == 'normal':
new_rotation=np.array([0,0,0]) # Normal
elif self.orientation == 'top':
new_rotation= | np.array([0,0,0]) | numpy.array |
# License: BSD 3 clause
# -*- coding: utf8 -*-
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from tick.array_test.build import array_test as test
class Test(unittest.TestCase):
def setUp(self):
self.correspondence_dict = {
'Double': {
'python_type': float,
'cpp_type': 'double',
},
'Float': {
'python_type': np.float32,
'cpp_type': 'float',
},
'Int': {
'python_type': np.int32,
'cpp_type': 'std::int32_t',
},
'UInt': {
'python_type': np.uint32,
'cpp_type': 'std::uint32_t',
},
'Short': {
'python_type': np.int16,
'cpp_type': 'std::int16_t',
},
'UShort': {
'python_type': np.uint16,
'cpp_type': 'std::uint16_t',
},
'Long': {
'python_type': np.int64,
'cpp_type': 'std::int64_t',
},
'ULong': {
'python_type': np.uint64,
'cpp_type': 'std::uint64_t',
}
}
for array_type, info in self.correspondence_dict.items():
# fixed number of the correct type
info['number'] = 148 # info['python_type'](np.exp(5))
# The dense array of the corresponding type
python_array = np.array([1, 2, 5, 0, 4,
1]).astype(info['python_type'])
# The dense array 2D of the corresponding type
python_array_2d = np.array([[1, 2, 5],
[0, 4, 1]]).astype(info['python_type'])
# The list of dense array of the corresponding type
python_array_list_1d = [
np.array([1, 2, 5]).astype(info['python_type']),
np.array([1, 2, 9]).astype(info['python_type']),
np.array([0, 4]).astype(info['python_type'])
]
python_array2d_list_1d = [
np.array([[1, 2, 5], [0, 4, 1]]).astype(info['python_type']),
np.array([[1, 2, 9], [1, 2, 5],
[0, 4, 1]]).astype(info['python_type']),
np.array([[0]]).astype(info['python_type'])
]
python_array_list_2d = [[
np.array([1, 2, 5]).astype(info['python_type'])
], [
np.array([1, 2, 9, 5]).astype(info['python_type']),
np.array([0, 4, 1]).astype(info['python_type'])
], []]
python_array2d_list_2d = [[
np.array([[1, 2, 5], [0, 4, 1]]).astype(info['python_type']),
], [
np.array([[1, 2, 9], [1, 2, 5],
[0, 4, 1]]).astype(info['python_type']),
np.array([[0]]).astype(info['python_type'])
], []]
# The sparse array of the corresponding type
python_sparse_array = csr_matrix(
(np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 4]))).astype(info['python_type'])
# The sparse array 2D of the corresponding type
python_sparse_array_2d = csr_matrix(
(np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 4]),
np.array([0, 3, 4]))).astype(info['python_type'])
python_sparse_array_list_1d = [
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 4]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 3]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3, 1, 2]),
np.array([3, 5, 7, 4, 1]),
np.array([0, 5]))).astype(info['python_type'])
]
# TODO: add mixed list
python_sparse_array2d_list_1d = [
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 3, 4]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 1, 3]))).astype(info['python_type']),
csr_matrix(
(np.array([1.5, 2, 3, 1, 2]), np.array([3, 5, 7, 4, 1]),
np.array([0, 2, 3, 5]))).astype(info['python_type'])
]
python_sparse_array_list_2d = [[
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 4]))).astype(info['python_type'])
], [
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 3]))).astype(info['python_type']),
csr_matrix((np.array([1.5, 2, 3, 1, 2]),
np.array([3, 5, 7, 4, 1]),
np.array([0, 5]))).astype(info['python_type'])
], []]
python_sparse_array2d_list_2d = [[
csr_matrix((np.array([1.5, 2, 3, 1]), np.array([3, 5, 7, 9]),
np.array([0, 3, 4]))).astype(info['python_type'])
], [
csr_matrix((np.array([1.5, 2, 3]), np.array([3, 5, 11]),
np.array([0, 1, 3]))).astype(info['python_type']),
csr_matrix(
(np.array([1.5, 2, 3, 1, 2]), np.array([3, 5, 7, 4, 1]),
np.array([0, 2, 3, 5]))).astype(info['python_type'])
], []]
info['python_array'] = python_array
info['python_array_2d'] = python_array_2d
info['python_array_list_1d'] = python_array_list_1d
info['python_array2d_list_1d'] = python_array2d_list_1d
info['python_array2d_list_2d'] = python_array2d_list_2d
info['python_array_list_2d'] = python_array_list_2d
info['python_sparse_array'] = python_sparse_array
info['python_sparse_array_2d'] = python_sparse_array_2d
info['python_sparse_array_list_1d'] = python_sparse_array_list_1d
info['python_sparse_array2d_list_1d'] = \
python_sparse_array2d_list_1d
info['python_sparse_array_list_2d'] = python_sparse_array_list_2d
info['python_sparse_array2d_list_2d'] = \
python_sparse_array2d_list_2d
# corresponding test functions
# for typemap in
info['typemap_in_array'] = \
getattr(test, 'test_typemap_in_Array%s' % array_type)
info['typemap_in_array_2d'] = \
getattr(test, 'test_typemap_in_Array%s2d' % array_type)
info['typemap_in_array_list_1d'] = \
getattr(test, 'test_typemap_in_Array%sList1D' % array_type)
info['typemap_in_array_list_2d'] = \
getattr(test, 'test_typemap_in_Array%sList2D' % array_type)
info['typemap_in_sparse_array'] = \
getattr(test, 'test_typemap_in_SparseArray%s' % array_type)
info['typemap_in_sparse_array_2d'] = \
getattr(test, 'test_typemap_in_SparseArray%s2d' % array_type)
info['typemap_in_sarray_ptr'] = \
getattr(test, 'test_typemap_in_SArray%sPtr' % array_type)
info['typemap_in_sarray_ptr_2d'] = \
getattr(test, 'test_typemap_in_SArray%s2dPtr' % array_type)
info['typemap_in_sarray_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SArray%sPtrList1D' % array_type)
info['typemap_in_sarray_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SArray%sPtrList2D' % array_type)
info['typemap_in_sarray2d_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SArray%s2dPtrList1D' %
array_type)
info['typemap_in_sarray2d_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SArray%s2dPtrList2D' %
array_type)
info['typemap_in_varray_ptr'] = \
getattr(test, 'test_typemap_in_VArray%sPtr' % array_type)
info['typemap_in_varray_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_VArray%sPtrList1D' % array_type)
info['typemap_in_varray_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_VArray%sPtrList2D' % array_type)
info['typemap_in_base_array'] = \
getattr(test, 'test_typemap_in_BaseArray%s' % array_type)
info['typemap_in_base_array_2d'] = \
getattr(test, 'test_typemap_in_BaseArray%s2d' % array_type)
info['typemap_in_sparse_array_ptr'] = \
getattr(test, 'test_typemap_in_SSparseArray%sPtr' % array_type)
info['typemap_in_sparse_array_2d_ptr'] = \
getattr(test, 'test_typemap_in_SSparseArray%s2dPtr' %
array_type)
info['typemap_in_base_array_ptr'] = \
getattr(test, 'test_typemap_in_SBaseArray%sPtr' % array_type)
info['typemap_in_base_array_2d_ptr'] = \
getattr(test, 'test_typemap_in_SBaseArray%s2dPtr' %
array_type)
info['typemap_in_base_array_list_1d'] = \
getattr(test, 'test_typemap_in_BaseArray%sList1D' % array_type)
info['typemap_in_base_array_list_2d'] = \
getattr(test, 'test_typemap_in_BaseArray%sList2D' % array_type)
info['typemap_in_base_array2d_list_1d'] = \
getattr(test, 'test_typemap_in_BaseArray%s2dList1D' %
array_type)
info['typemap_in_base_array2d_list_2d'] = \
getattr(test, 'test_typemap_in_BaseArray%s2dList2D' %
array_type)
info['typemap_in_base_array_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SBaseArray%sPtrList1D' %
array_type)
info['typemap_in_base_array_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SBaseArray%sPtrList2D' %
array_type)
info['typemap_in_base_array2d_ptr_list_1d'] = \
getattr(test, 'test_typemap_in_SBaseArray%s2dPtrList1D' %
array_type)
info['typemap_in_base_array2d_ptr_list_2d'] = \
getattr(test, 'test_typemap_in_SBaseArray%s2dPtrList2D' %
array_type)
# Functions that are not overloaded to test error messages
info['typemap_in_array_not_ol'] = \
getattr(test, 'test_typemap_in_not_ol_Array%s' % array_type)
info['typemap_in_array_2d_not_ol'] = \
getattr(test, 'test_typemap_in_not_ol_Array%s2d' % array_type)
info['typemap_in_sparse_array_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_SparseArray%s' % array_type)
info['typemap_in_base_array_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_BaseArray%s' % array_type)
info['typemap_in_array_list_1d_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_Array%sList1D' % array_type)
info['typemap_in_array_list_2d_not_ol'] = \
getattr(test,
'test_typemap_in_not_ol_Array%sList2D' % array_type)
# for typemap out
info['typemap_out_sarray_ptr'] = \
getattr(test, 'test_typemap_out_SArray%sPtr' % array_type)
info['typemap_out_sarray_ptr_list_1d'] = \
getattr(test, 'test_typemap_out_SArray%sPtrList1D' % array_type)
info['typemap_out_sarray_ptr_list_2d'] = \
getattr(test, 'test_typemap_out_SArray%sPtrList2D' % array_type)
info['typemap_out_sarray_2d_ptr'] = \
getattr(test, 'test_typemap_out_SArray%s2dPtr' % array_type)
def test_array_typemap_in(self):
"""...Test we can pass an Array as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
extract_function = info['typemap_in_array']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_array2d_typemap_in(self):
"""...Test we can pass an Array2d as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_2d']
extract_function = info['typemap_in_array_2d']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_array_list_1d_typemap_in(self):
"""...Test we can pass a list of Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
extract_function = info['typemap_in_array_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_array_list_2d_typemap_in(self):
"""...Test we can pass a list of list of Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
extract_function = info['typemap_in_array_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sparsearray_typemap_in(self):
"""...Test we pass a SparseArray as argument
"""
for array_type, info in self.correspondence_dict.items():
python_sparse_array = info['python_sparse_array']
extract_function = info['typemap_in_sparse_array']
self.assertEqual(python_sparse_array.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sparsearray2d_typemap_in(self):
"""...Test we can pass a SparseArray2d as argument
"""
for array_type, info in self.correspondence_dict.items():
python_sparse_array_2d = info['python_sparse_array_2d']
extract_function = info['typemap_in_sparse_array_2d']
self.assertEqual(python_sparse_array_2d.sum(),
extract_function(python_sparse_array_2d))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray_ptr_typemap_in(self):
"""...Test we can pass an SArray shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
extract_function = info['typemap_in_sarray_ptr']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray2dptr_typemap_in(self):
"""...Test we can pass an SArray2d shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_2d']
extract_function = info['typemap_in_sarray_ptr_2d']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray_ptr_list_1d_typemap_in(self):
"""...Test we can pass a list of SArray shared pointers as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
extract_function = info['typemap_in_sarray_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of SArray shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
extract_function = info['typemap_in_sarray_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray2d_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of SArray2d shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_1d']
extract_function = info['typemap_in_sarray2d_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarray2d_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of SArray2d shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_2d']
extract_function = info['typemap_in_sarray2d_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_varray_ptr_typemap_in(self):
"""...Test we can pass an VArray shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
extract_function = info['typemap_in_varray_ptr']
self.assertEqual(python_array.sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_varray_ptr_list_1d_typemap_in(self):
"""...Test we can pass a list of VArray shared pointers as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
extract_function = info['typemap_in_varray_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_varray_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of VArray shared pointers as
argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
extract_function = info['typemap_in_varray_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray_typemap_in(self):
"""...Test we can pass an BaseArray as argument for sparse and dense
arrays
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
python_sparse_array = info['python_sparse_array']
extract_function = info['typemap_in_base_array']
# Test dense
self.assertEqual(python_array.sum(),
extract_function(python_array))
# Test sparse
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray2d_typemap_in(self):
"""...Test we can pass an BaseArray2d as argument for sparse and dense
arrays
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_2d']
python_sparse_array = info['python_sparse_array_2d']
extract_function = info['typemap_in_base_array_2d']
# Test dense
self.assertEqual(python_array.sum(),
extract_function(python_array))
# Test sparse
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_ssparsearrayptr_typemap_in(self):
"""...Test we can pass a SSparseArray shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_sparse_array = info['python_sparse_array']
extract_function = info['typemap_in_sparse_array_ptr']
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_ssparsearray2d_ptr_typemap_in(self):
"""...Test we can pass a SSparseArray2d shared pointer as argument
"""
for array_type, info in self.correspondence_dict.items():
python_sparse_array = info['python_sparse_array_2d']
extract_function = info['typemap_in_sparse_array_2d_ptr']
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sbasearray_ptr_typemap_in(self):
"""...Test we can pass an BaseArray as argument for sparse and dense
arrays
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array']
python_sparse_array = info['python_sparse_array']
extract_function = info['typemap_in_base_array_ptr']
# Test dense
self.assertEqual(python_array.sum(),
extract_function(python_array))
# Test sparse
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sbasearray2d_ptr_typemap_in(self):
"""...Test we can pass an BaseArray2d as argument for sparse and dense
arrays
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_2d']
python_sparse_array = info['python_sparse_array_2d']
extract_function = info['typemap_in_base_array_2d_ptr']
# Test dense
self.assertEqual(python_array.sum(),
extract_function(python_array))
# Test sparse
self.assertEqual(python_sparse_array.data.sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray_list_1d_typemap_in(self):
"""...Test we can pass a list of base Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
python_sparse_array = info['python_sparse_array_list_1d']
extract_function = info['typemap_in_base_array_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray_list_2d_typemap_in(self):
"""...Test we can pass a list of list of base Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
python_sparse_array = info['python_sparse_array_list_2d']
extract_function = info['typemap_in_base_array_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0][0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray2d_list_1d_typemap_in(self):
"""...Test we can pass a list of base Arrays 2D as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_1d']
python_sparse_array = info['python_sparse_array2d_list_1d']
extract_function = info['typemap_in_base_array2d_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray2d_list_2d_typemap_in(self):
"""...Test we can pass a list of list of base Arrays 2D as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_2d']
python_sparse_array = info['python_sparse_array2d_list_2d']
extract_function = info['typemap_in_base_array2d_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0][0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray_ptr_list_1d_typemap_in(self):
"""...Test we can pass a list of base Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_1d']
python_sparse_array = info['python_sparse_array_list_1d']
extract_function = info['typemap_in_base_array_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of base Arrays as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array_list_2d']
python_sparse_array = info['python_sparse_array_list_2d']
extract_function = info['typemap_in_base_array_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0][0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray2d_ptr_list_1d_typemap_in(self):
"""...Test we can pass a list of base Arrays 2D as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_1d']
python_sparse_array = info['python_sparse_array2d_list_1d']
extract_function = info['typemap_in_base_array2d_ptr_list_1d']
self.assertEqual(python_array[0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_basearray2d_ptr_list_2d_typemap_in(self):
"""...Test we can pass a list of list of base Arrays 2D as argument
"""
for array_type, info in self.correspondence_dict.items():
python_array = info['python_array2d_list_2d']
python_sparse_array = info['python_sparse_array2d_list_2d']
extract_function = info['typemap_in_base_array2d_ptr_list_2d']
self.assertEqual(python_array[0][0].sum(),
extract_function(python_array))
self.assertEqual(python_sparse_array[0][0].sum(),
extract_function(python_sparse_array))
self.assertEqual(info['number'], extract_function(info['number']))
def test_sarrayptr_typemap_out(self):
"""...Test we can return an SArray shared pointer
"""
size = 10
for array_type, info in self.correspondence_dict.items():
python_array = np.arange(size, dtype=info['python_type'])
extract_function = info['typemap_out_sarray_ptr']
np.testing.assert_equal(python_array, extract_function(size))
def test_sarrayptr_list1d_typemap_out(self):
"""...Test we can return a list of SArray shared pointer
"""
size = 10
for array_type, info in self.correspondence_dict.items():
python_array = [
i * np.ones(i, dtype=info['python_type']) for i in range(size)
]
extract_function = info['typemap_out_sarray_ptr_list_1d']
np.testing.assert_equal(python_array, extract_function(size))
def test_sarrayptr_list2d_typemap_out(self):
"""...Test we can return an SArray shared pointer
"""
size1 = 10
size2 = 10
for array_type, info in self.correspondence_dict.items():
python_array = [[
i * np.ones(i, dtype=info['python_type']) for i in range(j)
] for j in range(size2)]
extract_function = info['typemap_out_sarray_ptr_list_2d']
np.testing.assert_equal(python_array, extract_function(
size1, size2))
def test_sarray2dptr_typemap_out(self):
"""...Test we can return an SArray2d shared pointer
"""
row_size = 6
col_size = 4
for array_type, info in self.correspondence_dict.items():
python_array = np.arange(row_size * col_size,
dtype=info['python_type']).reshape(
row_size, col_size)
extract_function = info['typemap_out_sarray_2d_ptr']
np.testing.assert_equal(python_array,
extract_function(row_size, col_size))
def test_basearray_errors_typemap_in(self):
"""...Test errors raised by typemap in on BaseArray
"""
for array_type, info in self.correspondence_dict.items():
extract_function = info['typemap_in_base_array_not_ol']
# Test if we pass something that has no link with a numpy array
regex_error_random = "Expecting.*1d.*%s.*array.*sparse" % \
info['cpp_type']
self.assertRaisesRegex(ValueError, regex_error_random,
extract_function, object())
# Test if we pass an array of another type
regex_error_wrong_type = "Expecting.*%s.*array" % info['cpp_type']
other_array_type = [
key for key in self.correspondence_dict.keys()
if key != array_type
][0]
python_other_type_array = self.correspondence_dict[
other_array_type]['python_array']
self.assertRaisesRegex(ValueError, regex_error_wrong_type,
extract_function, python_other_type_array)
def test_sparsearray_errors_typemap_in(self):
"""...Test errors raised by typemap in on SparseArray
"""
for array_type, info in self.correspondence_dict.items():
extract_function = info['typemap_in_sparse_array_not_ol']
regex_error_dense = "Expecting.*sparse"
python_array = info['python_array']
self.assertRaisesRegex(ValueError, regex_error_dense,
extract_function, python_array)
# Test if we pass an array of another type
regex_error_wrong_type = "Expecting.*%s.*array" % info['cpp_type']
other_array_type = [
key for key in self.correspondence_dict.keys()
if key != array_type
][0]
python_other_type_array = self.correspondence_dict[
other_array_type]['python_sparse_array']
self.assertRaisesRegex(ValueError, regex_error_wrong_type,
extract_function, python_other_type_array)
def test_array_errors_typemap_in(self):
"""...Test errors raised by typemap in on SparseArray
"""
for array_type, info in self.correspondence_dict.items():
extract_function = info['typemap_in_array_not_ol']
# Test if we pass something that has no link with a numpy array
regex_error_random = "Expecting(.*?)numpy(.*?)(a|A)rray"
self.assertRaisesRegex(ValueError, regex_error_random,
extract_function, object())
regex_error_sparse = "Expecting.*dense"
python_sparse_array = info['python_sparse_array']
self.assertRaisesRegex(ValueError, regex_error_sparse,
extract_function, python_sparse_array)
# Test if we pass an array of another type
regex_error_wrong_type = "Expecting.*%s.*array" % info['cpp_type']
other_array_type = [
key for key in self.correspondence_dict.keys()
if key != array_type
][0]
python_other_type_array = self.correspondence_dict[
other_array_type]['python_array']
self.assertRaisesRegex(ValueError, regex_error_wrong_type,
extract_function, python_other_type_array)
with self.assertRaisesRegex(ValueError, "contiguous"):
non_contiguous = np.arange(20).astype(float)[::2]
extract_function(non_contiguous)
with self.assertRaisesRegex(ValueError, "dimensional"):
extract_function(np.zeros((5, 5)))
with self.assertRaisesRegex(ValueError, "dimensional"):
extract_function( | np.zeros((5, 5, 5)) | numpy.zeros |
"""
Module with the derived instances for MaNGA kinematics.
.. |ee| unicode:: U+00E9
:trim:
.. |Sersic| replace:: S |ee| rsic
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import glob
import warnings
import netrc
from pkg_resources import resource_filename
from IPython import embed
import numpy as np
from scipy import sparse
import matplotlib.image as img
from astropy.io import fits
from astropy.wcs import WCS
from .util import get_map_bin_transformations, impose_positive_definite, gaussian_fill
from .kinematics import Kinematics
from .meta import GlobalPar
from ..util.bitmask import BitMask
from ..util.download import download_file
def channel_dictionary(hdu, ext, prefix='C'):
"""
Construct a dictionary of the channels in a MaNGA MAPS file.
Copied from mangadap.util.fileio.channel_dictionary
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file.
ext (:obj:`int`, :obj:`str`):
The name or index number of the fits extension with the
relevant map channels and the header with the channel
names.
prefix (:obj:`str`, optional):
The key that is used as a prefix for all header keywords
associated with channel names. The header keyword is the
combination of this prefix and the 1-indexed channel
number.
Returns:
:obj:`dict`: The dictionary that provides the channel index
associate with the channel name.
"""
channel_dict = {}
for k, v in hdu[ext].header.items():
if k[:len(prefix)] == prefix:
try:
i = int(k[len(prefix):])-1
except ValueError:
continue
channel_dict[v] = i
return channel_dict
def read_manga_psf(cube_file, psf_ext, fwhm=False, quiet=False):
"""
Read the image of the reconstructed datacube point-spread
function from the MaNGA DRP CUBE file.
.. warning::
No check is performed to ensure that the provided extension
is a valid PSF extension.
Args:
cube_file (:obj:`str`):
The name of the DRP-produced LOGCUBE fits file with the
reconstructed PSF.
psf_ext (:obj:`str`):
The name of the extension with the reconstructed PSF;
e.g. ``'GPSF'``.
fwhm (:obj:`bool`, optional):
If true, will return the g band FWHM as well
quiet (:obj:`bool`, optional):
Suppress printed output.
Returns:
`numpy.ndarray`_: Array with the reconstructed PSF.
Raises:
FileNotFoundError:
Raised if the provided ``cube_file`` does not exist.
KeyError:
Raised if the provided ``psf_ext`` is not a valid fits
extension.
"""
if not os.path.isfile(cube_file):
raise FileNotFoundError('File does not exist: {0}'.format(cube_file))
# Read the PSF
# TODO: Switch from print to a logger
if not quiet:
print('Reading {0} ... '.format(cube_file))
with fits.open(cube_file) as hdu:
if psf_ext not in hdu:
raise KeyError('{0} does not include an extension {1}.'.format(
cube_file, psf_ext))
psf = hdu[psf_ext].data
if fwhm: fwhm = hdu[0].header['GFWHM']
if not quiet:
print('Done')
if fwhm: return psf, fwhm
return psf
def manga_versions():
"""
Return a dictionary connecting the MaNGA DR/MPL to the relevant DRP and
DAP versions. To list the available versions::
from nirvana.data.manga import manga_versions
print(manga_versions().keys())
"""
return {'DR15': {'DRP': 'v2_4_3', 'DAP': '2.2.1', 'collab': False},
'MPL-10': {'DRP': 'v3_0_1', 'DAP': '3.0.1', 'collab': True},
'MPL-11': {'DRP': 'v3_1_1', 'DAP': '3.1.0', 'collab': True},
'DR17': {'DRP': 'v3_1_1', 'DAP': '3.1.0', 'collab': False}}
# TODO: Split this into catalog paths and galaxy paths
def manga_paths(plate, ifu, daptype='HYB10-MILESHC-MASTARHC2', dr='DR17', redux_path=None,
analysis_path=None, raw=False, relative=False):
"""
Construct a set of directory paths for the MaNGA data.
.. warning::
If absolute paths are used (``relative`` is False) and the code is
unable to define ``redux_path`` or ``analysis_path`` by default, the
relevant file paths are returned as ``None``.
Args:
plate (:obj:`int`):
MaNGA plate number.
ifu (:obj:`int`):
MaNGA IFU number.
daptype (:obj:`str`, optional):
The identifier for the method used by the DAP to analyze
the data.
dr (:obj:`str`, optional):
Data release identifier; see :func:`manga_versions`.
redux_path (:obj:`str`, optional):
The top-level directory with all DRP output. If None and
``relative`` is False, this will be set to the
``MANGA_SPECTRO_REDUX`` environmental variable, if it is defined.
analysis_path (:obj:`str`, optional):
The top-level directory with all DAP output. If None and
``relative`` is False, this will be set to the
``MANGA_SPECTRO_ANALYSIS`` environmental variable, if it is
defined.
raw (:obj:`bool`, optional):
Construct the directories using the raw version numbers instead
of the DR/MPL symlink.
relative (:obj:`bool`, optional):
When constructing the directories, only provide the path relative
to the top-level path for each relevant root path. I.e., if
``relative`` is true, the output cube path is relative to the
top-level DRP path, which is normally defined by
``MANGA_SPECTRO_REDUX``.
Returns:
:obj:`tuple`: Five strings providing (1) the DRP directory with the
DRPall file, (2) the DRP directory with the LOGCUBE file, (3) the DRP
directory with the SDSS multicolor finding chart image file, (4) the
DAP directory with the DAPall file, and (5) the DAP directory with
the MAPS file.
"""
versions = manga_versions()
if dr not in versions.keys():
raise ValueError(f'{dr} is not an available DR; see nirvana.data.manga.manga_versions.')
drpall_path = versions[dr]['DRP'] if raw else dr
cube_path = os.path.join(drpall_path, str(plate), 'stack')
image_path = os.path.join(drpall_path, str(plate), 'images')
dapall_path = os.path.join(versions[dr]['DRP'], versions[dr]['DAP']) if raw else dr
maps_path = os.path.join(dapall_path, daptype, str(plate), str(ifu))
if relative:
# Return only the relative paths
return drpall_path, cube_path, image_path, dapall_path, maps_path
_redux_path = os.getenv('MANGA_SPECTRO_REDUX') if redux_path is None else redux_path
_analysis_path = os.getenv('MANGA_SPECTRO_ANALYSIS') if analysis_path is None \
else analysis_path
drpall_path = None if _redux_path is None else os.path.join(_redux_path, drpall_path)
cube_path = None if _redux_path is None else os.path.join(_redux_path, cube_path)
image_path = None if _redux_path is None else os.path.join(_redux_path, image_path)
dapall_path = None if _analysis_path is None else os.path.join(_analysis_path, dapall_path)
maps_path = None if _analysis_path is None else os.path.join(_analysis_path, maps_path)
return drpall_path, cube_path, image_path, dapall_path, maps_path
# TODO: Split this into catalog files and galaxy files
def manga_file_names(plate, ifu, daptype='HYB10-MILESHC-MASTARHC2', dr='DR17'):
"""
Construct MaNGA file names.
Args:
plate (:obj:`int`):
MaNGA plate number.
ifu (:obj:`int`):
MaNGA IFU number.
daptype (:obj:`str`, optional):
The identifier for the method used by the DAP to analyze
the data.
dr (:obj:`str`, optional):
Data release identifier; see :func:`manga_versions`.
Returns:
:obj:`tuple`: Five strings providing (1) the DRPall file name, (2)
the LOGCUBE file name, (3) the SDSS multicolor finding chart image
file name, (4) the DAPall file name, and (5) the MAPS file name.
"""
versions = manga_versions()
if dr not in versions.keys():
raise ValueError(f'{dr} is not an available DR; see nirvana.data.manga.manga_versions.')
drpver = versions[dr]['DRP']
dapver = versions[dr]['DAP']
return f'drpall-{drpver}.fits', f'manga-{plate}-{ifu}-LOGCUBE.fits.gz', f'{ifu}.png', \
f'dapall-{drpver}-{dapver}.fits', f'manga-{plate}-{ifu}-MAPS-{daptype}.fits.gz'
def manga_files_from_plateifu(plate, ifu, daptype='HYB10-MILESHC-MASTARHC2', dr='DR17',
redux_path=None, cube_path=None, image_path=None, analysis_path=None,
maps_path=None, check=True, remotedir=None, rawpaths=False):
"""
Get MaNGA files used by NIRVANA.
.. warning::
The method does not check that these files exist.
Args:
plate (:obj:`int`):
Plate number
ifu (:obj:`int`):
IFU design number
daptype (:obj:`str`, optional):
The identifier for the method used by the DAP to analyze
the data.
dr (:obj:`str`, optional):
Data release identifier; see :func:`manga_versions`.
redux_path (:obj:`str`, optional):
The top-level directory with all DRP output. If None,
this will be set to the ``MANGA_SPECTRO_REDUX``
environmental variable, if it is defined.
cube_path (:obj:`str`, optional):
This provides the *direct* path to the datacube file,
circumventing the use of ``dr`` and ``redux_path``.
image_path (:obj:`str`, optional):
This provides the *direct* path to the image file,
circumventing the use of ``dr`` and ``redux_path``.
analysis_path (:obj:`str`, optional):
The top-level directory with all DAP output. If None,
this will be set to the ``MANGA_SPECTRO_ANALYSIS``
environmental variable, if it is defined.
maps_path (:obj:`str`, optional):
This provides the *direct* path to the maps file,
circumventing the use of ``dr`` and ``analysis_path``.
check (:obj:`bool`, optional):
Check that the directories with the expected files exist.
remotedir (:obj:`str`, optional):
If specified, it will download the data from sas into this root
directory instead of looking locally
rawpaths (:obj:`bool`, optional):
When constructing the default paths, use the raw version
directories instead of the DR symlinks. See :func:`manga_paths`.
Returns:
:obj:`tuple`: Full path to three files: (1) the DAP MAPS file, (2)
the DRP LOGCUBE file, and (3) the SDSS 3-color PNG image. If the
image path cannot be defined or does not exist, the image file name
is returned as ``None``.
Raises:
ValueError:
Raised if the directories to either the maps or cube file
could not be determined from the input.
NotADirectoryError:
Raised if the directory where the datacube should exist can be
defined but does not exist.
"""
#download from sas instead of looking locally
if remotedir is not None:
return download_plateifu(plate, ifu, daptype=daptype, dr=dr, oroot=remotedir,
overwrite=False)
_, cube_file, image_file, _, maps_file = manga_file_names(plate, ifu, daptype=daptype, dr=dr)
# Construct the paths
if cube_path is None or image_path is None or maps_path is None:
_, cpath, ipath, _, mpath = manga_paths(plate, ifu, daptype=daptype, dr=dr,
redux_path=redux_path, analysis_path=analysis_path,
raw=rawpaths)
if maps_path is None:
maps_path = mpath
if maps_path is None:
raise ValueError('Could not define path to MAPS file.')
if check and not os.path.isdir(maps_path):
raise NotADirectoryError('No such directory: {0}'.format(maps_path))
maps_file = os.path.abspath(os.path.join(maps_path, maps_file))
if cube_path is None:
cube_path = cpath
if cube_path is None:
raise ValueError('Could not define path to LOGCUBE file.')
if check and not os.path.isdir(cube_path):
raise NotADirectoryError('No such directory: {0}'.format(cube_path))
cube_file = os.path.abspath(os.path.join(cube_path, cube_file))
if image_path is None:
image_path = ipath
if image_path is None:
warnings.warn('Could not define directory to image PNGs')
elif not os.path.isdir(image_path):
warnings.warn('No such directory: {0}'.format(image_path))
image_path = None
image_file = None if image_path is None \
else os.path.abspath(os.path.join(image_path, image_file))
return maps_file, cube_file, image_file
# TODO: Break this into two functions to download the DRPall and DAPall file
# separately?
def download_catalogs(dr='DR17', oroot=None, redux_path=None, analysis_path=None, overwrite=True,
sasurl='https://data.sdss.org/sas/mangawork/manga/spectro'):
"""
Download the two main MaNGA catalog files, the DRPall and DAPall files.
Args:
dr (:obj:`str`, optional):
Data release identifier; see :func:`manga_versions`.
oroot (:obj:`str`, optional):
Root directory for all files. If provided, ``redux_path`` and
``analysis_path`` are ignored.
redux_path (:obj:`str`, optional):
The top-level directory with all DRP output. If None,
this will be set to the ``MANGA_SPECTRO_REDUX``
environmental variable, if it is defined.
analysis_path (:obj:`str`, optional):
The top-level directory with all DAP output. If None,
this will be set to the ``MANGA_SPECTRO_ANALYSIS``
environmental variable, if it is defined.
overwrite (:obj:`bool`, optional):
Overwrite existing files.
sasurl (:obj:`str`, optional):
Top-level Science Archive Server url with the MaNGA data.
Returns:
:obj:`tuple`: The names of the two downloaded files: (1) the DRPall
file and (2) the DAPall file.
"""
versions = manga_versions()
if dr not in versions.keys():
raise ValueError(f'{dr} is not an available DR; see nirvana.data.manga.manga_versions.')
# Get the authentication if needed
if versions[dr]['collab']:
try:
NETRC = netrc.netrc()
except Exception as e:
raise FileNotFoundError('Authentication required, but no ~/.netrc file.') from e
# TODO: What happens if the .netrc file doesn't have 'data.sdss.org'
# credentials?
user, acc, password = NETRC.authenticators('data.sdss.org')
auth = (user, password)
else:
auth = None
# Get the default relative paths, relative to the top-level reduction and
# analysis paths
sas_drpall_path, _, _, sas_dapall_path, _ \
= manga_paths(0, 0, dr=dr, raw='DR' in dr, relative=True)
# Get the output paths
if oroot is None:
_redux_path = os.getenv('MANGA_SPECTRO_REDUX') if redux_path is None else redux_path
if _redux_path is None:
raise ValueError('Cannot define the reduction path; set the MANGA_SPECTRO_REDUX '
'environmental variable or provide redux_path.')
_analysis_path = os.getenv('MANGA_SPECTRO_ANALYSIS') if analysis_path is None \
else analysis_path
if _analysis_path is None:
raise ValueError('Cannot define the analysis path; set the MANGA_SPECTRO_ANALYSIS '
'environmental variable or provide analysis_path.')
# Relative paths are never None...
if 'DR' in dr:
# NOTE: These gymnastics are because there is no DR15 DAP
# directory...
_drpall_path, _, _, _dapall_path, _ = manga_paths(0, 0, dr=dr, relative=True)
drpall_path = os.path.join(os.path.abspath(_redux_path), _drpall_path)
dapall_path = os.path.join(os.path.abspath(_analysis_path), _dapall_path)
else:
drpall_path = os.path.join(os.path.abspath(_redux_path), sas_drpall_path)
dapall_path = os.path.join(os.path.abspath(_analysis_path), sas_dapall_path)
else:
drpall_path = os.path.abspath(oroot)
dapall_path = os.path.abspath(oroot)
# Fix the SAS urls to include the top-level redux and analysis
sas_drpall_path = f'redux/{sas_drpall_path}'
sas_dapall_path = f'analysis/{sas_dapall_path}'
# Make the paths if they don't already exist
for p in [drpall_path, dapall_path]:
if not os.path.isdir(p):
os.makedirs(p)
# File names
drpall_file, _, _, dapall_file, _ = manga_file_names(0, 0, dr=dr)
# Fix input url?
_sasurl = sasurl[:-1] if sasurl[-1] == '/' else sasurl
# Download the files
files = ()
for s,p,f in zip([sas_drpall_path, sas_dapall_path], [drpall_path, dapall_path],
[drpall_file, dapall_file]):
files += (os.path.join(p, f),)
download_file(f'{_sasurl}/{s}/{f}', files[-1], overwrite=overwrite, auth=auth)
return files
def download_plateifu(plate, ifu, daptype='HYB10-MILESHC-MASTARHC2', dr='DR17', oroot=None,
redux_path=None, analysis_path=None, overwrite=True,
sasurl='https://data.sdss.org/sas/mangawork/manga/spectro'):
"""
Download the individual plate-ifu MaNGA data files.
Method currently requires authentication using a ``.netrc`` file in your
home directory if you are using collaboration-only data; see `SDSS
Collaboration Access`_.
Args:
plate (:obj:`int`):
MaNGA plate number.
ifu (:obj:`int`):
MaNGA IFU number.
daptype (:obj:`str`, optional):
The identifier for the method used by the DAP to analyze
the data.
dr (:obj:`str`, optional):
Data release identifier; see :func:`manga_versions`.
oroot (:obj:`str`, optional):
Root directory for all files. If provided, ``redux_path`` and
``analysis_path`` are ignored.
redux_path (:obj:`str`, optional):
The top-level directory with all DRP output. If None,
this will be set to the ``MANGA_SPECTRO_REDUX``
environmental variable, if it is defined.
analysis_path (:obj:`str`, optional):
The top-level directory with all DAP output. If None,
this will be set to the ``MANGA_SPECTRO_ANALYSIS``
environmental variable, if it is defined.
overwrite (:obj:`bool`, optional):
Overwrite existing files.
sasurl (:obj:`str`, optional):
Top-level Science Archive Server url with the MaNGA data.
Returns:
:obj:`tuple`: The names of the three downloaded files: (1) the DRP
LOGCUBE file, (2) SDSS color composite finding chart image, and (3)
the DAP MAPS file.
"""
versions = manga_versions()
if dr not in versions.keys():
raise ValueError(f'{dr} is not an available DR; see nirvana.data.manga.manga_versions.')
# Get the authentication if needed
if versions[dr]['collab']:
try:
NETRC = netrc.netrc()
except Exception as e:
raise FileNotFoundError('Authentication required, but no ~/.netrc file.') from e
# TODO: What happens if the .netrc file doesn't have 'data.sdss.org'
# credentials?
user, acc, password = NETRC.authenticators('data.sdss.org')
auth = (user, password)
else:
auth = None
# Get the default relative paths
_, sas_cube_path, sas_image_path, _, sas_maps_path \
= manga_paths(plate, ifu, daptype=daptype, dr=dr, redux_path=redux_path,
analysis_path=analysis_path, raw='DR' in dr, relative=True)
# Get the output paths
if oroot is None:
_redux_path = os.getenv('MANGA_SPECTRO_REDUX') if redux_path is None else redux_path
if _redux_path is None:
raise ValueError('Cannot define the reduction path; set the MANGA_SPECTRO_REDUX '
'environmental variable or provide redux_path.')
_analysis_path = os.getenv('MANGA_SPECTRO_ANALYSIS') if analysis_path is None \
else analysis_path
if _analysis_path is None:
raise ValueError('Cannot define the analysis path; set the MANGA_SPECTRO_ANALYSIS '
'environmental variable or provide analysis_path.')
# Relative paths are never None...
if 'DR' in dr:
# NOTE: These gymnastics are because there is no DR15 DAP
# directory...
_, _cube_path, _image_path, _, _maps_path \
= manga_paths(plate, ifu, daptype=daptype, dr=dr, redux_path=redux_path,
analysis_path=analysis_path, relative=True)
cube_path = os.path.join(os.path.abspath(_redux_path), _cube_path)
image_path = os.path.join(os.path.abspath(_redux_path), _image_path)
maps_path = os.path.join(os.path.abspath(_analysis_path), _maps_path)
else:
cube_path = os.path.join(os.path.abspath(_redux_path), sas_cube_path)
image_path = os.path.join(os.path.abspath(_redux_path), sas_image_path)
maps_path = os.path.join(os.path.abspath(_analysis_path), sas_maps_path)
else:
cube_path = os.path.join(os.path.abspath(oroot), str(plate), str(ifu))
image_path = cube_path
maps_path = cube_path
# Fix the SAS urls to include the top-level redux and analysis
sas_cube_path = f'redux/{sas_cube_path}'
sas_image_path = f'redux/{sas_image_path}'
sas_maps_path = f'analysis/{sas_maps_path}'
# Make the paths if they don't already exist
for p in [cube_path, image_path, maps_path]:
if not os.path.isdir(p):
os.makedirs(p)
# File names
_, cube_file, image_file, _, maps_file = manga_file_names(plate, ifu, daptype=daptype, dr=dr)
# Fix input url?
_sasurl = sasurl[:-1] if sasurl[-1] == '/' else sasurl
# NOTE: Order matters and must match the sequence from `manga_files_from_plateifu`
files = ()
for s,p,f in zip([sas_maps_path, sas_cube_path, sas_image_path],
[maps_path, cube_path, image_path], [maps_file, cube_file, image_file]):
files += (os.path.join(p, f),)
download_file(f'{_sasurl}/{s}/{f}', files[-1], overwrite=overwrite, auth=auth)
return files
def sdss_bitmask(bitgroup):
"""
Return a :class:`~nirvana.util.bitmask.BitMask` instance for the
specified group of SDSS maskbits.
Args:
bitgroup (:obj:`str`):
The name designating the group of bitmasks to read.
Returns:
:class:`~nirvana.util.bitmask.BitMask`: Instance used to parse SDSS
maskbits.
"""
sdssMaskbits = os.path.join(resource_filename('nirvana', 'config'), 'sdss', 'sdssMaskbits.par')
return BitMask.from_par_file(sdssMaskbits, bitgroup)
def parse_manga_targeting_bits(mngtarg1, mngtarg3=None):
"""
Return boolean flags identifying the target samples selected by the
provided bits.
Args:
mngtarg1 (:obj:`int`, `numpy.ndarray`_):
One or more targeting bit values from the MANGA_TARGET1 group.
mngtarg3 (:obj:`int`, `numpy.ndarray`_, optional):
One or more targeting bit values from the MANGA_TARGET3 group,
the ancillary targeting bits. Should have the same shape as
``mngtarg1``.
Returns:
:obj:`tuple`: Four booleans or boolean arrays that identify the
target as being (1) part of the Primary+ (Primary and/or
Color-enhanced) sample, (2) the Secondary sample, (3) an ancillary
target, and/or (4) a filler target.
"""
bitmask = sdss_bitmask('MANGA_TARGET1')
primaryplus = bitmask.flagged(mngtarg1, flag=['PRIMARY_v1_2_0', 'COLOR_ENHANCED_v1_2_0'])
secondary = bitmask.flagged(mngtarg1, flag='SECONDARY_v1_2_0')
ancillary = ( | np.zeros(mngtarg1.shape, dtype=bool) | numpy.zeros |
import sys
import petsc4py
petsc4py.init(sys.argv)
# from scipy.io import savemat, loadmat
# from src.ref_solution import *
# import warnings
# from memory_profiler import profile
# from time import time
import pickle
import numpy as np
from src import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic, StokesFlowObj
from petsc4py import PETSc
from src.geo import *
from src.myio import *
from src.objComposite import *
from src.myvtk import *
from src.StokesFlowMethod import *
from src.stokesletsInPipe import *
def get_problem_kwargs(**main_kwargs):
problem_kwargs = get_solver_kwargs()
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'spheroids_3')
OptDB.setValue('f', fileHandle)
problem_kwargs['fileHandle'] = fileHandle
kwargs_list = (get_shearFlow_kwargs(), get_freeVortex_kwargs(), get_ecoli_kwargs(),
get_one_ellipse_kwargs(), get_forcefree_kwargs(), main_kwargs,)
for t_kwargs in kwargs_list:
for key in t_kwargs:
problem_kwargs[key] = t_kwargs[key]
return problem_kwargs
def print_case_info(**problem_kwargs):
fileHandle = problem_kwargs['fileHandle']
print_solver_info(**problem_kwargs)
print_shearFlow_info(**problem_kwargs)
print_freeVortex_info(**problem_kwargs)
print_one_ellipse_info(fileHandle, **problem_kwargs)
return True
def main_fun_noIter(**main_kwargs):
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
print_case_info(**problem_kwargs)
iter_tor = 1e-3
# location and norm of spheroids
rc1 = np.array((0, 1, 0))
rc2 = np.array((-np.sqrt(3) / 2, - 1 / 2, 0))
rc3 = np.array((np.sqrt(3) / 2, - 1 / 2, 0))
nc1 = np.array((-1, np.sqrt(3), np.sqrt(5))) / 3
nc2 = np.array((-1, -np.sqrt(3), np.sqrt(5))) / 3
nc3 = np.array((2, 0, np.sqrt(5))) / 3
rc_all = np.vstack((rc1, rc2, rc3))
nc_all = np.vstack((nc1, nc2, nc3))
spheroid0 = create_one_ellipse(namehandle='spheroid0', **problem_kwargs)
spheroid0.move(rc1)
spheroid0_norm = spheroid0.get_u_geo().get_geo_norm()
rot_norm = np.cross(nc1, spheroid0_norm)
rot_theta = np.arccos(
np.dot(nc1, spheroid0_norm) / np.linalg.norm(nc1) / np.linalg.norm(spheroid0_norm))
spheroid0.node_rotation(rot_norm, rot_theta)
spheroid1 = spheroid0.copy()
spheroid1.node_rotation(np.array((0, 0, 1)), 2 * np.pi / 3, rotation_origin=np.zeros(3))
spheroid2 = spheroid0.copy()
spheroid2.node_rotation(np.array((0, 0, 1)), 4 * np.pi / 3, rotation_origin=np.zeros(3))
spheroid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='spheroid_comp')
spheroid_comp.add_obj(obj=spheroid0, rel_U=np.zeros(6))
spheroid_comp.add_obj(obj=spheroid1, rel_U=np.zeros(6))
spheroid_comp.add_obj(obj=spheroid2, rel_U=np.zeros(6))
# spheroid_comp.node_rotation(np.array((1, 0, 0)), np.pi / 2)
# # dbg
# tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
# spheroid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)), name='spheroid_comp')
# for tobj in tail_list:
# spheroid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
problem_ff = sf.LambOseenVortexForceFreeProblem(**problem_kwargs)
problem_ff.add_obj(spheroid_comp)
problem_ff.print_info()
problem_ff.create_matrix()
problem_ff.solve()
ref_U = spheroid_comp.get_ref_U()
PETSc.Sys.Print(' ref_U in shear flow', ref_U)
spheroid_comp_F = spheroid_comp.get_total_force()
spheroid0_F = spheroid0.get_total_force(center=np.zeros(3))
# spheroid0_F = tail_list[0].get_total_force(center=np.zeros(3))
PETSc.Sys.Print(' spheroid_comp_F %s' % str(spheroid_comp_F))
PETSc.Sys.Print(' spheroid0_F %s' % str(spheroid0_F))
PETSc.Sys.Print(' non dimensional (F, T) err %s' % str(spheroid_comp_F / spheroid0_F))
return True
def main_fun_fix(**main_kwargs):
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
print_case_info(**problem_kwargs)
iter_tor = 1e-3
# location and norm of spheroids
rc1 = | np.array((0, 1, 0)) | numpy.array |
import numpy as np
from . import stereonet_math
def fit_girdle(*args, **kwargs):
"""
Fits a plane to a scatter of points on a stereonet (a.k.a. a "girdle").
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the ``measurement`` keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strikes`` & ``dips``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
strike, dip: floats
The strike and dip of the plane.
Notes
-----
The pole to the best-fit plane is extracted by calculating the smallest
eigenvector of the covariance matrix of the input measurements in cartesian
3D space.
Examples
--------
Calculate the plunge of a cylindrical fold axis from a series of strike/dip
measurements of bedding from the limbs:
>>> strike = [270, 334, 270, 270]
>>> dip = [20, 15, 80, 78]
>>> s, d = mplstereonet.fit_girdle(strike, dip)
>>> plunge, bearing = mplstereonet.pole2plunge_bearing(s, d)
"""
vec = 0 # Smallest eigenvector will be the pole
return _sd_of_eigenvector(args, vec=vec, **kwargs)
def fit_pole(*args, **kwargs):
"""
Fits the pole to a plane to a "bullseye" of points on a stereonet.
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the ``measurement`` keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
strike, dip: floats
The strike and dip of the plane.
Notes
-----
The pole to the best-fit plane is extracted by calculating the largest
eigenvector of the covariance matrix of the input measurements in cartesian
3D space.
Examples
--------
Find the average strike/dip of a series of bedding measurements
>>> strike = [270, 65, 280, 300]
>>> dip = [20, 15, 10, 5]
>>> strike0, dip0 = mplstereonet.fit_pole(strike, dip)
"""
vec = -1 # Largest eigenvector will be the pole
return _sd_of_eigenvector(args, vec=vec, **kwargs)
def _sd_of_eigenvector(data, vec, measurement='poles', bidirectional=True):
"""Unifies ``fit_pole`` and ``fit_girdle``."""
lon, lat = _convert_measurements(data, measurement)
vals, vecs = cov_eig(lon, lat, bidirectional)
x, y, z = vecs[:, vec]
s, d = stereonet_math.geographic2pole(*stereonet_math.cart2sph(x, y, z))
return s[0], d[0]
def eigenvectors(*args, **kwargs):
"""
Finds the 3 eigenvectors and eigenvalues of the 3D covariance matrix of a
series of geometries. This can be used to fit a plane/pole to a dataset or
for shape fabric analysis (e.g. Flinn/Hsu plots).
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the *measurement* keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
plunges, bearings, values : sequences of 3 floats each
The plunges, bearings, and eigenvalues of the three eigenvectors of the
covariance matrix of the input data. The measurements are returned
sorted in descending order relative to the eigenvalues. (i.e. The
largest eigenvector/eigenvalue is first.)
Examples
--------
Find the eigenvectors as plunge/bearing and eigenvalues of the 3D
covariance matrix of a series of planar measurements:
>>> strikes = [270, 65, 280, 300]
>>> dips = [20, 15, 10, 5]
>>> plu, azi, vals = mplstereonet.eigenvectors(strikes, dips)
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
vals, vecs = cov_eig(lon, lat, kwargs.get('bidirectional', True))
lon, lat = stereonet_math.cart2sph(*vecs)
plunges, bearings = stereonet_math.geographic2plunge_bearing(lon, lat)
# Largest eigenvalue first...
return plunges[::-1], bearings[::-1], vals[::-1]
def cov_eig(lon, lat, bidirectional=True):
lon = np.atleast_1d(np.squeeze(lon))
lat = np.atleast_1d(np.squeeze(lat))
if bidirectional:
# Include antipodes in calculation...
lon2, lat2 = stereonet_math.antipode(lon, lat)
lon, lat = np.hstack([lon, lon2]), np.hstack([lat, lat2])
xyz = np.column_stack(stereonet_math.sph2cart(lon, lat))
cov = np.cov(xyz.T)
eigvals, eigvecs = np.linalg.eigh(cov)
order = eigvals.argsort()
return eigvals[order], eigvecs[:, order]
def _convert_measurements(data, measurement):
def do_nothing(x, y):
return x, y
func = {'poles':stereonet_math.pole,
'lines':stereonet_math.line,
'rakes':stereonet_math.rake,
'radians':do_nothing}[measurement]
return func(*data)
def find_mean_vector(*args, **kwargs):
"""
Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1).
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
vector, r_value = stereonet_math.mean_vector(lon, lat)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector)
return (plunge[0], bearing[0]), r_value
def find_fisher_stats(*args, **kwargs):
"""
Returns the mean vector and summary statistics for a set of measurements.
By default, this expects the input to be plunges and bearings, but the type
of input can be controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
conf : number
The confidence level (0-100). Defaults to 95%, similar to 2 sigma.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector: tuple of two floats
A set consisting of the plunge and bearing of the mean vector (in
degrees).
stats : tuple of three floats
``(r_value, confidence, kappa)``
The ``r_value`` is the magnitude of the mean vector as a number between
0 and 1.
The ``confidence`` radius is the opening angle of a small circle that
corresponds to the confidence in the calculated direction, and is
dependent on the input ``conf``.
The ``kappa`` value is the dispersion factor that quantifies the amount
of dispersion of the given vectors, analgous to a variance/stddev.
"""
# How the heck did this wind up as a separate function?
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
conf = kwargs.get('conf', 95)
center, stats = stereonet_math.fisher_stats(lon, lat, conf)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*center)
mean_vector = (plunge[0], bearing[0])
return mean_vector, stats
def kmeans(*args, **kwargs):
"""
Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster.
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
num = kwargs.get('num', 2)
bidirectional = kwargs.get('bidirectional', True)
tolerance = kwargs.get('tolerance', 1e-5)
points = lon, lat
dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional)
center_lon = | np.random.choice(lon, num) | numpy.random.choice |
#!/usr/bin/env python
"""
dynspec.py
----------------------------------
Dynamic spectrum class
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import time
import os
from os.path import split
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as sc
from copy import deepcopy as cp
from scint_models import scint_acf_model, scint_acf_model_2D, tau_acf_model,\
dnu_acf_model, scint_sspec_model, fit_parabola, fit_log_parabola
from scint_utils import is_valid, svd_model
from scipy.interpolate import griddata, interp1d, RectBivariateSpline
from scipy.signal import convolve2d, medfilt, savgol_filter
from scipy.io import loadmat
class Dynspec:
def __init__(self, filename=None, dyn=None, verbose=True, process=True,
lamsteps=False):
""""
Initialise a dynamic spectrum object by either reading from file
or from existing object
"""
if filename:
self.load_file(filename, verbose=verbose, process=process,
lamsteps=lamsteps)
elif dyn:
self.load_dyn_obj(dyn, verbose=verbose, process=process,
lamsteps=lamsteps)
else:
print("Error: No dynamic spectrum file or object")
def __add__(self, other):
"""
Defines dynamic spectra addition, which is concatination in time,
with the gaps filled
"""
print("Now adding {} ...".format(other.name))
if self.freq != other.freq \
or self.bw != other.bw or self.df != other.df:
print("WARNING: Code does not yet account for different \
frequency properties")
# Set constant properties
bw = self.bw
df = self.df
freqs = self.freqs
freq = self.freq
nchan = self.nchan
dt = self.dt
# Calculate properties for the gap
timegap = round((other.mjd - self.mjd)*86400
- self.tobs, 1) # time between two dynspecs
extratimes = np.arange(self.dt/2, timegap, dt)
if timegap < dt:
extratimes = [0]
nextra = 0
else:
nextra = len(extratimes)
dyngap = np.zeros([np.shape(self.dyn)[0], nextra])
# Set changed properties
name = self.name.split('.')[0] + "+" + other.name.split('.')[0] \
+ ".dynspec"
header = self.header + other.header
# times = np.concatenate((self.times, self.times[-1] + extratimes,
# self.times[-1] + extratimes[-1] + other.times))
nsub = self.nsub + nextra + other.nsub
tobs = self.tobs + timegap + other.tobs
# Note: need to check "times" attribute for added dynspec
times = np.linspace(0, tobs, nsub)
mjd = np.min([self.mjd, other.mjd]) # mjd for earliest dynspec
newdyn = np.concatenate((self.dyn, dyngap, other.dyn), axis=1)
# Get new dynspec object with these properties
newdyn = BasicDyn(newdyn, name=name, header=header, times=times,
freqs=freqs, nchan=nchan, nsub=nsub, bw=bw,
df=df, freq=freq, tobs=tobs, dt=dt, mjd=mjd)
return Dynspec(dyn=newdyn, verbose=False, process=False)
def load_file(self, filename, verbose=True, process=True, lamsteps=False):
"""
Load a dynamic spectrum from psrflux-format file
"""
start = time.time()
# Import all data from filename
if verbose:
print("LOADING {0}...".format(filename))
head = []
with open(filename, "r") as file:
for line in file:
if line.startswith("#"):
headline = str.strip(line[1:])
head.append(headline)
if str.split(headline)[0] == 'MJD0:':
# MJD of start of obs
self.mjd = float(str.split(headline)[1])
self.name = os.path.basename(filename)
self.header = head
rawdata = np.loadtxt(filename).transpose() # read file
self.times = np.unique(rawdata[2]*60) # time since obs start (secs)
self.freqs = rawdata[3] # Observing frequency in MHz.
fluxes = rawdata[4] # fluxes
fluxerrs = rawdata[5] # flux errors
self.nchan = int(np.unique(rawdata[1])[-1]) # number of channels
self.bw = self.freqs[-1] - self.freqs[0] # obs bw
self.df = round(self.bw/self.nchan, 5) # channel bw
self.bw = round(self.bw + self.df, 2) # correct bw
self.nchan += 1 # correct nchan
self.nsub = int(np.unique(rawdata[0])[-1]) + 1
self.tobs = self.times[-1]+self.times[0] # initial estimate of tobs
self.dt = self.tobs/self.nsub
if self.dt > 1:
self.dt = round(self.dt)
else:
self.times = np.linspace(self.times[0], self.times[-1], self.nsub)
self.tobs = self.dt * self.nsub # recalculated tobs
# Now reshape flux arrays into a 2D matrix
self.freqs = np.unique(self.freqs)
self.freq = round(np.mean(self.freqs), 2)
fluxes = fluxes.reshape([self.nsub, self.nchan]).transpose()
fluxerrs = fluxerrs.reshape([self.nsub, self.nchan]).transpose()
if self.df < 0: # flip things
self.df = -self.df
self.bw = -self.bw
# Flip flux matricies since self.freqs is now in ascending order
fluxes = np.flip(fluxes, 0)
fluxerrs = np.flip(fluxerrs, 0)
# Finished reading, now setup dynamic spectrum
self.dyn = fluxes # initialise dynamic spectrum
self.lamsteps = lamsteps
if process:
self.default_processing(lamsteps=lamsteps) # do default processing
end = time.time()
if verbose:
print("...LOADED in {0} seconds\n".format(round(end-start, 2)))
self.info()
def load_dyn_obj(self, dyn, verbose=True, process=True, lamsteps=False):
"""
Load in a dynamic spectrum object of different type.
"""
start = time.time()
# Import all data from filename
if verbose:
print("LOADING DYNSPEC OBJECT {0}...".format(dyn.name))
self.name = dyn.name
self.header = dyn.header
self.times = dyn.times # time since obs start (secs)
self.freqs = dyn.freqs # Observing frequency in MHz.
self.nchan = dyn.nchan # number of channels
self.nsub = dyn.nsub
self.bw = dyn.bw # obs bw
self.df = dyn.df # channel bw
self.freq = dyn.freq
self.tobs = dyn.tobs # initial estimate of tobs
self.dt = dyn.dt
self.mjd = dyn.mjd
self.dyn = dyn.dyn
self.lamsteps = lamsteps
if process:
self.default_processing(lamsteps=lamsteps) # do default processing
end = time.time()
if verbose:
print("...LOADED in {0} seconds\n".format(round(end-start, 2)))
self.info()
def default_processing(self, lamsteps=False):
"""
Default processing of a Dynspec object
"""
self.trim_edges() # remove zeros on band edges
self.refill() # refill with linear interpolation
self.correct_dyn()
self.calc_acf() # calculate the ACF
if lamsteps:
self.scale_dyn()
self.calc_sspec(lamsteps=lamsteps) # Calculate secondary spectrum
def plot_dyn(self, lamsteps=False, input_dyn=None, filename=None,
input_x=None, input_y=None, trap=False, display=True):
"""
Plot the dynamic spectrum
"""
plt.figure(1, figsize=(12, 6))
if input_dyn is None:
if lamsteps:
if not hasattr(self, 'lamdyn'):
self.scale_dyn()
dyn = self.lamdyn
elif trap:
if not hasattr(self, 'trapdyn'):
self.scale_dyn(scale='trapezoid')
dyn = self.trapdyn
else:
dyn = self.dyn
else:
dyn = input_dyn
medval = np.median(dyn[is_valid(dyn)*np.array(np.abs(
is_valid(dyn)) > 0)])
minval = np.min(dyn[is_valid(dyn)*np.array(np.abs(
is_valid(dyn)) > 0)])
# standard deviation
std = np.std(dyn[is_valid(dyn)*np.array(np.abs(
is_valid(dyn)) > 0)])
vmin = minval
vmax = medval+5*std
if input_dyn is None:
if lamsteps:
plt.pcolormesh(self.times/60, self.lam, dyn,
vmin=vmin, vmax=vmax)
plt.ylabel('Wavelength (m)')
else:
plt.pcolormesh(self.times/60, self.freqs, dyn,
vmin=vmin, vmax=vmax)
plt.ylabel('Frequency (MHz)')
plt.xlabel('Time (mins)')
# plt.colorbar() # arbitrary units
else:
plt.pcolormesh(input_x, input_y, dyn, vmin=vmin, vmax=vmax)
if filename is not None:
plt.savefig(filename, dpi=200, papertype='a4', bbox_inches='tight',
pad_inches=0.1)
plt.close()
elif input_dyn is None and display:
plt.show()
def plot_acf(self, method='acf1d', alpha=None, contour=False, filename=None,
input_acf=None, input_t=None, input_f=None, fit=True, mcmc=False,
display=True):
"""
Plot the ACF
"""
if not hasattr(self, 'acf'):
self.calc_acf()
if not hasattr(self, 'tau') and input_acf is None and fit:
self.get_scint_params(method=method, alpha=alpha, mcmc=mcmc)
if input_acf is None:
arr = self.acf
tspan = self.tobs
fspan = self.bw
else:
arr = input_acf
tspan = max(input_t) - min(input_t)
fspan = max(input_f) - min(input_f)
arr = np.fft.ifftshift(arr)
wn = arr[0][0] - arr[0][1] # subtract the white noise spike
arr[0][0] = arr[0][0] - wn # Set the noise spike to zero for plotting
arr = np.fft.fftshift(arr)
t_delays = np.linspace(-tspan/60, tspan/60, np.shape(arr)[1])
f_shifts = np.linspace(-fspan, fspan, np.shape(arr)[0])
if input_acf is None: # Also plot scintillation scales axes
fig, ax1 = plt.subplots()
if contour:
im = ax1.contourf(t_delays, f_shifts, arr)
else:
im = ax1.pcolormesh(t_delays, f_shifts, arr)
ax1.set_ylabel('Frequency lag (MHz)')
ax1.set_xlabel('Time lag (mins)')
miny, maxy = ax1.get_ylim()
if fit:
ax2 = ax1.twinx()
ax2.set_ylim(miny/self.dnu, maxy/self.dnu)
ax2.set_ylabel('Frequency lag / (dnu_d = {0})'.
format(round(self.dnu, 2)))
ax3 = ax1.twiny()
minx, maxx = ax1.get_xlim()
ax3.set_xlim(minx/(self.tau/60), maxx/(self.tau/60))
ax3.set_xlabel('Time lag/(tau_d={0})'.format(round(
self.tau/60, 2)))
fig.colorbar(im, pad=0.15)
else: # just plot acf without scales
if contour:
plt.contourf(t_delays, f_shifts, arr)
else:
plt.pcolormesh(t_delays, f_shifts, arr)
plt.ylabel('Frequency lag (MHz)')
plt.xlabel('Time lag (mins)')
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0.1)
plt.close()
elif input_acf is None and display:
plt.show()
def plot_sspec(self, lamsteps=False, input_sspec=None, filename=None,
input_x=None, input_y=None, trap=False, prewhite=True,
plotarc=False, maxfdop=np.inf, delmax=None, ref_freq=1400,
cutmid=0, startbin=0, display=True, colorbar=True):
"""
Plot the secondary spectrum
"""
if input_sspec is None:
if lamsteps:
if not hasattr(self, 'lamsspec'):
self.calc_sspec(lamsteps=lamsteps, prewhite=prewhite)
sspec = cp(self.lamsspec)
elif trap:
if not hasattr(self, 'trapsspec'):
self.calc_sspec(trap=trap, prewhite=prewhite)
sspec = cp(self.trapsspec)
else:
if not hasattr(self, 'sspec'):
self.calc_sspec(lamsteps=lamsteps, prewhite=prewhite)
sspec = cp(self.sspec)
xplot = cp(self.fdop)
else:
sspec = input_sspec
xplot = input_x
medval = np.median(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
# std = np.std(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
maxval = np.max(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
vmin = medval - 3
vmax = maxval - 3
# Get fdop plotting range
indicies = np.argwhere(np.abs(xplot) < maxfdop)
xplot = xplot[indicies].squeeze()
sspec = sspec[:, indicies].squeeze()
nr, nc = np.shape(sspec)
sspec[:, int(nc/2-np.floor(cutmid/2)):int(nc/2 +
np.ceil(cutmid/2))] = np.nan
sspec[:startbin, :] = np.nan
# Maximum value delay axis (us @ ref_freq)
delmax = np.max(self.tdel) if delmax is None else delmax
delmax = delmax*(ref_freq/self.freq)**2
ind = np.argmin(abs(self.tdel-delmax))
if input_sspec is None:
if lamsteps:
plt.pcolormesh(xplot, self.beta[:ind], sspec[:ind, :],
vmin=vmin, vmax=vmax)
plt.ylabel(r'$f_\lambda$ (m$^{-1}$)')
else:
plt.pcolormesh(xplot, self.tdel[:ind], sspec[:ind, :],
vmin=vmin, vmax=vmax)
plt.ylabel(r'$f_\nu$ ($\mu$s)')
plt.xlabel(r'$f_t$ (mHz)')
bottom, top = plt.ylim()
if plotarc:
if lamsteps:
eta = self.betaeta
else:
eta = self.eta
plt.plot(xplot, eta*np.power(xplot, 2),
'r--', alpha=0.5)
plt.ylim(bottom, top)
if colorbar:
plt.colorbar()
else:
plt.pcolormesh(xplot, input_y, sspec, vmin=vmin, vmax=vmax)
if colorbar:
plt.colorbar()
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0.1)
plt.close()
elif input_sspec is None and display:
plt.show()
def plot_scat_im(self, input_scat_im=None, display=True, colorbar=True,
input_sspec=None, input_eta=None, input_fdop=None,
input_tdel=None, lamsteps=False, trap=False,
prewhite=True, use_angle=False, s=None, veff=None):
"""
Plot the scattered image
"""
c = 299792458.0 # m/s
if input_scat_im is None:
if not hasattr(self, 'scat_im'):
scat_im, xyaxes = self.calc_scat_im(input_sspec=input_sspec,
input_eta=input_eta,
input_fdop=input_fdop,
input_tdel=input_tdel,
lamsteps=lamsteps, trap=trap,
prewhite=prewhite)
if use_angle:
thetarad = (input_fdop / (1e3 * self.freq)) * \
(c * s / (veff * 1000))
thetadeg = thetarad * 180 / np.pi
xyaxes = thetadeg
else:
scat_im = input_scat_im
if use_angle:
thetarad = (input_fdop / (1e3 * self.freq)) * (c * s / (veff * 1000))
thetadeg = thetarad * 180 / np.pi
xyaxes = thetadeg
else:
xyaxes = input_fdop
scat_im -= np.min(scat_im)
scat_im += 1e-10
scat_im = 10*np.log10(scat_im)
medval = np.median(scat_im[is_valid(scat_im) * np.array(np.abs(scat_im) > 0)])
# # std = np.std(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
maxval = np.max(scat_im[is_valid(scat_im) * np.array(np.abs(scat_im) > 0)])
vmin = medval-3
vmax = maxval-3
plt.pcolormesh(xyaxes, xyaxes, scat_im, vmin=vmin, vmax=vmax)
plt.title('Scattered image')
if use_angle:
plt.xlabel('Angle parallel to velocity (deg)')
plt.ylabel('Angle perpendicular to velocity (deg)')
else:
plt.xlabel('Angle parallel to velocity')
plt.ylabel('Angle perpendicular to velocity')
if colorbar:
plt.colorbar()
if display:
plt.show()
def plot_all(self, dyn=1, sspec=3, acf=2, norm_sspec=4, colorbar=True,
lamsteps=False, filename=None, display=True):
"""
Plots multiple figures in one
"""
# Dynamic Spectrum
plt.subplot(2, 2, dyn)
self.plot_dyn(lamsteps=lamsteps)
plt.title("Dynamic Spectrum")
# Autocovariance Function
plt.subplot(2, 2, acf)
self.plot_acf(subplot=True)
plt.title("Autocovariance")
# Secondary Spectrum
plt.subplot(2, 2, sspec)
self.plot_sspec(lamsteps=lamsteps)
plt.title("Secondary Spectrum")
# Normalised Secondary Spectrum
plt.subplot(2, 2, norm_sspec)
self.norm_sspec(plot=True, scrunched=False, lamsteps=lamsteps,
plot_fit=False)
plt.title("Normalised fdop secondary spectrum")
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0.1)
plt.close()
elif display:
plt.show()
def fit_arc(self, method='norm_sspec', asymm=False, plot=False, delmax=None,
numsteps=1e4, startbin=3, cutmid=3, lamsteps=True, etamax=None,
etamin=None, low_power_diff=-3, high_power_diff=-1.5, ref_freq=1400,
constraint=[0, np.inf], nsmooth=5, filename=None, noise_error=True,
display=True, log_parabola=False):
"""
Find the arc curvature with maximum power along it
constraint: Only search for peaks between constraint[0] and
constraint[1]
"""
if not hasattr(self, 'tdel'):
self.calc_sspec()
delmax = np.max(self.tdel) if delmax is None else delmax
delmax = delmax*(ref_freq/self.freq)**2 # adjust for frequency
if lamsteps:
if not hasattr(self, 'lamsspec'):
self.calc_sspec(lamsteps=lamsteps)
sspec = np.array(cp(self.lamsspec))
yaxis = cp(self.beta)
ind = np.argmin(abs(self.tdel-delmax))
ymax = self.beta[ind] # cut beta at equivalent value to delmax
else:
if not hasattr(self, 'sspec'):
self.calc_sspec()
sspec = np.array(cp(self.sspec))
yaxis = cp(self.tdel)
ymax = delmax
nr, nc = np.shape(sspec)
# Estimate noise in secondary spectrum
a = np.array(sspec[int(nr/2):,
int(nc/2 + np.ceil(cutmid/2)):].ravel())
b = np.array(sspec[int(nr/2):, 0:int(nc/2 -
np.floor(cutmid/2))].ravel())
noise = np.std(np.concatenate((a, b)))
# Adjust secondary spectrum
ind = np.argmin(abs(self.tdel-delmax))
sspec[0:startbin, :] = np.nan # mask first N delay bins
# mask middle N Doppler bins
sspec[:, int(nc/2 - np.floor(cutmid/2)):int(nc/2 +
np.ceil(cutmid/2))] = np.nan
sspec = sspec[0:ind, :] # cut at delmax
yaxis = yaxis[0:ind]
# noise of mean out to delmax
noise = np.sqrt(np.sum(np.power(noise, 2)))/np.sqrt(len(yaxis))
if etamax is None:
etamax = ymax/((self.fdop[1]-self.fdop[0])*cutmid)**2
if etamin is None:
etamin = (yaxis[1]-yaxis[0])*startbin/(max(self.fdop))**2
try:
len(etamin)
etamin_array = np.array(etamin).squeeze()
etamax_array = np.array(etamax).squeeze()
except TypeError:
# Force to be arrays for iteration
etamin_array = np.array([etamin])
etamax_array = np.array([etamax])
# At 1mHz for 1400MHz obs, the maximum arc terminates at delmax
max_sqrt_eta = np.sqrt(np.max(etamax_array))
min_sqrt_eta = np.sqrt(np.min(etamin_array))
# Create an array with equal steps in sqrt(curvature)
sqrt_eta_all = np.linspace(min_sqrt_eta, max_sqrt_eta, numsteps)
for iarc in range(0, len(etamin_array)):
if len(etamin_array) == 1:
etamin = etamin
etamax = etamax
else:
etamin = etamin_array.squeeze()[iarc]
etamax = etamax_array.squeeze()[iarc]
if not lamsteps:
c = 299792458.0 # m/s
beta_to_eta = c*1e6/((ref_freq*10**6)**2)
etamax = etamax/(self.freq/ref_freq)**2 # correct for freq
etamax = etamax*beta_to_eta
etamin = etamin/(self.freq/ref_freq)**2
etamin = etamin*beta_to_eta
constraint = constraint/(self.freq/ref_freq)**2
constraint = constraint*beta_to_eta
sqrt_eta = sqrt_eta_all[(sqrt_eta_all <= np.sqrt(etamax)) *
(sqrt_eta_all >= np.sqrt(etamin))]
numsteps_new = len(sqrt_eta)
# initiate
etaArray = []
if method == 'norm_sspec':
# Get the normalised secondary spectrum, set for minimum eta as
# normalisation. Then calculate peak as
self.norm_sspec(eta=etamin, delmax=delmax, plot=False,
startbin=startbin, maxnormfac=1, cutmid=cutmid,
lamsteps=lamsteps, scrunched=True,
plot_fit=False, numsteps=numsteps_new)
norm_sspec = self.normsspecavg.squeeze()
etafrac_array = np.linspace(-1, 1, len(norm_sspec))
ind1 = np.argwhere(etafrac_array > 1/(2*len(norm_sspec)))
ind2 = np.argwhere(etafrac_array < -1/(2*len(norm_sspec)))
norm_sspec_avg = np.add(norm_sspec[ind1],
np.flip(norm_sspec[ind2], axis=0))/2
norm_sspec_avg = norm_sspec_avg.squeeze()
etafrac_array_avg = 1/etafrac_array[ind1].squeeze()
# Make sure is valid
filt_ind = is_valid(norm_sspec_avg)
norm_sspec_avg = np.flip(norm_sspec_avg[filt_ind], axis=0)
etafrac_array_avg = np.flip(etafrac_array_avg[filt_ind],
axis=0)
# Form eta array and cut at maximum
etaArray = etamin*etafrac_array_avg**2
ind = np.argwhere(etaArray < etamax)
etaArray = etaArray[ind].squeeze()
norm_sspec_avg = norm_sspec_avg[ind].squeeze()
# Smooth data
norm_sspec_avg_filt = \
savgol_filter(norm_sspec_avg, nsmooth, 1)
# search for peaks within constraint range
indrange = np.argwhere((etaArray > constraint[0]) *
(etaArray < constraint[1]))
sumpow_inrange = norm_sspec_avg_filt[indrange]
ind = np.argmin(np.abs(norm_sspec_avg_filt -
np.max(sumpow_inrange)))
# Now find eta and estimate error by fitting parabola
# Data from -3db on low curvature side to -1.5db on high side
max_power = norm_sspec_avg_filt[ind]
power = max_power
ind1 = 1
while (power > max_power + low_power_diff and
ind + ind1 < len(norm_sspec_avg_filt)-1): # -3db
ind1 += 1
power = norm_sspec_avg_filt[ind - ind1]
power = max_power
ind2 = 1
while (power > max_power + high_power_diff and
ind + ind2 < len(norm_sspec_avg_filt)-1): # -1db power
ind2 += 1
power = norm_sspec_avg_filt[ind + ind2]
# Now select this region of data for fitting
xdata = etaArray[int(ind-ind1):int(ind+ind2)]
ydata = norm_sspec_avg[int(ind-ind1):int(ind+ind2)]
# Do the fit
# yfit, eta, etaerr = fit_parabola(xdata, ydata)
if log_parabola:
yfit, eta, etaerr = fit_log_parabola(xdata, ydata)
else:
yfit, eta, etaerr = fit_parabola(xdata, ydata)
if np.mean(np.gradient( | np.diff(yfit) | numpy.diff |
#!/usr/bin/env python
# coding: utf-8
from getpass import getuser
# Settings of the annotation process
############
# How many frames does a batch scan.
# This excludes the very last frame, since we start at 0.
batchsize = 100
# Show every tickskip-th frame for annotation.
tickskip = 5
# Skip so many batches after each. Example: 3 means batch, skip, skip, skip, batch, ...
batchskip = 3
# Everything further away than this is dropped.
# This is because many lasers put NaN to some number, e.g. 29.999
laser_cutoff = 14
# Where to load the laser csv data and images from.
# TODO: Describe better!
basedir = "/work/" + getuser() + "/strands/wheelchair/dumped/"
# Where to save the detections to.
# TODO: Describe better!
savedir = "/media/" + getuser() + "/NSA1/strands/wheelchair/"
# The field-of-view of the laser you're using.
# From https://github.com/lucasb-eyer/strands_karl/blob/5a2dd60/launch/karl_robot.launch#L25
# TODO: change to min/max for supporting non-zero-centered lasers.
laserFoV = 225
# The field-of-view of the supportive camera you're using.
# From https://www.asus.com/3D-Sensor/Xtion_PRO_LIVE/specifications/
# TODO: change to min/max for supporting non-zero-centered cameras.
cameraFoV = 58
# The size of the camera is needed for pre-generating the image-axes in the plot for efficiency.
camsize = (480, 640)
# Radius of the circle around the cursor, in data-units.
# From https://thesegamesiplay.files.wordpress.com/2015/03/wheelchair.jpg
circrad = 1.22/2
# TODO: make the types of labels configurable? Although, does it even matter?
# End of settings
############
import sys
import os
import json
import time
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.widgets import Cursor, AxesWidget
try:
import cv2
def imread(fname):
img = cv2.imread(fname)
if img is not None:
img = img[:,:,::-1] # BGR to RGB
return img
except ImportError:
# Python 2/3 compatibility
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
from matplotlib.image import imread as mpl_imread
def imread(fname):
try:
return mpl_imread(fname)
except FileNotFoundError:
return None
laserFoV = np.radians(laserFoV)
cameraFoV = np.radians(cameraFoV)
if len(sys.argv) < 2:
print("Usage: {} relative/path/to_file.bag".format(sys.argv[0]))
print()
print("relative to {}".format(basedir))
print("To perform a dry run without saving results use --dry-run or -n.")
print("To change into person annotation mode use --person or -p.")
sys.exit(1)
# Very crude, not robust argv handling.
name = None
dryrun = False
person_mode = False
for arg in sys.argv[1:]:
if arg in ("--dry-run", "-n"):
dryrun = True
elif arg in ("--person", "-p"):
person_mode = True
else:
if name is None:
name = arg
else:
print("Cannot parse arguments, please only specify a single path to the data, and/or flags for dry run or person only annotations.")
sys.exit(1)
def mkdirs(fname):
""" Make directories necessary for creating `fname`. """
dname = os.path.dirname(fname)
if not os.path.isdir(dname):
os.makedirs(dname)
# **TODO**: put into common toolbox repo.
def xy_to_rphi(x, y):
# Note: axes rotated by 90 by intent, so that 0 is top.
return np.hypot(x, y), np.arctan2(-x, y)
def rphi_to_xy(r, phi):
return r * -np.sin(phi), r * np.cos(phi)
def scan_to_xy(scan, thresh=None):
s = np.array(scan, copy=True)
if thresh is not None:
s[s > thresh] = np.nan
angles = np.linspace(-laserFoV/2, laserFoV/2, len(scan))
return rphi_to_xy(scan, angles)
def imload(name, *seqs):
for s in seqs:
fname = "{}{}_dir/{}.jpg".format(basedir, name, int(s))
im = imread(fname)
if im is not None:
return im
print("WARNING: Couldn't find any of " + ' ; '.join(map(str, map(int, seqs))))
return np.zeros(camsize + (3,), dtype=np.uint8)
class Anno1602:
def __init__(self, batches, scans, seqs, laser_thresh=laser_cutoff, circrad=circrad, xlim=None, ylim=None, dryrun=False, person_mode=False):
self.batches = batches
self.scans = scans
self.seqs = seqs
self.b = 0
self.i = 0
self.laser_thresh = laser_thresh
self.circrad = circrad
self.xlim = xlim
self.ylim = ylim
self.dryrun = dryrun
self.person_mode = person_mode
# Build the figure and the axes.
self.fig = plt.figure(figsize=(10,10))
gs = mpl.gridspec.GridSpec(3, 2, width_ratios=(3, 1))
self.axlaser = plt.subplot(gs[:,0])
axprev, axpres, axnext = plt.subplot(gs[0,1]), plt.subplot(gs[1,1]), plt.subplot(gs[2,1])
self.imprev = axprev.imshow( | np.random.randint(255, size=camsize + (3,)) | numpy.random.randint |
import cv2
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import torch as t
from skimage import io
from torchvision.utils import make_grid
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def save_tensor_images(image_tensor, i, save_dir, prefix, resize_factor=0.5):
with t.no_grad():
if prefix == "rgb":
mean = t.tensor([0.34, 0.33, 0.35]).reshape(1, 3, 1, 1)
std = t.tensor([0.19, 0.18, 0.18]).reshape(1, 3, 1, 1)
elif prefix == "ir":
mean = 0.35
std = 0.18
elif prefix == "pred":
mean = 0.5
std = 0.5
elif prefix == "segment":
image_unflat = (image_tensor.detach().cpu() + 1) / 2
else:
raise TypeError("Name error")
if prefix != "segment":
image_unflat = image_tensor.detach().cpu() * std + mean
image_grid = make_grid(image_unflat, nrow=3)
img = image_grid.permute(1, 2, 0).squeeze().numpy()
img = np.clip(img * 255, a_min=0, a_max=255).astype(np.uint8)
img = cv2.resize(img, (0, 0), fx=resize_factor, fy=resize_factor)
name = prefix + str(i) + r'.jpg'
io.imsave(os.path.join(save_dir, name), img)
def save_all_images(rgb, ir, pred, i, save_dir, segment=None, resize_factor=0.5):
with t.no_grad():
mean_rgb = t.tensor([0.34, 0.33, 0.35]).reshape(1, 3, 1, 1)
std_rgb = t.tensor([0.19, 0.18, 0.18]).reshape(1, 3, 1, 1)
rgb_n = rgb.detach().cpu() * std_rgb + mean_rgb
mean_ir = 0.35
std_ir = 0.18
ir_n = ir.detach().cpu() * std_ir + mean_ir
ir_n = t.cat([ir_n, ir_n, ir_n], dim=1)
pred_n = pred.detach().cpu() * 0.5 + 0.5
pred_n = t.cat([pred_n, pred_n, pred_n], dim=1)
if segment is not None:
segment_n = segment.detach().cpu() * 0.5 + 0.5
image_unflat = t.cat([pred_n, ir_n, rgb_n, segment_n], dim=0)
else:
image_unflat = t.cat([pred_n, ir_n, rgb_n], dim=0)
image_grid = make_grid(image_unflat, nrow=2)
img = image_grid.permute(1, 2, 0).squeeze().numpy()
img = np.clip(img * 255, a_min=0, a_max=255).astype(np.uint8)
img = cv2.resize(img, (0, 0), fx=resize_factor, fy=resize_factor)
io.imsave(os.path.join(save_dir, f"super{i:0>4d}.jpg"), img)
def save_model(disc, gen, cur_epoch, save_dir):
t.save(disc.state_dict(), os.path.join(save_dir, f"e_{cur_epoch:0>3d}_discriminator.pth"))
t.save(gen.state_dict(), os.path.join(save_dir, f"e_{cur_epoch:0>3d}_generator.pth"))
print("Models saved")
# def show_loss(src_dir):
# files_d = glob.glob(os.path.join(src_dir, 'v*_d_loss.npy'))
# sorted_disc_loss = sorted(files_d)
#
# files_g = glob.glob(os.path.join(src_dir, 'v*_g_loss.npy'))
# sorted_gen_loss = sorted(files_g)
#
# print('Reading all saved losses...')
# # print(sorted_disc_loss)
# arr_g = np.array([])
# arr_d = np.array([])
#
# for l in range(len(sorted_gen_loss)):
# arr_g = np.concatenate([arr_g, np.load(sorted_gen_loss[l])])
#
# arr_d = np.concatenate([arr_d, np.load(sorted_disc_loss[l])])
#
# plt.plot(np.arange(arr_d.shape[0]), arr_d, color='orange', label='Discriminator')
# plt.plot(np.arange(arr_g.shape[0]), arr_g, color='blue', label='Generator')
# plt.ylabel("Loss")
# plt.xlabel("Iteration")
# plt.legend()
# plt.show()
def save_loss(d, d1, d2, g, g1, g2, fm1, fm2, p, path, e):
np.save(os.path.join(path, f'v{e:0>3d}_d_loss.npy'), np.array(d))
np.save(os.path.join(path, f'v{e:0>3d}_d1_loss.npy'), np.array(d1))
np.save(os.path.join(path, f'v{e:0>3d}_d2_loss.npy'), np.array(d2))
np.save(os.path.join(path, f'v{e:0>3d}_g_loss.npy'), np.array(g))
np.save(os.path.join(path, f'v{e:0>3d}_g1_loss.npy'), np.array(g1))
np.save(os.path.join(path, f'v{e:0>3d}_g2_loss.npy'), np.array(g2))
np.save(os.path.join(path, f'v{e:0>3d}_fm1_loss.npy'), np.array(fm1))
np.save(os.path.join(path, f'v{e:0>3d}_fm2_loss.npy'), np.array(fm2))
np.save(os.path.join(path, f'v{e:0>3d}_p_loss.npy'), np.array(p))
def lr_lambda(epoch, decay_after=100):
''' Function for scheduling learning '''
return 1. if epoch < decay_after else 1 - float(epoch - decay_after) / (200 - decay_after)
def show_loss(src_dir):
sorted_disc_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_d_loss.npy')))
sorted_gen_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_g_loss.npy')))
sorted_gen1_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_g1_loss.npy')))
sorted_gen2_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_g2_loss.npy')))
sorted_disc1_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_d1_loss.npy')))
sorted_disc2_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_d2_loss.npy')))
sorted_fm1_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_fm1_loss.npy')))
sorted_fm2_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_fm2_loss.npy')))
sorted_p_loss = sorted(glob.glob(os.path.join(src_dir, 'v*_p_loss.npy')))
print('Reading all saved losses...')
print(sorted_disc_loss)
arr_g = np.array([])
arr_d = np.array([])
arr_g1 = np.array([])
arr_g2 = np.array([])
arr_d1 = | np.array([]) | numpy.array |
#
import ctypes
import numpy
import time
import gateway.metadata as md
class Device:
def downsample(y, size):
yReshape = y.reshape(size, int(len(y)/size))
yDownsamp = yReshape.mean(axis=1)
return yDownsamp
def __init__(self, sample_rate = 1, samplesPerChan = 1, subSamplesPerChan = 1, minValue = 0, maxValue = 10, IPNumber = "", moduleSlotNumber = 1, moduleChanRange = [0], uniqueChanIndexRange = [0]):
self.sample_rate = sample_rate
self.samplesPerChan = samplesPerChan
self.subSamplesPerChan = subSamplesPerChan
self.minValue = minValue
self.maxValue = maxValue
self.IPNumber = IPNumber
self.moduleSlotNumber = moduleSlotNumber
self.moduleChanRange = numpy.array(moduleChanRange)
self.uniqueChanIndexRange = | numpy.array(uniqueChanIndexRange) | numpy.array |
# Mostly based on the code written by <NAME>:
# https://github.com/mrharicot/monodepth/blob/master/utils/evaluate_kitti.py
from __future__ import division
import sys
import cv2
import os
import numpy as np
import argparse
from depth_evaluation_utils import *
parser = argparse.ArgumentParser()
parser.add_argument("--kitti_dir", type=str, help='Path to the KITTI dataset directory')
parser.add_argument("--pred_dir", type=str, help="Path to the prediction directory")
parser.add_argument('--min_depth', type=float, default=1e-3, help="Threshold for minimum depth")
parser.add_argument('--max_depth', type=float, default=80, help="Threshold for maximum depth")
parser.add_argument("--model_name", type=str, help="name of model")
args = parser.parse_args()
def convert_disps_to_depths_stereo(gt_disparities, pred_depths):
gt_depths = []
pred_depths_resized = []
pred_disparities_resized = []
for i in range(len(gt_disparities)):
gt_disp = gt_disparities[i]
height, width = gt_disp.shape
pred_depth = pred_depths[i]
pred_depth = cv2.resize(pred_depth, (width, height), interpolation=cv2.INTER_LINEAR)
pred_disparities_resized.append(1./pred_depth)
mask = gt_disp > 0
gt_depth = width_to_focal[width] * 0.54 / (gt_disp + (1.0 - mask))
#pred_depth = width_to_focal[width] * 0.54 / pred_disp
gt_depths.append(gt_depth)
pred_depths_resized.append(pred_depth)
return gt_depths, pred_depths_resized, pred_disparities_resized
def main():
dir_names = [dir_name for dir_name in os.listdir(args.pred_dir) \
if os.path.isdir(os.path.join(args.pred_dir, dir_name))]
for dir_name in dir_names:
pred_depths = np.load(os.path.join(args.pred_dir, dir_name) + '/depth/%s.npy' % args.model_name)
print('evaluating ' + args.pred_dir + '...')
file_names = os.listdir(os.path.join(args.kitti_dir, dir_name[:-16], dir_name, 'image_02', 'data'))
test_files = [dir_name[:-16] +'/' + dir_name + '/image_02/' + 'data/' + file_name \
for file_name in file_names]
gt_files, gt_calib, im_sizes, im_files, cams = \
read_file_data(test_files, args.kitti_dir)
num_test = len(im_files)
gt_depths = []
pred_depths_resized = []
for t_id in range(num_test):
camera_id = cams[t_id] # 2 is left, 3 is right
pred_depths_resized.append(
cv2.resize(pred_depths[t_id],
(im_sizes[t_id][1], im_sizes[t_id][0]),
interpolation=cv2.INTER_LINEAR))
depth = generate_depth_map(gt_calib[t_id],
gt_files[t_id],
im_sizes[t_id],
camera_id,
False,
True)
gt_depths.append(depth.astype(np.float32))
pred_depths = pred_depths_resized
rms = np.zeros(num_test, np.float32)
log_rms = np.zeros(num_test, np.float32)
abs_rel = np.zeros(num_test, np.float32)
sq_rel = np.zeros(num_test, np.float32)
d1_all = np.zeros(num_test, np.float32)
a1 = np.zeros(num_test, np.float32)
a2 = np.zeros(num_test, np.float32)
a3 = | np.zeros(num_test, np.float32) | numpy.zeros |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import gc
import pytest
import numpy
import awkward1
py27 = 2 if sys.version_info[0] < 3 else 1
def test_refcount1():
i = numpy.arange(12, dtype="i4").reshape(3, 4)
assert sys.getrefcount(i) == 2
i2 = awkward1.layout.Identities32(awkward1.layout.Identities32.newref(), [(0, "hey"), (1, "there")], i)
assert (sys.getrefcount(i), sys.getrefcount(i2)) == (3, 2)
tmp = numpy.asarray(i2)
assert tmp.tolist() == [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
assert (sys.getrefcount(i), sys.getrefcount(i2)) == (3, 2 + 1*py27)
del tmp
assert (sys.getrefcount(i), sys.getrefcount(i2)) == (3, 2)
tmp2 = i2.array
assert tmp2.tolist() == [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
assert (sys.getrefcount(i), sys.getrefcount(i2)) == (3, 2 + 1*py27)
del tmp2
assert (sys.getrefcount(i), sys.getrefcount(i2)) == (3, 2)
del i2
assert sys.getrefcount(i) == 2
def test_refcount2():
i = numpy.arange(6, dtype="i4").reshape(3, 2)
i2 = awkward1.layout.Identities32(awkward1.layout.Identities32.newref(), [], i)
x = numpy.arange(12).reshape(3, 4)
x2 = awkward1.layout.NumpyArray(x)
x2.identities = i2
del i
del i2
del x
i3 = x2.identities
del x2
gc.collect()
assert numpy.asarray(i3).tolist() == [[0, 1], [2, 3], [4, 5]]
del i3
gc.collect()
def test_refcount3():
i = numpy.arange(6, dtype="i4").reshape(3, 2)
i2 = awkward1.layout.Identities32(awkward1.layout.Identities32.newref(), [], i)
x = numpy.arange(12).reshape(3, 4)
x2 = awkward1.layout.NumpyArray(x)
x2.identities = i2
del i2
assert sys.getrefcount(i) == 3
x2.identities = None
assert sys.getrefcount(i) == 2
def test_numpyarray_setidentities():
x = numpy.arange(160).reshape(40, 4)
x2 = awkward1.layout.NumpyArray(x)
x2.setidentities()
assert numpy.asarray(x2.identities).tolist() == numpy.arange(40).reshape(40, 1).tolist()
def test_listoffsetarray_setidentities():
content = awkward1.layout.NumpyArray(numpy.arange(10))
offsets = awkward1.layout.Index32(numpy.array([0, 3, 3, 5, 10], dtype="i4"))
jagged = awkward1.layout.ListOffsetArray32(offsets, content)
jagged.setidentities()
assert | numpy.asarray(jagged.identities) | numpy.asarray |
import unittest
import numpy as np
from libpysal import cg, examples
# dev import structure
from .. import network as spgh
from .. import util
try:
import geopandas
GEOPANDAS_EXTINCT = False
except ImportError:
GEOPANDAS_EXTINCT = True
class TestNetwork(unittest.TestCase):
def setUp(self):
self.path_to_shp = examples.get_path("streets.shp")
# network instantiated from shapefile
self.ntw_from_shp = spgh.Network(
in_data=self.path_to_shp, weightings=True, w_components=True
)
self.n_known_arcs, self.n_known_vertices = 303, 230
def tearDown(self):
pass
def test_network_data_read(self):
# shp test against known
self.assertEqual(len(self.ntw_from_shp.arcs), self.n_known_arcs)
self.assertEqual(len(self.ntw_from_shp.vertices), self.n_known_vertices)
arc_lengths = self.ntw_from_shp.arc_lengths.values()
self.assertAlmostEqual(sum(arc_lengths), 104414.0920159, places=5)
self.assertIn(0, self.ntw_from_shp.adjacencylist[1])
self.assertIn(0, self.ntw_from_shp.adjacencylist[2])
self.assertNotIn(0, self.ntw_from_shp.adjacencylist[3])
@unittest.skipIf(GEOPANDAS_EXTINCT, "Missing Geopandas")
def test_network_from_geopandas(self):
# network instantiated from geodataframe
gdf = geopandas.read_file(self.path_to_shp)
self.ntw_from_gdf = spgh.Network(in_data=gdf, w_components=True)
# gdf test against known
self.assertEqual(len(self.ntw_from_gdf.arcs), self.n_known_arcs)
self.assertEqual(len(self.ntw_from_gdf.vertices), self.n_known_vertices)
# shp against gdf
self.assertEqual(len(self.ntw_from_shp.arcs), len(self.ntw_from_gdf.arcs))
self.assertEqual(
len(self.ntw_from_shp.vertices), len(self.ntw_from_gdf.vertices)
)
def test_contiguity_weights(self):
known_network_histo = [(2, 35), (3, 89), (4, 105), (5, 61), (6, 13)]
observed_network_histo = self.ntw_from_shp.w_network.histogram
self.assertEqual(known_network_histo, observed_network_histo)
known_graph_histo = [(2, 2), (3, 2), (4, 47), (5, 80), (6, 48)]
observed_graph_histo = self.ntw_from_shp.w_graph.histogram
self.assertEqual(observed_graph_histo, known_graph_histo)
def test_components(self):
known_network_arc = (225, 226)
observed_network_arc = self.ntw_from_shp.network_component2arc[0][-1]
self.assertEqual(observed_network_arc, known_network_arc)
known_graph_edge = (207, 208)
observed_graph_edge = self.ntw_from_shp.graph_component2edge[0][-1]
self.assertEqual(observed_graph_edge, known_graph_edge)
def test_distance_band_weights(self):
w = self.ntw_from_shp.distancebandweights(threshold=500)
self.assertEqual(w.n, 230)
self.assertEqual(
w.histogram,
[(1, 22), (2, 58), (3, 63), (4, 40), (5, 36), (6, 3), (7, 5), (8, 3)],
)
def test_split_arcs_200(self):
n200 = self.ntw_from_shp.split_arcs(200.0)
self.assertEqual(len(n200.arcs), 688)
def test_enum_links_vertex(self):
coincident = self.ntw_from_shp.enum_links_vertex(24)
self.assertIn((24, 48), coincident)
@unittest.skipIf(GEOPANDAS_EXTINCT, "Missing Geopandas")
def test_element_as_gdf(self):
vertices, arcs = spgh.element_as_gdf(
self.ntw_from_shp, vertices=True, arcs=True
)
known_vertex_wkt = "POINT (728368.04762 877125.89535)"
obs_vertex = vertices.loc[(vertices["id"] == 0), "geometry"].squeeze()
obs_vertex_wkt = obs_vertex.wkt
self.assertEqual(obs_vertex_wkt, known_vertex_wkt)
known_arc_wkt = (
"LINESTRING (728368.04762 877125.89535, " + "728368.13931 877023.27186)"
)
obs_arc = arcs.loc[(arcs["id"] == (0, 1)), "geometry"].squeeze()
obs_arc_wkt = obs_arc.wkt
self.assertEqual(obs_arc_wkt, known_arc_wkt)
arcs = spgh.element_as_gdf(self.ntw_from_shp, arcs=True)
known_arc_wkt = (
"LINESTRING (728368.04762 877125.89535, " + "728368.13931 877023.27186)"
)
obs_arc = arcs.loc[(arcs["id"] == (0, 1)), "geometry"].squeeze()
obs_arc_wkt = obs_arc.wkt
self.assertEqual(obs_arc_wkt, known_arc_wkt)
def test_round_sig(self):
# round to 2 significant digits test
x_round2, y_round2 = 1200, 1900
self.ntw_from_shp.vertex_sig = 2
obs_xy_round2 = self.ntw_from_shp._round_sig((1215, 1865))
self.assertEqual(obs_xy_round2, (x_round2, y_round2))
# round to no significant digits test
x_roundNone, y_roundNone = 1215, 1865
self.ntw_from_shp.vertex_sig = None
obs_xy_roundNone = self.ntw_from_shp._round_sig((1215, 1865))
self.assertEqual(obs_xy_roundNone, (x_roundNone, y_roundNone))
class TestNetworkPointPattern(unittest.TestCase):
def setUp(self):
path_to_shp = examples.get_path("streets.shp")
self.ntw = spgh.Network(in_data=path_to_shp)
self.pp1_str = "schools"
self.pp2_str = "crimes"
iterator = [(self.pp1_str, "pp1"), (self.pp2_str, "pp2")]
for (obs, idx) in iterator:
path_to_shp = examples.get_path("%s.shp" % obs)
self.ntw.snapobservations(path_to_shp, obs, attribute=True)
setattr(self, idx, self.ntw.pointpatterns[obs])
self.known_pp1_npoints = 8
def tearDown(self):
pass
@unittest.skipIf(GEOPANDAS_EXTINCT, "Missing Geopandas")
def test_pp_from_geopandas(self):
self.gdf_pp1_str = "schools"
self.gdf_pp2_str = "crimes"
iterator = [(self.gdf_pp1_str, "gdf_pp1"), (self.gdf_pp2_str, "gdf_pp2")]
for (obs, idx) in iterator:
path_to_shp = examples.get_path("%s.shp" % obs)
in_data = geopandas.read_file(path_to_shp)
self.ntw.snapobservations(in_data, obs, attribute=True)
setattr(self, idx, self.ntw.pointpatterns[obs])
self.assertEqual(self.pp1.npoints, self.gdf_pp1.npoints)
self.assertEqual(self.pp2.npoints, self.gdf_pp2.npoints)
def test_split_arcs_1000(self):
n1000 = self.ntw.split_arcs(1000.0)
self.assertEqual(len(n1000.arcs), 303)
def test_add_point_pattern(self):
self.assertEqual(self.pp1.npoints, self.known_pp1_npoints)
self.assertIn("properties", self.pp1.points[0])
self.assertIn([1], self.pp1.points[0]["properties"])
def test_count_per_link_network(self):
counts = self.ntw.count_per_link(self.pp1.obs_to_arc, graph=False)
meancounts = sum(counts.values()) / float(len(counts.keys()))
self.assertAlmostEqual(meancounts, 1.0, places=5)
def test_count_per_edge_graph(self):
counts = self.ntw.count_per_link(self.pp1.obs_to_arc, graph=True)
meancounts = sum(counts.values()) / float(len(counts.keys()))
self.assertAlmostEqual(meancounts, 1.0, places=5)
def test_simulate_normal_observations(self):
sim = self.ntw.simulate_observations(self.known_pp1_npoints)
self.assertEqual(self.known_pp1_npoints, sim.npoints)
def test_simulate_poisson_observations(self):
sim = self.ntw.simulate_observations(
self.known_pp1_npoints, distribution="poisson"
)
self.assertEqual(self.known_pp1_npoints, sim.npoints)
def test_all_neighbor_distances(self):
matrix1, tree = self.ntw.allneighbordistances(self.pp1_str, gen_tree=True)
known_mtx_val = 17682.436988
known_tree_val = (173, 64)
self.assertAlmostEqual(np.nansum(matrix1[0]), known_mtx_val, places=4)
self.assertEqual(tree[(6, 7)], known_tree_val)
del self.ntw.alldistances
del self.ntw.distancematrix
matrix2 = self.ntw.allneighbordistances(self.pp1_str, fill_diagonal=0.0)
observed = matrix2.diagonal()
known = np.zeros(matrix2.shape[0])
self.assertEqual(observed.all(), known.all())
del self.ntw.alldistances
del self.ntw.distancematrix
matrix3 = self.ntw.allneighbordistances(self.pp1_str, snap_dist=True)
known_mtx_val = 3218.2597894
observed_mtx_val = matrix3
self.assertAlmostEqual(observed_mtx_val[0, 1], known_mtx_val, places=4)
del self.ntw.alldistances
del self.ntw.distancematrix
matrix4 = self.ntw.allneighbordistances(self.pp1_str, fill_diagonal=0.0)
observed = matrix4.diagonal()
known = np.zeros(matrix4.shape[0])
self.assertEqual(observed.all(), known.all())
del self.ntw.alldistances
del self.ntw.distancematrix
matrix5, tree = self.ntw.allneighbordistances(self.pp2_str, gen_tree=True)
known_mtx_val = 1484112.694526529
known_tree_val = (-0.1, -0.1)
self.assertAlmostEqual(np.nansum(matrix5[0]), known_mtx_val, places=4)
self.assertEqual(tree[(18, 19)], known_tree_val)
del self.ntw.alldistances
del self.ntw.distancematrix
def test_all_neighbor_distances_multiproccessing(self):
matrix1, tree = self.ntw.allneighbordistances(
self.pp1_str, fill_diagonal=0.0, n_processes="all", gen_tree=True
)
known_mtx1_val = 17682.436988
known_tree_val = (173, 64)
observed = matrix1.diagonal()
known = np.zeros(matrix1.shape[0])
self.assertEqual(observed.all(), known.all())
self.assertAlmostEqual(np.nansum(matrix1[0]), known_mtx1_val, places=4)
self.assertEqual(tree[(6, 7)], known_tree_val)
del self.ntw.alldistances
del self.ntw.distancematrix
matrix2 = self.ntw.allneighbordistances(self.pp1_str, n_processes=2)
known_mtx2_val = 17682.436988
self.assertAlmostEqual(np.nansum(matrix2[0]), known_mtx2_val, places=4)
del self.ntw.alldistances
del self.ntw.distancematrix
matrix3, tree = self.ntw.allneighbordistances(
self.pp1_str, fill_diagonal=0.0, n_processes=2, gen_tree=True
)
known_mtx3_val = 17682.436988
known_tree_val = (173, 64)
self.assertAlmostEqual(np.nansum(matrix3[0]), known_mtx3_val, places=4)
self.assertEqual(tree[(6, 7)], known_tree_val)
del self.ntw.alldistances
del self.ntw.distancematrix
def test_nearest_neighbor_distances(self):
# general test
with self.assertRaises(KeyError):
self.ntw.nearestneighbordistances("i_should_not_exist")
nnd1 = self.ntw.nearestneighbordistances(self.pp1_str)
nnd2 = self.ntw.nearestneighbordistances(self.pp1_str, destpattern=self.pp1_str)
nndv1 = np.array(list(nnd1.values()))[:, 1].astype(float)
nndv2 = np.array(list(nnd2.values()))[:, 1].astype(float)
np.testing.assert_array_almost_equal_nulp(nndv1, nndv2)
del self.ntw.alldistances
del self.ntw.distancematrix
# nearest neighbor keeping zero test
known_zero = ([19], 0.0)[0]
nn_c = self.ntw.nearestneighbordistances(self.pp2_str, keep_zero_dist=True)
self.assertEqual(nn_c[18][0], known_zero)
del self.ntw.alldistances
del self.ntw.distancematrix
# nearest neighbor omitting zero test
known_nonzero = ([11], 165.33982412719126)[1]
nn_c = self.ntw.nearestneighbordistances(self.pp2_str, keep_zero_dist=False)
self.assertAlmostEqual(nn_c[18][1], known_nonzero, places=4)
del self.ntw.alldistances
del self.ntw.distancematrix
# nearest neighbor with snap distance
known_neigh = ([3], 402.5219673922477)[1]
nn_c = self.ntw.nearestneighbordistances(
self.pp2_str, keep_zero_dist=True, snap_dist=True
)
self.assertAlmostEqual(nn_c[0][1], known_neigh, places=4)
del self.ntw.alldistances
del self.ntw.distancematrix
@unittest.skipIf(GEOPANDAS_EXTINCT, "Missing Geopandas")
def test_element_as_gdf(self):
obs = spgh.element_as_gdf(self.ntw, pp_name=self.pp1_str)
snap_obs = spgh.element_as_gdf(self.ntw, pp_name=self.pp1_str, snapped=True)
known_dist = 205.65961300587043
observed_point = obs.loc[(obs["id"] == 0), "geometry"].squeeze()
snap_point = snap_obs.loc[(snap_obs["id"] == 0), "geometry"].squeeze()
observed_dist = observed_point.distance(snap_point)
self.assertAlmostEqual(observed_dist, known_dist, places=8)
with self.assertRaises(KeyError):
spgh.element_as_gdf(self.ntw, pp_name="i_should_not_exist")
class TestNetworkAnalysis(unittest.TestCase):
def setUp(self):
path_to_shp = examples.get_path("streets.shp")
self.ntw = spgh.Network(in_data=path_to_shp)
self.pt_str = "schools"
path_to_shp = examples.get_path("%s.shp" % self.pt_str)
self.ntw.snapobservations(path_to_shp, self.pt_str, attribute=True)
npts = self.ntw.pointpatterns[self.pt_str].npoints
self.ntw.simulate_observations(npts)
self.test_permutations = 3
self.test_steps = 5
def tearDown(self):
pass
def test_network_f(self):
obtained = self.ntw.NetworkF(
self.ntw.pointpatterns[self.pt_str],
permutations=self.test_permutations,
nsteps=self.test_steps,
)
self.assertEqual(obtained.lowerenvelope.shape[0], self.test_steps)
def test_network_g(self):
obtained = self.ntw.NetworkG(
self.ntw.pointpatterns[self.pt_str],
permutations=self.test_permutations,
nsteps=self.test_steps,
)
self.assertEqual(obtained.lowerenvelope.shape[0], self.test_steps)
def test_network_k(self):
obtained = self.ntw.NetworkK(
self.ntw.pointpatterns[self.pt_str],
permutations=self.test_permutations,
nsteps=self.test_steps,
)
self.assertEqual(obtained.lowerenvelope.shape[0], self.test_steps)
class TestNetworkUtils(unittest.TestCase):
def setUp(self):
path_to_shp = examples.get_path("streets.shp")
self.ntw = spgh.Network(in_data=path_to_shp)
def tearDown(self):
pass
def test_compute_length(self):
self.point1, self.point2 = (0, 0), (1, 1)
self.length = util.compute_length(self.point1, self.point2)
self.assertAlmostEqual(self.length, 1.4142135623730951, places=4)
def test_get_neighbor_distances(self):
self.known_neighs = {1: 102.62353453439829, 2: 660.000001049743}
self.neighs = util.get_neighbor_distances(self.ntw, 0, self.ntw.arc_lengths)
self.assertAlmostEqual(self.neighs[1], 102.62353453439829, places=4)
self.assertAlmostEqual(self.neighs[2], 660.000001049743, places=4)
def test_generate_tree(self):
self.known_path = [23, 22, 20, 19, 170, 2, 0]
self.distance, self.pred = util.dijkstra(self.ntw, 0)
self.tree = util.generatetree(self.pred)
self.assertEqual(self.tree[3], self.known_path)
def test_dijkstra(self):
self.distance, self.pred = util.dijkstra(self.ntw, 0)
self.assertAlmostEqual(self.distance[196], 5505.668247, places=4)
self.assertEqual(self.pred[196], 133)
def test_dijkstra_mp(self):
self.distance, self.pred = util.dijkstra_mp((self.ntw, 0))
self.assertAlmostEqual(self.distance[196], 5505.668247, places=4)
self.assertEqual(self.pred[196], 133)
def test_squared_distance_point_link(self):
self.point, self.link = (1, 1), ((0, 0), (2, 0))
self.sqrd_nearp = util.squared_distance_point_link(self.point, self.link)
self.assertEqual(self.sqrd_nearp[0], 1.0)
self.assertEqual(self.sqrd_nearp[1].all(), np.array([1.0, 0.0]).all())
def test_snap_points_to_links(self):
self.points = {0: cg.shapes.Point((1, 1))}
self.links = [
cg.shapes.Chain([cg.shapes.Point((0, 0)), cg.shapes.Point((2, 0))])
]
self.snapped = util.snap_points_to_links(self.points, self.links)
self.known_coords = [xy._Point__loc for xy in self.snapped[0][0]]
self.assertEqual(self.known_coords, [(0.0, 0.0), (2.0, 0.0)])
self.assertEqual(self.snapped[0][1].all(), | np.array([1.0, 0.0]) | numpy.array |
import numpy as np
from PIL import Image
def main():
original_image = Image.open('data/raw/gonzalez.tif')
original_image_array = np.asarray(original_image)
print(original_image_array)
mask_image = Image.open('data/ground_truth/gonzalez/gonzalez_mask.tif')
mask_image_array = | np.asarray(mask_image) | numpy.asarray |
import unittest
from yauber_algo.errors import *
class CategorizeTestCase(unittest.TestCase):
def test_categorize(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import categorize
#
# Function settings
#
algo = 'categorize'
func = categorize
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]),
func,
(
| np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]) | numpy.array |
import numpy as np
from ..decorator import check_vector
class MAE:
def predict(self, y_predict, y):
result = np.mean(np.abs(y_predict - y))
return result
def __str__(self):
return 'mae'
class MSE:
@check_vector
def predict(self, y_predict, y):
result = np.square(y_predict - y)
result = np.mean(result)
return result
@check_vector
def d(self, y_predict, y):
diff = y_predict - y
shape = diff.shape
result = 1
result = result * np.ones(shape)/np.prod(shape)
result = result * 2 * diff
return result
def __str__(self):
return 'mse'
class BINARY_CROSS_ENTROPY:
@check_vector
def predict(self, y_predict, y):
H = self.get_H(y_predict, y)
result = np.mean(H)
return result
def relu(self, X):
return np.maximum(X,0)
def transform(self, X):
# input : 변환하고자하는 값
# output[1] : 변환된 값
# 시그모이드 값 대입하는 함수
return np.exp(-self.relu(-X))/(1.0 + np.exp(-np.abs(X)))
def derv_H_sigmoid(self, logit, z): # z는 참 레이블
return -z + self.transform(logit)
def get_H(self, X, z):
return self.relu(X) - X * z + np.log(1 + np.exp(-np.abs(X)))
@check_vector
def d(self, y_predict, y):
result = 1.0 / np.prod(y_predict.shape)
result = result * self.derv_H_sigmoid(y_predict, y)
return result
def __str__(self):
return 'BINARY_CROSS_ENTROPY'
class ACCURACY:
@check_vector
def predict(self, y_predict, y):
est_yes = np.greater(y_predict, 0.5)
ans_yes = np.greater(y, 0.5)
est_no = np.logical_not(est_yes)
ans_no = np.logical_not(ans_yes)
tp = np.sum(np.logical_and(est_yes, ans_yes))
fp = np.sum(np.logical_and(est_yes, ans_no))
fn = np.sum(np.logical_and(est_no, ans_yes))
tn = np.sum(np.logical_and(est_no, ans_no))
result = self.safe_div(tp+tn, tp+tn+fp+fn)
return result
def safe_div(self, p, q):
p, q = float(p), float(q)
if np.abs(q) < 1.0e-20: return np.sign(p)
return p / q
def __str__(self):
return 'ACCURACY'
class PRECISION:
@check_vector
def predict(self, y_predict, y):
est_yes = np.greater(y_predict, 0)
ans_yes = np.greater(y, 0.5)
est_no = np.logical_not(est_yes)
ans_no = np.logical_not(ans_yes)
tp = np.sum(np.logical_and(est_yes, ans_yes))
fp = np.sum(np.logical_and(est_yes, ans_no))
fn = np.sum(np.logical_and(est_no, ans_yes))
tn = np.sum(np.logical_and(est_no, ans_no))
result = self.safe_div(tp, tp+fp)
return result
def safe_div(self, p, q):
p, q = float(p), float(q)
if np.abs(q) < 1.0e-20: return np.sign(p)
return p / q
def __str__(self):
return 'PRECISION'
class RECALL:
@check_vector
def predict(self, y_predict, y):
est_yes = np.greater(y_predict, 0)
ans_yes = np.greater(y, 0.5)
est_no = | np.logical_not(est_yes) | numpy.logical_not |
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequences to provide input to Keras
"""
__all__ = [
"NodeSequence",
"LinkSequence",
"OnDemandLinkSequence",
"FullBatchSequence",
"SparseFullBatchSequence",
"RelationalFullBatchNodeSequence",
"PaddedGraphSequence",
]
import warnings
import operator
import random
import collections
import numpy as np
import itertools as it
import networkx as nx
import scipy.sparse as sps
from tensorflow.keras import backend as K
from functools import reduce
from tensorflow.keras.utils import Sequence
from ..data.unsupervised_sampler import UnsupervisedSampler
from ..core.utils import is_real_iterable, normalize_adj
from ..random import random_state
from scipy import sparse
from ..core.experimental import experimental
class NodeSequence(Sequence):
"""Keras-compatible data generator to use with the Keras
methods :meth:`keras.Model.fit`, :meth:`keras.Model.evaluate`,
and :meth:`keras.Model.predict`.
This class generated data samples for node inference models
and should be created using the `.flow(...)` method of
:class:`GraphSAGENodeGenerator` or :class:`DirectedGraphSAGENodeGenerator`
or :class:`HinSAGENodeGenerator` or :class:`Attri2VecNodeGenerator`.
These generator classes are used within the NodeSequence to generate
the required features for downstream ML tasks from the graph.
Args:
sample_function (Callable): A function that returns features for supplied head nodes.
ids (list): A list of the node_ids to be used as head-nodes in the downstream task.
targets (list, optional): A list of targets or labels to be used in the downstream task.
shuffle (bool): If True (default) the ids will be randomly shuffled every epoch.
"""
def __init__(
self, sample_function, batch_size, ids, targets=None, shuffle=True, seed=None
):
# Check that ids is an iterable
if not is_real_iterable(ids):
raise TypeError("IDs must be an iterable or numpy array of graph node IDs")
# Check targets is iterable & has the correct length
if targets is not None:
if not is_real_iterable(targets):
raise TypeError("Targets must be None or an iterable or numpy array ")
if len(ids) != len(targets):
raise ValueError(
"The length of the targets must be the same as the length of the ids"
)
self.targets = np.asanyarray(targets)
else:
self.targets = None
# Store the generator to draw samples from graph
if isinstance(sample_function, collections.abc.Callable):
self._sample_function = sample_function
else:
raise TypeError(
"({}) The sampling function expects a callable function.".format(
type(self).__name__
)
)
self.ids = list(ids)
self.data_size = len(self.ids)
self.shuffle = shuffle
self.batch_size = batch_size
self._rs, _ = random_state(seed)
# Shuffle IDs to start
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.ceil(self.data_size / self.batch_size))
def __getitem__(self, batch_num):
"""
Generate one batch of data
Args:
batch_num (int): number of a batch
Returns:
batch_feats (list): Node features for nodes and neighbours sampled from a
batch of the supplied IDs
batch_targets (list): Targets/labels for the batch.
"""
start_idx = self.batch_size * batch_num
end_idx = start_idx + self.batch_size
if start_idx >= self.data_size:
raise IndexError("Mapper: batch_num larger than length of data")
# print("Fetching batch {} [{}]".format(batch_num, start_idx))
# The ID indices for this batch
batch_indices = self.indices[start_idx:end_idx]
# Get head (root) nodes
head_ids = [self.ids[ii] for ii in batch_indices]
# Get corresponding targets
batch_targets = None if self.targets is None else self.targets[batch_indices]
# Get features for nodes
batch_feats = self._sample_function(head_ids, batch_num)
return batch_feats, batch_targets
def on_epoch_end(self):
"""
Shuffle all head (root) nodes at the end of each epoch
"""
self.indices = list(range(self.data_size))
if self.shuffle:
self._rs.shuffle(self.indices)
class LinkSequence(Sequence):
"""
Keras-compatible data generator to use with Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and :meth:`keras.Model.predict`
This class generates data samples for link inference models
and should be created using the :meth:`flow` method of
:class:`GraphSAGELinkGenerator` or :class:`HinSAGELinkGenerator` or :class:`Attri2VecLinkGenerator`.
Args:
sample_function (Callable): A function that returns features for supplied head nodes.
ids (iterable): Link IDs to batch, each link id being a tuple of (src, dst) node ids.
targets (list, optional): A list of targets or labels to be used in the downstream task.
shuffle (bool): If True (default) the ids will be randomly shuffled every epoch.
seed (int, optional): Random seed
"""
def __init__(
self, sample_function, batch_size, ids, targets=None, shuffle=True, seed=None
):
# Check that ids is an iterable
if not is_real_iterable(ids):
raise TypeError("IDs must be an iterable or numpy array of graph node IDs")
# Check targets is iterable & has the correct length
if targets is not None:
if not is_real_iterable(targets):
raise TypeError("Targets must be None or an iterable or numpy array ")
if len(ids) != len(targets):
raise ValueError(
"The length of the targets must be the same as the length of the ids"
)
self.targets = np.asanyarray(targets)
else:
self.targets = None
# Ensure number of labels matches number of ids
if targets is not None and len(ids) != len(targets):
raise ValueError("Length of link ids must match length of link targets")
# Store the generator to draw samples from graph
if isinstance(sample_function, collections.abc.Callable):
self._sample_features = sample_function
else:
raise TypeError(
"({}) The sampling function expects a callable function.".format(
type(self).__name__
)
)
self.batch_size = batch_size
self.ids = list(ids)
self.data_size = len(self.ids)
self.shuffle = shuffle
self._rs, _ = random_state(seed)
# Shuffle the IDs to begin
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.ceil(self.data_size / self.batch_size))
def __getitem__(self, batch_num):
"""
Generate one batch of data
Args:
batch_num (int): number of a batch
Returns:
batch_feats (list): Node features for nodes and neighbours sampled from a
batch of the supplied IDs
batch_targets (list): Targets/labels for the batch.
"""
start_idx = self.batch_size * batch_num
end_idx = start_idx + self.batch_size
if start_idx >= self.data_size:
raise IndexError("Mapper: batch_num larger than length of data")
# print("Fetching {} batch {} [{}]".format(self.name, batch_num, start_idx))
# The ID indices for this batch
batch_indices = self.indices[start_idx:end_idx]
# Get head (root) nodes for links
head_ids = [self.ids[ii] for ii in batch_indices]
# Get targets for nodes
batch_targets = None if self.targets is None else self.targets[batch_indices]
# Get node features for batch of link ids
batch_feats = self._sample_features(head_ids, batch_num)
return batch_feats, batch_targets
def on_epoch_end(self):
"""
Shuffle all link IDs at the end of each epoch
"""
self.indices = list(range(self.data_size))
if self.shuffle:
self._rs.shuffle(self.indices)
class OnDemandLinkSequence(Sequence):
"""
Keras-compatible data generator to use with Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and :meth:`keras.Model.predict`
This class generates data samples for link inference models
and should be created using the :meth:`flow` method of
:class:`GraphSAGELinkGenerator` or :class:`Attri2VecLinkGenerator`.
Args:
sample_function (Callable): A function that returns features for supplied head nodes.
sampler (UnsupersizedSampler): An object that encapsulates the neighbourhood sampling of a graph.
The generator method of this class returns a batch of positive and negative samples on demand.
"""
def __init__(self, sample_function, batch_size, walker, shuffle=True):
# Store the generator to draw samples from graph
if isinstance(sample_function, collections.abc.Callable):
self._sample_features = sample_function
else:
raise TypeError(
"({}) The sampling function expects a callable function.".format(
type(self).__name__
)
)
if not isinstance(walker, UnsupervisedSampler):
raise TypeError(
"({}) UnsupervisedSampler is required.".format(type(self).__name__)
)
self.batch_size = batch_size
self.walker = walker
self.shuffle = shuffle
# FIXME(#681): all batches are created at once, so this is no longer "on demand"
self._batches = self._create_batches()
self.length = len(self._batches)
self.data_size = sum(len(batch[0]) for batch in self._batches)
def __getitem__(self, batch_num):
"""
Generate one batch of data.
Args:
batch_num<int>: number of a batch
Returns:
batch_feats<list>: Node features for nodes and neighbours sampled from a
batch of the supplied IDs
batch_targets<list>: Targets/labels for the batch.
"""
if batch_num >= self.__len__():
raise IndexError(
"Mapper: batch_num larger than number of esstaimted batches for this epoch."
)
# print("Fetching {} batch {} [{}]".format(self.name, batch_num, start_idx))
# Get head nodes and labels
head_ids, batch_targets = self._batches[batch_num]
# Obtain features for head ids
batch_feats = self._sample_features(head_ids, batch_num)
return batch_feats, batch_targets
def __len__(self):
"""Denotes the number of batches per epoch"""
return self.length
def _create_batches(self):
return self.walker.run(self.batch_size)
def on_epoch_end(self):
"""
Shuffle all link IDs at the end of each epoch
"""
if self.shuffle:
self._batches = self._create_batches()
def _full_batch_array_and_reshape(array, propagate_none=False):
"""
Args:
array: an array-like object
propagate_none: if True, return None when array is None
Returns:
array as a numpy array with an extra first dimension (batch dimension) equal to 1
"""
# if it's ok, just short-circuit on None (e.g. for target arrays, that may or may not exist)
if propagate_none and array is None:
return None
as_np = np.asanyarray(array)
return np.reshape(as_np, (1,) + as_np.shape)
class FullBatchSequence(Sequence):
"""
Keras-compatible data generator for for node inference models
that require full-batch training (e.g., GCN, GAT).
Use this class with the Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and
:meth:`keras.Model.predict`,
This class should be created using the `.flow(...)` method of
:class:`FullBatchNodeGenerator`.
Args:
features (np.ndarray): An array of node features of size (N x F),
where N is the number of nodes in the graph, F is the node feature size
A (np.ndarray or sparse matrix): An adjacency matrix of the graph of size (N x N).
targets (np.ndarray, optional): An optional array of node targets of size (N x C),
where C is the target size (e.g., number of classes for one-hot class targets)
indices (np.ndarray, optional): Array of indices to the feature and adjacency matrix
of the targets. Required if targets is not None.
"""
use_sparse = False
def __init__(self, features, A, targets=None, indices=None):
if (targets is not None) and (len(indices) != len(targets)):
raise ValueError(
"When passed together targets and indices should be the same length."
)
# Store features and targets as np.ndarray
self.features = np.asanyarray(features)
self.target_indices = np.asanyarray(indices)
# Convert sparse matrix to dense:
if sps.issparse(A) and hasattr(A, "toarray"):
self.A_dense = _full_batch_array_and_reshape(A.toarray())
elif isinstance(A, (np.ndarray, np.matrix)):
self.A_dense = _full_batch_array_and_reshape(A)
else:
raise TypeError(
"Expected input matrix to be either a Scipy sparse matrix or a Numpy array."
)
# Reshape all inputs to have batch dimension of 1
self.features = _full_batch_array_and_reshape(features)
self.target_indices = _full_batch_array_and_reshape(indices)
self.inputs = [self.features, self.target_indices, self.A_dense]
self.targets = _full_batch_array_and_reshape(targets, propagate_none=True)
def __len__(self):
return 1
def __getitem__(self, index):
return self.inputs, self.targets
class SparseFullBatchSequence(Sequence):
"""
Keras-compatible data generator for for node inference models
that require full-batch training (e.g., GCN, GAT).
Use this class with the Keras methods :meth:`keras.Model.fit`,
:meth:`keras.Model.evaluate`, and
:meth:`keras.Model.predict`,
This class uses sparse matrix representations to send data to the models,
and only works with the Keras tensorflow backend. For any other backends,
use the :class:`FullBatchSequence` class.
This class should be created using the `.flow(...)` method of
:class:`FullBatchNodeGenerator`.
Args:
features (np.ndarray): An array of node features of size (N x F),
where N is the number of nodes in the graph, F is the node feature size
A (sparse matrix): An adjacency matrix of the graph of size (N x N).
targets (np.ndarray, optional): An optional array of node targets of size (N x C),
where C is the target size (e.g., number of classes for one-hot class targets)
indices (np.ndarray, optional): Array of indices to the feature and adjacency matrix
of the targets. Required if targets is not None.
"""
use_sparse = True
def __init__(self, features, A, targets=None, indices=None):
if (targets is not None) and (len(indices) != len(targets)):
raise ValueError(
"When passed together targets and indices should be the same length."
)
# Ensure matrix is in COO format to extract indices
if sps.isspmatrix(A):
A = A.tocoo()
else:
raise ValueError("Adjacency matrix not in expected sparse format")
# Convert matrices to list of indices & values
self.A_indices = np.expand_dims(
np.hstack((A.row[:, None], A.col[:, None])), 0
).astype("int64")
self.A_values = | np.expand_dims(A.data, 0) | numpy.expand_dims |
from __future__ import division, print_function
import math, sys, warnings, datetime
from operator import itemgetter
import itertools
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as _ # <-registers a date unit converter
from matplotlib import docstring
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.spines as mspines
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.stackplot as mstack
import matplotlib.streamplot as mstream
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
from matplotlib import MatplotlibDeprecationWarning as mplDeprecation
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
iterable = cbook.iterable
is_string_like = cbook.is_string_like
is_sequence_of_strings = cbook.is_sequence_of_strings
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def _process_plot_format(fmt):
"""
Process a MATLAB style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
# We need to differentiate grayscale '1.0' from tri_down marker '1'
try:
fmtint = str(int(fmt))
except ValueError:
return linestyle, marker, color # Yes
else:
if fmt != fmtint:
# user definitely doesn't want tri_down marker
return linestyle, marker, color # Yes
else:
# ignore converted color
color = None
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers.
See also: :meth:`~matplotlib.axes.Axes.set_color_cycle`.
.. Note:: Deprecated 2010/01/03.
Set rcParams['axes.color_cycle'] directly.
"""
rcParams['axes.color_cycle'] = clist
warnings.warn("Set rcParams['axes.color_cycle'] directly", mplDeprecation)
class _process_plot_var_args(object):
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self.set_color_cycle()
def __getstate__(self):
# note: it is not possible to pickle a itertools.cycle instance
return {'axes': self.axes, 'command': self.command}
def __setstate__(self, state):
self.__dict__ = state.copy()
self.set_color_cycle()
def set_color_cycle(self, clist=None):
if clist is None:
clist = rcParams['axes.color_cycle']
self.color_cycle = itertools.cycle(clist)
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
if self.axes.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if self.axes.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError('There is no line property "%s"'%key)
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError('There is no patch property "%s"'%key)
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
if self.command!='plot':
# the Line2D class can handle unitized data, with
# support for post hoc unit changes etc. Other mpl
# artists, eg Polygon which _process_plot_var_args
# also serves on calls to fill, cannot. So this is a
# hack to say: if you are not "plot", which is
# creating Line2D, then convert the data now to
# floats. If you are plot, pass the raw data through
# to Line2D which will handle the conversion. So
# polygons will not support post hoc conversions of
# the unit type since they are not storing the orig
# data. Hopefully we can rationalize this at a later
# date - JDH
if bx:
x = self.axes.convert_xunits(x)
if by:
y = self.axes.convert_yunits(y)
x = np.atleast_1d(x) #like asanyarray, but converts scalar to array
y = np.atleast_1d(y)
if x.shape[0] != y.shape[0]:
raise ValueError("x and y must have same first dimension")
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y can be no greater than 2-D")
if x.ndim == 1:
x = x[:,np.newaxis]
if y.ndim == 1:
y = y[:,np.newaxis]
return x, y
def _makeline(self, x, y, kw, kwargs):
kw = kw.copy() # Don't modify the original kw.
if not 'color' in kw and not 'color' in kwargs.keys():
kw['color'] = self.color_cycle.next()
# (can't use setdefault because it always evaluates
# its second argument)
seg = mlines.Line2D(x, y,
axes=self.axes,
**kw
)
self.set_lineprops(seg, **kwargs)
return seg
def _makefill(self, x, y, kw, kwargs):
try:
facecolor = kw['color']
except KeyError:
facecolor = self.color_cycle.next()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=kw['closed']
)
self.set_patchprops(seg, **kwargs)
return seg
def _plot_args(self, tup, kwargs):
ret = []
if len(tup) > 1 and is_string_like(tup[-1]):
linestyle, marker, color = _process_plot_format(tup[-1])
tup = tup[:-1]
elif len(tup) == 3:
raise ValueError('third arg must be a format string')
else:
linestyle, marker, color = None, None, None
kw = {}
for k, v in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if v is not None:
kw[k] = v
y = np.atleast_1d(tup[-1])
if len(tup) == 2:
x = np.atleast_1d(tup[0])
else:
x = np.arange(y.shape[0], dtype=float)
x, y = self._xy_from_xy(x, y)
if self.command == 'plot':
func = self._makeline
else:
kw['closed'] = kwargs.get('closed', True)
func = self._makefill
ncx, ncy = x.shape[1], y.shape[1]
for j in xrange(max(ncx, ncy)):
seg = func(x[:,j%ncx], y[:,j%ncy], kw, kwargs)
ret.append(seg)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0:
return
if len(remaining) <= 3:
for seg in self._plot_args(remaining, kwargs):
yield seg
return
if is_string_like(remaining[2]):
isplit = 3
else:
isplit = 2
for seg in self._plot_args(remaining[:isplit], kwargs):
yield seg
remaining=remaining[isplit:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
xscale=None,
yscale=None,
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' | 'box-forced']
*alpha* float: the alpha transparency (can be None)
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
self.set_axes_locator(kwargs.get("axes_locator", None))
self.spines = self._gen_axes_spines()
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._rasterization_zorder = None
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if xscale:
self.set_xscale(xscale)
if yscale:
self.set_yscale(yscale)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def __setstate__(self, state):
self.__dict__ = state
# put the _remove_method back on all artists contained within the axes
for container_name in ['lines', 'collections', 'tables', 'patches',
'texts', 'images']:
container = getattr(self, container_name)
for artist in container:
artist._remove_method = container.remove
def get_window_extent(self, *args, **kwargs):
"""
get the axes bounding box in display space; *args* and
*kwargs* are empty
"""
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.spines['bottom'].register_axis(self.xaxis)
self.spines['top'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
.. note::
This method is primarily used by rectilinear projections
of the :class:`~matplotlib.axes.Axes` class, and is meant
to be overridden by new kinds of projection axes that need
different transformations and limits. (See
:class:`~matplotlib.projections.polar.PolarAxes` for an
example.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor.
# It is assumed that this part will have non-linear components
# (e.g. for a log scale).
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def get_xaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which=='grid':
return self._xaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['bottom'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['top'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_xaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_xaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self,which='grid'):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which=='grid':
return self._yaxis_transform
elif which=='tick1':
# for cartesian projection, this is bottom spine
return self.spines['left'].get_spine_transform()
elif which=='tick2':
# for cartesian projection, this is top spine
return self.spines['right'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_yaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self.get_yaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
try:
line._transformed_path.invalidate()
except AttributeError:
pass
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
"""Make the original position the active position"""
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def set_axes_locator(self, locator):
"""
set axes_locator
ACCEPT : a callable object which takes an axes instance and renderer and
returns a bbox.
"""
self._axes_locator = locator
def get_axes_locator(self):
"""
return axes_locator
"""
return self._axes_locator
def _set_artist_props(self, a):
"""set the boilerplate props for artists added to axes"""
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
"""
Returns a dict whose keys are spine names and values are
Line2D or Patch instances. Each element is used to draw a
spine of the axes.
In the standard axes, this is a single line segment, but in
other projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return {
'left':mspines.Spine.linear_spine(self,'left'),
'right':mspines.Spine.linear_spine(self,'right'),
'bottom':mspines.Spine.linear_spine(self,'bottom'),
'top':mspines.Spine.linear_spine(self,'top'),
}
def cla(self):
"""Clear the current axes."""
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
for name,spine in self.spines.iteritems():
spine.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry()
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False, auto=None)
# Save the current formatter/locator so we don't lose it
majf = self._sharex.xaxis.get_major_formatter()
minf = self._sharex.xaxis.get_minor_formatter()
majl = self._sharex.xaxis.get_major_locator()
minl = self._sharex.xaxis.get_minor_locator()
# This overwrites the current formatter/locator
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
# Reset the formatter/locator
self.xaxis.set_major_formatter(majf)
self.xaxis.set_minor_formatter(minf)
self.xaxis.set_major_locator(majl)
self.xaxis.set_minor_locator(minl)
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False, auto=None)
# Save the current formatter/locator so we don't lose it
majf = self._sharey.yaxis.get_major_formatter()
minf = self._sharey.yaxis.get_minor_formatter()
majl = self._sharey.yaxis.get_major_locator()
minl = self._sharey.yaxis.get_minor_locator()
# This overwrites the current formatter/locator
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
# Reset the formatter/locator
self.yaxis.set_major_formatter(majf)
self.yaxis.set_minor_formatter(minf)
self.yaxis.set_major_locator(majl)
self.yaxis.set_minor_locator(minl)
else:
self.yaxis.set_scale('linear')
self._autoscaleXon = True
self._autoscaleYon = True
self._xmargin = 0
self._ymargin = 0
self._tight = False
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self._current_image = None # strictly for pyplot via _sci, _gci
self.legend_ = None
self.collections = [] # collection.Collection instances
self.containers = [] #
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='baseline',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def get_frame(self):
raise AttributeError('Axes.frame was removed in favor of Axes.spines')
frame = property(get_frame)
def clear(self):
"""clear the axes"""
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
*clist* is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
self._get_patches_for_fill.set_color_cycle(clist)
def ishold(self):
"""return the HOLD status of the axes"""
return self._hold
def hold(self, b=None):
"""
Call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples::
# toggle hold
hold()
# turn hold on
hold(True)
# turn hold off
hold(False)
When hold is *True*, subsequent plot commands will be added to
the current axes. When hold is *False*, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
============ =====================================
value description
============ =====================================
'box' change physical size of axes
'datalim' change xlim or ylim
'box-forced' same as 'box', but axes can be shared
============ =====================================
'box' does not allow axes sharing, as this can cause
unintended side effect. For cases when sharing axes is
fine, use 'box-forced'.
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' | 'box-forced']
"""
if adjustable in ('box', 'datalim', 'box-forced'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.Bbox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def get_data_ratio_log(self):
"""
Returns the aspect ratio of the raw data in log scale.
Will be used when both axis scales are in log.
"""
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
xsize = max(math.fabs(math.log10(xmax)-math.log10(xmin)), 1e-30)
ysize = max(math.fabs(math.log10(ymax)-math.log10(ymin)), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
"""
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
"""
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if self.name != 'polar':
xscale, yscale = self.get_xscale(), self.get_yscale()
if xscale == "linear" and yscale == "linear":
aspect_scale_mode = "linear"
elif xscale == "log" and yscale == "log":
aspect_scale_mode = "log"
elif (xscale == "linear" and yscale == "log") or \
(xscale == "log" and yscale == "linear"):
if aspect is not "auto":
warnings.warn(
'aspect is not supported for Axes with xscale=%s, yscale=%s' \
% (xscale, yscale))
aspect = "auto"
else: # some custom projections have their own scales.
pass
else:
aspect_scale_mode = "linear"
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable in ['box', 'box-forced']:
if aspect_scale_mode == "log":
box_aspect = A * self.get_data_ratio_log()
else:
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
ymin,ymax = self.get_ybound()
if aspect_scale_mode == "log":
xmin, xmax = math.log10(xmin), math.log10(xmax)
ymin, ymax = math.log10(ymin), math.log10(ymax)
xsize = max(math.fabs(xmax-xmin), 1e-30)
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
if aspect_scale_mode == "log":
dL = self.dataLim
dL_width = math.log10(dL.x1) - math.log10(dL.x0)
dL_height = math.log10(dL.y1) - math.log10(dL.y0)
xr = 1.05 * dL_width
yr = 1.05 * dL_height
else:
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
if aspect_scale_mode == "log":
self.set_ybound((10.**y0, 10.**y1))
else:
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
if aspect_scale_mode == "log":
self.set_xbound((10.**x0, 10.**x1))
else:
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
"""
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot. For details, see
:func:`~matplotlib.pyplot.axis`.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
"""
if len(v) == 0 and len(kwargs) == 0:
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view(tight=False)
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by <NAME>
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
emit = kwargs.get('emit', True)
try:
v[0]
except IndexError:
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
auto = False # turn off autoscaling, unless...
if xmin is None and xmax is None:
auto = None # leave autoscaling state alone
xmin, xmax = self.set_xlim(xmin, xmax, emit=emit, auto=auto)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
auto = False # turn off autoscaling, unless...
if ymin is None and ymax is None:
auto = None # leave autoscaling state alone
ymin, ymax = self.set_ylim(ymin, ymax, emit=emit, auto=auto)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]], emit=emit, auto=False)
self.set_ylim([v[2], v[3]], emit=emit, auto=False)
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise mplDeprecation('Use get_children instead')
def get_frame(self):
"""Return the axes Rectangle frame"""
warnings.warn('use ax.patch instead', mplDeprecation)
return self.patch
def get_legend(self):
"""Return the legend.Legend instance, or None if no legend is defined"""
return self.legend_
def get_images(self):
"""return a list of Axes images contained by the Axes"""
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
"""Return a list of lines contained by the Axes"""
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
"""Return the XAxis instance"""
return self.xaxis
def get_xgridlines(self):
"""Get the x grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
"""Get the xtick lines as a list of Line2D instances"""
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
"""Return the YAxis instance"""
return self.yaxis
def get_ygridlines(self):
"""Get the y grid lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
"""Get the ytick lines as a list of Line2D instances"""
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def _sci(self, im):
"""
helper for :func:`~matplotlib.pyplot.sci`;
do not use elsewhere.
"""
if isinstance(im, matplotlib.contour.ContourSet):
if im.collections[0] not in self.collections:
raise ValueError(
"ContourSet must be in current Axes")
elif im not in self.images and im not in self.collections:
raise ValueError(
"Argument must be an image, collection, or ContourSet in this Axes")
self._current_image = im
def _gci(self):
"""
Helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
return self._current_image
def has_data(self):
"""
Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
"""
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
"""
Add any :class:`~matplotlib.artist.Artist` to the axes.
Returns the artist.
"""
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
return a
def add_collection(self, collection, autolim=True):
"""
Add a :class:`~matplotlib.collections.Collection` instance
to the axes.
Returns the collection.
"""
label = collection.get_label()
if not label:
collection.set_label('_collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
if collection.get_clip_path() is None:
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
return collection
def add_line(self, line):
"""
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
Returns the line.
"""
self._set_artist_props(line)
if line.get_clip_path() is None:
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d' % len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
return line
def _update_line_limits(self, line):
"""Figures out the data limit of the given line, updating self.dataLim."""
path = line.get_path()
if path.vertices.size == 0:
return
line_trans = line.get_transform()
if line_trans == self.transData:
data_path = path
elif any(line_trans.contains_branch_seperately(self.transData)):
# identify the transform to go from line's coordinates
# to data coordinates
trans_to_data = line_trans - self.transData
# if transData is affine we can use the cached non-affine component
# of line's path. (since the non-affine part of line_trans is
# entirely encapsulated in trans_to_data).
if self.transData.is_affine:
line_trans_path = line._get_transformed_path()
na_path, _ = line_trans_path.get_transformed_path_and_affine()
data_path = trans_to_data.transform_path_affine(na_path)
else:
data_path = trans_to_data.transform_path(path)
else:
# for backwards compatibility we update the dataLim with the
# coordinate range of the given path, even though the coordinate
# systems are completely different. This may occur in situations
# such as when ax.transAxes is passed through for absolute
# positioning.
data_path = path
if data_path.vertices.size > 0:
updatex, updatey = line_trans.contains_branch_seperately(
self.transData
)
self.dataLim.update_from_path(data_path,
self.ignore_existing_data_limits,
updatex=updatex,
updatey=updatey)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
Returns the patch.
"""
self._set_artist_props(p)
if p.get_clip_path() is None:
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
return p
def _update_patch_limits(self, patch):
"""update the data limits for patch *p*"""
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
# cannot check for '==0' since unitized data may not compare to zero
if (isinstance(patch, mpatches.Rectangle) and
((not patch.get_width()) or (not patch.get_height()))):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
patch_to_data = (patch.get_data_transform() -
self.transData)
xys = patch_to_data.transform(xys)
updatex, updatey = patch.get_transform().\
contains_branch_seperately(self.transData)
self.update_datalim(xys, updatex=updatex,
updatey=updatey)
def add_table(self, tab):
"""
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
Returns the table.
"""
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
return tab
def add_container(self, container):
"""
Add a :class:`~matplotlib.container.Container` instance
to the axes.
Returns the collection.
"""
label = container.get_label()
if not label:
container.set_label('_container%d'%len(self.containers))
self.containers.append(container)
container.set_remove_method(lambda h: self.containers.remove(h))
return container
def relim(self):
"""
Recompute the data limits based on current artists.
At present, :class:`~matplotlib.collections.Collection`
instances are not supported.
"""
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
"""Update the data lim bbox with seq of xy tups or equiv. 2-D array"""
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
"""Update the data lim bbox with seq of xy tups"""
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
"""
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
"""
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
"""Look for unit *kwargs* and update the axis instances as necessary"""
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if self.name == 'polar':
xunits = kwargs.pop( 'thetaunits', xunits )
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if self.name == 'polar':
yunits = kwargs.pop( 'runits', yunits )
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
"""
Return *True* if the given *mouseevent* (in display coords)
is in the Axes
"""
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for both axes on plot commands
"""
return self._autoscaleXon and self._autoscaleYon
def get_autoscalex_on(self):
"""
Get whether autoscaling for the x-axis is applied on plot commands
"""
return self._autoscaleXon
def get_autoscaley_on(self):
"""
Get whether autoscaling for the y-axis is applied on plot commands
"""
return self._autoscaleYon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
self._autoscaleYon = b
def set_autoscalex_on(self, b):
"""
Set whether autoscaling for the x-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleXon = b
def set_autoscaley_on(self, b):
"""
Set whether autoscaling for the y-axis is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleYon = b
def set_xmargin(self, m):
"""
Set padding of X data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._xmargin = m
def set_ymargin(self, m):
"""
Set padding of Y data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
"""
if m < 0 or m > 1:
raise ValueError("margin must be in range 0 to 1")
self._ymargin = m
def margins(self, *args, **kw):
"""
Set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin
::
margins(margin)
margins(xmargin, ymargin)
margins(x=xmargin, y=ymargin)
margins(..., tight=False)
All three forms above set the xmargin and ymargin parameters.
All keyword parameters are optional. A single argument
specifies both xmargin and ymargin. The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
"""
if not args and not kw:
return self._xmargin, self._ymargin
tight = kw.pop('tight', True)
mx = kw.pop('x', None)
my = kw.pop('y', None)
if len(args) == 1:
mx = my = args[0]
elif len(args) == 2:
mx, my = args
else:
raise ValueError("more than two arguments were supplied")
if mx is not None:
self.set_xmargin(mx)
if my is not None:
self.set_ymargin(my)
scalex = (mx is not None)
scaley = (my is not None)
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def set_rasterization_zorder(self, z):
"""
Set zorder value below which artists will be rasterized. Set
to `None` to disable rasterizing of artists below a particular
zorder.
"""
self._rasterization_zorder = z
def get_rasterization_zorder(self):
"""
Get zorder value below which artists will be rasterized
"""
return self._rasterization_zorder
def autoscale(self, enable=True, axis='both', tight=None):
"""
Autoscale the axis view to the data (toggle).
Convenience method for simple axis view autoscaling.
It turns autoscaling on or off, and then,
if autoscaling for either axis is on, it performs
the autoscaling on the specified axis or axes.
*enable*: [True | False | None]
True (default) turns autoscaling on, False turns it off.
None leaves the autoscaling state unchanged.
*axis*: ['x' | 'y' | 'both']
which axis to operate on; default is 'both'
*tight*: [True | False | None]
If True, set view limits to data limits;
if False, let the locator and margins expand the view limits;
if None, use tight scaling if the only artist is an image,
otherwise treat *tight* as False.
The *tight* setting is retained for future autoscaling
until it is explicitly changed.
Returns None.
"""
if enable is None:
scalex = True
scaley = True
else:
scalex = False
scaley = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def autoscale_view(self, tight=None, scalex=True, scaley=True):
"""
Autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
The data limits are not updated automatically when artist
data are changed after the artist has been added to an
Axes instance. In that case, use
:meth:`matplotlib.axes.Axes.relim`
prior to calling autoscale_view.
"""
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
xlocator = self.xaxis.get_major_locator()
try:
# e.g. DateLocator has its own nonsingular()
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
# Default nonsingular for, e.g., MaxNLocator
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
#### Drawing
@allow_rasterization
def draw(self, renderer=None, inframe=False):
"""Draw everything (plot lines, axes, labels)"""
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
artists = []
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.extend(self.spines.itervalues())
dsu = [ (a.zorder, a) for a in artists
if not a.get_animated() ]
# add images to dsu if the backend support compositing.
# otherwise, does the manaul compositing without adding images to dsu.
if len(self.images)<=1 or renderer.option_image_nocomposite():
dsu.extend([(im.zorder, im) for im in self.images])
_do_composite = False
else:
_do_composite = True
dsu.sort(key=itemgetter(0))
# rasterize artists with negative zorder
# if the minimum zorder is negative, start rasterization
rasterization_zorder = self._rasterization_zorder
if (rasterization_zorder is not None and
len(dsu) > 0 and dsu[0][0] < rasterization_zorder):
renderer.start_rasterizing()
dsu_rasterized = [l for l in dsu if l[0] < rasterization_zorder]
dsu = [l for l in dsu if l[0] >= rasterization_zorder]
else:
dsu_rasterized = []
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
if _do_composite:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
zorder_images = [(im.zorder, im) for im in self.images \
if im.get_visible()]
zorder_images.sort(key=lambda x: x[0])
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0) for z,im in zorder_images]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(mtransforms.TransformedPath(
self.patch.get_path(),
self.patch.get_transform()))
renderer.draw_image(gc, round(l), round(b), im)
gc.restore()
if dsu_rasterized:
for zorder, a in dsu_rasterized:
a.draw(renderer)
renderer.stop_rasterizing()
for zorder, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort(key=lambda x: x[0])
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
@docstring.dedent_interpd
def grid(self, b=None, which='major', axis='both', **kwargs):
"""
Turn the axes grids on or off.
Call signature::
grid(self, b=None, which='major', axis='both', **kwargs)
Set the axes grids on or off; *b* is a boolean. (For MATLAB
compatibility, *b* may also be a string, 'on' or 'off'.)
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*.
*which* can be 'major' (default), 'minor', or 'both' to control
whether major tick grids, minor tick grids, or both are affected.
*axis* can be 'both' (default), 'x', or 'y' to control which
set of gridlines are drawn.
*kwargs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs):
b = True
b = _string_to_bool(b)
if axis == 'x' or axis == 'both':
self.xaxis.grid(b, which=which, **kwargs)
if axis == 'y' or axis == 'both':
self.yaxis.grid(b, which=which, **kwargs)
def ticklabel_format(self, **kwargs):
"""
Change the `~matplotlib.ticker.ScalarFormatter` used by
default for linear axes.
Optional keyword arguments:
============ =========================================
Keyword Description
============ =========================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*useOffset* [True | False | offset]; if True,
the offset will be calculated as needed;
if False, no offset will be used; if a
numeric offset is specified, it will be
used.
*axis* [ 'x' | 'y' | 'both' ]
*useLocale* If True, format the number according to
the current locale. This affects things
such as the character used for the
decimal separator. If False, use
C-style (English) formatting. The
default setting is controlled by the
axes.formatter.use_locale rcparam.
============ =========================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
useOffset = kwargs.pop('useOffset', None)
useLocale = kwargs.pop('useLocale', None)
axis = kwargs.pop('axis', 'both').lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError("comma style remains to be added")
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useOffset(useOffset)
if useLocale is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useLocale(useLocale)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useLocale(useLocale)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Control behavior of tick locators.
Keyword arguments:
*axis*
['x' | 'y' | 'both'] Axis on which to operate;
default is 'both'.
*tight*
[True | False | None] Parameter passed to :meth:`autoscale_view`.
Default is None, for no change.
Remaining keyword arguments are passed to directly to the
:meth:`~matplotlib.ticker.MaxNLocator.set_params` method.
Typically one might want to reduce the maximum number
of ticks and use tight bounds when plotting small
subplots, for example::
ax.locator_params(tight=True, nbins=4)
Because the locator is involved in autoscaling,
:meth:`autoscale_view` is called automatically after
the parameters are changed.
This presently works only for the
:class:`~matplotlib.ticker.MaxNLocator` used
by default on linear axes, but it may be generalized.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y)
def tick_params(self, axis='both', **kwargs):
"""
Change the appearance of ticks and tick labels.
Keyword arguments:
*axis* : ['x' | 'y' | 'both']
Axis on which to operate; default is 'both'.
*reset* : [True | False]
If *True*, set all parameters to defaults
before processing other keyword arguments. Default is
*False*.
*which* : ['major' | 'minor' | 'both']
Default is 'major'; apply arguments to *which* ticks.
*direction* : ['in' | 'out' | 'inout']
Puts ticks inside the axes, outside the axes, or both.
*length*
Tick length in points.
*width*
Tick width in points.
*color*
Tick color; accepts any mpl color spec.
*pad*
Distance in points between tick and label.
*labelsize*
Tick label font size in points or as a string (e.g. 'large').
*labelcolor*
Tick label color; mpl color spec.
*colors*
Changes the tick color and the label color to the same value:
mpl color spec.
*zorder*
Tick and label zorder.
*bottom*, *top*, *left*, *right* : [bool | 'on' | 'off']
controls whether to draw the respective ticks.
*labelbottom*, *labeltop*, *labelleft*, *labelright*
Boolean or ['on' | 'off'], controls whether to draw the
respective tick labels.
Example::
ax.tick_params(direction='out', length=6, width=2, colors='r')
This will make all major ticks be red, pointing out of the box,
and with dimensions 6 points by 2 points. Tick labels will
also be red.
"""
if axis in ['x', 'both']:
xkw = dict(kwargs)
xkw.pop('left', None)
xkw.pop('right', None)
xkw.pop('labelleft', None)
xkw.pop('labelright', None)
self.xaxis.set_tick_params(**xkw)
if axis in ['y', 'both']:
ykw = dict(kwargs)
ykw.pop('top', None)
ykw.pop('bottom', None)
ykw.pop('labeltop', None)
ykw.pop('labelbottom', None)
self.yaxis.set_tick_params(**ykw)
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
"""Return the axis background color"""
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left, auto=None)
def xaxis_inverted(self):
"""Returns *True* if the x-axis is inverted."""
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the _autoscaleXon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower, auto=None)
else:
self.set_xlim(lower, upper, auto=None)
else:
if lower < upper:
self.set_xlim(lower, upper, auto=None)
else:
self.set_xlim(upper, lower, auto=None)
def get_xlim(self):
"""
Get the x-axis range [*left*, *right*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, left=None, right=None, emit=True, auto=False, **kw):
"""
Call signature::
set_xlim(self, *args, **kwargs):
Set the data limits for the xaxis
Examples::
set_xlim((left, right))
set_xlim(left, right)
set_xlim(left=1) # right unchanged
set_xlim(right=1) # left unchanged
Keyword arguments:
*left*: scalar
The left xlim; *xmin*, the previous name, may still be used
*right*: scalar
The right xlim; *xmax*, the previous name, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *x* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *left* (formerly *xmin*) value may be greater than
the *right* (formerly *xmax*).
For example, suppose *x* is years before present.
Then one might use::
set_ylim(5000, 0)
so 5000 years ago is on the left of the plot and the
present is on the right.
Returns the current xlimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'xmin' in kw:
left = kw.pop('xmin')
if 'xmax' in kw:
right = kw.pop('xmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if right is None and iterable(left):
left,right = left
self._process_unit_info(xdata=(left, right))
if left is not None:
left = self.convert_xunits(left)
if right is not None:
right = self.convert_xunits(right)
old_left, old_right = self.get_xlim()
if left is None: left = old_left
if right is None: right = old_right
if left==right:
warnings.warn(('Attempting to set identical left==right results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return left, right
def get_xscale(self):
return self.xaxis.get_scale()
get_xscale.__doc__ = "Return the xaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_xscale(self, value, **kwargs):
"""
Call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view(scaley=False)
self._update_transScale()
def get_xticks(self, minor=False):
"""Return the x ticks as a list of locations"""
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
"""
Get the xtick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
"""
Get the x minor tick labels as a list of
:class:`matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
"""
Get the x tick labels as a list of :class:`~matplotlib.text.Text`
instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
@docstring.dedent_interpd
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
def invert_yaxis(self):
"Invert the y-axis."
bottom, top = self.get_ylim()
self.set_ylim(top, bottom, auto=None)
def yaxis_inverted(self):
"""Returns *True* if the y-axis is inverted."""
bottom, top = self.get_ylim()
return top < bottom
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
bottom, top = self.get_ylim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_ybound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the _autoscaleYon attribute.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower, auto=None)
else:
self.set_ylim(lower, upper, auto=None)
else:
if lower < upper:
self.set_ylim(lower, upper, auto=None)
else:
self.set_ylim(upper, lower, auto=None)
def get_ylim(self):
"""
Get the y-axis range [*bottom*, *top*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Call signature::
set_ylim(self, *args, **kwargs):
Set the data limits for the yaxis
Examples::
set_ylim((bottom, top))
set_ylim(bottom, top)
set_ylim(bottom=1) # top unchanged
set_ylim(top=1) # bottom unchanged
Keyword arguments:
*bottom*: scalar
The bottom ylim; the previous name, *ymin*, may still be used
*top*: scalar
The top ylim; the previous name, *ymax*, may still be used
*emit*: [ *True* | *False* ]
Notify observers of limit change
*auto*: [ *True* | *False* | *None* ]
Turn *y* autoscaling on (*True*), off (*False*; default),
or leave unchanged (*None*)
Note, the *bottom* (formerly *ymin*) value may be greater than
the *top* (formerly *ymax*).
For example, suppose *y* is depth in the ocean.
Then one might use::
set_ylim(5000, 0)
so 5000 m depth is at the bottom of the plot and the
surface, 0 m, is at the top.
Returns the current ylimits as a length 2 tuple
ACCEPTS: length 2 sequence of floats
"""
if 'ymin' in kw:
bottom = kw.pop('ymin')
if 'ymax' in kw:
top = kw.pop('ymax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and iterable(bottom):
bottom,top = bottom
if bottom is not None:
bottom = self.convert_yunits(bottom)
if top is not None:
top = self.convert_yunits(top)
old_bottom, old_top = self.get_ylim()
if bottom is None: bottom = old_bottom
if top is None: top = old_top
if bottom==top:
warnings.warn(('Attempting to set identical bottom==top results\n'
+ 'in singular transformations; automatically expanding.\n'
+ 'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
def get_yscale(self):
return self.yaxis.get_scale()
get_yscale.__doc__ = "Return the yaxis scale string: %s""" % (
", ".join(mscale.get_scale_names()))
@docstring.dedent_interpd
def set_yscale(self, value, **kwargs):
"""
Call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view(scalex=False)
self._update_transScale()
def get_yticks(self, minor=False):
"""Return the y ticks as a list of locations"""
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ *False* | *True* ]
Sets the minor ticks if *True*
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
"""
Get the major y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
"""
Get the minor y tick labels as a list of
:class:`~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
"""
Get the y tick labels as a list of :class:`~matplotlib.text.Text`
instances
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
@docstring.dedent_interpd
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the y tick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
def xaxis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
# should be enough to inform the unit conversion interface
# dates are coming in
self.xaxis.axis_date(tz)
def yaxis_date(self, tz=None):
"""
Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
"""
self.yaxis.axis_date(tz)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
"""Return a format string formatting the *x*, *y* coord"""
if x is None:
xs = '???'
else:
xs = self.format_xdata(x)
if y is None:
ys = '???'
else:
ys = self.format_ydata(y)
return 'x=%s y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
"""
return True
def can_pan(self) :
"""
Return *True* if this axes supports any pan/zoom button functionality.
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ *True* | *False* ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = np.array([p.x, p.y])
oldpoints = p.lim.transformed(p.trans)
newpoints = start + alpha * (oldpoints - start)
result = mtransforms.Bbox(newpoints) \
.transformed(p.trans_inverse)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
Return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise mplDeprecation('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
"""disconnect from the Axes event."""
raise mplDeprecation('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
"""return a list of child artists"""
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.extend(self.spines.itervalues())
return children
def contains(self,mouseevent):
"""
Test whether the mouse event occured in the axes.
Returns *True* / *False*, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def contains_point(self, point):
"""
Returns *True* if the point (tuple of x,y) is inside the axes
(the area defined by the its patch). A pixel coordinate is
required.
"""
return self.patch.contains_point(point, radius=1.0)
def pick(self, *args):
"""
Call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args) > 1:
raise mplDeprecation('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self, args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
@docstring.dedent_interpd
def set_title(self, label, fontdict=None, **kwargs):
"""
Call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'baseline',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Call signature::
set_xlabel(xlabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the xaxis.
*labelpad* is the spacing in points between the label and the x-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Call signature::
set_ylabel(ylabel, fontdict=None, labelpad=None, **kwargs)
Set the label for the yaxis
*labelpad* is the spacing in points between the label and the y-axis
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`
for information on how override and the optional args work
"""
if labelpad is not None: self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
@docstring.dedent_interpd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
Add text to the axes.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ *False* | *True* ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'baseline',
'horizontalalignment' : 'left',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
@docstring.dedent_interpd
def annotate(self, *args, **kwargs):
"""
Create an annotation: a piece of text referring to a data
point.
Call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
a._remove_method = lambda h: self.texts.remove(h)
return a
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal line across the axis.
Call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( ydata=y, kwargs=kwargs )
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Add a vertical line across the axes.
Call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
ylim settings, even if you change them, eg. with the
:meth:`set_ylim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info( xdata=x, kwargs=kwargs )
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal span (rectangle) across the axis.
Call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes::
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scalex=False)
return p
@docstring.dedent_interpd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Add a vertical span (rectangle) across the axes.
Call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes::
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scaley=False)
return p
@docstring.dedent
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines.
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise mplDeprecation('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
# process the unit information
self._process_unit_info( [xmin, xmax], y, kwargs=kwargs )
y = self.convert_yunits( y )
xmin = self.convert_xunits(xmin)
xmax = self.convert_xunits(xmax)
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError('xmin and y are unequal sized sequences')
if len(xmax)!=len(y):
raise ValueError('xmax and y are unequal sized sequences')
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
if len(y) > 0:
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines.
Call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors* :
A line collection's color args, either a single color
or a ``len(x)`` list of colors
*linestyles* : [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise mplDeprecation('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError('ymin and x are unequal sized sequences')
if len(ymax)!=len(x):
raise ValueError('ymax and x are unequal sized sequences')
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
if len(x) > 0:
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
#### Basic plotting
@docstring.dedent_interpd
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
By default, each line is assigned a different color specified by a
'color cycle'. To change this behavior, you can edit the
axes.color_cycle rcParam. Alternatively, you can use
:meth:`~matplotlib.axes.Axes.set_default_color_cycle`.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12).
See :class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Plot with data with dates.
Call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ *True* | *False* ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ *False* | *True* ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.dates.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.dates.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.dates.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.dates.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates` for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange` for help on creating the required
floating point dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the *x* and *y* axis.
Call signature::
loglog(*args, **kwargs)
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
Base of the *x*/*y* logarithm
*subsx*/*subsy*: [ *None* | sequence ]
The location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
*nonposx*/*nonposy*: ['mask' | 'clip' ]
Non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the *x* axis.
Call signature::
semilogx(*args, **kwargs)
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
Base of the *x* logarithm
*subsx*: [ *None* | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
*nonposx*: [ 'mask' | 'clip' ]
Non-positive values in *x* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the *y* axis.
call signature::
semilogy(*args, **kwargs)
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ *None* | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
*nonposy*: [ 'mask' | 'clip' ]
Non-positive values in *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of *x*.
Call signature::
acorr(x, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, **kwargs)
If *normed* = *True*, normalize the data by the autocorrelation at
0-th lag. *x* is detrended by the *detrend* callable (default no
normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
``(2*len(x)-1)`` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`
For documentation on valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
@docstring.dedent_interpd
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
Plot the cross correlation between *x* and *y*.
Call signature::
xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs)
If *normed* = *True*, normalize the data by the cross
correlation at 0-th lag. *x* and y are detrended by the
*detrend* callable (default no normalization). *x* and *y*
must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` is top graph, and
:func:`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
def _get_legend_handles(self, legend_handler_map=None):
"return artists that will be used as handles for legend"
handles_original = self.lines + self.patches + \
self.collections + self.containers
# collections
handler_map = mlegend.Legend.get_default_handler_map()
if legend_handler_map is not None:
handler_map = handler_map.copy()
handler_map.update(legend_handler_map)
handles = []
for h in handles_original:
if h.get_label() == "_nolegend_": #.startswith('_'):
continue
if mlegend.Legend.get_legend_handler(handler_map, h):
handles.append(h)
return handles
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
handles = []
labels = []
for handle in self._get_legend_handles(legend_handler_map):
label = handle.get_label()
#if (label is not None and label != '' and not label.startswith('_')):
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def legend(self, *args, **kwargs):
"""
Place a legend on the current axes.
Call signature::
legend(*args, **kwargs)
Places legend at location *loc*. Labels are a sequence of
strings and *loc* can be a string or an integer specifying the
legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
Users can specify any arbitrary location for the legend using the
*bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance
of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.
For example,
loc = 'upper right', bbox_to_anchor = (0.5, 0.5)
will place the legend so that the upper right corner of the legend at
the center of the axes.
The legend location can be specified in other coordinate, by using the
*bbox_transform* keyword.
The loc itslef can be a 2-tuple giving x,y of the lower-left corner of
the legend in axes coords (*bbox_to_anchor* is ignored).
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*fontsize*: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points. This
argument is only used if prop is not specified.
*numpoints*: integer
The number of points in the legend for line
*scatterpoints*: integer
The number of points in the legend for scatter plot
*scatteroffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*,
use rc settings.
*frameon*: [ *True* | *False* ]
if *True*, draw a frame around the legend.
The default is set by the rcParam 'legend.frameon'
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*,
use rc settings
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*,
use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizontally expanded
to fill the axes area (or *bbox_to_anchor*)
*bbox_to_anchor* : an instance of BboxBase or a tuple of 2 or 4 floats
the bbox that the legend will be anchored.
*bbox_transform* : [ an instance of Transform | *None* ]
the transform for the bbox. transAxes if *None*.
*title* : string
the legend title
Padding and spacing between various elements use following
keywords parameters. These values are measure in font-size
units. E.g., a fontsize of 10 points and a handlelength=5
implies a handlelength of 50 points. Values from rcParams
will be used if None.
================ ==================================================================
Keyword Description
================ ==================================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
.. Note:: Not all kinds of artist are supported by the legend command.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
.. seealso::
:ref:`plotting-guide-legend`.
"""
if len(args)==0:
handles, labels = self.get_legend_handles_labels()
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(self._get_legend_handles(),
labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
# Why do we need to call "flatten" here? -JJL
# handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
"""
Make a step plot.
Call signature::
step(x, y, *args, **kwargs)
Additional keyword args to :func:`step` are the same as those
for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i+1]
If 'post', that interval has level y[i]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
"""
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
@docstring.dedent_interpd
def bar(self, left, height, width=0.8, bottom=None, **kwargs):
"""
Make a bar plot.
Call signature::
bar(left, height, width=0.8, bottom=0, **kwargs)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*error_kw* dictionary of kwargs to be passed to
errorbar method. *ecolor* and *capsize*
may be specified here rather than as
independent kwargs.
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: *xerr* and *yerr* are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
color = kwargs.pop('color', None)
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar,
# most dimension checking and processing will be left
# to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', dict())
ecolor = kwargs.pop('ecolor', None)
capsize = kwargs.pop('capsize', 3)
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
align = kwargs.pop('align', 'edge')
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log', nonposy='clip')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log', nonposx='clip')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError('invalid orientation: %s' % orientation)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) == 0: # until to_rgba_array is changed
color = [[0,0,0,0]]
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) == 0: # until to_rgba_array is changed
edgecolor = [[0,0,0,0]]
if len(edgecolor) < nbars:
edgecolor *= nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "incompatible sizes: argument 'left' must be length %d or scalar" % nbars
assert len(height)==nbars, ("incompatible sizes: argument 'height' must be length %d or scalar" %
nbars)
assert len(width)==nbars, ("incompatible sizes: argument 'width' must be length %d or scalar" %
nbars)
assert len(bottom)==nbars, ("incompatible sizes: argument 'bottom' must be length %d or scalar" %
nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
left = self.convert_xunits( left )
width = self.convert_xunits( width )
if xerr is not None:
xerr = self.convert_xunits( xerr )
if self.yaxis is not None:
bottom = self.convert_yunits( bottom )
height = self.convert_yunits( height )
if yerr is not None:
yerr = self.convert_yunits( yerr )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError('invalid alignment: %s' % align)
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_'
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
if "label" not in error_kw:
error_kw["label"] = '_nolegend_'
errorbar = self.errorbar(x, y,
yerr=yerr, xerr=xerr,
fmt=None, **error_kw)
else:
errorbar = None
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin([w for w in width if w > 0])
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin([h for h in height if h > 0])
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_container(bar_container)
return bar_container
@docstring.dedent_interpd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
Make a horizontal bar plot.
Call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Plot horizontal bars.
Call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-',
bottom=None, label=None):
"""
Create a stem plot.
Call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
This `document <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/stem_plot.py
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt, label="_nolegend_")
if bottom is None:
bottom = 0
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [bottom, thisy], linefmt,
label="_nolegend_")
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [bottom,bottom],
basefmt, label="_nolegend_")
self.hold(remember_hold)
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None):
r"""
Plot a pie chart.
Call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized. The wedges are plotted counterclockwise,
by default starting from the x-axis.
Keyword arguments:
*explode*: [ *None* | len(x) sequence ]
If not *None*, is a ``len(x)`` array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ *None* | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ *None* | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ *None* | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ *False* | *True* ]
Draw a shadow beneath the pie.
*startangle*: [ *None* | Offset angle ]
If not *None*, rotates the start of the pie chart by *angle*
degrees counterclockwise from the x-axis.
*radius*: [ *None* | scalar ]
The radius of the pie, if *radius* is *None* it will be set to 1.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is *None*, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
if radius is None:
radius = 1
# Starting theta1 is the start fraction of the circle
if startangle is None:
theta1 = 0
else:
theta1 = startangle / 360.0
texts = []
slices = []
autotexts = []
i = 0
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
shad.set_label('_nolegend_')
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None:
return slices, texts
else:
return slices, texts, autotexts
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
**kwargs):
"""
Plot an errorbar graph.
Call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1,
capthick=None)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, or 2xN array-like ]
If a scalar number, len(N) array-like object, or an Nx1
array-like object, errorbars are drawn at +/-value relative
to the data.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
*fmt*: '-'
The plot format symbol. If *fmt* is *None*, only the
errorbars are plotted. This is used for adding
errorbars to a bar plot, for example.
*ecolor*: [ *None* | mpl color ]
A matplotlib color arg which gives the color the errorbar lines;
if *None*, use the marker color.
*elinewidth*: scalar
The linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
The length of the error bar caps in points
*capthick*: scalar
An alias kwarg to *markeredgewidth* (a.k.a. - *mew*). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
*barsabove*: [ *True* | *False* ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims* / *uplims* / *xlolims* / *xuplims*: [ *False* | *True* ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
*errorevery*: positive integer
subsamples the errorbars. Eg if everyerror=5, errorbars for every
5-th datapoint will be plotted. The data plot itself still shows
all data points.
All other keyword arguments are passed on to the plot command for the
markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Returns (*plotline*, *caplines*, *barlinecols*):
*plotline*: :class:`~matplotlib.lines.Line2D` instance
*x*, *y* plot markers and/or line
*caplines*: list of error bar cap
:class:`~matplotlib.lines.Line2D` instances
*barlinecols*: list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
if errorevery < 1:
raise ValueError('errorevery has to be a strictly positive integer')
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
holdstate = self._hold
self._hold = True
label = kwargs.pop("label", None)
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,label="_nolegend_", **kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
if 'alpha' in kwargs:
lines_kw['alpha'] = kwargs['alpha']
if 'zorder' in kwargs:
lines_kw['zorder'] = kwargs['zorder']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if capthick is not None:
# 'mew' has higher priority, I believe,
# if both 'mew' and 'markeredgewidth' exists.
# So, save capthick to markeredgewidth so that
# explicitly setting mew or markeredgewidth will
# over-write capthick.
plot_kw['markeredgewidth'] = capthick
# For backwards-compat, allow explicit setting of
# 'mew' or 'markeredgewidth' to over-ride capthick.
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if 'alpha' in kwargs:
plot_kw['alpha'] = kwargs['alpha']
if 'zorder' in kwargs:
plot_kw['zorder'] = kwargs['zorder']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
yo, _ = xywhere(y, right, everymask)
lo, ro= xywhere(left, right, everymask)
barcols.append( self.hlines(yo, lo, ro, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims & everymask)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims & everymask)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
leftlo, ylo = xywhere(left, y, everymask)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims & everymask)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims & everymask)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
rightup, yup = xywhere(right, y, everymask)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
xo, _ = xywhere(x, lower, everymask)
lo, uo= xywhere(lower, upper, everymask)
barcols.append( self.vlines(xo, lo, uo, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims & everymask)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims & everymask)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
xlo, lowerlo = xywhere(x, lower, everymask)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims & everymask)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims & everymask)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
xup, upperup = xywhere(x, upper, everymask)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines.color_cycle.next()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer((l0, tuple(caplines), tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
def boxplot(self, x, notch=False, sym='b+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None):
"""
Make a box and whisker plot.
Call signature::
boxplot(x, notch=False, sym='+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Function Arguments:
*x* :
Array or a sequence of vectors.
*notch* : [ False (default) | True ]
If False (default), produces a rectangular box plot.
If True, will produce a notched box plot
*sym* : [ default 'b+' ]
The default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
*vert* : [ False | True (default) ]
If True (default), makes the boxes vertical.
If False, makes horizontal boxes.
*whis* : [ default 1.5 ]
Defines the length of the whiskers as a function of the inner
quartile range. They extend to the most extreme data point
within ( ``whis*(75%-25%)`` ) data range.
*bootstrap* : [ *None* (default) | integer ]
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If bootstrap==None,
no bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation (see <NAME>.,
<NAME>., and <NAME>., 1978, and <NAME>,
1967). Otherwise, bootstrap specifies the number of times to
bootstrap the median to determine it's 95% confidence intervals.
Values between 1000 and 10000 are recommended.
*usermedians* : [ default None ]
An array or sequence whose first dimension (or length) is
compatible with *x*. This overrides the medians computed by
matplotlib for each element of *usermedians* that is not None.
When an element of *usermedians* == None, the median will be
computed directly as normal.
*conf_intervals* : [ default None ]
Array or sequence whose first dimension (or length) is compatible
with *x* and whose second dimension is 2. When the current element
of *conf_intervals* is not None, the notch locations computed by
matplotlib are overridden (assuming notch is True). When an element of
*conf_intervals* is None, boxplot compute notches the method
specified by the other kwargs (e.g. *bootstrap*).
*positions* : [ default 1,2,...,n ]
Sets the horizontal positions of the boxes. The ticks and limits
are automatically set to match the positions.
*widths* : [ default 0.5 ]
Either a scalar or a vector and sets the width of each box. The
default is 0.5, or ``0.15*(distance between extreme positions)``
if that is smaller.
*patch_artist* : [ False (default) | True ]
If False produces boxes with the Line2D artist
If True produces boxes with the Patch artist
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created. That dictionary has the following keys
(assuming vertical boxplots):
- boxes: the main body of the boxplot showing the quartiles
and the median's confidence intervals if enabled.
- medians: horizonal lines at the median of each box.
- whiskers: the vertical lines extending to the most extreme,
n-outlier data points.
- caps: the horizontal lines at the ends of the whiskers.
- fliers: points representing data that extend beyone the
whiskers (outliers).
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
def bootstrapMedian(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentile = [2.5,97.5]
estimate = np.zeros(N)
for n in range(N):
bsIndex = np.random.random_integers(0,M-1,M)
bsData = data[bsIndex]
estimate[n] = mlab.prctile(bsData, 50)
CI = mlab.prctile(estimate, percentile)
return CI
def computeConfInterval(data, med, iq, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = bootstrapMedian(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
# Estimate notch locations using Gaussian-based
# asymptotic approximation.
#
# For discussion: <NAME>., <NAME>.,
# and <NAME>. (1978) "Variations of
# Boxplots", The American Statistician, 32:12-16.
N = len(data)
notch_min = med - 1.57*iq/np.sqrt(N)
notch_max = med + 1.57*iq/np.sqrt(N)
return notch_min, notch_max
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError("input x can have no more than 2 dimensions")
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# sanitize user-input medians
msg1 = "usermedians must either be a list/tuple or a 1d array"
msg2 = "usermedians' length must be compatible with x"
if usermedians is not None:
if hasattr(usermedians, 'shape'):
if len(usermedians.shape) != 1:
raise ValueError(msg1)
elif usermedians.shape[0] != col:
raise ValueError(msg2)
elif len(usermedians) != col:
raise ValueError(msg2)
#sanitize user-input confidence intervals
msg1 = "conf_intervals must either be a list of tuples or a 2d array"
msg2 = "conf_intervals' length must be compatible with x"
msg3 = "each conf_interval, if specificied, must have two values"
if conf_intervals is not None:
if hasattr(conf_intervals, 'shape'):
if len(conf_intervals.shape) != 2:
raise ValueError(msg1)
elif conf_intervals.shape[0] != col:
raise ValueError(msg2)
elif conf_intervals.shape[1] == 2:
raise ValueError(msg3)
else:
if len(conf_intervals) != col:
raise ValueError(msg2)
for ci in conf_intervals:
if ci is not None and len(ci) != 2:
raise ValueError(msg3)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i, pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
if row==0:
# no data, skip this position
continue
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# replace with input medians if available
if usermedians is not None:
if usermedians[i] is not None:
med = usermedians[i]
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'notch' plot
if notch:
# conf. intervals from user, if available
if conf_intervals is not None and conf_intervals[i] is not None:
notch_max = np.max(conf_intervals[i])
notch_min = np.min(conf_intervals[i])
else:
notch_min, notch_max = computeConfInterval(d, med, iq,
bootstrap)
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# calculate 'regular' plot
else:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
def to_vc(xs,ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi,yi in zip(xs,ys):
verts.append( (xi,yi) )
verts.append( (0,0) ) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO]*(len(verts)-2) + \
[mpath.Path.CLOSEPOLY]
return verts,codes
def patch_list(xs,ys):
verts,codes = to_vc(xs,ys)
path = mpath.Path( verts, codes )
patch = mpatches.PathPatch(path)
self.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
def dopatch(xs,ys):
return patch_list(xs,ys)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
def dopatch(xs,ys):
xs,ys = ys,xs # flip X, Y
return patch_list(xs,ys)
if patch_artist:
median_color = 'k'
else:
median_color = 'r'
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
if patch_artist:
boxes.extend(dopatch(box_x, box_y))
else:
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, median_color+'-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
@docstring.dedent_interpd
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
Make a scatter plot.
Call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are
converted to 1-D sequences which must be of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
%(MarkerTable)s
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance or registered
name. If *None*, defaults to rc ``image.cmap``. *cmap* is
only used if *c* is an array of floats.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are *None*, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: ``0 <= scalar <= 1`` or *None*
The alpha value for the patches
*linewidths*: [ *None* | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
The string 'none' to plot faces with no outlines
*facecolors*:
The string 'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
c_is_stringy = is_string_like(c) or is_sequence_of_strings(c)
if not c_is_stringy:
c = np.asanyarray(c)
if c.size == x.size:
c = np.ma.ravel(c)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
if c_is_stringy:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if c.size == x.size:
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
mplDeprecation) # 2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = 'face'
collection = mcoll.PathCollection(
(path,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = kwargs.pop('transform', self.transData),
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0 :
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0 :
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear', extent = None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none',
reduce_C_function = np.mean, mincnt=None, marginals=False,
**kwargs):
"""
Make a hexagonal binning plot.
Call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none'
reduce_C_function = np.mean, mincnt=None, marginals=True
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ *None* | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
*mincnt*: [ *None* | a positive integer ]
If not *None*, only display cells with more than *mincnt*
number of points in the cell
*marginals*: [ *True* | *False* ]
if marginals is *True*, plot the marginal density as
colormapped rectagles along the bottom of the x-axis and
left of the y-axis
*extent*: [ *None* | scalars (left, right, bottom, top) ]
The limits of the bins. The default assigns the limits
based on gridsize, x, y, xscale and yscale.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ *None* | Colormap ]
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ *None* | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin* / *vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar between 0 and 1, or *None*
the alpha value for the patches
*linewidths*: [ *None* | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ *None* | ``'none'`` | mpl color | color sequence ]
If ``'none'``, draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collections.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon. If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
if np.any(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = np.log10(x)
if yscale=='log':
if np.any(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = np.log10(y)
if extent is not None:
xmin, xmax, ymin, ymax = extent
else:
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]]+=1
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]]+=1
# threshold
if mincnt is not None:
for i in xrange(nx1):
for j in xrange(ny1):
if lattice1[i,j]<mincnt:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
if lattice2[i,j]<mincnt:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
else:
if mincnt is None:
mincnt = 0
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals)>mincnt:
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals)>mincnt:
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
offsets = np.zeros((n, 2), float)
offsets[:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
offsets[:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
offsets[nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
offsets[nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
offsets[:,0] *= sx
offsets[:,1] *= sy
offsets[:,0] += xmin
offsets[:,1] += ymin
# remove accumulation bins with no data
offsets = offsets[good_idxs,:]
accum = accum[good_idxs]
polygon = np.zeros((6, 2), float)
polygon[:,0] = sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:,1] = sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
if edgecolors=='none':
edgecolors = 'face'
if xscale == 'log' or yscale == 'log':
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xmin = 10.0 ** xmin
xmax = 10.0 ** xmax
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
ymin = 10.0 ** ymin
ymax = 10.0 ** ymax
self.set_yscale(yscale)
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
)
else:
collection = mcoll.PolyCollection(
[polygon],
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=mtransforms.IdentityTransform(),
offset_position="data"
)
if isinstance(norm, mcolors.LogNorm):
if (accum==0).any():
# make sure we have not zeros
accum += 1
# autoscale the norm with curren accum values if it hasn't
# been set
if norm is not None:
if norm.vmin is None and norm.vmax is None:
norm.autoscale(accum)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view(tight=True)
# add the collection last
self.add_collection(collection)
if not marginals:
return collection
if C is None:
C = np.ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.searchsorted(x).clip(0, len(coarse)-1)
mus = np.zeros(len(coarse))
for i in range(len(coarse)):
mu = reduce_C_function(y[ind==i])
mus[i] = mu
return mus
coarse = np.linspace(xmin, xmax, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~np.isnan(xcoarse)
verts, values = [], []
for i,val in enumerate(xcoarse):
thismin = coarse[i]
if i<len(coarse)-1:
thismax = coarse[i+1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]: continue
verts.append([(thismin, 0), (thismin, 0.05), (thismax, 0.05), (thismax, 0)])
values.append(val)
values = np.array(values)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_array(values)
hbar.set_cmap(cmap)
hbar.set_norm(norm)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_collection(hbar)
coarse = np.linspace(ymin, ymax, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~np.isnan(ycoarse)
verts, values = [], []
for i,val in enumerate(ycoarse):
thismin = coarse[i]
if i<len(coarse)-1:
thismax = coarse[i+1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]: continue
verts.append([(0, thismin), (0.0, thismax), (0.05, thismax), (0.05, thismin)])
values.append(val)
values = np.array(values)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_array(values)
vbar.set_cmap(cmap)
vbar.set_norm(norm)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_collection(vbar)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.callbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Add an arrow to the axes.
Call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*). Uses FancyArrow patch to construct the arrow.
Optional kwargs control the arrow construction and properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def stackplot(self, x, *args, **kwargs):
return mstack.stackplot(self, x, *args, **kwargs)
stackplot.__doc__ = mstack.stackplot.__doc__
def streamplot(self, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None):
if not self._hold: self.cla()
stream_container = mstream.streamplot(self, x, y, u, v,
density=density,
linewidth=linewidth,
color=color,
cmap=cmap,
norm=norm,
arrowsize=arrowsize,
arrowstyle=arrowstyle,
minlength=minlength,
transform=transform)
return stream_container
streamplot.__doc__ = mstream.streamplot.__doc__
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
@docstring.dedent_interpd
def fill(self, *args, **kwargs):
"""
Plot filled polygons.
Call signature::
fill(*args, **kwargs)
*args* is a variable length argument, allowing for multiple
*x*, *y* pairs with an optional color format string; see
:func:`~matplotlib.pyplot.plot` for details on the argument
parsing. For example, to plot a polygon with vertices at *x*,
*y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
**kwargs):
"""
Make filled polygons between two curves.
Call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x* :
An N-length array of the x data
*y1* :
An N-length array (or scalar) of the y data
*y2* :
An N-length array (or scalar) of the y data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is an N-length numpy boolean array and the fill will
only happen over the regions where ``where==True``.
*interpolate* :
If *True*, interpolate between the two lines to find the
precise point of intersection. Otherwise, the start and
end points of the filled region will only occur on explicit
values in the *x* array.
*kwargs* :
Keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`.
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between_demo.py
.. seealso::
:meth:`fill_betweenx`
for filling between two sets of x-values
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = ma.masked_invalid(self.convert_xunits(x))
y1 = ma.masked_invalid(self.convert_yunits(y1))
y2 = ma.masked_invalid(self.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x)*y1
if y2.ndim == 0:
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind-1, 0)
x_values = x[im1:ind+1]
diff_values = y1[im1:ind+1] - y2[im1:ind+1]
y1_values = y1[im1:ind+1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
diff_root_y = np.interp(diff_root_x, x_values, y1_values)
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N+1] = end
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, where=None, **kwargs):
"""
Make filled polygons between two horizontal curves.
Call signature::
fill_betweenx(y, x1, x2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *x1* and *x2* where
``where==True``
*y* :
An N-length array of the y data
*x1* :
An N-length array (or scalar) of the x data
*x2* :
An N-length array (or scalar) of the x data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs* :
keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_betweenx_demo.py
.. seealso::
:meth:`fill_between`
for filling between two sets of y-values
"""
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the arrays so we can work with them
y = ma.masked_invalid(self.convert_yunits(y))
x1 = ma.masked_invalid(self.convert_xunits(x1))
x2 = ma.masked_invalid(self.convert_xunits(x2))
if x1.ndim == 0:
x1 = np.ones_like(y)*x1
if x2.ndim == 0:
x2 = np.ones_like(y)*x2
if where is None:
where = np.ones(len(y), np.bool)
else:
where = np.asarray(where, np.bool)
if not (y.shape == x1.shape == x2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (y, x1, x2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
yslice = y[ind0:ind1]
x1slice = x1[ind0:ind1]
x2slice = x2[ind0:ind1]
if not len(yslice):
continue
N = len(yslice)
Y = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the x1 sample points do
Y[0] = x2slice[0], yslice[0]
Y[N+1] = x2slice[-1], yslice[-1]
Y[1:N+1,0] = x1slice
Y[1:N+1,1] = yslice
Y[N+2:,0] = x2slice[::-1]
Y[N+2:,1] = yslice[::-1]
polys.append(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = np.array([x1[where], y[where]]).T
X2Y = np.array([x2[where], y[where]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@docstring.dedent_interpd
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Display an image on the axes.
Call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=None, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ *None* | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'none', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos'
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
If *interpolation* is ``'none'``, then no interpolation is
performed on the Agg, ps and pdf backends. Other backends
will fall back to 'nearest'.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ *None* | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
or *None*
*origin*: [ *None* | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ *None* | scalars (left, right, bottom, top) ]
Data limits for the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ *None* | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties.
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) != 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) != 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
@docstring.dedent_interpd
def pcolor(self, *args, **kwargs):
"""
Create a pseudocolor plot of a 2-D array.
Note: pcolor can be very slow for large arrays; consider
using the similar but much faster
:func:`~matplotlib.pyplot.pcolormesh` instead.
Call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='none'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'none'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'none'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the MATLAB convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to::
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
MATLAB :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collections.PolyCollection` properties:
%(PolyCollection)s
Note: the default *antialiaseds* is False if the default
*edgecolors*="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of
alpha. If *edgecolors* is not "none", then the default
*antialiaseds* is taken from
rcParams['patch.antialiased'], which defaults to *True*.
Stroking the edges may be preferred if *alpha* is 1, but
will cause artifacts otherwise.
.. seealso::
:func:`~matplotlib.pyplot.pcolormesh`
For an explanation of the differences between
pcolor and pcolormesh.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, | ma.filled(X[1:,0:-1]) | numpy.ma.filled |
import glob
import copy
import numpy as np
import os
import astropy.table as tbl
from astropy import time, coordinates as coord, units as u
from astropy.stats import LombScargle
from astropy.io import fits
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
from scipy.stats import sigmaclip
import scipy.signal
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from matplotlib.patches import ConnectionPatch
#i, TIC_ID, RA, Dec, Year, Mon, Day, HR, Min, g_mags, TESS_mags, distance, SpT, mass, Prot, Ro, Evry_Erg, e_Evry_Erg, TESS_erg, e_TESS_Erg, evr_peakFF, tess_peakFF, n_peaks, tot_BB_data, e_tot_BB_data, tot_BB_data_trap, e_tot_BB_data_trap, E_tot_BB_data_trap, tot_BB_sampl, e_tot_BB_sampl, E_tot_BB_sampl, FWHM_BB_data, e_FWHM_BB_data, FWHM_BB_sampl, e_FWHM_BB_sampl, E_FWHM_BB_sampl, FWHM, impulse
i=[] #0
TIC_ID=[] #1
RA=[] #2
Dec=[] #3
Year=[] #4
Mon=[] #5
Day=[] #6
HR=[] #7
Min=[] #8
g_mags=[] #9
TESS_mags=[] #10
distance=[] #11
SpT=[] #12
mass=[] #13
Prot=[] #14
Ro=[] #15
Evry_Erg=[] #16
e_Evry_Erg=[] #17
TESS_erg=[] #18
e_TESS_Erg=[] #19
evr_peakFF=[] #20
tess_peakFF=[] #21
n_peaks=[] #22
tot_BB_data=[] #23
e_tot_BB_data=[] #24
tot_BB_data_trap=[] #25
e_tot_BB_data_trap=[] #26
E_tot_BB_data_trap=[] #27
tot_BB_sampl=[] #28
e_tot_BB_sampl=[] #29
E_tot_BB_sampl=[] #30
FWHM_BB_data=[] #31
e_FWHM_BB_data=[] #32
FWHM_BB_sampl=[] #33
e_FWHM_BB_sampl=[] #34
E_FWHM_BB_sampl=[] #35
FWHM=[] #36
impulse=[] #37
with open("evryflare_III_table_I.csv","r") as INFILE:
next(INFILE)
for lines in INFILE:
i.append(int(lines.split(",")[0])) #0
TIC_ID.append(int(lines.split(",")[1])) #1
RA.append(float(lines.split(",")[2])) #2
Dec.append(float(lines.split(",")[3])) #3
Year.append(int(lines.split(",")[4])) #4
Mon.append(int(lines.split(",")[5])) #5
Day.append(int(lines.split(",")[6])) #6
HR.append(int(lines.split(",")[7])) #7
Min.append(int(lines.split(",")[8])) #8
g_mags.append(float(lines.split(",")[9])) #9
TESS_mags.append(float(lines.split(",")[10])) #10
distance.append(float(lines.split(",")[11])) #11
SpT.append(str(lines.split(",")[12])) #12
mass.append(float(lines.split(",")[13])) #13
Prot.append(float(lines.split(",")[14])) #14
Ro.append(float(lines.split(",")[15])) #15
Evry_Erg.append(float(lines.split(",")[16])) #16
e_Evry_Erg.append(float(lines.split(",")[17])) #17
TESS_erg.append(float(lines.split(",")[18])) #18
e_TESS_Erg.append(float(lines.split(",")[19])) #19
evr_peakFF.append(float(lines.split(",")[20])) #20
tess_peakFF.append(float(lines.split(",")[21])) #21
n_peaks.append(int(lines.split(",")[22])) #22
tot_BB_data.append(float(lines.split(",")[23])) #23
e_tot_BB_data.append(float(lines.split(",")[24])) #24
tot_BB_data_trap.append(float(lines.split(",")[25])) #25
e_tot_BB_data_trap.append(float(lines.split(",")[26])) #26
E_tot_BB_data_trap.append(float(lines.split(",")[27])) #27
tot_BB_sampl.append(float(lines.split(",")[28])) #28
e_tot_BB_sampl.append(float(lines.split(",")[29])) #29
E_tot_BB_sampl.append(float(lines.split(",")[30])) #30
FWHM_BB_data.append(float(lines.split(",")[31])) #31
e_FWHM_BB_data.append(float(lines.split(",")[32])) #32
FWHM_BB_sampl.append(float(lines.split(",")[33])) #33
e_FWHM_BB_sampl.append(float(lines.split(",")[34])) #34
E_FWHM_BB_sampl.append(float(lines.split(",")[35])) #35
FWHM.append(float(lines.split(",")[36])) #36
impulse.append(float(lines.split(",")[37])) #37
i = np.array(i)
TIC_ID=np.array(TIC_ID)
mass = np.array(mass)
FWHM = np.array(FWHM)
g_mags= np.array(g_mags)
distance= np.array(distance)
Evry_Erg = np.array(Evry_Erg)
TESS_Erg = np.array(TESS_erg)
e_Evry_Erg = np.array(e_Evry_Erg)
Evry_Erg_bol = np.log10((10.0**Evry_Erg)/0.19)
SpT = np.array(SpT)
FWHM_BB_data = np.array(FWHM_BB_data)
tot_BB_data = np.array(tot_BB_data)
e_FWHM_BB_data = np.array(e_FWHM_BB_data)
e_tot_BB_data = np.array(e_tot_BB_data)
FWHM_BB_sampl = np.array(FWHM_BB_sampl)
tot_BB_sampl = np.array(tot_BB_sampl)
tot_BB_data_trap=np.array(tot_BB_data_trap)
e_tot_BB_data_trap=np.array(e_tot_BB_data_trap)
E_tot_BB_data_trap=np.array(E_tot_BB_data_trap)
impulse = np.array(impulse)
evr_peakFF = np.array(evr_peakFF)
tess_peakFF = np.array(tess_peakFF)
color = evr_peakFF - tess_peakFF
n_peaks = np.array(n_peaks).astype(float)
Ro = np.array(Ro)
Prot = np.array(Prot)
e_FWHM_BB_data[e_FWHM_BB_data<300.0]=300.0
e_tot_BB_data[e_tot_BB_data<300.0]=300.0
#e_color = np.absolute(color)*np.sqrt((evry_pk_err/evr_peakFF)**2.0 + (tess_pk_err/tess_peakFF)**2.0)
#e_impulse = np.absolute(impulse)*np.sqrt((evry_pk_err/evr_peakFF)**2.0 + (1.0/FWHM)**2.0)
def compute_1sigma_CI(input_array):
sorted_input_array = np.sort(input_array)
low_ind = int(0.16*len(input_array))
high_ind = int(0.84*len(input_array))
bot_val = (sorted_input_array[:low_ind])[-1]
top_val = (sorted_input_array[high_ind:])[0]
bot_arr_err = abs(np.nanmedian(input_array) - bot_val)
top_arr_err = abs(np.nanmedian(input_array) - top_val)
return (np.nanmedian(input_array), bot_arr_err, top_arr_err)
def get_temp_data(i):
data_times=[]
data_temps=[]
data_lowerr=[]
data_upperr=[]
data_formal_lowerr=[]
data_formal_upperr=[]
with open(str(i)+"_flaretemp_data_lc.csv","r") as INFILE:
for lines in INFILE:
data_times.append(float(lines.split(",")[0]))
data_temps.append(float(lines.split(",")[1]))
data_lowerr.append(float(lines.split(",")[2]))
data_upperr.append(float(lines.split(",")[3]))
data_formal_lowerr.append(float(lines.split(",")[4]))
data_formal_upperr.append(float(lines.split(",")[5]))
data_times=np.array(data_times)
data_temps=np.array(data_temps)
data_lowerr=np.array(data_lowerr)
data_upperr=np.array(data_upperr)
data_formal_lowerr=np.array(data_formal_lowerr)
data_formal_upperr=np.array(data_formal_upperr)
return (data_times, data_temps, data_lowerr, data_upperr, data_formal_lowerr, data_formal_upperr)
def get_temp_model(i):
model_times=[]
model_temps=[]
model_lowerr=[]
model_upperr=[]
with open(str(i)+"_flaretemp_model_lc.csv","r") as INFILE:
for lines in INFILE:
model_times.append(float(lines.split(",")[0]))
model_temps.append(float(lines.split(",")[1]))
model_lowerr.append(float(lines.split(",")[2]))
model_upperr.append(float(lines.split(",")[3]))
model_times=np.array(model_times)
model_temps=np.array(model_temps)
model_lowerr=np.array(model_lowerr)
model_upperr=np.array(model_upperr)
return (model_times, model_temps, model_lowerr, model_upperr)
def get_flare_fits(i):
fit_times=[]
fit_evry_fracflux=[]
fit_tess_fracflux=[]
with open(str(i)+"_flare_fits_lc.csv","r") as INFILE:
for lines in INFILE:
fit_times.append(float(lines.split(",")[0]))
fit_evry_fracflux.append(float(lines.split(",")[1]))
fit_tess_fracflux.append(float(lines.split(",")[2]))
fit_times=np.array(fit_times)
fit_evry_fracflux=np.array(fit_evry_fracflux)
fit_tess_fracflux=np.array(fit_tess_fracflux)
return (fit_times, fit_evry_fracflux, fit_tess_fracflux)
def get_fracflux(i):
x_tess_and_evry=[]
y_tess_and_evry=[]
y_err_tess_and_evry=[]
flag=[]
with open(str(i)+"_flare_fluxes_lc.csv","r") as INFILE:
for lines in INFILE:
x_tess_and_evry.append(float(lines.split(",")[0]))
y_tess_and_evry.append(float(lines.split(",")[1]))
y_err_tess_and_evry.append(float(lines.split(",")[2]))
flag.append(int(lines.split(",")[3]))
x_tess_and_evry= | np.array(x_tess_and_evry) | numpy.array |
##########################################################
# @author: pkc/Vincent
# --------------------------------------------------------
# Based on the MATLAB code by <NAME>
# modification of python code by sajid
#
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
import itertools
import sys
def diagonal_split(x):
''' pre-processing steps interms of
cropping to enable the diagonal
splitting of the input image
'''
h, w = x.shape
cp_x = x
''' cropping the rows '''
if (np.mod(h, 4)==1):
cp_x = cp_x[:-1]
elif(np.mod(h, 4)==2):
cp_x = cp_x[1:-1]
elif(np.mod(h, 4)==3):
cp_x = cp_x[1:-2]
''' cropping the columns'''
if (np.mod(w, 4)==1):
cp_x = cp_x[:, :-1]
elif(np.mod(w, 4)==2):
cp_x = cp_x[:,1:-1]
elif(np.mod(w, 4)==3):
cp_x = cp_x[:, 1:-2]
x = cp_x
h, w = x.shape
if((np.mod(h, 4)!=0) or (np.mod(w, 4)!=0)):
print('[!] diagonal splitting not possible due to cropping issue')
print('[!] re-check the cropping portion')
end()
row_indices = np.arange(0, h)
col_indices = np.arange(0, w)
row_split_u = row_indices[::2]
row_split_d = np.asanyarray(list(set(row_indices)-set(row_split_u)))
col_split_l = col_indices[::2]
col_split_r = np.asanyarray(list(set(col_indices)-set(col_split_l)))
''' ordered pair of pre-processing
of the diagonal elements
and sub-sequent splits of the image
'''
op1 = list(itertools.product(row_split_u, col_split_l))
ind = [np.asanyarray([fo for fo, _ in op1]), np.asanyarray([so for _, so in op1])]
s_a1 = x[ind]
s_a1 = s_a1.reshape((len(row_split_u), len(col_split_l)))
op2 = list(itertools.product(row_split_d, col_split_r))
ind = [np.asanyarray([fo for fo, _ in op2]), np.asanyarray([so for _, so in op2])]
s_a2 = x[ind]
s_a2 = s_a2.reshape((len(row_split_d), len(col_split_r)))
op3 = list(itertools.product(row_split_d, col_split_l))
ind = [np.asanyarray([fo for fo, _ in op3]), np.asanyarray([so for _, so in op3])]
s_b1 = x[ind]
s_b1 = s_b1.reshape((len(row_split_d), len(col_split_l)))
op4 = list(itertools.product(row_split_u, col_split_r))
ind = [np.asanyarray([fo for fo, _ in op4]), np.asanyarray([so for _, so in op4])]
s_b2 = x[ind]
s_b2 = s_b2.reshape((len(row_split_u), len(col_split_r)))
return(s_a1, s_a2, s_b1, s_b2)
def get_frc_img(img, frc_img_lx, center=None):
''' Returns a cropped image version of input image "img"
img: input image
center: cropping is performed with center a reference
point to calculate length in x and y direction.
Unless otherwise stated center is basically center
of input image "img"
frc_img_lx: length of cropped image in x as well as y. Also
the cropped image is made to be square image for
the FRC calculation
'''
h, w = img.shape
cy = round(min(h, w)/2)
if center is None:
cy = cy
else:
cy = cy + center
ep = cy + round(frc_img_lx/2)
sp = ep - frc_img_lx
frc_img = img[sp:ep, sp:ep]
return frc_img
def ring_indices(x, inscribed_rings=True, plot=False):
print("ring plots is:", plot)
#read the shape and dimensions of the input image
shape = np.shape(x)
dim = np.size(shape)
'''Depending on the dimension of the image 2D/3D,
create an array of integers which increase with
distance from the center of the array
'''
if dim == 2 :
nr,nc = shape
nrdc = np.floor(nr/2)
ncdc = np.floor(nc/2)
r = np.arange(nr)-nrdc
c = np.arange(nc)-ncdc
[R,C] = np.meshgrid(r,c)
index = np.round(np.sqrt(R**2+C**2))
elif dim == 3 :
nr,nc,nz = shape
nrdc = np.floor(nr/2)+1
ncdc = np.floor(nc/2)+1
nzdc = np.floor(nz/2)+1
r = np.arange(nr)-nrdc + 1
c = np.arange(nc)-ncdc + 1
z = np.arange(nc)-nzdc + 1
[R,C,Z] = np.meshgrid(r,c,z)
index = np.round(np.sqrt(R**2+C**2+Z**2))+1
else :
print('input is neither a 2d or 3d array')
''' if inscribed_rings is True then the outmost
ring use to evaluate the FRC will be the circle
inscribed in the square input image of size L.
(i.e. FRC_r <= L/2). Else the arcs of the rings
beyond the inscribed circle will also be
considered while determining FRC
(i.e. FRC_r<=sqrt((L/2)^2 + (L/2)^2))
'''
if (inscribed_rings == True):
maxindex = nr/2
else:
maxindex = np.max(index)
#output = np.zeros(int(maxindex),dtype = complex)
''' In the next step the output is generated. The output is an array of length
maxindex. The elements in this array corresponds to the sum of all the elements
in the original array correponding to the integer position of the output array
divided by the number of elements in the index array with the same value as the
integer position.
Depening on the size of the input array, use either the pixel or index method.
By-pixel method for large arrays and by-index method for smaller ones.
'''
print('performed by index method')
indices = []
for i in np.arange(int(maxindex)):
indices.append(np.where(index == i))
if plot is True:
img_plane = np.zeros((nr, nc))
for i in range(int(maxindex)):
if ((i%20)==0):
img_plane[indices[i]]=1.0
plt.imshow(img_plane,cmap="summer")
if inscribed_rings is True:
plt.title(' FRC rings with the max radius as that\
\n of the inscribed circle in the image (spacing of 20 [px] between rings)')
else:
plt.title(' FRC rings extending beyond the radius of\
\n the inscribed circle in the image (spacing of 20 [px] between rings)')
return(indices)
def spinavej(x, inscribed_rings=True):
''' modification of code by sajid an
Based on the MATLAB code by <NAME>
'''
shape = np.shape(x)
dim = np.size(shape)
''' Depending on the dimension of the image 2D/3D, create an array of integers
which increase with distance from the center of the array
'''
if dim == 2 :
nr,nc = shape
nrdc = np.floor(nr/2)
ncdc = np.floor(nc/2)
r = np.arange(nr)-nrdc
c = np.arange(nc)-ncdc
[R,C] = np.meshgrid(r,c)
index = np.round(np.sqrt(R**2+C**2))
indexf = np.floor(np.sqrt(R**2+C**2))
indexC = np.ceil(np.sqrt(R**2+C**2))
print(np.shape(index))
elif dim == 3 :
nr,nc,nz = shape
nrdc = np.floor(nr/2)+1
ncdc = np.floor(nc/2)+1
nzdc = np.floor(nz/2)+1
r = np.arange(nr)-nrdc + 1
c = np.arange(nc)-ncdc + 1
z = np.arange(nc)-nzdc + 1
[R,C,Z] = np.meshgrid(r,c,z)
index = np.round(np.sqrt(R**2+C**2+Z**2))+1
else :
print('input is neither a 2d or 3d array')
'''
The index array has integers from 1 to maxindex arranged according to distance
from the center
'''
if (inscribed_rings == True):
maxindex = nr/2
else:
maxindex = np.max(index)
output = np.zeros(int(maxindex),dtype = complex)
''' In the next step output is generated. The output is an array of length
maxindex. The elements in this array corresponds to the sum of all the elements
in the original array correponding to the integer position of the output array
divided by the number of elements in the index array with the same value as the
integer position.
Depending on the size of the input array, use either the pixel or index method.
By-pixel method for large arrays and by-index method for smaller ones.
'''
print('performed by index method')
indices = []
indicesf, indicesC = [], []
for i in np.arange(int(maxindex)):
#indices.append(np.where(index == i+1))
indicesf.append(np.where(indexf == i))
indicesC.append(np.where(indexC == i))
for i in np.arange(int(maxindex)):
#output[i] = sum(x[indices[i]])/len(indices[i][0])
output[i] = (sum(x[indicesf[i]])+sum(x[indicesC[i]]))/2
return output
def FRC( i1, i2, thresholding='half-bit', inscribed_rings=True, analytical_arc_based=True, info_split=True):
''' Check whether the dimensions of input image is
square or not
'''
if ( np.shape(i1) != np.shape(i2) ) :
print('\n [!] input images must have the same dimensions\n')
import sys
sys.exit()
if ( np.shape(i1)[0] != np.shape(i1)[1]) :
print('\n [!] input images must be squares\n')
import sys
sys.exit()
''' Performing the fourier transform of input
images to determine the FRC
'''
I1 = fft.fftshift(fft.fft2(i1))
I2 = fft.fftshift(fft.fft2(i2))
C = spinavej(I1*np.conjugate(I2), inscribed_rings=inscribed_rings)
C = np.real(C)
C1 = spinavej(np.abs(I1)**2, inscribed_rings=inscribed_rings)
C2 = spinavej(np.abs(I2)**2, inscribed_rings=inscribed_rings)
C = C.astype(np.float32)
C1 = np.real(C1).astype(np.float32)
C2 = np.real(C2).astype(np.float32)
FSC = abs(C)/np.sqrt(C1*C2)
x_fsc = np.arange(np.shape(C)[0])/(np.shape(i1)[0]/2)
ring_plots=False
if(inscribed_rings==True):
''' for rings with max radius
as L/2
'''
if (analytical_arc_based == True):
''' perimeter of circle based calculation to
determine n in each ring
'''
r = np.arange(np.shape(i1)[0]/2) # array (0:1:L/2-1)
n = 2*np.pi*r # perimeter of r's from above
n[0] = 1
eps = np.finfo(float).eps
#t1 = np.divide(np.ones(np.shape(n)),n+eps)
inv_sqrt_n = np.divide(np.ones(np.shape(n)),np.sqrt(n)) # 1/sqrt(n)
x_T = r/(np.shape(i1)[0]/2)
else:
''' no. of pixels along the border of each circle
is used to determine n in each ring
'''
indices = ring_indices( i1, inscribed_rings=True, plot=ring_plots)
N_ind = len(indices)
n = np.zeros(N_ind)
for i in range(N_ind):
n[i] = len(indices[i][0])
inv_sqrt_n = np.divide(np.ones(np.shape(n)),np.sqrt(n)) # 1/sqrt(n)
x_T = np.arange(N_ind)/( | np.shape(i1) | numpy.shape |
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are length of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: <NAME>
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import pc_util
import sunrgbd_utils
from sunrgbd_utils import extract_pc_in_box3d
from model_util_sunrgbd import SunrgbdDatasetConfig
DC = SunrgbdDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 64 # maximum number of objects allowed per scene
MEAN_COLOR_RGB = np.array([0.5,0.5,0.5]) # sunrgbd color is in 0~1
DIST_THRESH = 0.1#0.2
VAR_THRESH = 5e-3
CENTER_THRESH = 0.1
LOWER_THRESH = 1e-6
NUM_POINT = 50
NUM_POINT_LINE = 10
LINE_THRESH = 0.1#0.2
MIND_THRESH = 0.1
NUM_POINT_SEM_THRESHOLD = 1
def check_upright(para_points):
return (para_points[0][-1] == para_points[1][-1]) and (para_points[1][-1] == para_points[2][-1]) and (para_points[2][-1] == para_points[3][-1])
def check_z(plane_equ, para_points):
return np.sum(para_points[:,2] + plane_equ[-1]) / 4.0 < LOWER_THRESH
def clockwise2counter(angle):
'''
@Args:
angle: clockwise from x axis, from 0 to 2*pi,
@Returns:
theta: counter clockwise, -pi / 2 ~ pi / 2, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
'''
return -((angle + np.pi / 2) % np.pi) + np.pi / 2;
def point2line_dist(points, a, b):
'''
@Args:
points: (N, 3)
a / b: (3,)
@Returns:
distance: (N,)
'''
x = b - a
t = np.dot(points - a, x) / np.dot(x, x)
c = a + t[:, None] * np.tile(x, (t.shape[0], 1))
return np.linalg.norm(points - c, axis=1)
def get_linesel(points, corners, direction):
''' corners:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
if direction == 'lower':
sel1 = point2line_dist(points, corners[0], corners[2]) < LINE_THRESH
sel2 = point2line_dist(points, corners[4], corners[6]) < LINE_THRESH
sel3 = point2line_dist(points, corners[0], corners[4]) < LINE_THRESH
sel4 = point2line_dist(points, corners[2], corners[6]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'upper':
sel1 = point2line_dist(points, corners[1], corners[3]) < LINE_THRESH
sel2 = point2line_dist(points, corners[5], corners[7]) < LINE_THRESH
sel3 = point2line_dist(points, corners[1], corners[5]) < LINE_THRESH
sel4 = point2line_dist(points, corners[3], corners[7]) < LINE_THRESH
return sel1, sel2, sel3, sel4
elif direction == 'left':
sel1 = point2line_dist(points, corners[0], corners[1]) < LINE_THRESH
sel2 = point2line_dist(points, corners[2], corners[3]) < LINE_THRESH
return sel1, sel2
elif direction == 'right':
sel1 = point2line_dist(points, corners[4], corners[5]) < LINE_THRESH
sel2 = point2line_dist(points, corners[6], corners[7]) < LINE_THRESH
return sel1, sel2
else:
AssertionError('direction = lower / upper / left')
def get_linesel2(points, ymin, ymax, zmin, zmax, axis=0):
#sel3 = sweep(points, axis, ymax, 2, zmin, zmax)
#sel4 = sweep(points, axis, ymax, 2, zmin, zmax)
sel3 = np.abs(points[:,axis] - ymin) < LINE_THRESH
sel4 = np.abs(points[:,axis] - ymax) < LINE_THRESH
return sel3, sel4
''' ATTENTION: SUNRGBD, size_label is only half the actual size
'''
def params2bbox(center, size, angle):
''' from bbox_center, angle and size to bbox
@Args:
center: (3,)
size: (3,)
angle: -pi ~ pi, +x~+y: (0, pi/2), +x~-y: (0, -pi/2)
@Returns:
bbox: 8 x 3, order:
[[xmin, ymin, zmin], [xmin, ymin, zmax], [xmin, ymax, zmin], [xmin, ymax, zmax],
[xmax, ymin, zmin], [xmax, ymin, zmax], [xmax, ymax, zmin], [xmax, ymax, zmax]]
'''
xsize = size[0]
ysize = size[1]
zsize = size[2]
vx = np.array([np.cos(angle), np.sin(angle), 0])
vy = np.array([-np.sin(angle), np.cos(angle), 0])
vx = vx * np.abs(xsize) / 2
vy = vy * np.abs(ysize) / 2
vz = np.array([0, 0, np.abs(zsize) / 2])
bbox = np.array([\
center - vx - vy - vz, center - vx - vy + vz,
center - vx + vy - vz, center - vx + vy + vz,
center + vx - vy - vz, center + vx - vy + vz,
center + vx + vy - vz, center + vx + vy + vz])
return bbox
class SunrgbdDetectionVotesDataset(Dataset):
def __init__(self, data_path=None, split_set='train', num_points=20000,
use_color=False, use_height=False, use_v1=False,
augment=False, scan_idx_list=None):
assert(num_points<=50000)
self.use_v1 = use_v1
if use_v1:
self.data_path = os.path.join(data_path, 'sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
# self.data_path = os.path.join('/scratch/cluster/yanght/Dataset/sunrgbd/sunrgbd_pc_bbox_votes_50k_v1_' + split_set)
else:
AssertionError("v2 data is not prepared")
self.raw_data_path = os.path.join(ROOT_DIR, 'sunrgbd/sunrgbd_trainval')
self.scan_names = sorted(list(set([os.path.basename(x)[0:6] \
for x in os.listdir(self.data_path)])))
if scan_idx_list is not None:
self.scan_names = [self.scan_names[i] for i in scan_idx_list]
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
scan_name = self.scan_names[idx]
point_color_sem = np.load(os.path.join(self.data_path, scan_name)+'_pc.npz')['pc'] # Nx6
bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy') # K,8
point_votes = np.load(os.path.join(self.data_path, scan_name)+'_votes.npz')['point_votes'] # Nx10
semantics37 = point_color_sem[:, 6]
semantics10 = np.array([DC.class37_2_class10[k] for k in semantics37])
semantics10_multi = [DC.class37_2_class10_multi[k] for k in semantics37]
if not self.use_color:
point_cloud = point_color_sem[:, 0:3]
else:
point_cloud = point_color_sem[:,0:6]
point_cloud[:,3:6] = (point_color_sem[:,3:6]-MEAN_COLOR_RGB)
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
bboxes[:,0] = -1 * bboxes[:,0]
bboxes[:,6] = np.pi - bboxes[:,6]
point_votes[:,[1,4,7]] = -1 * point_votes[:,[1,4,7]]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = sunrgbd_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:,3:6] + MEAN_COLOR_RGB
rgb_color *= (1+0.4*np.random.random(3)-0.2) # brightness change for each channel
rgb_color += (0.1*np.random.random(3)-0.05) # color shift for each channel
rgb_color += np.expand_dims((0.05*np.random.random(point_cloud.shape[0])-0.025), -1) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(np.random.random(point_cloud.shape[0])>0.3,-1)
point_cloud[:,3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
# new items
box3d_angles = np.zeros((MAX_NUM_OBJ,))
point_boundary_mask_z = np.zeros(self.num_points)
point_boundary_mask_xy = np.zeros(self.num_points)
point_boundary_offset_z = np.zeros([self.num_points, 3])
point_boundary_offset_xy = np.zeros([self.num_points, 3])
point_boundary_sem_z = np.zeros([self.num_points, 3+2+1])
point_boundary_sem_xy = np.zeros([self.num_points, 3+1+1])
point_line_mask = np.zeros(self.num_points)
point_line_offset = np.zeros([self.num_points, 3])
point_line_sem = np.zeros([self.num_points, 3+1])
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]*2
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
box3d_angles[i] = bbox[6]
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = sunrgbd_utils.my_compute_box_3d(bbox[0:3], bbox[3:6], bbox[6])
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = | np.max(corners_3d[:,1]) | numpy.max |
'''
Implements the class-conditional HMM where the class label specifies one of C possible words, chosen from some vocabulary.
If the class is a word of length T, it has a deterministic left-to-right state transition matrix A
with T possible states.The t'th state should have an categorical emission distribution which generates t'th letter
in lower case with probability p1, in upper case with probability p1, a blank character "-" with prob p2,
and a random letter with prob p3.
Author : <NAME> (@karalleyna)
'''
import numpy as np
import matplotlib.pyplot as plt
from hmm_lib import HMMDiscrete
class Word:
'''
This class consists of components needed for a class-conditional Hidden Markov Model
with categorical distribution
Parameters
----------
word: str
Class label representing a word
p1: float
The probability of the uppercase and lowercase letter included within
a word for the current state
p2: float
The probability of the blank character
p3: float
The probability of the uppercase and lowercase letters except correct one
L : int
The number of letters used when constructing words
type_ : str
"all" : Includes both uppercase and lowercase letters
"lower" : Includes only lowercase letters
"upper" : Includes only uppercase letters
'''
def __init__(self, word, p1, p2, p3, L, type_):
self.word, self.T = word, len(word)
self.p1, self.p2, self.p3 = p1, p2, p3
self.type_ = type_
self.L = 2 * L if self.type_ == 'all' else L
self.init_state_dist = np.zeros((self.T + 1,))
self.init_state_dist[0] = 1
self.init_state_transition_matrix()
self.init_emission_probs()
def init_state_transition_matrix(self):
assert self.T > 0
A = np.zeros((self.T + 1, self.T + 1)) # transition-probability matrix
A[:-1, 1:] = np.eye(self.T)
A[-1, 0] = 1
self.A = A
def emission_prob_(self, letter):
ascii_no = ord(letter.upper()) - 65 # 65 :ascii number of A
idx = [ascii_no, ascii_no + self.L // 2] if self.type_ == 'all' else ascii_no
emission_prob = np.full((1, self.L), self.p3)
emission_prob[:, idx] = self.p1
return emission_prob
def init_emission_probs(self):
self.B = np.zeros((self.T, self.L)) # observation likelihoods
for i in range(self.T):
self.B[i] = self.emission_prob_(self.word[i])
self.B = np.c_[self.B, np.full((self.T, 1), self.p2), np.zeros((self.T, 1))]
self.B = np.r_[self.B, np.zeros((1, self.L + 2))]
self.B[-1, -1] = 1
def sample(self, n_word, random_states=None):
'''
n_word: int
The number of times sampled a word by HMMDiscrete
random_states: List[int]
The random states each of which is given to HMMDiscrete.sample as a parameter
'''
random_states = np.random.randint(0, 2 * n_word, n_word) if random_states is None or n_word > len(
random_states) else random_states
self.hmm_discrete = HMMDiscrete(self.A, self.B, self.init_state_dist)
return np.r_[[self.hmm_discrete.sample(self.T + 1, rand_stat)[1] for rand_stat in random_states]]
def likelihood(self, word, cls_prob):
'''
Calculates the class conditional likelihood for a particular word with
given class probability by using p(word|c) p(c) where c is the class itself
'''
return cls_prob * np.exp(self.hmm_discrete.forwards(word)[1])
def loglikelihood(self, X):
return np.array([self.hmm_discrete.forwards(x)[1] for x in X])
# converts the list of integers into a word
def decode(idx, L, type_):
if idx==L+2:
return ' '
elif idx==L:
return '-'
elif type_=="all" and idx >= L // 2:
return chr(idx - L // 2 + 65)
elif type_=="upper":
return chr(idx + 65)
return chr(idx + 97)
# encodes the word into list of integers
def encode(word, L , type_):
arr = np.array([], dtype=int)
for c in word:
if c=='-':
arr = | np.append(arr, [2*L if type_=='all' else L]) | numpy.append |
"""
Centrality pipes for quantifying node-based topology measurements
Created by: <NAME>
Change Log
----------
2016/03/10 - Implemented DegrCentral, EvecCentral, SyncCentral pipes
"""
from __future__ import division
import numpy as np
from ..base import NodeTopoPipe
class DegrCentral(NodeTopoPipe):
"""
DegrCentral class for computing weighted degree centrality of the nodes
"""
def __init__(self):
self = self
def _pipe_as_flow(self, signal_packet):
# Get signal_packet details
hkey = signal_packet.keys()[0]
adj = signal_packet[hkey]['data']
centrality = np.sum(adj, axis=0).reshape(-1, 1)
# Dump into signal_packet
new_packet = {}
new_packet[hkey] = {
'data': centrality,
'meta': {
'ax_0': signal_packet[hkey]['meta']['ax_0'],
'time': signal_packet[hkey]['meta']['time']
}
}
return new_packet
class EvecCentral(NodeTopoPipe):
"""
EvecCentral class for computing eigenvector centrality of the nodes
"""
def __init__(self):
self = self
def _pipe_as_flow(self, signal_packet):
# Get signal_packet details
hkey = signal_packet.keys()[0]
adj = signal_packet[hkey]['data']
# Add 1s along the diagonal to make positive definite
adj[np.diag_indices_from(adj)] = 1
# Compute eigenvalues and eigenvectors, ensure they are real
eigval, eigvec = | np.linalg.eig(adj) | numpy.linalg.eig |
from pmd_beamphysics.units import dimension, dimension_name, SI_symbol, pg_units, c_light
from pmd_beamphysics.interfaces.astra import write_astra
from pmd_beamphysics.interfaces.bmad import write_bmad
from pmd_beamphysics.interfaces.genesis import write_genesis4_distribution, genesis2_beam_data, write_genesis2_beam_file
from pmd_beamphysics.interfaces.gpt import write_gpt
from pmd_beamphysics.interfaces.impact import write_impact
from pmd_beamphysics.interfaces.litrack import write_litrack
from pmd_beamphysics.interfaces.lucretia import write_lucretia
from pmd_beamphysics.interfaces.opal import write_opal
from pmd_beamphysics.interfaces.elegant import write_elegant
from pmd_beamphysics.plot import density_plot, marginal_plot, slice_plot
from pmd_beamphysics.readers import particle_array, particle_paths
from pmd_beamphysics.species import charge_of, mass_of
from pmd_beamphysics.statistics import norm_emit_calc, normalized_particle_coordinate, particle_amplitude, particle_twiss_dispersion, matched_particles
from pmd_beamphysics.writers import write_pmd_bunch, pmd_init
from h5py import File
import numpy as np
from copy import deepcopy
import os
#-----------------------------------------
# Classes
class ParticleGroup:
"""
Particle Group class
Initialized on on openPMD beamphysics particle group:
h5 = open h5 handle, or str that is a file
data = raw data
The required bunch data is stored in .data with keys
np.array: x, px, y, py, z, pz, t, status, weight
str: species
where:
x, y, z are positions in units of [m]
px, py, pz are momenta in units of [eV/c]
t is time in [s]
weight is the macro-charge weight in [C], used for all statistical calulations.
species is a proper species name: 'electron', etc.
Optional data:
np.array: id
where:
id is a list of unique integers that identify the particles.
Derived data can be computed as attributes:
.gamma, .beta, .beta_x, .beta_y, .beta_z: relativistic factors [1].
.r, .theta: cylidrical coordinates [m], [1]
.pr, .ptheta: momenta in the radial and angular coordinate directions [eV/c]
.Lz: angular momentum about the z axis [m*eV/c]
.energy : total energy [eV]
.kinetic_energy: total energy - mc^2 in [eV].
.higher_order_energy: total energy with quadratic fit in z or t subtracted [eV]
.p: total momentum in [eV/c]
.mass: rest mass in [eV]
.xp, .yp: Slopes x' = dx/dz = dpx/dpz and y' = dy/dz = dpy/dpz [1].
Normalized transvere coordinates can also be calculated as attributes:
.x_bar, .px_bar, .y_bar, .py_bar in [sqrt(m)]
The normalization is automatically calculated from the covariance matrix.
See functions in .statistics for more advanced usage.
Their cooresponding amplitudes are:
.Jx, .Jy [m]
where Jx = (x_bar^2 + px_bar^2 )/2,
The momenta are normalized by the mass, so that:
<Jx> = norm_emit_x
and similar for y.
Statistics of any of these are calculated with:
.min(X)
.max(X)
.ptp(X)
.avg(X)
.std(X)
.cov(X, Y, ...)
.histogramdd(X, Y, ..., bins=10, range=None)
with a string X as the name any of the properties above.
Useful beam physics quantities are given as attributes:
.norm_emit_x
.norm_emit_y
.norm_emit_4d
.higher_order_energy_spread
.average_current
Twiss parameters, including dispersion, for the 'x' or 'y' plane:
.twiss(plane='x', fraction=0.95, p0C=None)
For convenience, plane='xy' will calculate twiss for both planes.
Twiss matched particles, using a simple linear transformation:
.twiss_match(self, beta=None, alpha=None, plane='x', p0c=None, inplace=False)
The weight is required and must sum to > 0. The sum of the weights is:
.charge
This can also be set:
.charge = 1.234 # C, will rescale the .weight array
All attributes can be accessed with brackets:
[key]
Additional keys are allowed for convenience:
['min_prop'] will return .min('prop')
['max_prop'] will return .max('prop')
['ptp_prop'] will return .ptp('prop')
['mean_prop'] will return .avg('prop')
['sigma_prop'] will return .std('prop')
['cov_prop1__prop2'] will return .cov('prop1', 'prop2')[0,1]
Units for all attributes can be accessed by:
.units(key)
Particles are often stored at the same time (i.e. from a t-based code),
or with the same z position (i.e. from an s-based code.)
Routines:
drift_to_z(z0)
drift_to_t(t0)
help to convert these. If no argument is given, particles will be drifted to the mean.
Related properties are:
.in_t_coordinates returns True if all particles have the same t corrdinate
.in_z_coordinates returns True if all particles have the same z corrdinate
Convenient plotting is provided with:
.plot(...)
.slice_plot(...)
Use help(ParticleGroup.plot), etc. for usage.
"""
def __init__(self, h5=None, data=None):
if h5 and data:
# TODO:
# Allow merging or changing some array with extra data
raise NotImplementedError('Cannot init on both h5 and data')
if h5:
# Allow filename
if isinstance(h5, str):
fname = os.path.expandvars(h5)
assert os.path.exists(fname), f'File does not exist: {fname}'
with File(fname, 'r') as hh5:
pp = particle_paths(hh5)
assert len(pp) == 1, f'Number of particle paths in {h5}: {len(pp)}'
data = load_bunch_data(hh5[pp[0]])
else:
# Try dict
data = load_bunch_data(h5)
else:
# Fill out data. Exclude species.
data = full_data(data)
species = list(set(data['species']))
# Allow for empty data (len=0). Otherwise, check species.
if len(species) >= 1:
assert len(species) == 1, f'mixed species are not allowed: {species}'
data['species'] = species[0]
self._settable_array_keys = ['x', 'px', 'y', 'py', 'z', 'pz', 't', 'status', 'weight']
# Optional data
for k in ['id']:
if k in data:
self._settable_array_keys.append(k)
self._settable_scalar_keys = ['species']
self._settable_keys = self._settable_array_keys + self._settable_scalar_keys
# Internal data. Only allow settable keys
self._data = {k:data[k] for k in self._settable_keys}
#-------------------------------------------------
# Access to intrinsic coordinates
@property
def x(self):
"""
x coordinate in [m]
"""
return self._data['x']
@x.setter
def x(self, val):
self._data['x'] = full_array(len(self), val)
@property
def y(self):
"""
y coordinate in [m]
"""
return self._data['y']
@y.setter
def y(self, val):
self._data['y'] = full_array(len(self), val)
@property
def z(self):
"""
z coordinate in [m]
"""
return self._data['z']
@z.setter
def z(self, val):
self._data['z'] = full_array(len(self), val)
@property
def px(self):
"""
px coordinate in [eV/c]
"""
return self._data['px']
@px.setter
def px(self, val):
self._data['px'] = full_array(len(self), val)
@property
def py(self):
"""
py coordinate in [eV/c]
"""
return self._data['py']
@py.setter
def py(self, val):
self._data['py'] = full_array(len(self), val)
@property
def pz(self):
"""
pz coordinate in [eV/c]
"""
return self._data['pz']
@pz.setter
def pz(self, val):
self._data['pz'] = full_array(len(self), val)
@property
def t(self):
"""
t coordinate in [s]
"""
return self._data['t']
@t.setter
def t(self, val):
self._data['t'] = full_array(len(self), val)
@property
def status(self):
"""
status coordinate in [1]
"""
return self._data['status']
@status.setter
def status(self, val):
self._data['status'] = full_array(len(self), val)
@property
def weight(self):
"""
weight coordinate in [C]
"""
return self._data['weight']
@weight.setter
def weight(self, val):
self._data['weight'] = full_array(len(self), val)
@property
def id(self):
"""
id integer
"""
if 'id' not in self._data:
self.assign_id()
return self._data['id']
@id.setter
def id(self, val):
self._data['id'] = full_array(len(self), val)
@property
def species(self):
"""
species string
"""
return self._data['species']
@species.setter
def species(self, val):
self._data['species'] = val
@property
def data(self):
"""
Internal data dict
"""
return self._data
#-------------------------------------------------
# Derived data
def assign_id(self):
"""
Assigns unique ids, integers from 1 to n_particle
"""
if 'id' not in self._settable_array_keys:
self._settable_array_keys.append('id')
self.id = np.arange(1, self['n_particle']+1)
@property
def n_particle(self):
"""Total number of particles. Same as len """
return len(self)
@property
def n_alive(self):
"""Number of alive particles, defined by status == 1"""
return len(np.where(self.status==1)[0])
@property
def n_dead(self):
"""Number of alive particles, defined by status != 1"""
return self.n_particle - self.n_alive
def units(self, key):
"""Returns the units of any key"""
return pg_units(key)
@property
def mass(self):
"""Rest mass in eV"""
return mass_of(self.species)
@property
def species_charge(self):
"""Species charge in C"""
return charge_of(self.species)
@property
def charge(self):
"""Total charge in C"""
return np.sum(self.weight)
@charge.setter
def charge(self, val):
"""Rescale weight array so that it sum to this value"""
assert val >0, 'charge must be >0. This is used to weight the particles.'
self.weight *= val/self.charge
# Relativistic properties
@property
def p(self):
"""Total momemtum in eV/c"""
return np.sqrt(self.px**2 + self.py**2 + self.pz**2)
@property
def energy(self):
"""Total energy in eV"""
return np.sqrt(self.px**2 + self.py**2 + self.pz**2 + self.mass**2)
@property
def kinetic_energy(self):
"""Kinetic energy in eV"""
return self.energy - self.mass
# Slopes. Note that these are relative to pz
@property
def xp(self):
"""x slope px/pz (dimensionless)"""
return self.px/self.pz
@property
def yp(self):
"""y slope py/pz (dimensionless)"""
return self.py/self.pz
@property
def higher_order_energy(self):
"""
Fits a quadratic (order=2) to the Energy vs. time, and returns the energy with this subtracted.
"""
return self.higher_order_energy_calc(order=2)
@property
def higher_order_energy_spread(self):
"""
Legacy syntax to compute the standard deviation of higher_order_energy.
"""
return self.std('higher_order_energy')
def higher_order_energy_calc(self, order=2):
"""
Fits a polynmial with order `order` to the Energy vs. time, , and returns the energy with this subtracted.
"""
#order=2
if self.std('z') < 1e-12:
# must be at a screen. Use t
t = self.t
else:
# All particles at the same time. Use z to calc t
t = self.z/c_light
energy = self.energy
best_fit_coeffs = np.polynomial.polynomial.polyfit(t, energy, order)
best_fit = np.polynomial.polynomial.polyval(t, best_fit_coeffs)
return energy - best_fit
# Polar coordinates. Note that these are centered at x=0, y=0, and not an averge center.
@property
def r(self):
"""Radius in the xy plane: r = sqrt(x^2 + y^2) in m"""
return np.hypot(self.x, self.y)
@property
def theta(self):
"""Angle in xy plane: theta = arctan2(y, x) in radians"""
return np.arctan2(self.y, self.x)
@property
def pr(self):
"""
Momentum in the radial direction in eV/c
r_hat = cos(theta) xhat + sin(theta) yhat
pr = p dot r_hat
"""
theta = self.theta
return self.px * np.cos(theta) + self.py * np.sin(theta)
@property
def ptheta(self):
"""
Momentum in the polar theta direction.
theta_hat = -sin(theta) xhat + cos(theta) yhat
ptheta = p dot theta_hat
Note that Lz = r*ptheta
"""
theta = self.theta
return -self.px * np.sin(theta) + self.py * np.cos(theta)
@property
def Lz(self):
"""
Angular momentum around the z axis in m*eV/c
Lz = x * py - y * px
"""
return self.x*self.py - self.y*self.px
# Relativistic quantities
@property
def gamma(self):
"""Relativistic gamma"""
return self.energy/self.mass
@gamma.setter
def gamma(self, val):
beta_x = self.beta_x
beta_y = self.beta_y
beta_z = self.beta_z
beta = self.beta
gamma_new = full_array(len(self), val)
energy_new = gamma_new * self.mass
beta_new = np.sqrt(gamma_new**2 - 1)/gamma_new
self._data['px'] = energy_new * beta_new * beta_x / beta
self._data['py'] = energy_new * beta_new * beta_y / beta
self._data['pz'] = energy_new * beta_new * beta_z / beta
@property
def beta(self):
"""Relativistic beta"""
return self.p/self.energy
@property
def beta_x(self):
"""Relativistic beta, x component"""
return self.px/self.energy
@beta_x.setter
def beta_x(self, val):
self._data['px'] = full_array(len(self), val)*self.energy
@property
def beta_y(self):
"""Relativistic beta, y component"""
return self.py/self.energy
@beta_y.setter
def beta_y(self, val):
self._data['py'] = full_array(len(self), val)*self.energy
@property
def beta_z(self):
"""Relativistic beta, z component"""
return self.pz/self.energy
@beta_z.setter
def beta_z(self, val):
self._data['pz'] = full_array(len(self), val)*self.energy
# Normalized coordinates for x and y
@property
def x_bar(self):
"""Normalized x in units of sqrt(m)"""
return normalized_particle_coordinate(self, 'x')
@property
def px_bar(self):
"""Normalized px in units of sqrt(m)"""
return normalized_particle_coordinate(self, 'px')
@property
def Jx(self):
"""Normalized amplitude J in the x-px plane"""
return particle_amplitude(self, 'x')
@property
def y_bar(self):
"""Normalized y in units of sqrt(m)"""
return normalized_particle_coordinate(self, 'y')
@property
def py_bar(self):
"""Normalized py in units of sqrt(m)"""
return normalized_particle_coordinate(self, 'py')
@property
def Jy(self):
"""Normalized amplitude J in the y-py plane"""
return particle_amplitude(self, 'y')
def delta(self, key):
"""Attribute (array) relative to its mean"""
return getattr(self, key) - self.avg(key)
# Statistical property functions
def min(self, key):
"""Minimum of any key"""
return np.min(getattr(self, key))
def max(self, key):
"""Maximum of any key"""
return np.max(getattr(self, key))
def ptp(self, key):
"""Peak-to-Peak = max - min of any key"""
return np.ptp(getattr(self, key))
def avg(self, key):
"""Statistical average"""
dat = getattr(self, key) # equivalent to self.key for accessing properties above
if np.isscalar(dat):
return dat
return np.average(dat, weights=self.weight)
def std(self, key):
"""Standard deviation (actually sample)"""
dat = getattr(self, key)
if np.isscalar(dat):
return 0
avg_dat = self.avg(key)
return np.sqrt(np.average( (dat - avg_dat)**2, weights=self.weight))
def cov(self, *keys):
"""
Covariance matrix from any properties
Example:
P = ParticleGroup(h5)
P.cov('x', 'px', 'y', 'py')
"""
dats = np.array([ getattr(self, key) for key in keys ])
return np.cov(dats, aweights=self.weight)
def histogramdd(self, *keys, bins=10, range=None):
"""
Wrapper for numpy.histogramdd, but accepts property names as keys.
Computes the multidimensional histogram of keys. Internally uses weights.
Example:
P.histogramdd('x', 'y', bins=50)
Returns:
np.array with shape 50x50, edge list
"""
H, edges = np.histogramdd(np.array([self[k] for k in list(keys)]).T, weights=self.weight, bins=bins, range=range)
return H, edges
# Beam statistics
@property
def norm_emit_x(self):
"""Normalized emittance in the x plane"""
return norm_emit_calc(self, planes=['x'])
@property
def norm_emit_y(self):
"""Normalized emittance in the x plane"""
return norm_emit_calc(self, planes=['y'])
@property
def norm_emit_4d(self):
"""Normalized emittance in the xy planes (4D)"""
return norm_emit_calc(self, planes=['x', 'y'])
def twiss(self, plane='x', fraction=1, p0c=None):
"""
Returns Twiss and Dispersion dict.
plane can be:
'x', 'y', 'xy'
Optionally a fraction of the particles, based on amplitiude, can be specified.
"""
d = {}
for p in plane:
d.update(particle_twiss_dispersion(self, plane=p, fraction=fraction, p0c=p0c))
return d
def twiss_match(self, beta=None, alpha=None, plane='x', p0c=None, inplace=False):
"""
Returns a ParticleGroup with requested Twiss parameters.
See: statistics.matched_particles
"""
return matched_particles(self, beta=beta, alpha=alpha, plane=plane, inplace=inplace)
@property
def in_z_coordinates(self):
"""
Returns True if all particles have the same z coordinate
"""
# Check that z are all the same
return len(np.unique(self.z)) == 1
@property
def in_t_coordinates(self):
"""
Returns True if all particles have the same t coordinate
"""
# Check that t are all the same
return len(np.unique(self.t)) == 1
@property
def average_current(self):
"""
Simple average current in A: charge / dt, with dt = (max_t - min_t)
If particles are in t coordinates, will try dt = (max_z - min_z)*c_light*beta_z
"""
dt = self.t.ptp() # ptp 'peak to peak' is max - min
if dt == 0:
# must be in t coordinates. Calc with
dt = self.z.ptp() / (self.avg('beta_z')*c_light)
return self.charge / dt
def __getitem__(self, key):
"""
Returns a property or statistical quantity that can be computed:
P['x'] returns the x array
P['sigmx_x'] returns the std(x) scalar
P['norm_emit_x'] returns the norm_emit_x scalar
Parts can also be given. Example: P[0:10] returns a new ParticleGroup with the first 10 elements.
"""
# Allow for non-string operations:
if not isinstance(key, str):
return particle_parts(self, key)
if key.startswith('cov_'):
subkeys = key[4:].split('__')
assert len(subkeys) == 2, f'Too many properties in covariance request: {key}'
return self.cov(*subkeys)[0,1]
elif key.startswith('delta_'):
return self.delta(key[6:])
elif key.startswith('sigma_'):
return self.std(key[6:])
elif key.startswith('mean_'):
return self.avg(key[5:])
elif key.startswith('min_'):
return self.min(key[4:])
elif key.startswith('max_'):
return self.max(key[4:])
elif key.startswith('ptp_'):
return self.ptp(key[4:])
else:
return getattr(self, key)
def where(self, x):
return self[np.where(x)]
# TODO: should the user be allowed to do this?
#def __setitem__(self, key, value):
# assert key in self._settable_keyes, 'Error: you cannot set:'+str(key)
#
# if key in self._settable_array_keys:
# assert len(value) == self.n_particle
# self.__dict__[key] = value
# elif key ==
# print()
# Simple 'tracking'
def drift(self, delta_t):
"""
Drifts particles by time delta_t
"""
self.x = self.x + self.beta_x * c_light * delta_t
self.y = self.y + self.beta_y * c_light * delta_t
self.z = self.z + self.beta_z * c_light * delta_t
self.t = self.t + delta_t
def drift_to_z(self, z=None):
if not z:
z = self.avg('z')
dt = (z - self.z) / (self.beta_z * c_light)
self.drift(dt)
# Fix z to be exactly this value
self.z = np.full(self.n_particle, z)
def drift_to_t(self, t=None):
"""
Drifts all particles to the same t
If no z is given, particles will be drifted to the average t
"""
if not t:
t = self.avg('t')
dt = t - self.t
self.drift(dt)
# Fix t to be exactly this value
self.t = np.full(self.n_particle, t)
# Writers
def write_astra(self, filePath, verbose=False):
write_astra(self, filePath, verbose=verbose)
def write_bmad(self, filePath, p0c=None, t_ref=0, verbose=False):
write_bmad(self, filePath, p0c=p0c, t_ref=t_ref, verbose=verbose)
def write_elegant(self, filePath, verbose=False):
write_elegant(self, filePath, verbose=verbose)
def write_genesis2_beam_file(self, filePath, n_slice=None, verbose=False):
# Get beam columns
beam_columns = genesis2_beam_data(self, n_slice=n_slice)
# Actually write the file
write_genesis2_beam_file(filePath, beam_columns, verbose=verbose)
def write_genesis4_distribution(self, filePath, verbose=False):
write_genesis4_distribution(self, filePath, verbose=verbose)
def write_gpt(self, filePath, asci2gdf_bin=None, verbose=False):
write_gpt(self, filePath, asci2gdf_bin=asci2gdf_bin, verbose=verbose)
def write_impact(self, filePath, cathode_kinetic_energy_ref=None, include_header=True, verbose=False):
return write_impact(self, filePath, cathode_kinetic_energy_ref=cathode_kinetic_energy_ref,
include_header=include_header, verbose=verbose)
def write_litrack(self, filePath, p0c=None, verbose=False):
return write_litrack(self, outfile=filePath, p0c=p0c, verbose=verbose)
def write_lucretia(self, filePath, ele_name='BEGINNING', t_ref=0, stop_ix=None, verbose=False):
return write_lucretia(self, filePath, ele_name=ele_name, t_ref=t_ref, stop_ix=stop_ix)
def write_opal(self, filePath, verbose=False, dist_type='emitted'):
return write_opal(self, filePath, verbose=verbose, dist_type=dist_type)
# openPMD
def write(self, h5, name=None):
"""
Writes to an open h5 handle, or new file if h5 is a str.
"""
if isinstance(h5, str):
fname = os.path.expandvars(h5)
g = File(fname, 'w')
pmd_init(g, basePath='/', particlesPath='.' )
else:
g = h5
write_pmd_bunch(g, self, name=name)
# Plotting
# --------
# TODO: more general plotting
def plot(self, key1='x', key2=None, bins=None, return_figure=False,
tex=True, **kwargs):
"""
1d or 2d density plot.
"""
if not key2:
fig = density_plot(self, key=key1, bins=bins, tex=tex, **kwargs)
else:
fig = marginal_plot(self, key1=key1, key2=key2, bins=bins, tex=tex, **kwargs)
if return_figure:
return fig
def slice_plot(self, key='sigma_x',
n_slice=100,
slice_key=None,
tex=True,
return_figure=False,
**kwargs):
"""
Slice statistics plot.
"""
if not slice_key:
if self.in_t_coordinates:
slice_key = 'z'
else:
slice_key = 't'
fig = slice_plot(self, stat_key=key, n_slice=n_slice, tex=tex, **kwargs)
if return_figure:
return fig
# New constructors
def split(self, n_chunks = 100, key='z'):
return split_particles(self, n_chunks=n_chunks, key=key)
def copy(self):
"""Returns a deep copy"""
return deepcopy(self)
# Operator overloading
# Resample
def resample(self, n):
"""
Resamples n particles.
"""
return resample(self, n)
# Internal sorting
def _sort(self, key):
"""Sorts internal arrays by key"""
ixlist = np.argsort(self[key])
for k in self._settable_array_keys:
self._data[k] = self[k][ixlist]
# Operator overloading
def __add__(self, other):
"""
Overloads the + operator to join particle groups.
Simply calls join_particle_groups
"""
return join_particle_groups(self, other)
#
def __contains__(self, item):
"""Checks internal data"""
return True if item in self._data else False
def __len__(self):
return len(self[self._settable_array_keys[0]])
def __str__(self):
s = f'ParticleGroup with {self.n_particle} particles with total charge {self.charge} C'
return s
def __repr__(self):
memloc = hex(id(self))
return f'<ParticleGroup with {self.n_particle} particles at {memloc}>'
#-----------------------------------------
# helper functions for ParticleGroup class
def single_particle(x=0,
px=0,
y=0,
py=0,
z=0,
pz=0,
t=0,
weight=1,
status=1,
species='electron'):
"""
Convenience function to make ParticleGroup with a single particle.
Units:
x, y, z: m
px, py, pz: eV/c
t: s
weight: C
status=1 => live particle
"""
data = dict(x=x, px=px, y=y, py=py, z=z, pz=pz, t=t, weight=weight, status=status, species=species)
return ParticleGroup(data=data)
def load_bunch_data(h5):
"""
Load particles into structured numpy array.
"""
n = len(h5['position/x'])
attrs = dict(h5.attrs)
data = {}
data['species'] = attrs['speciesType'].decode('utf-8') # String
n_particle = int(attrs['numParticles'])
data['total_charge'] = attrs['totalCharge']*attrs['chargeUnitSI']
for key in ['<KEY>']:
data[key] = particle_array(h5, key)
if 'particleStatus' in h5:
data['status'] = particle_array(h5, 'particleStatus')
else:
data['status'] = np.full(n_particle, 1)
# Make sure weight is populated
if 'weight' in h5:
weight = particle_array(h5, 'weight')
if len(weight) == 1:
weight = np.full(n_particle, weight[0])
else:
weight = np.full(n_particle, data['total_charge']/n_particle)
data['weight'] = weight
# id should be a unique integer, no units
# optional
if 'id' in h5:
data['id'] = h5['id'][:]
return data
def full_array(n, val):
"""
Casts a value into a full array of length n
"""
if np.isscalar(val):
return np.full(n, val)
n_here = len(val)
if n_here == 1:
return np.full(n, val[0])
elif n_here != n:
raise ValueError(f'Length mismatch: len(val)={n_here}, but requested n={n}')
# Cast to array
return np.array(val)
def full_data(data, exclude=None):
"""
Expands keyed data into np arrays, assuring that the lengths of all items are the same.
Allows for some keys to be scalars or length 1, and fills them out with np.full.
"""
full_data = {}
scalars = {}
for k, v in data.items():
if np.isscalar(v):
scalars[k] = v
elif len(v) == 1:
scalars[k] = v[0]
else:
# must be array
full_data[k] = np.array(v)
# Check for single particle
if len(full_data) == 0:
return {k:np.array([v]) for k, v in scalars.items()}
# Array data should all have the same length
nlist = [len(v) for _, v in full_data.items()]
assert len(set(nlist)) == 1, f'arrays must have the same length. Found len: { {k:len(v) for k, v in full_data.items()} }'
for k, v in scalars.items():
full_data[k] = | np.full(nlist[0], v) | numpy.full |
"""
Module for PypeIt extraction code
.. include:: ../include/links.rst
"""
import copy
import numpy as np
import scipy
from matplotlib import pyplot as plt
from IPython import embed
from astropy import stats
from pypeit import msgs
from pypeit import utils
from pypeit import specobj
from pypeit import specobjs
from pypeit import tracepca
from pypeit import bspline
from pypeit.display import display
from pypeit.core import pydl
from pypeit.core import pixels
from pypeit.core import arc
from pypeit.core import fitting
from pypeit.core import procimg
from pypeit.core.trace import fit_trace
from pypeit.core.moment import moment1d
def extract_optimal(sciimg, ivar, mask, waveimg, skyimg, thismask, oprof, box_radius,
spec, min_frac_use=0.05, base_var=None, count_scale=None, noise_floor=None):
"""
Calculate the spatial FWHM from an object profile. Utility routine for
fit_profile
The specobj object is changed in place with the boxcar and optimal
dictionaries being filled with the extraction parameters.
Parameters
----------
sciimg : float `numpy.ndarray`_, shape (nspec, nspat)
Science frame
ivar : float `numpy.ndarray`_, shape (nspec, nspat)
Inverse variance of science frame. Can be a model or deduced from the
image itself.
mask : boolean `numpy.ndarray`_, shape (nspec, nspat)
Good-pixel mask, indicating which pixels are should or should not be
used. Good pixels = True, Bad Pixels = False
waveimg : float `numpy.ndarray`_, shape (nspec, nspat)
Wavelength image.
skyimg : float `numpy.ndarray`_, shape (nspec, nspat)
Image containing our model of the sky
thismask : boolean `numpy.ndarray`_, shape (nspec, nspat)
Image indicating which pixels are on the slit/order in question.
True=Good.
oprof : float `numpy.ndarray`_, shape (nspec, nspat)
Image containing the profile of the object that we are extracting.
box_radius : :obj:`float`
Size of boxcar window in floating point pixels in the spatial direction.
spec : :class:`~pypeit.specobj.SpecObj`
This is the container that holds object, trace, and extraction
information for the object in question. This routine operates one object
at a time. **This object is altered in place!**
min_frac_use : :obj:`float`, optional
If the sum of object profile across the spatial direction are less than
this value, the optimal extraction of this spectral pixel is masked
because the majority of the object profile has been masked.
base_var : `numpy.ndarray`_, shape is (nspec, nspat), optional
The "base-level" variance in the data set by the detector properties and
the image processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `numpy.ndarray`_, optional
A scale factor, :math:`s`, that *has already been applied* to the
provided science image. For example, if the image has been flat-field
corrected, this is the inverse of the flat-field counts. If None, set
to 1. If a single float, assumed to be constant across the full image.
If an array, the shape must match ``base_var``. The variance will be 0
wherever :math:`s \leq 0`, modulo the provided ``adderr``. This is one
of the components needed to construct the model variance; see
``model_noise``.
noise_floor : :obj:`float`, optional
A fraction of the counts to add to the variance, which has the effect of
ensuring that the S/N is never greater than ``1/noise_floor``; see
:func:`~pypeit.core.procimg.variance_model`. If None, no noise floor is
added.
"""
# Setup
imgminsky = sciimg - skyimg
nspat = imgminsky.shape[1]
nspec = imgminsky.shape[0]
spec_vec = np.arange(nspec)
spat_vec = np.arange(nspat)
# TODO This makes no sense for difference imaging? Not sure we need NIVAR anyway
var_no = None if base_var is None \
else procimg.variance_model(base_var, counts=skyimg, count_scale=count_scale,
noise_floor=noise_floor)
ispec, ispat = np.where(oprof > 0.0)
# Exit gracefully if we have no positive object profiles, since that means something was wrong with object fitting
if not np.any(oprof > 0.0):
msgs.warn('Object profile is zero everywhere. This aperture is junk.')
return
mincol = np.min(ispat)
maxcol = np.max(ispat) + 1
nsub = maxcol - mincol
mask_sub = mask[:,mincol:maxcol]
thismask_sub = thismask[:, mincol:maxcol]
wave_sub = waveimg[:,mincol:maxcol]
ivar_sub = np.fmax(ivar[:,mincol:maxcol],0.0) # enforce positivity since these are used as weights
vno_sub = None if var_no is None else np.fmax(var_no[:,mincol:maxcol],0.0)
base_sub = None if base_var is None else base_var[:,mincol:maxcol]
img_sub = imgminsky[:,mincol:maxcol]
sky_sub = skyimg[:,mincol:maxcol]
oprof_sub = oprof[:,mincol:maxcol]
# enforce normalization and positivity of object profiles
norm = np.nansum(oprof_sub,axis = 1)
norm_oprof = np.outer(norm, np.ones(nsub))
oprof_sub = np.fmax(oprof_sub/norm_oprof, 0.0)
ivar_denom = np.nansum(mask_sub*oprof_sub, axis=1)
mivar_num = np.nansum(mask_sub*ivar_sub*oprof_sub**2, axis=1)
mivar_opt = mivar_num/(ivar_denom + (ivar_denom == 0.0))
flux_opt = np.nansum(mask_sub*ivar_sub*img_sub*oprof_sub, axis=1)/(mivar_num + (mivar_num == 0.0))
# Optimally extracted noise variance (sky + read noise) only. Since
# this variance is not the same as that used for the weights, we
# don't get the usual cancellation. Additional denom factor is the
# analog of the numerator in Horne's variance formula. Note that we
# are only weighting by the profile (ivar_sub=1) because
# otherwise the result depends on the signal (bad).
nivar_num = np.nansum(mask_sub*oprof_sub**2, axis=1) # Uses unit weights
if vno_sub is None:
nivar_opt = None
else:
nvar_opt = ivar_denom * np.nansum(mask_sub * vno_sub * oprof_sub**2, axis=1) \
/ (nivar_num**2 + (nivar_num**2 == 0.0))
nivar_opt = 1.0/(nvar_opt + (nvar_opt == 0.0))
# Optimally extract sky and (read noise)**2 in a similar way
sky_opt = ivar_denom*(np.nansum(mask_sub*sky_sub*oprof_sub**2, axis=1))/(nivar_num**2 + (nivar_num**2 == 0.0))
if base_var is None:
base_opt = None
else:
base_opt = ivar_denom * np.nansum(mask_sub * base_sub * oprof_sub**2, axis=1) \
/ (nivar_num**2 + (nivar_num**2 == 0.0))
base_opt = np.sqrt(base_opt)
base_opt[np.isnan(base_opt)]=0.0
tot_weight = np.nansum(mask_sub*ivar_sub*oprof_sub, axis=1)
prof_norm = np.nansum(oprof_sub, axis=1)
frac_use = (prof_norm > 0.0)*np.nansum((mask_sub*ivar_sub > 0.0)*oprof_sub, axis=1)/(prof_norm + (prof_norm == 0.0))
# Use the same weights = oprof^2*mivar for the wavelenghts as the flux.
# Note that for the flux, one of the oprof factors cancels which does
# not for the wavelengths.
wave_opt = np.nansum(mask_sub*ivar_sub*wave_sub*oprof_sub**2, axis=1)/(mivar_num + (mivar_num == 0.0))
mask_opt = (tot_weight > 0.0) & (frac_use > min_frac_use) & (mivar_num > 0.0) & (ivar_denom > 0.0) & \
np.isfinite(wave_opt) & (wave_opt > 0.0)
# Interpolate wavelengths over masked pixels
badwvs = (mivar_num <= 0) | np.invert(np.isfinite(wave_opt)) | (wave_opt <= 0.0)
if badwvs.any():
oprof_smash = np.nansum(thismask_sub*oprof_sub**2, axis=1)
# Can we use the profile average wavelengths instead?
oprof_good = badwvs & (oprof_smash > 0.0)
if oprof_good.any():
wave_opt[oprof_good] = np.nansum(
wave_sub[oprof_good,:]*thismask_sub[oprof_good,:]*oprof_sub[oprof_good,:]**2, axis=1)/\
np.nansum(thismask_sub[oprof_good,:]*oprof_sub[oprof_good,:]**2, axis=1)
oprof_bad = badwvs & ((oprof_smash <= 0.0) | (np.isfinite(oprof_smash) == False) | (wave_opt <= 0.0) | (np.isfinite(wave_opt) == False))
if oprof_bad.any():
# For pixels with completely bad profile values, interpolate from trace.
f_wave = scipy.interpolate.RectBivariateSpline(spec_vec,spat_vec, waveimg*thismask)
wave_opt[oprof_bad] = f_wave(spec.trace_spec[oprof_bad], spec.TRACE_SPAT[oprof_bad],
grid=False)
flux_model = np.outer(flux_opt,np.ones(nsub))*oprof_sub
chi2_num = np.nansum((img_sub - flux_model)**2*ivar_sub*mask_sub,axis=1)
chi2_denom = np.fmax(np.nansum(ivar_sub*mask_sub > 0.0, axis=1) - 1.0, 1.0)
chi2 = chi2_num/chi2_denom
# Fill in the optimally extraction tags
spec.OPT_WAVE = wave_opt # Optimally extracted wavelengths
spec.OPT_COUNTS = flux_opt # Optimally extracted flux
spec.OPT_COUNTS_IVAR = mivar_opt # Inverse variance of optimally extracted flux using modelivar image
spec.OPT_COUNTS_SIG = np.sqrt(utils.inverse(mivar_opt))
spec.OPT_COUNTS_NIVAR = nivar_opt # Optimally extracted noise variance (sky + read noise) only
spec.OPT_MASK = mask_opt # Mask for optimally extracted flux
spec.OPT_COUNTS_SKY = sky_opt # Optimally extracted sky
spec.OPT_COUNTS_SIG_DET = base_opt # Square root of optimally extracted read noise squared
spec.OPT_FRAC_USE = frac_use # Fraction of pixels in the object profile subimage used for this extraction
spec.OPT_CHI2 = chi2 # Reduced chi2 of the model fit for this spectral pixel
def extract_boxcar(sciimg, ivar, mask, waveimg, skyimg, box_radius, spec, base_var=None,
count_scale=None, noise_floor=None):
"""
Perform boxcar extraction for a single SpecObj
SpecObj is filled in place
Parameters
----------
sciimg : float `numpy.ndarray`_, shape (nspec, nspat)
Science frame
ivar : float `numpy.ndarray`_, shape (nspec, nspat)
Inverse variance of science frame. Can be a model or deduced from the
image itself.
mask : boolean `numpy.ndarray`_, shape (nspec, nspat)
Good-pixel mask, indicating which pixels are should or should not be
used. Good pixels = True, Bad Pixels = False
waveimg : float `numpy.ndarray`_, shape (nspec, nspat)
Wavelength image.
skyimg : float `numpy.ndarray`_, shape (nspec, nspat)
Image containing our model of the sky
box_radius : :obj:`float`
Size of boxcar window in floating point pixels in the spatial direction.
spec : :class:`~pypeit.specobj.SpecObj`
This is the container that holds object, trace, and extraction
information for the object in question. This routine operates one object
at a time. **This object is altered in place!**
base_var : `numpy.ndarray`_, shape is (nspec, nspat), optional
The "base-level" variance in the data set by the detector properties and
the image processing steps. See
:func:`~pypeit.core.procimg.base_variance`.
count_scale : :obj:`float`, `numpy.ndarray`_, optional
A scale factor, :math:`s`, that *has already been applied* to the
provided science image. For example, if the image has been flat-field
corrected, this is the inverse of the flat-field counts. If None, set
to 1. If a single float, assumed to be constant across the full image.
If an array, the shape must match ``base_var``. The variance will be 0
wherever :math:`s \leq 0`, modulo the provided ``adderr``. This is one
of the components needed to construct the model variance; see
``model_noise``.
noise_floor : :obj:`float`, optional
A fraction of the counts to add to the variance, which has the effect of
ensuring that the S/N is never greater than ``1/noise_floor``; see
:func:`~pypeit.core.procimg.variance_model`. If None, no noise floor is
added.
"""
# Setup
imgminsky = sciimg - skyimg
nspat = imgminsky.shape[1]
nspec = imgminsky.shape[0]
spec_vec = np.arange(nspec)
spat_vec = np.arange(nspat)
if spec.trace_spec is None:
spec.trace_spec = spec_vec
# TODO This makes no sense for difference imaging? Not sure we need NIVAR anyway
var_no = None if base_var is None \
else procimg.variance_model(base_var, counts=skyimg, count_scale=count_scale,
noise_floor=noise_floor)
# Fill in the boxcar extraction tags
flux_box = moment1d(imgminsky*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
# Denom is computed in case the trace goes off the edge of the image
box_denom = moment1d(waveimg*mask > 0.0, spec.TRACE_SPAT, 2*box_radius,
row=spec.trace_spec)[0]
wave_box = moment1d(waveimg*mask, spec.TRACE_SPAT, 2*box_radius,
row=spec.trace_spec)[0] / (box_denom + (box_denom == 0.0))
varimg = 1.0/(ivar + (ivar == 0.0))
var_box = moment1d(varimg*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
nvar_box = None if var_no is None \
else moment1d(var_no*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
sky_box = moment1d(skyimg*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
if base_var is None:
base_box = None
else:
_base_box = moment1d(base_var*mask, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
base_posind = (_base_box > 0.0)
base_box = np.zeros(_base_box.shape, dtype=float)
base_box[base_posind] = np.sqrt(_base_box[base_posind])
pixtot = moment1d(ivar*0 + 1.0, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
pixmsk = moment1d(ivar*mask == 0.0, spec.TRACE_SPAT, 2*box_radius, row=spec.trace_spec)[0]
# If every pixel is masked then mask the boxcar extraction
mask_box = (pixmsk != pixtot) & np.isfinite(wave_box) & (wave_box > 0.0)
bad_box = (wave_box <= 0.0) | np.invert(np.isfinite(wave_box)) | (box_denom == 0.0)
# interpolate bad wavelengths over masked pixels
if bad_box.any():
f_wave = scipy.interpolate.RectBivariateSpline(spec_vec, spat_vec, waveimg)
wave_box[bad_box] = f_wave(spec.trace_spec[bad_box], spec.TRACE_SPAT[bad_box], grid=False)
ivar_box = 1.0/(var_box + (var_box == 0.0))
nivar_box = None if nvar_box is None else 1.0/(nvar_box + (nvar_box == 0.0))
# Fill em up!
spec.BOX_WAVE = wave_box
spec.BOX_COUNTS = flux_box*mask_box
spec.BOX_COUNTS_IVAR = ivar_box*mask_box
spec.BOX_COUNTS_SIG = np.sqrt(utils.inverse(ivar_box*mask_box))
spec.BOX_COUNTS_NIVAR = None if nivar_box is None else nivar_box*mask_box
spec.BOX_MASK = mask_box
spec.BOX_COUNTS_SKY = sky_box
spec.BOX_COUNTS_SIG_DET = base_box
spec.BOX_RADIUS = box_radius
# TODO - Confirm this should be float, not int
spec.BOX_NPIX = pixtot-pixmsk
def findfwhm(model, sig_x):
""" Calculate the spatial FWHM from an object profile. Utitlit routine for fit_profile
Parameters
----------
model : numpy float 2-d array [nspec, nspat]
x :
Returns
-------
peak : Peak value of the profile model
peak_x: sig_x location where the peak value is obtained
lwhm: Value of sig_x at the left width at half maximum
rwhm: Value of sig_x at the right width at half maximum
Notes
-----
Revision History
- 11-Mar-2005 Written by <NAME> and <NAME>, Princeton.
- 28-May-2018 Ported to python by <NAME>
"""
peak = (model*(np.abs(sig_x) < 1.)).max()
peak_x = sig_x[(model*(np.abs(sig_x) < 1.)).argmax()]
lrev = ((sig_x < peak_x) & (model < 0.5*peak))[::-1]
lind, = np.where(lrev)
if(lind.size > 0):
lh = lind.min()
lwhm = (sig_x[::-1])[lh]
else:
lwhm = -0.5*2.3548
rind, = np.where((sig_x > peak_x) & (model < 0.5*peak))
if(rind.size > 0):
rh = rind.min()
rwhm = sig_x[rh]
else:
rwhm = 0.5 * 2.3548
return (peak, peak_x, lwhm, rwhm)
def qa_fit_profile(x_tot,y_tot, model_tot, l_limit = None, r_limit = None, ind = None,
title =' ', xtrunc = 1e6, xlim = None, ylim = None, qafile = None):
# Plotting pre-amble
plt.close("all")
#plt.clf()
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
width = 10.0 # Golden ratio 1.618
fig, ax = plt.subplots(1, figsize=(width, width/1.618))
if ind is None:
indx = np.slice(x_tot.size)
else:
if len(ind) == 0:
indx = np.slice(x_tot.size)
title = title + ': no good pixels, showing all'
else:
indx = ind
x = x_tot.flat[indx]
y = y_tot.flat[indx]
model = model_tot.flat[indx]
ax.plot(x,y,color='k',marker='o',markersize= 0.3, mfc='k',fillstyle='full',linestyle='None')
max_model = np.fmin(model.max(), 2.0)
if xlim is None:
goodpix = model > 0.001*max_model
if goodpix.any():
minx = np.fmax(1.1*(x[goodpix]).min(), -xtrunc)
maxx = np.fmin(1.1*(x[goodpix]).max(), xtrunc)
else:
minx = -5.0
maxx = 5.0
else:
minx = -xlim
maxx = xlim
xlimit = (minx, maxx)
nsamp = 150
half_bin = (maxx - minx)/nsamp/2.0
if ylim is None:
ymax = np.fmax(1.5*model.max(), 0.1)
ymin = np.fmin(-0.1*ymax, -0.05)
ylim = (ymin, ymax)
plot_mid = (np.arange(nsamp) + 0.5)/nsamp*(maxx - minx) + minx
y20 = np.zeros(nsamp)
y80 = np.zeros(nsamp)
y50 = np.zeros(nsamp)
model_samp = np.zeros(nsamp)
nbin = np.zeros(nsamp)
for i in range(nsamp):
dist = np.abs(x - plot_mid[i])
close = dist < half_bin
yclose = y[close]
nclose = close.sum()
nbin[i] = nclose
if close.any():
closest = (dist[close]).argmin()
model_samp[i] = (model[close])[closest]
if nclose > 3:
s = yclose.argsort()
y50[i] = yclose[s[int(np.rint((nclose - 1)*0.5))]]
y80[i] = yclose[s[int(np.rint((nclose - 1)*0.8))]]
y20[i] = yclose[s[int(np.rint((nclose - 1)*0.2))]]
ax.plot([plot_mid[i],plot_mid[i]], [y20[i],y80[i]], linewidth=1.2, color='orange')
icl = nbin > 3
if icl.any():
ax.plot(plot_mid[icl],y50[icl],marker = 'o', color='lime', markersize=2, fillstyle='full', linestyle='None')
else:
ax.plot(plot_mid, y50, marker='o', color='lime', markersize=2, fillstyle = 'full', linestyle='None')
isort = x.argsort()
ax.plot(x[isort], model[isort], color='red', linewidth=1.0)
if l_limit is not None:
ax.axvline(x =l_limit, color='cornflowerblue',linewidth=2.0)
if r_limit is not None:
ax.axvline(x=r_limit, color='cornflowerblue',linewidth=2.0)
ax.set_xlim(xlimit)
ax.set_ylim(ylim)
ax.set_title(title)
ax.set_xlabel(r'$x/\sigma$')
ax.set_ylabel('Normalized Profile')
fig.subplots_adjust(left=0.15, right=0.9, top=0.9, bottom=0.15)
plt.show()
return
def return_gaussian(sigma_x, norm_obj, fwhm, med_sn2, obj_string, show_profile,
ind = None, l_limit = None, r_limit=None, xlim = None, xtrunc = 1e6):
"""
Utility function to return Gaussian object profile in the case of low S/N ratio or too many rejected pixels.
Args:
sigma_x: ndarray (nspec, nspat)
Spatial of gaussian
norm_obj: ndarray (nspec, nspat)
Normalized 2-d spectrum.
fwhm: float
FWHM parameter for Gaussian profile
med_sn2: float
Median (S/N)^2 used only for QA
obj_string: str
String identifying object. Used only for QA.
show_profile: bool
Is set, qa plot will be shown to screen
ind: array, int
Good indices in object profile. Used by QA routine.
l_limit: float
Left limit of profile fit where derivative is evaluated for Gaussian apodization. Used by QA routine.
r_limit: float
Right limit of profile fit where derivative is evaluated for Gaussian apodization. Used by QA routine.
xlim: float
Spatial location to trim object profile for plotting in QA routine.
xtrunc: float
Spatial nsigma to truncate object profile for plotting in QA routine.
Returns:
"""
profile_model = np.exp(-0.5*sigma_x**2)/np.sqrt(2.0 * np.pi)*(sigma_x ** 2 < 25.)
info_string = "FWHM=" + "{:6.2f}".format(fwhm) + ", S/N=" + "{:8.3f}".format(np.sqrt(med_sn2))
title_string = obj_string + ', ' + info_string
msgs.info(title_string)
inf = np.isfinite(profile_model) == False
ninf = np.sum(inf)
if (ninf != 0):
msgs.warn("Nan pixel values in object profile... setting them to zero")
profile_model[inf] = 0.0
# JFH Not sure we need this normalization.
#nspat = profile_model.shape[1]
#norm_spec = np.sum(profile_model, 1)
#norm = np.outer(norm_spec, np.ones(nspat))
#if np.sum(norm) > 0.0:
# profile_model = profile_model*(norm > 0.0)/(norm + (norm <= 0.0))
if show_profile:
qa_fit_profile(sigma_x, norm_obj, profile_model, title = title_string, l_limit = l_limit, r_limit = r_limit,
ind=ind, xlim = xlim, xtrunc=xtrunc)
return profile_model
def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, flux, fluxivar,
inmask=None, thisfwhm=4.0, max_trace_corr=2.0, sn_gauss=4.0, percentile_sn2=70.0,
prof_nsigma=None, no_deriv=False, gauss=False, obj_string='',
show_profile=False):
"""
Fit a non-parametric object profile to an object spectrum, unless
the S/N ratio is low (> sn_gauss) in which fit a simple Gaussian.
Port of IDL LOWREDUX long_gprofile.pro
Parameters
----------
image : numpy float 2-d array (nspec, nspat)
sky-subtracted image
ivar : numpy float 2-d array (nspec, nspat)
inverse variance of sky-subtracted image
waveimg numpy float 2-d array (nspec, nspat)
2-d wavelength map
spat_img: float ndarray, shape (nspec, nspat)
Image containing the spatial location of pixels. If not input,
it will be computed via spat_img = np.outer(np.ones(nspec), np.arange(nspat))
trace_in : numpy 1-d array (nspec,)
object trace
wave : numpy 1-d array (nspec,)
extracted wavelength of spectrum
flux : numpy 1-d array (nspec,)
extracted flux of spectrum
fluxivar : numpy 1-d array (nspec,)
inverse variance of extracted flux spectrum
thisfwhm : float, optional
fwhm of the object trace
max_trace_corr : float [default = 2.0], optional
maximum trace correction to apply
sn_gauss : float [default = 3.0], optional
S/N ratio below which code just uses a Gaussian
percentile_sn2: float [default = 70.0], optional
Estimates the S/N of an object from pixels in the upper percentile_sn2 percentile of wavelength values.
For example if percentile_sn2 = 70.0 then the upper 30% of spectrals are used.
This ensures the code can still fit line only objects and/or high redshift quasars which might only have
signal in reddest part of a spectrum.
maskwidth : float [default = None], optional
object maskwidth determined from object finding algorithm. If = None,
code defaults to use 3.0*(np.max(thisfwhm) + 1.0)
THIS PARAMETER IS NOT USED IN THIS METHOD
prof_nsigma : float [default = None], optional
Number of sigma to include in the profile fitting. This option is only needed for bright objects that are not
point sources, which allows the profile fitting to fit the high S/N wings (rather than the default behavior
which truncates exponentially). This allows for extracting all the flux and results in better sky-subtraction
for bright extended objects.
no_deriv : boolean [default = False]
disables determination of derivatives and exponential apodization
Returns
-------
sset: object
bspline object
outmask: : :class:`numpy.ndarray`
output mask which the same size as xdata
yfit : :class:`numpy.ndarray`
result of the bspline fit (same size as xdata)
reduced_chi: float
value of the reduced chi^2
"""
if inmask is None:
inmask = (ivar > 0.0) & thismask
totmask = inmask & (ivar > 0.0) & thismask
if prof_nsigma is not None:
no_deriv = True
thisfwhm = np.fmax(thisfwhm,1.0) # require the FWHM to be greater than 1 pixel
nspat = image.shape[1]
nspec = image.shape[0]
# dspat is the spatial position along the image centered on the object trace
dspat = spat_img - np.outer(trace_in, np.ones(nspat))
# create some images we will need
sn2_img = np.zeros((nspec,nspat))
spline_img = np.zeros((nspec,nspat))
flux_sm = scipy.ndimage.filters.median_filter(flux, size=5, mode = 'reflect')
fluxivar_sm0 = scipy.ndimage.filters.median_filter(fluxivar, size = 5, mode = 'reflect')
fluxivar_sm0 = fluxivar_sm0*(fluxivar > 0.0)
wave_min = waveimg[thismask].min()
wave_max = waveimg[thismask].max()
# This adds an error floor to the fluxivar_sm, preventing too much rejection at high-S/N (i.e. standard stars)
# TODO implement the ivar_cap function here in utils.
sn_cap = 100.0
fluxivar_sm = utils.clip_ivar(flux_sm, fluxivar_sm0, sn_cap)
indsp = (wave >= wave_min) & (wave <= wave_max) & \
np.isfinite(flux_sm) & \
(flux_sm > -1000.0) & (fluxivar_sm > 0.0)
b_answer, bmask = fitting.iterfit(wave[indsp], flux_sm[indsp], invvar = fluxivar_sm[indsp],
kwargs_bspline={'everyn': 1.5}, kwargs_reject={'groupbadpix':True,'maxrej':1})
b_answer, bmask2 = fitting.iterfit(wave[indsp], flux_sm[indsp], invvar = fluxivar_sm[indsp]*bmask,
kwargs_bspline={'everyn': 1.5}, kwargs_reject={'groupbadpix':True,'maxrej':1})
c_answer, cmask = fitting.iterfit(wave[indsp], flux_sm[indsp], invvar = fluxivar_sm[indsp]*bmask2,
kwargs_bspline={'everyn': 30}, kwargs_reject={'groupbadpix':True,'maxrej':1})
spline_flux, _ = b_answer.value(wave[indsp])
try:
cont_flux, _ = c_answer.value(wave[indsp])
except:
msgs.error("Bad Fitting in extract:fit_profile..")
sn2 = (np.fmax(spline_flux*(np.sqrt(np.fmax(fluxivar_sm[indsp], 0))*bmask2),0))**2
ind_nonzero = (sn2 > 0)
nonzero = np.sum(ind_nonzero)
if(nonzero >0):
## Select the top 30% data for estimating the med_sn2. This ensures the code still fit line only object and/or
## high redshift quasars which might only have signal in part of the spectrum.
sn2_percentile = np.percentile(sn2,percentile_sn2)
mean, med_sn2, stddev = stats.sigma_clipped_stats(sn2[sn2>sn2_percentile],sigma_lower=3.0,sigma_upper=5.0)
else:
med_sn2 = 0.0
#min_wave = np.min(wave[indsp])
#max_wave = np.max(wave[indsp])
# TODO -- JFH document this
spline_flux1 = np.zeros(nspec)
cont_flux1 = np.zeros(nspec)
sn2_1 = np.zeros(nspec)
ispline = (wave >= wave_min) & (wave <= wave_max)
spline_tmp, _ = b_answer.value(wave[ispline])
spline_flux1[ispline] = spline_tmp
cont_tmp, _ = c_answer.value(wave[ispline])
cont_flux1[ispline] = cont_tmp
isrt = np.argsort(wave[indsp])
s2_1_interp = scipy.interpolate.interp1d(wave[indsp][isrt], sn2[isrt],assume_sorted=False, bounds_error=False,fill_value = 0.0)
sn2_1[ispline] = s2_1_interp(wave[ispline])
bmask = np.zeros(nspec,dtype='bool')
bmask[indsp] = bmask2
spline_flux1 = pydl.djs_maskinterp(spline_flux1,(bmask == False))
cmask2 = np.zeros(nspec,dtype='bool')
cmask2[indsp] = cmask
cont_flux1 = pydl.djs_maskinterp(cont_flux1,(cmask2 == False))
(_, _, sigma1) = stats.sigma_clipped_stats(flux[indsp],sigma_lower=3.0,sigma_upper=5.0)
sn2_med_filt = scipy.ndimage.filters.median_filter(sn2, size=9, mode='reflect')
if np.any(totmask):
sn2_interp = scipy.interpolate.interp1d(wave[indsp][isrt],sn2_med_filt[isrt],assume_sorted=False,
bounds_error=False,fill_value = 'extrapolate')
sn2_img[totmask] = sn2_interp(waveimg[totmask])
else:
msgs.warn('All pixels are masked')
msgs.info('sqrt(med(S/N)^2) = ' + "{:5.2f}".format(np.sqrt(med_sn2)))
# TODO -- JFH document this
if(med_sn2 <= 2.0):
spline_img[totmask]= np.fmax(sigma1,0)
else:
if((med_sn2 <=5.0) and (med_sn2 > 2.0)):
spline_flux1 = cont_flux1
# Interp over points <= 0 in boxcar flux or masked points using cont model
badpix = (spline_flux1 <= 0.5) | (bmask == False)
goodval = (cont_flux1 > 0.0) & (cont_flux1 < 5e5)
indbad1 = badpix & goodval
nbad1 = np.sum(indbad1)
if(nbad1 > 0):
spline_flux1[indbad1] = cont_flux1[indbad1]
indbad2 = badpix & np.invert(goodval)
nbad2 = np.sum(indbad2)
ngood0 = np.sum(np.invert(badpix))
if((nbad2 > 0) or (ngood0 > 0)):
spline_flux1[indbad2] = np.median(spline_flux1[np.invert(badpix)])
# take a 5-pixel median to filter out some hot pixels
spline_flux1 = scipy.ndimage.filters.median_filter(spline_flux1,size=5,mode ='reflect')
# Create the normalized object image
if np.any(totmask):
igd = (wave >= wave_min) & (wave <= wave_max)
isrt1 = np.argsort(wave[igd])
#plt.plot(wave[igd][isrt1], spline_flux1[igd][isrt1])
#plt.show()
spline_img_interp = scipy.interpolate.interp1d(wave[igd][isrt1],spline_flux1[igd][isrt1],assume_sorted=False,
bounds_error=False,fill_value = 'extrapolate')
spline_img[totmask] = spline_img_interp(waveimg[totmask])
else:
spline_img[totmask] = np.fmax(sigma1, 0)
# TODO -- JFH document this
norm_obj = (spline_img != 0.0)*image/(spline_img + (spline_img == 0.0))
norm_ivar = ivar*spline_img**2
# Cap very large inverse variances
ivar_mask = (norm_obj > -0.2) & (norm_obj < 0.7) & totmask & np.isfinite(norm_obj) & np.isfinite(norm_ivar)
norm_ivar = norm_ivar*ivar_mask
good = (norm_ivar.flatten() > 0.0)
ngood = np.sum(good)
xtemp = (np.cumsum(np.outer(4.0 + np.sqrt(np.fmax(sn2_1, 0.0)),np.ones(nspat)))).reshape((nspec,nspat))
xtemp = xtemp/xtemp.max()
sigma = np.full(nspec, thisfwhm/2.3548)
fwhmfit = sigma*2.3548
trace_corr = np.zeros(nspec)
msgs.info("Gaussian vs b-spline of width " + "{:6.2f}".format(thisfwhm) + " pixels")
area = 1.0
# sigma_x represents the profile argument, i.e. (x-x0)/sigma
sigma_x = dspat/np.outer(sigma, np.ones(nspat)) - np.outer(trace_corr, np.ones(nspat))
# If we have too few pixels to fit a profile or S/N is too low, just use a Gaussian profile
if((ngood < 10) or (med_sn2 < sn_gauss**2) or (gauss is True)):
msgs.info("Too few good pixels or S/N <" + "{:5.1f}".format(sn_gauss) + " or gauss flag set")
msgs.info("Returning Gaussian profile")
profile_model = return_gaussian(sigma_x, norm_obj, thisfwhm, med_sn2, obj_string,show_profile,ind=good,xtrunc=7.0)
return (profile_model, trace_in, fwhmfit, med_sn2)
mask = np.full(nspec*nspat, False, dtype=bool)
# The following lines set the limits for the b-spline fit
limit = scipy.special.erfcinv(0.1/np.sqrt(med_sn2))*np.sqrt(2.0)
if(prof_nsigma is None):
sinh_space = 0.25*np.log10(np.fmax((1000./np.sqrt(med_sn2)),10.))
abs_sigma = np.fmin((np.abs(sigma_x.flat[good])).max(),2.0*limit)
min_sigma = np.fmax(sigma_x.flat[good].min(), (-abs_sigma))
max_sigma = np.fmin(sigma_x.flat[good].max(), (abs_sigma))
nb = (np.arcsinh(abs_sigma)/sinh_space).astype(int) + 1
else:
msgs.info("Using prof_nsigma= " + "{:6.2f}".format(prof_nsigma) + " for extended/bright objects")
nb = | np.round(prof_nsigma > 10) | numpy.round |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from pyiron_base._tests import PyironTestCase
from pyiron_continuum.mesh import (
RectMesh,
callable_to_array,
takes_scalar_field,
takes_vector_field,
has_default_accuracy
)
import numpy as np
import pyiron_continuum.mesh as mesh_mod
class TestDecorators(PyironTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mesh = RectMesh([1, 2, 3], [30, 20, 10])
@staticmethod
def give_vector(mesh):
return np.ones(mesh.shape)
@staticmethod
def give_scalar(mesh):
return np.ones(mesh.divisions)
def test_callable_to_array(self):
scalar_field = self.give_scalar(self.mesh)
@callable_to_array
def method(mesh, callable_or_array, some_kwarg=1):
return callable_or_array + some_kwarg
self.assertTrue(np.allclose(scalar_field + 1, method(self.mesh, self.give_scalar)), msg="Accept functions")
self.assertTrue(np.allclose(scalar_field + 1, method(self.mesh, scalar_field)), msg="Accept arrays")
self.assertTrue(np.allclose(scalar_field + 2, method(self.mesh, self.give_scalar, some_kwarg=2)),
msg="Pass kwargs")
def test_takes_scalar_field(self):
scalar_field = self.give_scalar(self.mesh)
@takes_scalar_field
def method(mesh, scalar_field, some_kwarg=1):
return some_kwarg
self.assertEqual(1, method(self.mesh, scalar_field), msg="Accept arrays")
self.assertEqual(2, method(self.mesh, scalar_field, some_kwarg=2), msg="Pass kwargs")
self.assertEqual(1, method(self.mesh, scalar_field.tolist()), msg="Should work with listlike stuff too")
self.assertRaises(TypeError, method, self.mesh, | np.ones(2) | numpy.ones |
import os, io
import numpy as np
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D, BatchNormalization
from tensorflow.keras.layers import Activation
import cv2
from sklearn.model_selection import train_test_split
import time
from PIL import Image
def load_train_data(way_x, way_y):
train_x, train_y = [], []
for j in sorted(os.listdir(way_x)):
flore_x = os.path.join(way_x, j)
flore_y = os.path.join(way_y, j)
for i in sorted(os.listdir(flore_x)):
image = cv2.imread(os.path.join(flore_x, i))
image = cv2.resize(image, (455, 256))
image = np.asarray(image)
if 'lost_image' in locals():
frame_to_frame = np.concatenate([lost_image, image], axis=2)
lost_image = image
train_x.append(frame_to_frame)
else:
lost_image = image
if os.path.isfile(os.path.join(flore_y, i)):
print(i)
image = cv2.imread(os.path.join(flore_y, i))
image = cv2.resize(image, (455, 256))
image = np.asarray(image)
train_y.append(image)
del lost_image
train_x = np.asarray(train_x)
train_y = np.asarray(train_y)
train_x = train_x / 255
train_y = train_y / 255
return train_x, train_y
def model_init(input_shape):
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=input_shape, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization(batch_size=16))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization(batch_size=16))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(UpSampling2D(size=(2, 2)))
model.add(BatchNormalization(batch_size=16))
model.add(Conv2D(32, (2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(batch_size=16))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(UpSampling2D(size=(2, 2)))
model.add(ZeroPadding2D(padding=((0, 0), (1, 2)))) # Zero padding for fitting output layers shape
model.add(BatchNormalization(batch_size=16))
model.add(Conv2D(16, (2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(batch_size=16))
model.add(Conv2D(16, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization(batch_size=16))
model.add(Conv2D(3, (1, 1)))
model.add(Activation('sigmoid'))
model.compile(
optimizer="adam",
loss=tensorflow.losses.binary_crossentropy,
metrics=["accuracy"])
return model
def train(model, train_x, train_y, test_x, test_y, epochs=20, batch_size=16):
model.fit(
train_x, train_y,
epochs=epochs,
batch_size=batch_size,
validation_data=(test_x, test_y)
)
model.save_weights('1.h5')
return model
def write_video(file_path):
model.load_weights(input("Путь к файлу весов: "))
cap = cv2.VideoCapture(file_path)
h, w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = int(cap.get(cv2.CAP_PROP_FPS))
buf = []
images = []
lost_time = time.time()
all_time = time.time()
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
count = 0
agr_time = 0
avr_img = 0
step_x = 10
step_y = 10
ret, control_image = cap.read()
if ret:
control_image = cv2.resize(control_image, (455, 256))
control_image = np.asarray(control_image)
control_image = control_image / 255
file = open(input("Путь к запакованному видеофайлу: "), "wb")
file.write(w.to_bytes(2, "little"))
file.write(h.to_bytes(2, "little"))
file.write(fps.to_bytes(2, "little"))
file.write(step_x.to_bytes(2, "little"))
file.write(step_y.to_bytes(2, "little"))
while 1:
ret, image = cap.read()
if ret:
images.append(image)
image = cv2.resize(image, (455, 256))
image = np.asarray(image)
image = image / 255
frame_to_frame = | np.concatenate([image, control_image], axis=2) | numpy.concatenate |
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import SGD
import numpy as np
import matplotlib.pyplot as plt
import keyboard as kb
from keras.datasets import fashion_mnist
def show_samples(rows, columns):
fig, images = plt.subplots(rows, columns)
fig.tight_layout()
for image in images.ravel():
r = np.random.randint(0, 50000)
image.imshow(Xtrain[r], cmap='Greys')
image.axis('off')
image.set_title(clothing[Ytrain[r]])
def load_data():
(XtrainMat, Ytrain), (XtestMat, Ytest) = fashion_mnist.load_data()
n_train = len(Ytrain)
n_test = len(Ytest)
p_train = np.random.permutation(n_train)
p_test = np.random.permutation(n_test)
XtrainMat, Ytrain = XtrainMat[p_train] / 255, Ytrain[p_train]
XtestMat, Ytest = XtestMat[p_test] / 255, Ytest[p_test]
Xtrain = np.array([image.flatten() for image in XtrainMat])
Xtest = np.array([image.flatten() for image in XtestMat])
Xtrain = np.concatenate(( | np.ones((n_train, 1)) | numpy.ones |
import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import MadDog
x = []
y = []
def generate():
# Generate random data
base = np.linspace(0, 5, 11)
# base = np.random.randint(0, 10, 5)
outliers = | np.random.randint(10, 20, 2) | numpy.random.randint |
"""
An example of running autofaiss by pyspark to produce N indices.
You need to install pyspark before using the following example.
"""
from typing import Dict
import faiss
import numpy as np
from autofaiss import build_index
# You'd better create a spark session before calling build_index,
# otherwise, a spark session would be created by autofaiss with the least configuration.
_, index_path2_metric_infos = build_index(
embeddings="hdfs://root/path/to/your/embeddings/folder",
distributed="pyspark",
file_format="parquet",
temporary_indices_folder="hdfs://root/tmp/distributed_autofaiss_indices",
current_memory_available="10G",
max_index_memory_usage="100G",
nb_indices_to_keep=10,
)
index_paths = sorted(index_path2_metric_infos.keys())
###########################################
# Use case 1: merging 10 indices into one #
###########################################
merged = faiss.read_index(index_paths[0])
for rest_index_file in index_paths[1:]:
index = faiss.read_index(rest_index_file)
faiss.merge_into(merged, index, shift_ids=False)
with open("merged-knn.index", "wb") as f:
faiss.write_index(merged, faiss.PyCallbackIOWriter(f.write))
########################################
# Use case 2: searching from N indices #
########################################
K, DIM, all_distances, all_ids, NB_QUERIES = 5, 512, [], [], 2
queries = faiss.rand((NB_QUERIES, DIM))
for rest_index_file in index_paths:
index = faiss.read_index(rest_index_file)
distances, ids = index.search(queries, k=K)
all_distances.append(distances)
all_ids.append(ids)
dists_arr = np.stack(all_distances, axis=1).reshape(NB_QUERIES, -1)
knn_ids_arr = | np.stack(all_ids, axis=1) | numpy.stack |
from __future__ import division, print_function, absolute_import
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import warnings
import numpy as np
from numpy.testing import assert_allclose, \
assert_array_almost_equal_nulp, TestCase, run_module_suite, dec, \
assert_raises, verbose, assert_equal, assert_array_equal
from numpy import array, finfo, argsort, dot, round, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix, isspmatrix
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence, arpack
from scipy.linalg import svd, hilbert
from scipy.lib._gcutils import assert_deallocated
# eigs() and eigsh() are called many times, so apply a filter for the warnings
# they generate here.
_eigs_warn_msg = "Single-precision types in `eigs` and `eighs`"
def setup_module():
warnings.filterwarnings("ignore", message=_eigs_warn_msg)
def teardown_module():
warnings.filterwarnings("default", message=_eigs_warn_msg)
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N,N))
if complex:
M = M + 1j * np.random.random((N,N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i,j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.where(i == j)
j[ind] = (j[ind] + 1) % N
M[i,j] = 0
M[j,i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i,j] = 0
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eval, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eval, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eval - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eval - sigma)
+ 1. / (eval - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eval - sigma)
- 1. / (eval - np.conj(sigma)))
elif mode == 'cayley':
reval = (eval + sigma) / (eval - sigma)
elif mode == 'buckling':
reval = eval / (eval - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ.lower())
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval_a = exact_eval
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eval, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eval, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eval, typ, k, which,
sigma, OPpart, mode)
eval_a = eval
eval = eval[ind]
evec = evec[:,ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eval * np.dot(b, evec)
else:
RHS = eval * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
yield (eval_evec, symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((2, 2)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# XXX: this test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
#----------------------------------------------------------------------
# sparse SVD tests
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError("unknown which=%r" % (which,))
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
def test_svd_simple_real():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], np.float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.float)
z = csc_matrix(x)
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_simple_complex():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1 + 1j, 0, 2],
[0, 0, 1]], np.complex)
y = np.array([[1, 2, 3, 8 + 5j],
[3 - 2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.complex)
z = csc_matrix(x)
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape) - 1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_maxiter():
# check that maxiter works as expected
x = hilbert(6)
# ARPACK shouldn't converge on such an ill-conditioned matrix with just
# one iteration
assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1)
# but 100 iterations should be more than enough
u, s, vt = svds(x, 1, maxiter=100)
assert_allclose(s, [1.7], atol=0.5)
def test_svd_return():
# check that the return_singular_vectors parameter works as expected
x = hilbert(6)
_, s, _ = sorted_svd(x, 2)
ss = svds(x, 2, return_singular_vectors=False)
assert_allclose(s, ss)
def test_svd_which():
# check that the which parameter works as expected
x = hilbert(6)
for which in ['LM', 'SM']:
_, s, _ = sorted_svd(x, 2, which=which)
ss = svds(x, 2, which=which, return_singular_vectors=False)
ss.sort()
assert_allclose(s, ss, atol=np.sqrt(1e-15))
def test_svd_v0():
# check that the v0 parameter works as expected
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float)
u, s, vh = svds(x, 1)
u2, s2, vh2 = svds(x, 1, v0=u[:,0])
assert_allclose(s, s2, atol=np.sqrt(1e-15))
def _check_svds(A, k, U, s, VH):
n, m = A.shape
# Check shapes.
assert_equal(U.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(VH.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (U*s).dot(VH)
assert_equal(A_rebuilt.shape, A.shape)
assert_allclose(A_rebuilt, A)
# Check that U is a semi-orthogonal matrix.
UH_U = np.dot(U.T.conj(), U)
assert_equal(UH_U.shape, (k, k))
assert_allclose(UH_U, np.identity(k), atol=1e-12)
# Check that V is a semi-orthogonal matrix.
VH_V = np.dot(VH, VH.T.conj())
assert_equal(VH_V.shape, (k, k))
assert_allclose(VH_V, np.identity(k), atol=1e-12)
def test_svd_LM_ones_matrix():
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
for n, m in (6, 5), (5, 5), (5, 6):
for t in float, complex:
A = np.ones((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
assert_array_equal(sorted(s)[:-1], 0)
def test_svd_LM_zeros_matrix():
# Check that svds can deal with matrices containing only zeros.
k = 1
for n, m in (3, 4), (4, 4), (4, 3):
for t in float, complex:
A = np.zeros((n, m), dtype=t)
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
def test_svd_LM_zeros_matrix_gh_3452():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/3452
# Note that for complex dype the size of this matrix is too small for k=1.
n, m, k = 4, 2, 1
A = np.zeros((n, m))
U, s, VH = svds(A, k)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
def test_svd_linop():
nmks = [(6, 7, 3),
(9, 5, 4),
(10, 8, 5)]
def reorder(args):
U, s, VH = args
j = np.argsort(s)
return U[:,j], s[j], VH[j,:]
for n, m, k in nmks:
# Test svds on a LinearOperator.
A = np.random.RandomState(52).randn(n, m)
L = CheckingLinearOperator(A)
v0 = np.ones(min(A.shape))
U1, s1, VH1 = reorder(svds(A, k, v0=v0))
U2, s2, VH2 = reorder(svds(L, k, v0=v0))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
# Try again with which="SM".
A = np.random.RandomState(1909).randn(n, m)
L = CheckingLinearOperator(A)
U1, s1, VH1 = reorder(svds(A, k, which="SM"))
U2, s2, VH2 = reorder(svds(L, k, which="SM"))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
if k < min(n, m) - 1:
# Complex input and explicit which="LM".
for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]:
rng = np.random.RandomState(1648)
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
L = CheckingLinearOperator(A)
U1, s1, VH1 = reorder(svds(A, k, which="LM"))
U2, s2, VH2 = reorder(svds(L, k, which="LM"))
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
assert_allclose(s1, s2, rtol=eps)
assert_allclose(np.abs(VH1), | np.abs(VH2) | numpy.abs |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.collections as mc
import copy
action2vect = {0: np.array([0, -1]),
1: np.array([0, +1]),
2: np.array([-1, 0]),
3: np.array([+1, 0])
}
a2m = {0:'up', 1:'down', 2:'left', 3:'right'}
def random_initialize(Maze):
floor_labels = np.arange(len(Maze.floors))
start_floor_label = np.random.choice(floor_labels)
goal_floor_label = np.random.choice(floor_labels)
#Maze.set_start(Maze.floors[start_floor_label].tolist())
Maze.set_goal(Maze.floors[goal_floor_label].tolist())
return Maze
def get_fig_ax(size=(8, 5)):
fig = plt.figure(figsize=size)
ax = fig.add_subplot(111)
ax.set_xlabel('x')
ax.set_ylabel('y')
return fig, ax
class MazeEnv():
def __init__(self, lx, ly, threshold=0.9, figsize=5):
self.lx = lx
self.ly = ly
self.create_maze_by_normal_distribution(threshold=threshold)
self = random_initialize(self)
self.action_space = [0,1,2,3]
self.status = 'Initialized'
self.figsize = figsize
def reset(self, coordinate=[None, None]):
"""
put the state at the start.
"""
if coordinate[0]!=None:
self.state = np.array(coordinate)
else:
#
floor_labels = np.arange(len(self.floors))
start_floor_label = np.random.choice(floor_labels)
self.state = self.floors[start_floor_label]
#
#self.state = np.array(self.start)
self.status = 'Reset'
self.t = 0
return self.get_state()
def is_solved(self):
"""
if the state is at the goal, returns True.
"""
return self.goal==self.state.tolist()
def get_state(self):
"""
returns (x, y) coordinate of the state
"""
return copy.deepcopy(self.state)#, copy.deepcopy(self.state[1])
def step0(self, state, action):
add_vector_np = action2vect[action]
if (state+add_vector_np).tolist() in self.floors.tolist():
next_state = state+add_vector_np
self.status = 'Moved'
else:
next_state = state
self.status = 'Move failed'
self.t += 1
return next_state
def step1(self, state, action, state_p):
if state_p.tolist()==self.goal:
reward = 1
elif False:
reward = 0.1
else:
reward = 0
return reward
def step(self, action):
state = self.get_state()
next_state = self.step0(state, action)
reward = self.step1(state, action, next_state)
# self.state update
self.state = next_state
return self.get_state(), reward, self.is_solved(), {}
def create_maze_by_normal_distribution(self, threshold):
"""
creating a random maze.
Higher threshold creates easier maze.
around threshold=1 is recomended.
"""
x = np.random.randn(self.lx*self.ly).reshape(self.lx, self.ly)
y = (x < threshold)*(x > -threshold)
self.tile = y
self.load_tile()
def load_tile(self):
self.floors = np.array(list(np.where(self.tile==True))).T # (#white tiles, 2), 2 means (x,y) coordinate
self.holes = np.array(list(np.where(self.tile==True))).T # (#black tiles, 2)
def flip(self, coordinate=[None, None]):
self.tile[coordinate[0], coordinate[1]] = not self.tile[coordinate[0], coordinate[1]]
self.load_tile()
def render_tile(self, ax, cmap='gray'):
ax.imshow(self.tile.T, interpolation="none", cmap=cmap)
return ax
def render_arrows(self, ax, values_table):
lx, ly, _ = values_table.shape
vmaxs = np.max(values_table, axis=2).reshape(lx, ly, 1)
vt = np.transpose(values_table*self.tile.reshape(lx, ly, 1)/vmaxs, (1,0,2))
width = 0.5
X, Y= np.meshgrid(np.arange(0, lx, 1), np.arange(0, ly, 1))
ones = .5*np.ones(lx*ly).reshape(lx, ly)
zeros= np.zeros(lx*ly).reshape(lx, ly)
# up
ax.quiver(X, Y, zeros, ones, vt[:,:,0], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
# down
ax.quiver(X, Y, zeros, -ones, vt[:,:,1], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
# left
ax.quiver(X, Y, -ones, zeros, vt[:,:,2], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
# right
ax.quiver(X, Y, ones, zeros, vt[:,:,3], alpha=0.8,
cmap='Reds', scale_units='xy', scale=1)
return ax
def render(self, fig=None, ax=None, lines=None, values_table=None):
canvas = False
if ax!=None:
pass
canvas = True
ax.clear()
else:
fig = plt.figure(figsize=(self.figsize, self.figsize))
ax = fig.add_subplot(111)
ax.set_xlabel('x')
ax.set_ylabel('y')
####
ax = self.render_tile(ax)
if values_table is not None:
ax = self.render_arrows(ax, values_table)
####
try:
ax.scatter(self.start[0], self.start[1], marker='x', s=100, color='blue',
alpha=0.8, label='start')
except AttributeError:
pass
try:
ax.scatter(self.goal[0], self.goal[1], marker='d', s=100, color='red',
alpha=0.8, label='goal')
except AttributeError:
pass
try:
ax.scatter(self.state[0], self.state[1], marker='o', s=100, color='black',
alpha=0.8, label='agent')
except AttributeError:
pass
if lines is not None:
lc = mc.LineCollection(lines, linewidths=2, color='black', alpha=0.5)
ax.add_collection(lc)
else:
pass
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left',
scatterpoints=1)
if canvas:
#pass
fig.canvas.draw()
else:
plt.show()
def set_start(self, coordinate=[None, None]):
if coordinate in self.floors.tolist():
self.start = coordinate
else:
print('Set the start on a white tile.')
def set_goal(self, coordinate=[None, None]):
if coordinate in self.floors.tolist():
self.goal = coordinate
else:
print('Set the goal on a white tile.')
def play(self, Agent, show=True):
lines = []
while not self.is_solved():
state0 = self.get_state()
action = Agent.play()
self.step(action)
state1 = self.get_state()
lines.append([state0, state1])
if show:
self.render(lines=lines)
def play_interactive(self, Agent):
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax.set_xlabel('x')
ax.set_ylabel('y')
self.render(fig=fig, ax=ax)
lines = []
while not self.is_solved():
state0 = self.get_state()
action = Agent.play()
self.step(action)
state1 = self.get_state()
lines.append([state0, state1])
self.render(fig=fig, ax=ax, lines=lines)
#fig.canvas.draw()
self.render(fig=fig, ax=ax, lines=lines)
plt.show()
print("solved!")
class CliffEnv(MazeEnv):
def __init__(self, lx, ly, threshold=0.9, figsize=5):
self.lx = lx
self.ly = ly
self.create_cliff()
self.start = [0, ly-1]
self.goal = [lx-1, ly-1]
self.action_space = [0,1,2,3]
self.status = 'Initialized'
self.figsize = figsize
def reset(self, coordinate=[None, None]):
"""
put the state at the start.
"""
if coordinate[0]!=None:
self.state = np.array(coordinate)
else:
self.state = | np.array(self.start) | numpy.array |
# Copyright (c) 2020, <NAME>
from __future__ import print_function
import numpy as np
import pickle
from torch.utils.data import Dataset, DataLoader
import torch
from sklearn.model_selection import StratifiedShuffleSplit
class Dataset_from_matrix(Dataset):
"""Face Landmarks dataset."""
def __init__(self, data_matrix):
"""
Args: create a torch dataset from a tensor data_matrix with size n * p
[treatment, features, outcome]
"""
self.data_matrix = data_matrix
self.num_data = data_matrix['x'].shape[0]
def __len__(self):
return self.num_data
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return (self.data_matrix['x'][idx], self.data_matrix['t'][idx], self.data_matrix['d'][idx], self.data_matrix['y'][idx])
def get_iter(data_matrix, batch_size, shuffle=True):
dataset = Dataset_from_matrix(data_matrix)
iterator = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
return iterator
def softmax(x):
e_x = np.exp(x)
return e_x / e_x.sum(axis=0)
def compute_beta(alpha, optimal_dosage):
if (optimal_dosage <= 0.001 or optimal_dosage >= 1.0):
beta = 1.0
else:
beta = (alpha - 1.0) / float(optimal_dosage) + (2.0 - alpha)
return beta
def generate_patient(x, v, num_treatments, treatment_selection_bias, dosage_selection_bias,
scaling_parameter, noise_std):
outcomes = []
dosages = []
for treatment in range(num_treatments):
if (treatment == 0):
b = 0.75 * np.dot(x, v[treatment][1]) / (np.dot(x, v[treatment][2]))
if (b >= 0.75):
optimal_dosage = b / 3.0
else:
optimal_dosage = 1.0
#optimal_dosage = np.dot(x, v[treatment][1]) / (np.dot(x, v[treatment][2]))
alpha = dosage_selection_bias
dosage = np.random.beta(alpha, compute_beta(alpha, optimal_dosage))
y = get_patient_outcome(x, v, treatment, dosage, scaling_parameter)
elif (treatment == 1):
optimal_dosage = np.dot(x, v[treatment][2]) / (2.0 * np.dot(x, v[treatment][1]))
alpha = dosage_selection_bias
dosage = np.random.beta(alpha, compute_beta(alpha, optimal_dosage))
if (optimal_dosage <= 0.001):
dosage = 1 - dosage
y = get_patient_outcome(x, v, treatment, dosage, scaling_parameter)
elif (treatment == 2):
optimal_dosage = np.dot(x, v[treatment][1]) / (2.0 * np.dot(x, v[treatment][2]))
alpha = dosage_selection_bias
dosage = np.random.beta(alpha, compute_beta(alpha, optimal_dosage))
if (optimal_dosage <= 0.001):
dosage = 1 - dosage
y = get_patient_outcome(x, v, treatment, dosage, scaling_parameter)
outcomes.append(y)
dosages.append(dosage)
treatment_coeff = [treatment_selection_bias * (outcomes[i] / np.max(outcomes)) for i in range(num_treatments)]
treatment = np.random.choice(num_treatments, p=softmax(treatment_coeff))
return treatment, dosages[treatment], outcomes[treatment] + np.random.normal(0, noise_std)
def get_patient_outcome(x, v, treatment, dosage, scaling_parameter=10):
if (treatment == 0):
#y = float(scaling_parameter) * (np.dot(x, v[treatment][0]) + 12.0 * dosage * (dosage - ( np.dot(x, v[treatment][1]) / np.dot(x, v[treatment][2]))) ** 2)
y = float(scaling_parameter) * (np.dot(x, v[treatment][0]) + 12.0 * dosage * (dosage - 0.75 * ( np.dot(x, v[treatment][1]) / np.dot(x, v[treatment][2]))) ** 2)
elif (treatment == 1):
y = float(scaling_parameter) * (np.dot(x, v[treatment][0]) + np.sin(
np.pi * (np.dot(x, v[treatment][1]) / np.dot(x, v[treatment][2])) * dosage))
elif (treatment == 2):
y = float(scaling_parameter) * (np.dot(x, v[treatment][0]) + 12.0 * (np.dot(x, v[treatment][
1]) * dosage - np.dot(x, v[treatment][2]) * dosage ** 2))
return y
def get_dataset_splits(dataset):
dataset_keys = ['x', 't', 'd', 'y', 'y_normalized']
train_index = dataset['metadata']['train_index']
val_index = dataset['metadata']['val_index']
test_index = dataset['metadata']['test_index']
dataset_train = dict()
dataset_val = dict()
dataset_test = dict()
for key in dataset_keys:
dataset_train[key] = dataset[key][train_index]
dataset_val[key] = dataset[key][val_index]
dataset_test[key] = dataset[key][test_index]
dataset_train['metadata'] = dataset['metadata']
dataset_val['metadata'] = dataset['metadata']
dataset_test['metadata'] = dataset['metadata']
return dataset_train, dataset_val, dataset_test
def get_split_indices(num_patients, patients, treatments, validation_fraction, test_fraction):
num_validation_patients = int(np.floor(num_patients * validation_fraction))
num_test_patients = int(np.floor(num_patients * test_fraction))
test_sss = StratifiedShuffleSplit(n_splits=1, test_size=num_test_patients, random_state=0)
rest_indices, test_indices = next(test_sss.split(patients, treatments))
val_sss = StratifiedShuffleSplit(n_splits=1, test_size=num_validation_patients, random_state=0)
train_indices, val_indices = next(val_sss.split(patients[rest_indices], treatments[rest_indices]))
return train_indices, val_indices, test_indices
class TCGA_Data():
def __init__(self, args):
np.random.seed(3)
self.num_treatments = args['num_treatments']
self.treatment_selection_bias = args['treatment_selection_bias']
self.dosage_selection_bias = args['dosage_selection_bias']
self.validation_fraction = args['validation_fraction']
self.test_fraction = args['test_fraction']
self.tcga_data = pickle.load(open('datasets/tcga.p', 'rb'))
self.patients = self.normalize_data(self.tcga_data['rnaseq'])
self.scaling_parameteter = 10
self.noise_std = 0.2
self.num_weights = 3
self.v = np.zeros(shape=(self.num_treatments, self.num_weights, self.patients.shape[1]))
for i in range(self.num_treatments):
for j in range(self.num_weights):
self.v[i][j] = np.random.uniform(0, 10, size=(self.patients.shape[1]))
self.v[i][j] = self.v[i][j] / np.linalg.norm(self.v[i][j])
self.dataset = self.generate_dataset(self.patients, self.num_treatments)
def normalize_data(self, patient_features):
x = (patient_features - np.min(patient_features, axis=0)) / (
np.max(patient_features, axis=0) - np.min(patient_features, axis=0))
for i in range(x.shape[0]):
x[i] = x[i] / np.linalg.norm(x[i])
return x
def generate_dataset(self, patient_features, num_treatments):
tcga_dataset = dict()
tcga_dataset['x'] = []
tcga_dataset['y'] = []
tcga_dataset['t'] = []
tcga_dataset['d'] = []
tcga_dataset['metadata'] = dict()
tcga_dataset['metadata']['v'] = self.v
tcga_dataset['metadata']['treatment_selection_bias'] = self.treatment_selection_bias
tcga_dataset['metadata']['dosage_selection_bias'] = self.dosage_selection_bias
tcga_dataset['metadata']['noise_std'] = self.noise_std
tcga_dataset['metadata']['scaling_parameter'] = self.scaling_parameteter
for patient in patient_features:
t, dosage, y = generate_patient(x=patient, v=self.v, num_treatments=num_treatments,
treatment_selection_bias=self.treatment_selection_bias,
dosage_selection_bias=self.dosage_selection_bias,
scaling_parameter=self.scaling_parameteter,
noise_std=self.noise_std)
tcga_dataset['x'].append(patient)
tcga_dataset['t'].append(t)
tcga_dataset['d'].append(dosage)
tcga_dataset['y'].append(y)
for key in ['x', 't', 'd', 'y']:
tcga_dataset[key] = np.array(tcga_dataset[key])
tcga_dataset['metadata']['y_min'] = np.min(tcga_dataset['y'])
tcga_dataset['metadata']['y_max'] = np.max(tcga_dataset['y'])
tcga_dataset['y_normalized'] = (tcga_dataset['y'] - np.min(tcga_dataset['y'])) / (
np.max(tcga_dataset['y']) - | np.min(tcga_dataset['y']) | numpy.min |
import numpy as np
from scipy.optimize import minimize
import scipy.stats
import pickle, os, random, time
import matplotlib.pyplot as plt
from pathos.multiprocessing import ProcessingPool as Pool
from sklearn.metrics import mean_squared_error
import logging
def set_cons(a_max_n_boundary=[0.1, 2.5], desired_V_n_boundary=[1, 40], a_comf_n_boundary=[0.1, 5],
S_jam_boundary=[0.1, 10], desired_T_n_boundary=[0.1, 5], beta_boundary=[4, 4]):
# constraints: eq or ineq
a_max_n_boundary = a_max_n_boundary
desired_V_n_boundary = desired_V_n_boundary
a_comf_n_boundary = a_comf_n_boundary
S_jam_boundary = S_jam_boundary
desired_T_n_boundary = desired_T_n_boundary
beta_boundary = beta_boundary
cons = ({'type': 'ineq', 'fun': lambda x: x[0] - a_max_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[0] + a_max_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[1] - desired_V_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[1] + desired_V_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[2] - a_comf_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[2] + a_comf_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[3] - S_jam_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[3] + S_jam_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[4] - desired_T_n_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[4] + desired_T_n_boundary[1]}, \
{'type': 'ineq', 'fun': lambda x: x[5] - beta_boundary[0]}, \
{'type': 'ineq', 'fun': lambda x: -x[5] + beta_boundary[1]})
return cons
def initialize(a_max_n_boundary=[0.1, 2.5], desired_V_n_boundary=[1, 40], a_comf_n_boundary=[0.1, 5],
S_jam_boundary=[0.1, 10], \
desired_T_n_boundary=[0.1, 5], beta_boundary=[4, 4]):
# a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta
x0 = (random.uniform(a_max_n_boundary[0], a_max_n_boundary[1]),
random.uniform(desired_V_n_boundary[0], desired_V_n_boundary[1]), \
random.uniform(a_comf_n_boundary[0], a_comf_n_boundary[1]),
random.uniform(S_jam_boundary[0], S_jam_boundary[1]), \
random.uniform(desired_T_n_boundary[0], desired_T_n_boundary[1]), 4)
return x0
def IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta):
def desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n):
# if a_max_n * a_comf_n <= 0:
# print("a_max_n", a_max_n, "a_comf_n", a_comf_n)
item1 = S_jam_n
item2 = V_n_t * desired_T_n
item3 = (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# if V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n)) > 0:
# item2 = V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# else:
# item2 = 0
return item1 + max(0, item2 + item3)
a_n_t = []
for i in range(len(delta_V_n_t)):
desired_S_n = desired_space_hw(S_jam_n, V_n_t[i], desired_T_n, delta_V_n_t[i], a_max_n, a_comf_n)
a_n_t.append(a_max_n * (1 - (V_n_t[i] / desired_V_n) ** beta - (desired_S_n / S_n_t[i]) ** 2))
return np.array(a_n_t)
def tv_IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta):
def desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n):
# if a_max_n * a_comf_n <= 0:
# print("a_max_n", a_max_n, "a_comf_n", a_comf_n)
item1 = S_jam_n
item2 = V_n_t * desired_T_n
item3 = (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# if V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n)) > 0:
# item2 = V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# else:
# item2 = 0
return item1 + max(0, item2 + item3)
a_n_t = []
for i in range(len(delta_V_n_t)):
desired_S_n = desired_space_hw(S_jam_n[i], V_n_t[i], desired_T_n[i], delta_V_n_t[i], a_max_n[i], a_comf_n[i])
a_n_t.append(a_max_n[i] * (1 - (V_n_t[i] / desired_V_n[i]) ** beta - (desired_S_n / S_n_t[i]) ** 2))
return np.array(a_n_t)
def IDM_cf_model_for_p(delta_V_n_t, S_n_t, V_n_t, a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta):
def desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n):
item1 = S_jam_n
item2 = V_n_t * desired_T_n
item3 = (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# if V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n)) > 0:
# item2 = V_n_t * desired_T_n - (V_n_t * delta_V_n_t) / (2 * np.sqrt(a_max_n * a_comf_n))
# else:
# item2 = 0
return item1 + max(0, item2 + item3)
desired_S_n = desired_space_hw(S_jam_n, V_n_t, desired_T_n, delta_V_n_t, a_max_n, a_comf_n)
a_n_t = a_max_n * (1 - (V_n_t / desired_V_n) ** beta - (desired_S_n / S_n_t) ** 2)
return a_n_t
def obj_func(args):
a, delta_V_n_t, S_n_t, V_n_t = args
# x[0:6]: a_max_n, desired_V_n, a_comf_n, S_jam_n, desired_T_n, beta
# err = lambda x: np.sqrt( np.sum( ( (a - IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, x[0], x[1], x[2], x[3], x[4], x[5])) / a ) ** 2) / len(a) )
# err = lambda x: np.sqrt( np.sum((a - IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, x[0], x[1], x[2], x[3], x[4], x[5])) ** 2) / np.sum(a**2))
err = lambda x: np.sqrt(
np.sum((a - IDM_cf_model(delta_V_n_t, S_n_t, V_n_t, x[0], x[1], x[2], x[3], x[4], x[5])) ** 2) / len(a))
return err
a_max_n_boundary = [0.1, 2.5]
desired_V_n_boundary = [1, 40]
a_comf_n_boundary = [0.1, 5]
S_jam_n_boundary = [0.1, 10]
desired_T_n_boundary = [0.1, 5]
boundary = [a_max_n_boundary, desired_V_n_boundary, a_comf_n_boundary, S_jam_n_boundary, desired_T_n_boundary]
def save_pkl_file(file, var):
pkl_file = open(file, 'wb')
pickle.dump(var, pkl_file)
pkl_file.close()
def read_pkl_file(file):
pkl_file = open(file, 'rb')
var = pickle.load(pkl_file)
pkl_file.close()
return var
def context_target_split(b_x, b_y, num_context, num_extra_target):
"""Given inputs x and their value y, return random subsets of points for
context and target. Note that following conventions from "Empirical
Evaluation of Neural Process Objectives" the context points are chosen as a
subset of the target points.
Parameters
----------
x : torch.Tensor
Shape (batch_size, num_points, x_dim)
y : torch.Tensor
Shape (batch_size, num_points, y_dim)
num_context : int
Number of context points.
num_extra_target : int
Number of additional target points.
"""
x_context = []
y_context = []
x_target = []
y_target = []
for i in range(len(b_x)):
x = np.array(b_x[i])
y = np.array(b_y[i]).reshape(len(b_y[i]), 1)
# print(x.shape, y.shape)
num_points = x.shape[0]
# Sample locations of context and target points
# print(num_points, num_context, num_extra_target, num_context + num_extra_target)
if num_context + num_extra_target < num_points:
locations = np.random.choice(num_points,
size=num_context + num_extra_target,
replace=False)
else:
locations = np.random.choice(num_points,
size=num_context + num_extra_target,
replace=True)
for j in range(len(locations)):
if locations[j] > num_points:
while True:
new_loc = np.random.choice(locations, size=1)
if new_loc < num_points:
locations[j] = new_loc
break
x_context.append(x[locations[:num_context], :])
y_context.append(y[locations[:num_context], :])
x_target.append(x[locations, :])
y_target.append(y[locations, :])
x_context = np.array(x_context)
y_context = np.array(y_context)
x_target = np.array(x_target)
y_target = np.array(y_target)
# print(x_context.shape, y_context.shape, x_target.shape, y_target.shape)
return x_context, y_context, x_target, y_target
import torch
from torch import nn
from torch.nn import functional as F
from torch.distributions import Normal
from random import randint
from torch.distributions.kl import kl_divergence
class DeterministicEncoder(nn.Module):
"""Maps an (x_i, y_i) pair to a representation r_i.
Parameters
----------
x_dim : int
Dimension of x values.
y_dim : int
Dimension of y values.
h_dim : int
Dimension of hidden layer.
r_dim : int
Dimension of output representation r.
"""
def __init__(self, x_dim, y_dim, h_dim, r_dim):
super(DeterministicEncoder, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.h_dim = h_dim
self.r_dim = r_dim
layers = [nn.Linear(x_dim + y_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, r_dim)]
self.input_to_hidden = nn.Sequential(*layers)
def forward(self, x, y):
"""
x : torch.Tensor
Shape (batch_size, x_dim)
y : torch.Tensor
Shape (batch_size, y_dim)
"""
input_pairs = torch.cat((x, y), dim=1)
return self.input_to_hidden(input_pairs)
class LatentEncoder(nn.Module):
"""
Maps a representation r to mu and sigma which will define the normal
distribution from which we sample the latent variable z.
Parameters
----------
r_dim : int
Dimension of output representation r.
z_dim : int
Dimension of latent variable z.
"""
def __init__(self, x_dim, y_dim, r_dim, z_dim):
super(LatentEncoder, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.r_dim = r_dim
self.z_dim = z_dim
self.xy_to_hidden = nn.Linear(x_dim + y_dim, r_dim)
self.hidden_to_mu = nn.Linear(r_dim, z_dim)
self.hidden_to_sigma = nn.Linear(r_dim, z_dim)
def forward(self, x, y, batch_size, num_points):
"""
x : torch.Tensor
Shape (batch_size, x_dim)
y : torch.Tensor
Shape (batch_size, y_dim)
"""
input_pairs = torch.cat((x, y), dim=1)
hidden = torch.relu(self.xy_to_hidden(input_pairs))
hidden = hidden.view(batch_size, num_points, self.r_dim)
hidden = torch.mean(hidden, dim=1)
mu = torch.relu(self.hidden_to_mu(hidden))
# Define sigma following convention in "Empirical Evaluation of Neural
# Process Objectives" and "Attentive Neural Processes"
sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden))
return mu, sigma
# constrained output
class activation(nn.Module):
def __init__(self, a_max_n_boundary = [-5.0, 5.0]):
super().__init__()
self.a_max_n_boundary = a_max_n_boundary
def forward(self, inputs):
for i in range(len(inputs)):
if inputs[i] < self.a_max_n_boundary[0]:
inputs[i] = self.a_max_n_boundary[0]
elif inputs[i] > self.a_max_n_boundary[1]:
inputs[i] = self.a_max_n_boundary[1]
return inputs
class Decoder(nn.Module):
"""
Maps target input x_target and samples z (encoding information about the
context points) to predictions y_target.
Parameters
----------
x_dim : int
Dimension of x values.
z_dim : int
Dimension of latent variable z.
h_dim : int
Dimension of hidden layer.
y_dim : int
Dimension of y values.
r_dim : int
Dimension of output representation r.
"""
def __init__(self, x_dim, z_dim, h_dim, y_dim, r_dim):
super(Decoder, self).__init__()
self.x_dim = x_dim
self.z_dim = z_dim
self.h_dim = h_dim
self.y_dim = y_dim
self.r_dim = r_dim
layers = [nn.Linear(x_dim + z_dim + r_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, h_dim),
nn.ReLU(inplace=True)]
self.xz_to_hidden = nn.Sequential(*layers)
self.hidden_to_mu = nn.Linear(h_dim, y_dim)
self.hidden_to_sigma = nn.Linear(h_dim, y_dim)
self.constrain_output = activation()
def forward(self, x, z, r):
"""
x : torch.Tensor
Shape (batch_size, num_points, x_dim)
z : torch.Tensor
Shape (batch_size, z_dim)
r : torch.Tensor
Shape (batch_size, r_dim)
Returns
-------
Returns mu and sigma for output distribution. Both have shape
(batch_size, num_points, y_dim).
"""
batch_size, num_points, _ = x.size()
# Repeat z, so it can be concatenated with every x. This changes shape
# from (batch_size, z_dim) to (batch_size, num_points, z_dim)
z = z.unsqueeze(1).repeat(1, num_points, 1)
r = r.unsqueeze(1).repeat(1, num_points, 1)
# Flatten x, z, and r to fit with linear layer
x_flat = x.view(batch_size * num_points, self.x_dim)
z_flat = z.view(batch_size * num_points, self.z_dim)
r_flat = r.view(batch_size * num_points, self.r_dim)
# print(x_flat.size(), z_flat.size(), r_flat.size())
# Input is concatenation of z with every row of x
input_pairs = torch.cat((x_flat, z_flat, r_flat), dim=1)
hidden = self.xz_to_hidden(input_pairs)
mu = self.hidden_to_mu(hidden)
mu = self.constrain_output(mu)
pre_sigma = self.hidden_to_sigma(hidden)
# Reshape output into expected shape
mu = mu.view(batch_size, num_points, self.y_dim)
pre_sigma = pre_sigma.view(batch_size, num_points, self.y_dim)
# Define sigma following convention in "Empirical Evaluation of Neural
# Process Objectives" and "Attentive Neural Processes"
sigma = 0.1 + 0.9 * F.softplus(pre_sigma)
return mu, sigma
class NeuralProcess(nn.Module):
"""
Implements Neural Process for functions of arbitrary dimensions.
Parameters
----------
x_dim : int
Dimension of x values.
y_dim : int
Dimension of y values.
r_dim : int
Dimension of output representation r.
z_dim : int
Dimension of latent variable z.
h_dim : int
Dimension of hidden layer in encoder and decoder.
"""
def __init__(self, x_dim, y_dim, r_dim, z_dim, h_dim):
super(NeuralProcess, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.r_dim = r_dim
self.z_dim = z_dim
self.h_dim = h_dim
# self.training = training
# Initialize networks
self.deterministic_encoder = DeterministicEncoder(x_dim, y_dim, h_dim, r_dim)
self.latent_encoder = LatentEncoder(x_dim, y_dim, r_dim, z_dim)
self.decoder = Decoder(x_dim, z_dim, h_dim, y_dim, r_dim)
def aggregate(self, r_i):
"""
Aggregates representations for every (x_i, y_i) pair into a single
representation.
Parameters
----------
r_i : torch.Tensor
Shape (batch_size, num_points, r_dim)
"""
return torch.mean(r_i, dim=1)
def deterministic_rep(self, x, y):
"""
Maps (x, y) pairs into the mu and sigma parameters defining the normal
distribution of the latent variables z.
Parameters
----------
x : torch.Tensor
Shape (batch_size, num_points, x_dim)
y : torch.Tensor
Shape (batch_size, num_points, y_dim)
"""
batch_size, num_points, _ = x.size()
# Flatten tensors, as encoder expects one dimensional inputs
x_flat = x.view(batch_size * num_points, self.x_dim)
y_flat = y.contiguous().view(batch_size * num_points, self.y_dim)
# Encode each point into a representation r_i
r_i_flat = self.deterministic_encoder(x_flat, y_flat)
# Reshape tensors into batches
r_i = r_i_flat.view(batch_size, num_points, self.r_dim)
# print("deterministic encoder r_i size", r_i.size())
# Aggregate representations r_i into a single representation r
r = self.aggregate(r_i)
# Return deterministic representation
return r
def latent_rep(self, x, y):
"""
Maps (x, y) pairs into the mu and sigma parameters defining the normal
distribution of the latent variables z.
Parameters
----------
x : torch.Tensor
Shape (batch_size, num_points, x_dim)
y : torch.Tensor
Shape (batch_size, num_points, y_dim)
"""
batch_size, num_points, _ = x.size()
# Flatten tensors, as encoder expects one dimensional inputs
x_flat = x.view(batch_size * num_points, self.x_dim)
y_flat = y.contiguous().view(batch_size * num_points, self.y_dim)
# Return parameters of latent representation
mu, sigma = self.latent_encoder(x_flat, y_flat, batch_size, num_points)
return mu, sigma
def forward(self, x_context, y_context, x_target, y_target=None, given_r=None):
"""
Given context pairs (x_context, y_context) and target points x_target,
returns a distribution over target points y_target.
Parameters
----------
x_context : torch.Tensor
Shape (batch_size, num_context, x_dim). Note that x_context is a
subset of x_target.
y_context : torch.Tensor
Shape (batch_size, num_context, y_dim)
x_target : torch.Tensor
Shape (batch_size, num_target, x_dim)
y_target : torch.Tensor or None
Shape (batch_size, num_target, y_dim). Only used during training.
Note
----
We follow the convention given in "Empirical Evaluation of Neural
Process Objectives" where context is a subset of target points. This was
shown to work best empirically.
"""
# Infer quantities from tensor dimensions
batch_size, num_context, x_dim = x_context.size()
_, num_target, _ = x_target.size()
_, _, y_dim = y_context.size()
if self.training:
# Encode target and context (context needs to be encoded to
# calculate kl term)
mu_target, sigma_target = self.latent_rep(x_target, y_target)
mu_context, sigma_context = self.latent_rep(x_context, y_context)
# Sample from encoded distribution using reparameterization trick
q_target = Normal(mu_target, sigma_target)
q_context = Normal(mu_context, sigma_context)
z_sample = q_target.rsample()
r = self.deterministic_rep(x_context, y_context)
# Get parameters of output distribution
# print("x_target size", x_target.size())
# print("z_sample size", z_sample.size())
# print("r size", r.size())
y_pred_mu, y_pred_sigma = self.decoder(x_target, z_sample, r)
p_y_pred = Normal(y_pred_mu, y_pred_sigma)
return p_y_pred, q_target, q_context, y_pred_mu
else:
# At testing time, encode only context
mu_context, sigma_context = self.latent_rep(x_context, y_context)
# Sample from distribution based on context
q_context = Normal(mu_context, sigma_context)
z_sample = q_context.rsample()
r = self.deterministic_rep(x_context, y_context)
# Predict target points based on context
if given_r is None:
y_pred_mu, y_pred_sigma = self.decoder(x_target, z_sample, r)
else:
y_pred_mu, y_pred_sigma = self.decoder(x_target, z_sample, given_r)
p_y_pred = Normal(y_pred_mu, y_pred_sigma)
return p_y_pred, y_pred_mu, y_pred_sigma, r, mu_context, sigma_context
class NeuralProcessTrainer():
"""
Class to handle training of Neural Processes for functions and images.
Parameters
----------
device : torch.device
neural_process : neural_process.NeuralProcess or NeuralProcessImg instance
optimizer : one of torch.optim optimizers
num_context_range : tuple of ints
Number of context points will be sampled uniformly in the range given
by num_context_range.
num_extra_target_range : tuple of ints
Number of extra target points (as we always include context points in
target points, i.e. context points are a subset of target points) will
be sampled uniformly in the range given by num_extra_target_range.
print_freq : int
Frequency with which to print loss information during training.
"""
def __init__(self, device, neural_process, optimizer, num_context_range,
num_extra_target_range, print_freq=10):
self.device = device
self.neural_process = neural_process
self.optimizer = optimizer
self.num_context_range = num_context_range
self.num_extra_target_range = num_extra_target_range
self.print_freq = print_freq
self.steps = 0
self.epoch_loss_history = []
def train(self, data_loader, epochs):
"""
Trains Neural Process.
Parameters
----------
dataloader : torch.utils.DataLoader instance
epochs : int
Number of epochs to train for.
"""
for epoch in range(epochs):
epoch_loss = 0.
epoch_loss_n = 0
for i, data in data_loader.items():
for _ in range(1): # try with different number of context points
self.optimizer.zero_grad()
# Sample number of context and target points
num_context = randint(*self.num_context_range)
num_extra_target = randint(*self.num_extra_target_range)
# Create context and target points and apply neural process
x, y = data
# print(np.array(x).shape, np.array(y).shape)
x_context, y_context, x_target, y_target = context_target_split(x, y, num_context, num_extra_target)
x_context = torch.from_numpy(x_context).type(torch.FloatTensor)
y_context = torch.from_numpy(y_context).type(torch.FloatTensor)
x_target = torch.from_numpy(x_target).type(torch.FloatTensor)
y_target = torch.from_numpy(y_target).type(torch.FloatTensor)
p_y_pred, q_target, q_context, y_pred_mu = self.neural_process(x_context, y_context, x_target, y_target)
loss = self._loss(p_y_pred, y_target, q_target, q_context, y_pred_mu)
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
epoch_loss_n += 1
self.steps += 1
# if self.steps % self.print_freq == 0:
batch_size, num_points, _ = y_pred_mu.size()
y_pred_mu = y_pred_mu.view(batch_size * num_points, )
y_target = y_target.view(batch_size * num_points, )
print(y_pred_mu.size(), y_target.size())
print("iteration {} | loss {:.3f} | train accuracy {:.3f}".format(self.steps, loss.item(),
mean_squared_error(y_pred_mu.detach().numpy(), y_target.detach().numpy())))
logging.info("iteration {} | loss {:.3f} | train accuracy {:.3f}".format(self.steps, loss.item(),
mean_squared_error(y_pred_mu.detach().numpy(), y_target.detach().numpy())))
logging.info("Avg_loss: {}".format(epoch_loss / epoch_loss_n))
self.epoch_loss_history.append(epoch_loss / epoch_loss_n)
print("Epoch: {}, Avg_loss: {}, Min_loss: {}".format(epoch, epoch_loss / epoch_loss_n,
min(self.epoch_loss_history)))
return epoch_loss / epoch_loss_n
def _loss(self, p_y_pred, y_target, q_target, q_context, y_pred_mu):
"""
Computes Neural Process loss.
Parameters
----------
p_y_pred : one of torch.distributions.Distribution
Distribution over y output by Neural Process.
y_target : torch.Tensor
Shape (batch_size, num_target, y_dim)
q_target : one of torch.distributions.Distribution
Latent distribution for target points.
q_context : one of torch.distributions.Distribution
Latent distribution for context points.
"""
# Log likelihood has shape (batch_size, num_target, y_dim). Take mean
# over batch and sum over number of targets and dimensions of y
log_likelihood = p_y_pred.log_prob(y_target).mean(dim=0).mean()
# KL has shape (batch_size, r_dim). Take mean over batch and sum over
# r_dim (since r_dim is dimension of normal distribution)
kl = kl_divergence(q_target, q_context).mean(dim=0).mean()
# reconstruction error
batch_size, num_points, _ = y_pred_mu.size()
y_pred_mu = y_pred_mu.view(batch_size * num_points, )
y_target = y_target.view(batch_size * num_points, )
recon = mean_squared_error(y_pred_mu.detach().numpy(), y_target.detach().numpy(), squared=False) * 10
return -log_likelihood + kl + recon
def get_data_with_pos():
f = open('all_data_for_cf_model_w_t_pre_info_pos_1101.pkl', 'rb')
all_data_for_cf_model = pickle.load(f)
f.close()
next_vs = []
v_ids = []
all_cf_datas = []
next_v = 1
segs_info = []
for v_id, all_cf_data in all_data_for_cf_model.items():
print("-------------------------------------------------------------------------------------------------")
print(str(next_v) + 'th vehicle with id ' + str(v_id))
next_vs.append(next_v)
v_ids.append(v_id)
next_v += 1
# [delta_v_l, space_hw_l, ego_v_l, a_l]
delta_V_n_t = np.array(all_cf_data[0])
S_n_t = np.array(all_cf_data[1])
V_n_t = np.array(all_cf_data[2])
a = np.array(all_cf_data[3])
t = np.array(all_cf_data[4])
pre_v = np.array(all_cf_data[5])
pre_tan_acc = np.array(all_cf_data[6])
pre_lat_acc = np.array(all_cf_data[7])
pre_v_id = np.array(all_cf_data[8])
ego_x = np.array(all_cf_data[9])
ego_y = np.array(all_cf_data[10])
pre_x = np.array(all_cf_data[11])
pre_y = np.array(all_cf_data[12])
print(len(a), np.mean(np.abs(a)))
print(len(pre_v), np.mean(pre_v))
print(len(pre_tan_acc), np.mean(np.abs(pre_tan_acc)))
print(len(pre_lat_acc), np.mean(np.abs(pre_lat_acc)))
data_array = np.array([delta_V_n_t, S_n_t, V_n_t, a, ego_x, ego_y, pre_x, pre_y, pre_v, pre_tan_acc, pre_lat_acc, pre_v_id, t]).T
data_array = data_array[data_array[:, -1].argsort()]
t = np.array(data_array[:, -1])
# data_array = data_array[:, 0:-1]
segs = []
this_seg = []
this_seg_info = []
for i in range(len(data_array) - 1):
current_t = data_array[i][-1]
next_t = data_array[i + 1][-1]
current_pre_v_id = data_array[i][-2]
next_pre_v_id = data_array[i + 1][-2]
if np.abs(next_t - current_t - 0.04) < 0.0001 and np.abs(current_pre_v_id - next_pre_v_id) < 0.0001:
this_seg.append(np.append(data_array[i], i))
if i == len(data_array) - 2:
this_seg.append(np.append(data_array[i + 1], i + 1))
this_seg = np.array(this_seg)
if len(this_seg) > 1:
this_seg_info.append(this_seg.shape)
print(this_seg.shape)
segs.append(this_seg)
break
continue
else:
this_seg.append(np.append(data_array[i], i))
this_seg = np.array(this_seg)
if len(this_seg) > 1:
this_seg_info.append(this_seg.shape)
print(this_seg.shape)
segs.append(this_seg)
this_seg = []
print(len(segs))
segs_info.append(this_seg_info)
new_delta_V_n_t = []
new_S_n_t = []
check_S_n_t = []
new_V_n_t = []
new_S_n_t_y = []
new_ego_x = []
new_ego_y = []
new_next_pre_x = []
new_next_pre_y = []
new_frame_id = []
sim_S_n_t_y = []
new_a = []
new_pre_v = []
new_pre_tan_acc = []
new_pre_lat_acc = []
# clean_a = []
diff_s = []
# diff_a = []
# delta_V_n_t, S_n_t, V_n_t, a, ego_x, ego_y, pre_x, pre_y, pre_v, pre_tan_acc, pre_lat_acc, pre_v_id, t
for seg in segs:
for i in range(len(seg) - 1):
new_delta_V_n_t.append(seg[i][0])
new_S_n_t.append(seg[i][1])
check_S_n_t.append(np.sqrt((seg[i][6] - seg[i][4]) ** 2 + (seg[i][7] - seg[i][5]) ** 2))
new_V_n_t.append(seg[i][2])
new_a.append(seg[i][3])
sim_spacing = sim_new_spacing(seg[i + 1][6], seg[i + 1][7], seg[i][4], seg[i][5], seg[i][2], seg[i][3])
# cal_a = cal_new_a(seg[i + 1][6], seg[i + 1][7], seg[i][4], seg[i][5], seg[i][2], seg[i + 1][1])
sim_S_n_t_y.append(sim_spacing)
# clean_a.append(cal_a)
new_S_n_t_y.append(seg[i + 1][1])
new_ego_x.append(seg[i][4])
new_ego_y.append(seg[i][5])
new_next_pre_x.append(seg[i + 1][6])
new_next_pre_y.append(seg[i + 1][7])
diff_s.append(np.abs(seg[i + 1][1] - sim_spacing))
# diff_a.append(np.abs(seg[i][3] - cal_a))
new_frame_id.append(seg[i][-1])
new_pre_v.append(seg[i][8])
new_pre_tan_acc.append(seg[i][9])
new_pre_lat_acc.append(seg[i][10])
if not data_array.shape[0] - 2 == new_frame_id[-1]:
print("error", data_array.shape, new_frame_id[-1])
time.sleep(5)
data_array = np.array(
[new_delta_V_n_t, new_S_n_t, new_V_n_t, new_a, new_S_n_t_y, new_ego_x, new_ego_y, new_next_pre_x,
new_next_pre_y, new_pre_v, new_pre_tan_acc, new_pre_lat_acc, new_frame_id]).T
# print("spacing", np.mean(new_S_n_t_y), np.mean(new_S_n_t), np.mean(check_S_n_t),
# np.mean(np.array(new_S_n_t_y) - np.array(new_S_n_t)),
# np.mean(diff_s), np.mean(diff_a))
print(data_array.shape)
all_cf_datas.append(data_array)
return next_vs, v_ids, all_cf_datas, segs_info
def cal_ttc(next_v, v_id, all_cf_data):
# S_n_t_1, delta_V_n_t, S_n_t, V_n_t, next_pre_x, next_pre_y, ego_x, ego_y = args
if os.path.exists('0803_mop_space_dist_param/' + str(int(v_id)) + '/using_all_data.txt'):
res_param = np.loadtxt('0803_mop_space_dist_param/' + str(int(v_id)) + '/using_all_data.txt')
else:
return False, False, False
fix_a_max = res_param[0]
fix_desired_V = res_param[1]
fix_a_comf = res_param[2]
fix_S_jam = res_param[3]
fix_desired_T = res_param[4]
tv_params_mean = np.loadtxt('0803_mop_space_dist_param/' + str(int(v_id)) + '/tv_params_mean.txt')
for i in range(1, len(all_cf_data)):
previous_frame = all_cf_data[i - 1]
current_frame = all_cf_data[i]
if current_frame[9] - previous_frame[9] != 1:
p_delta_V_n_t = current_frame[0]
p_S_n_t = current_frame[1]
p_V_n_t = current_frame[2]
p_a_n_t = current_frame[3]
p_next_S_n_t = current_frame[4]
p_ego_x = current_frame[5]
p_ego_y = current_frame[6]
p_next_pre_x = current_frame[7]
p_next_pre_y = current_frame[8]
continue
delta_V_n_t = current_frame[0]
S_n_t = current_frame[1]
V_n_t = current_frame[2]
a_n_t = current_frame[3]
next_S_n_t = current_frame[4]
ego_x = current_frame[5]
ego_y = current_frame[6]
next_pre_x = current_frame[7]
next_pre_y = current_frame[8]
pre_V_n_t = V_n_t - delta_V_n_t
if i == 1:
p_delta_V_n_t = previous_frame[0]
p_S_n_t = previous_frame[1]
p_V_n_t = previous_frame[2]
p_a_n_t = previous_frame[3]
p_next_S_n_t = previous_frame[4]
p_ego_x = previous_frame[5]
p_ego_y = previous_frame[6]
p_next_pre_x = previous_frame[7]
p_next_pre_y = previous_frame[8]
tv_params = tv_params_mean[i]
a_max, desired_V, a_comf, S_jam, desired_T = tv_params[0], tv_params[1], tv_params[2], tv_params[3], tv_params[
4]
a_n_t_hat = IDM_cf_model_for_p(p_delta_V_n_t, p_S_n_t, p_V_n_t, a_max, desired_V, a_comf, S_jam, desired_T, 4)
tv_sim_V_n_t = p_V_n_t + a_n_t_hat * 0.04
fix_a_n_t_hat = IDM_cf_model_for_p(p_delta_V_n_t, p_S_n_t, p_V_n_t, fix_a_max, fix_desired_V, fix_a_comf,
fix_S_jam, fix_desired_T, 4)
fix_sim_V_n_t = p_V_n_t + fix_a_n_t_hat * 0.04
tv_V_n_t_1 = V_n_t
ttc = S_n_t / delta_V_n_t
# fix_ttc =
# tv_ttc =
i += 1
def sim_new_spacing(new_pre_x, new_pre_y, old_ego_x, old_ego_y, V_n_t, a_n_t, delta_t=0.04):
return np.sqrt((new_pre_x - old_ego_x) ** 2 + (new_pre_y - old_ego_y) ** 2) - (2 * V_n_t + a_n_t * delta_t) / 2 * delta_t
train_x = []
train_y = []
test_x = []
test_y = []
all_x = []
all_y = []
train = np.random.choice(range(1, 277), 256, replace=False)
next_v = 1
all_a = []
next_vs, v_ids, all_cf_datas, segs_info = get_data_with_pos()
for i in range(len(v_ids)):
v_id = v_ids[i]
all_cf_data = all_cf_datas[i].T
# print("------------------------------------------------------------------------------------------------------")
# print(str(next_v) + 'th vehicle with id ' + str(v_id))
# delta_V_n_t, S_n_t, V_n_t, a, S_n_t_y, ego_x, ego_y, next_pre_x, next_pre_y, pre_v, pre_tan_acc, pre_lat_acc, frame_id
delta_V_n_t = np.array(all_cf_data[0])
S_n_t = np.array(all_cf_data[1])
V_n_t = np.array(all_cf_data[2])
a = np.array(all_cf_data[3])
S_n_t_y = np.array(all_cf_data[4])
ego_x = np.array(all_cf_data[5])
ego_y = np.array(all_cf_data[6])
next_pre_x = np.array(all_cf_data[7])
next_pre_y = np.array(all_cf_data[8])
pre_v = np.array(all_cf_data[9])
pre_tan_acc = np.array(all_cf_data[10])
pre_lat_acc = np.array(all_cf_data[11])
frame_id = np.array(all_cf_data[12])
v_id = [v_id] * len(a)
data_array = np.array([v_id, delta_V_n_t, S_n_t, V_n_t, pre_v, pre_tan_acc, pre_lat_acc, S_n_t_y, ego_x, ego_y, next_pre_x, next_pre_y, frame_id, a]).T
print(data_array.shape)
if not next_v in train:
test_x.append(data_array[:, 0:-1])
test_y.append(data_array[:, -1])
all_x.append(data_array[:, 0:-1])
all_y.append(data_array[:, -1])
print(test_x[-1].shape, test_y[-1].shape)
else:
train_x.append(data_array[:, 0:-1])
train_y.append(data_array[:, -1])
all_x.append(data_array[:, 0:-1])
all_y.append(data_array[:, -1])
print(train_x[-1].shape, train_y[-1].shape)
next_v += 1
print(len(train_x), len(train_y))
print(len(test_x), len(test_y))
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from time import strftime
def get_test_dataloader(x, y):
# v_id, delta_V_n_t, S_n_t, V_n_t, pre_v, pre_tan_acc, pre_lat_acc, S_n_t_y, ego_x, ego_y, next_pre_x, next_pre_y, frame_id, a
data_loader = {}
for i in range(len(x)):
v_id = x[i][0][0]
print(v_id)
for_sim_spacing = x[i].T[7::].T
x[i] = x[i].T[1:7].T
data_loader[v_id] = ([x[i]], [for_sim_spacing], [y[i]])
return data_loader
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cpu"
# Create a folder to store experiment results
# timestamp = strftime("%Y-%m-%d")
# directory = "neural_processes_results_{}".format(timestamp)
# if not os.path.exists(directory):
# os.makedirs(directory)
batch_size = 1
x_dim = 6
y_dim = 1
r_dim = 5
h_dim = 128
z_dim = 1
num_context_range = (300, 500)
num_extra_target_range = (100, 200)
epochs = 2000
lr = 0.001
data_loader = get_test_dataloader(all_x, all_y)
print(len(data_loader))
cf_np = NeuralProcess(x_dim, y_dim, r_dim, z_dim, h_dim)
cf_np.load_state_dict(torch.load("NP_model.pt"))
print(cf_np)
cf_np.training = False
print(cf_np.training)
# logging.basicConfig(filename=directory + '/test.log',
# format='%(asctime)s : %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S %p',
# level=10)
def simulate_agg():
all_rmse = []
all_r_c = []
n = 1
sim_ttc = []
sim_spacing = []
sim_speed = []
ttc = []
spacing = []
speed = []
a_err = []
new_seg = False
direction = 0.9712041389105396
non_existed_r_c = np.loadtxt("/root/AAAI2022_neuma/0714_dist_param/new_ds_cf_model/new_ds.txt")
new_con_r = torch.tensor(non_existed_r_c[0]).type(torch.FloatTensor)
new_agg_r = torch.tensor(non_existed_r_c[1]).type(torch.FloatTensor)
response_time = 1.5
safe_decel = -5
for i, data in data_loader.items():
r_l = []
rmse_l = []
n += 1
x, for_sim_spacing, y = data
print(i)
for j in range(1, len(x[0])):
current_frame = x[0][j]
current_for_sim_spacing = for_sim_spacing[0][j]
previous_frame = x[0][j - 1]
previous_for_sim_spacing = for_sim_spacing[0][j - 1]
if current_for_sim_spacing[-1] - previous_for_sim_spacing[-1] != 1:
new_seg = True
break
# Sample number of context and target points
# num_context = randint(*num_context_range)
# num_extra_target = randint(*num_extra_target_range)
# num_points = num_context + num_extra_target
# Create context and target points and apply neural process
num_context = len(x[0])
num_extra_target = 1
num_points = len(x[0])
# x_context, y_context, x_target, y_target = context_target_split(x, y, num_context, num_extra_target)
# x: delta_V_n_t, S_n_t, V_n_t, pre_v, pre_tan_acc, pre_lat_acc
# for sim spacing: S_n_t_y, ego_x, ego_y, next_pre_x, next_pre_y
if j == 1 or new_seg:
previous_delta_V_n_t = previous_frame[0]
previous_S_n_t = previous_frame[1]
previous_V_n_t = previous_frame[2]
previous_pre_v = previous_frame[3]
previous_pre_tan_acc = previous_frame[4]
previous_pre_lat_acc = previous_frame[5]
else:
previous_delta_V_n_t = sim_previous_frame[0]
previous_S_n_t = sim_previous_frame[1]
previous_V_n_t = sim_previous_frame[2]
previous_pre_v = sim_previous_frame[3]
previous_pre_tan_acc = sim_previous_frame[4]
previous_pre_lat_acc = sim_previous_frame[5]
x_target = | np.array([[previous_delta_V_n_t, previous_S_n_t, previous_V_n_t, previous_pre_v, previous_pre_tan_acc, previous_pre_lat_acc]]) | numpy.array |
"""
Train a regression DCGAN
"""
import torch
import torch.nn as nn
import numpy as np
import os
import timeit
from PIL import Image
from utils import *
from opts import parse_opts
''' Settings '''
args = parse_opts()
NGPU = torch.cuda.device_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# some parameters in opts
niters = args.niters_gan
resume_niters = args.resume_niters_gan
dim_gan = args.dim_gan
lr_g = args.lr_gan
lr_d = args.lr_gan
save_niters_freq = args.save_niters_freq
batch_size_disc = args.batch_size_disc
batch_size_gene = args.batch_size_gene
threshold_type = args.threshold_type
nonzero_soft_weight_threshold = args.nonzero_soft_weight_threshold
def train_CcGAN(kernel_sigma, kappa, train_samples, train_labels, netG, netD, save_images_folder, save_models_folder = None, plot_in_train=False, samples_tar_eval = None, angle_grid_eval = None, fig_size=5, point_size=None):
netG = netG.to(device)
netD = netD.to(device)
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
if save_models_folder is not None and resume_niters>0:
save_file = save_models_folder + "/CcGAN_checkpoint_intrain/CcGAN_checkpoint_niter_{}.pth".format(resume_niters)
checkpoint = torch.load(save_file)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
#################
unique_train_labels = np.sort(np.array(list(set(train_labels))))
start_time = timeit.default_timer()
for niter in range(resume_niters, niters):
''' Train Discriminator '''
## randomly draw batch_size_disc y's from unique_train_labels
batch_target_labels_raw = np.random.choice(unique_train_labels, size=batch_size_disc, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_disc)
batch_target_labels = batch_target_labels_raw + batch_epsilons
## only for similation
batch_target_labels[batch_target_labels<0] = batch_target_labels[batch_target_labels<0] + 1
batch_target_labels[batch_target_labels>1] = batch_target_labels[batch_target_labels>1] - 1
## find index of real images with labels in the vicinity of batch_target_labels
## generate labels for fake image generation; these labels are also in the vicinity of batch_target_labels
batch_real_indx = | np.zeros(batch_size_disc, dtype=int) | numpy.zeros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.