prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from scipy.spatial.distance import cdist
import heapq
import numpy as np
import random
from hashlib import sha1
from itertools import zip_longest
def batch_unit_norm(b, epsilon=1e-8):
"""
Give all vectors unit norm along the last dimension
"""
return b / np.linalg.norm(b, axis=-1, keepdims=True) + epsilon
def unit_vectors(n_examples, n_dims):
"""
Create n_examples of synthetic data on the unit
sphere in n_dims
"""
dense = np.random.normal(0, 1, (n_examples, n_dims))
return batch_unit_norm(dense)
def hyperplanes(n_planes, n_dims):
"""
Return n_planes plane vectors, which describe
hyperplanes in n_dims space that are perpendicular
to lines running from the origin to each point
"""
return unit_vectors(n_planes, n_dims)
def random_projection(plane_vectors, data, pack=True, binarize=True):
"""
Return bit strings for a batch of vectors, with each
bit representing which side of each hyperplane the point
falls on
"""
flattened = data.reshape((len(data), plane_vectors.shape[-1]))
x = np.dot(plane_vectors, flattened.T).T
if not binarize:
return x
output = np.zeros((len(data), len(plane_vectors)), dtype=np.uint8)
output[np.where(x > 0)] = 1
if pack:
output = np.packbits(output, axis=-1).view(np.uint64)
return output
def traversal(roots, pop_from=0):
build_queue = list(roots)
while build_queue:
next_node = build_queue.pop(pop_from)
yield build_queue, next_node
class HyperPlaneNode(object):
def __init__(self, shape, data=None, plane=None):
super(HyperPlaneNode, self).__init__()
self.dimensions = shape
# choose one plane, at random, for this node
if plane is None:
self.plane = hyperplanes(1, shape)
else:
self.plane = plane
self.data = \
data if data is not None else np.zeros((0,), dtype=np.uint64)
self.left = None
self.right = None
def __hash__(self):
return hash(sha1(self.plane).hexdigest())
def traverse(self):
for queue, node in traversal([self], pop_from=0):
yield node
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
def __eq__(self, other):
st = self.traverse()
try:
ot = other.traverse()
except AttributeError:
return False
for a, b in zip_longest(st, ot):
if np.any(a.data != b.data):
return False
if np.any(a.plane != b.plane):
return False
return True
def __len__(self):
return len(self.data)
def __repr__(self):
return f'Node(hash={hash(self)})'
def __str__(self):
return self.__repr__()
@property
def is_leaf(self):
return self.left is None and self.right is None
@property
def children(self):
return self.left, self.right
def distance(self, query):
dist = random_projection(
self.plane, query, pack=False, binarize=False).reshape(-1)
return dist
def route(self, data, indices=None):
if indices is None:
indices = self.data
data = data[indices]
dist = self.distance(data)
left_indices = indices[dist > 0]
right_indices = indices[dist <= 0]
return left_indices, right_indices
def create_children(self, data):
left_indices, right_indices = self.route(data)
self.left = HyperPlaneNode(self.dimensions, left_indices)
self.right = HyperPlaneNode(self.dimensions, right_indices)
class MultiHyperPlaneTree(object):
def __init__(self, data, smallest_node, n_trees=10):
super(MultiHyperPlaneTree, self).__init__()
self.data = data
indices = np.arange(0, len(data), dtype=np.uint64)
self.smallest_node = smallest_node
self.roots = \
[HyperPlaneNode(self.dimensions, indices) for _ in range(n_trees)]
build_queue = list(self.roots)
while build_queue:
node = build_queue.pop()
if len(node) <= smallest_node:
continue
else:
node.create_children(self.data)
build_queue.extend(node.children)
@property
def dimensions(self):
return self.data.shape[1]
def check(self):
output = []
for queue, node in traversal(list(self.roots), pop_from=0):
output.append(str(node))
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return output
def __setstate__(self, state):
def build_node(_, plane, data):
return HyperPlaneNode(state['shape'], data, plane)
roots = [build_node(*data) for data in state['roots']]
self.roots = roots
self.data = state['data']
self.smallest_node = state['smallest_node']
graph = state['graph']
for queue, next_node in traversal(roots, pop_from=0):
left, right = graph[hash(next_node)]
if left:
left = build_node(*left)
next_node.left = left
queue.append(left)
if right:
right = build_node(*right)
next_node.right = right
queue.append(right)
def __getstate__(self):
def node_state(node):
return hash(node), node.plane, node.data
graph = dict()
for queue, next_node in traversal(list(self.roots), pop_from=0):
item = []
left = next_node.left
right = next_node.right
if left:
queue.append(left)
item.append(node_state(left))
else:
item.append(None)
if right:
queue.append(right)
item.append(node_state(right))
else:
item.append(None)
graph[hash(next_node)] = item
roots = [node_state(r) for r in self.roots]
return {
'shape': self.roots[0].dimensions,
'roots': roots,
'graph': graph,
'smallest_node': self.smallest_node,
'n_trees': len(roots),
'data': self.data
}
def __eq__(self, other):
return all(s == r for (s, r) in zip(self.roots, other.roots))
def __len__(self):
return len(self.data)
def append(self, chunk):
# compute the new set of indices that need to be added to the tree
new_indices = np.arange(0, len(chunk), dtype=np.uint64) + len(self.data)
# ensure that the chunk of vectors are added to the available vector
# data
self.data = np.concatenate([self.data, chunk])
# initialize the search queue with all root nodes
search_queue = list([(r, new_indices) for r in self.roots])
while search_queue:
# add the indices to the node's data
node, indices = search_queue.pop()
node.data = np.concatenate([node.data, indices])
if len(node) <= self.smallest_node:
# this will be a leaf node. There's no need to further route
# the data or add further child nodes (for now)
continue
if node.is_leaf:
# we'll be creating new child nodes. At this point, we need
# to route *all* of the data currently owned by this node
node.create_children(self.data)
else:
# this node already has children, so it's only necessary to
# route new indices
left_indices, right_indices = node.route(self.data, indices)
search_queue.append((node.left, left_indices))
search_queue.append((node.right, right_indices))
def search_with_priority_queue(
self,
query,
n_results,
threshold,
return_distances=False,
return_vectors=False):
query = query.reshape(1, self.dimensions)
indices = set()
# this is kinda arbitrary.
# How do I pick this intelligently?
to_consider = n_results * 100
# put the root nodes in the queue
# KLUDGE: Assign arbitrary values to each root node, taking on values
# larger than the greatest possible cosine distance to ensure that
# each root node is processed first
# KLUDGE: Add a random number in the second position to ensure that
# hyperplane nodes are never compared in the event of identical
# distances
heap = [
(-((i + 1) * 10), random.random(), root)
for i, root in enumerate(self.roots)
]
# traverse the tree, finding candidate indices
while heap and (len(indices) < to_consider):
current_distance, _, current_node = heapq.heappop(heap)
if current_node.is_leaf:
indices.update(current_node.data)
continue
dist = current_node.distance(query)
abs_dist = | np.abs(dist) | numpy.abs |
#%%
#%%
import os
import time
import shutil
import numpy as np
import tensorflow as tf
from PIL import Image
import random
import matplotlib.pyplot as plt
import cv2
from cv2 import cv2
scal = 224
sampleModel = tf.keras.applications.ResNet50V2(weights='imagenet',
include_top=False,
input_shape=(scal, scal, 3))
sampleModel.trianable = False
for l in sampleModel.layers:
print(l.name)
if l.name == 'conv4_block5_out':
print(l)
#%%
c=[]
name=['conv2_block2_out','conv3_block3_out','conv4_block5_out','conv5_block3_out']
i=0
for l in sampleModel.layers:
if l.name == name[i]:
i+=1
print(l.name)
c.append(l.output)
if i == 4:
break
print(c)
model = tf.keras.models.Model(inputs=sampleModel.input, outputs=c)
tf.keras.utils.plot_model(model, to_file='rennetRpn.png', show_shapes=True, show_layer_names=True)
#%%
model.outputs
#%%
sampleModel.layers['conv4_block5_out']
#%%
img = cv2.imread('hua.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img/255.0
img = cv2.resize(img,(224,224))
plt.imshow(img)
o = sampleModel(np.expand_dims(img,0))
# %%
probs = tf.nn.softmax(o)
probs=probs.numpy()
np.max(probs)
# %%
| np.argmax(probs) | numpy.argmax |
import popsims
import numpy as np
import matplotlib.pyplot as plt
import wisps
import pandas as pd
import wisps.simulations as wispsim
from tqdm import tqdm
import astropy.units as u
import numba
from scipy.interpolate import griddata
from popsims import galaxy
def probability_of_selection(spt, snr):
"""
probablity of selection for a given snr and spt
"""
ref_df=wispsim.SELECTION_FUNCTION.dropna()
#self.data['spt']=self.data.spt.apply(splat.typeToNum)
interpoints=np.array([ref_df.spt.values, ref_df.logsnr.values]).T
return griddata(interpoints, ref_df.tot_label.values , (spt, np.log10(snr)), method='linear')
def get_snr(exp_grism, appf110s, appf140s, appf160s):
#print (exp_grism)
snrjs110= 10**(fit_snr_exptime( exp_grism, appf110s, *list(wispsim.MAG_LIMITS['snr_exp']['F110'])))
snrjs140= 10**(fit_snr_exptime( exp_grism, appf140s, *list(wispsim.MAG_LIMITS['snr_exp']['F140'])))
snrjs160= 10**(fit_snr_exptime( exp_grism, appf160s, *list(wispsim.MAG_LIMITS['snr_exp']['F160'])))
#assign upper and lo limits
snr_bool_up= np.logical_or.reduce([ appf110s >25, appf140s >25, appf160s>24])
snr_bool_do= np.logical_or.reduce([ appf110s <15, appf140s <15, appf160s>15])
snrjs= np.nanmin(np.vstack([snrjs110, snrjs140, snrjs160]), axis=0)
return snrjs
def format_maglimits(wisp_limits):
return {'WFC3_F110W':[16, wisp_limits['F110']],\
'WFC3_F140W':[16, wisp_limits['F140']],\
'WFC3_F160W':[16,wisp_limits['F160']]}
def make_cuts(df, dcts, expt):
snr=get_snr(expt, df.WFC3_F110W.values, df.WFC3_F140W.values, df.WFC3_F160W.values)
bools0=np.logical_or.reduce([df[k]< dcts[k][1] for k in dcts.keys()])
return df[np.logical_and(bools0, snr>=3)]
def get_average_distance_limits(p, cuts):
p.mag_limits=cuts
return dict(pd.DataFrame(p.distance_limits).applymap(lambda x: \
x[1]).apply(lambda x: np.nanmedian(x), axis=1))
@numba.jit(nopython=True)
def fit_snr_exptime(ts, mag, d, e, f, m0):
return d*(mag-m0)+e*np.log10(ts/1000)+f
@numba.jit(nopython=True)
def mag_unc_exptime_relation( mag, t, m0, beta, a, b):
tref = 1000.
#m0, beta, a, b= params
return ((t/tref)**-beta)*(10**(a*(mag-m0)+b))
def get_absmags_hst_filters(df, mag_key):
"""
get abs_mag
"""
#load relations
relabsmags=wisps.POLYNOMIAL_RELATIONS['abs_mags']
relcolors=wisps.POLYNOMIAL_RELATIONS['colors']
binary_flag=df.is_binary.values
#compute absolue magnitudes for singles
res=np.ones_like(df.spt.values)*np.nan
abs_mags_singles=np.random.normal((relabsmags[mag_key+'W'][0])(df.spt.values), relabsmags[mag_key+'W'][1])
#for binaries, base this on their absolute J and H mag
color_key='j_'+mag_key.lower()
#if mag_key=='F160':
# color_key='h_f160'
#colors=np.random.normal((relcolors[color_key][0])(df.spt.values), relcolors[color_key][1])
#abs_mags_binaries=df['abs_2MASS_J']-colors
abs_mag_primaries=np.random.normal((relabsmags[mag_key+'W'][0])(df.prim_spt.values) , relabsmags[mag_key+'W'][1])
abs_mag_secondaries=np.random.normal((relabsmags[mag_key+'W'][0])(df.sec_spt.values) , relabsmags[mag_key+'W'][1])
abs_mags_binaries=-2.5*np.log10(10**(-0.4* abs_mag_primaries)+10**(-0.4* abs_mag_secondaries))
np.place(res, ~binary_flag, abs_mags_singles[~binary_flag])
np.place(res, binary_flag, abs_mags_binaries[binary_flag])
#absolute mag
df['abs{}'.format(mag_key)]=res
df['prim_abs{}'.format(mag_key)]=abs_mag_primaries
df['sec_abs{}'.format(mag_key)]= abs_mag_secondaries
#df['abs{}'.format(mag_key)]=abs_mags_singles
#apparent mag
app=res+5*np.log10(df.dist/10.0)
app_er= mag_unc_exptime_relation(app.values, df['exp_image'].values, *list(wispsim.MAG_LIMITS['mag_unc_exp'][mag_key]))
df['app{}'.format(mag_key)]= np.random.normal(app, app_er)
df['app{}'.format(mag_key)+'er']=app_er
return df
def add_abs_mags(df):
get_absmags_hst_filters(df, 'F110')
get_absmags_hst_filters(df, 'F140')
get_absmags_hst_filters(df, 'F160')
#add magnitude uncertainities
return df
def get_galactic_quantities():
scaleH=900.
scaleL=3600.
thin_points=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl')
distance_limits= thin_points[0].dist_limits
coords=[x.coord for x in thin_points]
pnt_names=[x.name for x in thin_points]
points=[galaxy.Pointing(coord=p.coord, name=p.name) for p in tqdm(thin_points)]
volumes={}
distances={}
for s in wispsim.SPGRID:
volumes[s]={}
distances[s]={}
for p in tqdm(points):
volumes[s][p.name] = galaxy.volume_calc(p.coord.galactic.l.radian,\
p.coord.galactic.b.radian,
distance_limits[s][-1], distance_limits[s][0],scaleH, scaleL, \
kind='exp')
distances[s][p.name]= p.draw_distances(distance_limits[s][1]*0.5, 2*distance_limits[s][0], \
scaleH, scaleL, nsample=1000)
import pickle
with open(wisps.OUTPUT_FILES+'/thick_disk_volumes.pkl', 'wb') as file:
pickle.dump(volumes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(wisps.OUTPUT_FILES+'/thick_disk_distances.pkl', 'wb') as file:
pickle.dump(distances, file, protocol=pickle.HIGHEST_PROTOCOL)
def run_all():
#load in some precomputed values
corr_pols=wisps.POLYNOMIAL_RELATIONS['mag_limit_corrections']
thin_points=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl')
names=[x.name for x in thin_points]
pntindex= np.arange(len(thin_points))
volumes=pd.read_pickle(wisps.OUTPUT_FILES+'/thick_disk_volumes.pkl')
DISTANCE_SAMPLES=pd.read_pickle(wisps.OUTPUT_FILES+'/thick_disk_distances.pkl')
#compute total cumulative volumes
tot_volumes_by_pointing=abs(np.nansum([[volumes[s][k] for k in names] for s in wispsim.SPGRID], axis=0))
tot_volumes_by_spt=abs(np.nansum([[volumes[s][k] for k in names] for s in wispsim.SPGRID], axis=1))
volumes_cdf= np.cumsum( tot_volumes_by_pointing)/np.nansum( tot_volumes_by_pointing)
#load in data from evolutionary models
data=popsims.make_systems(model_name='burrows1997', bfraction=0.2,\
mass_age_range= [0.01, 0.15, 8., 13.0],\
nsample=int(1e6),
save=True)
#remove early types
spts= (data['spt'].values).flatten()
mask= np.logical_and( spts>=17, spts<=41)
df= data[mask].reset_index(drop=True)
#assign distances
spts=spts[mask]
spt_r=np.round(spts)
#assign pointings based on contributions to the tottal volumes
pntindex_to_use=wisps.random_draw(pntindex, volumes_cdf, nsample=len(spts)).astype(int)
pnts=np.take(thin_points, pntindex_to_use)
pnts_names=np.take(names, pntindex_to_use)
exptimes_mag=np.array([x.imag_exptime for x in thin_points])
exptime_spec= np.array([x.exposure_time for x in thin_points])
exps= np.take(exptimes_mag, pntindex_to_use)
exp_grism= np.take(exptime_spec, pntindex_to_use)
#assign distance based on pointing and
#retrieve key by key, let's see ho long it takes to run
spt_r=np.floor(spts).astype(int)
dists_for_spts= np.array([ | np.random.choice(DISTANCE_SAMPLES[k][idx]) | numpy.random.choice |
import numpy as np
from pprint import pprint
import networkx as nx
from jet.utils import get_unique_name, get_caller_info, slice_to_str
from jet import config
# global expander variables
constants = []
placeholder_count = 0
graph = nx.DiGraph()
# hack to fix issues with circular dependencies
intake = None
def import_intake():
global intake
if intake is None:
from jet import intake
##########################################################################
#### Operations ####
##########################################################################
class Op(object):
# Base class for all operations
def __init__(self, inputs, *args, **kwargs):
self.inputs_raw = inputs
self.inputs = check_type(*inputs)
self.output = None
self.init_op(self.inputs, *args, **kwargs)
if config.debug or config.group_class or config.group_func:
self.caller_info = get_caller_info('expander.py', 'intake.py')
else:
self.caller_info = None
self.add_to_graph()
def init_op(self, inputs, *args, **kwargs):
pass
def add_to_graph(self):
if hasattr(self, 'name'): # TODO dominique: is this check here? the class has a property 'name'
graph.add_node(self, name=self.name)
else:
name=get_unique_name('uniquename')
graph.add_node(self, name=name)
if self.inputs is not None:
for arr in self.inputs:
if hasattr(arr, 'assignment') and arr.assignment:
graph.add_edge(arr.assignment[-1], self, array=arr.name)
elif hasattr(arr, 'producer'):
graph.add_edge(arr.producer, self, array=arr.name)
def get_output(self):
return self.output
@property
def name(self):
return self.output.name
@property
def dtype(self):
return self.output.dtype
def __repr__(self):
out_name = ''
if self.output is not None:
out_name = self.output.name + '\\n' + str(self.output.shape)
return out_name
class ConstOp(Op):
def init_op(self, inputs):
self.op = 'Const'
self.inputs = None
self.value_holder = inputs[0]
self.output = inputs[0]
def values(self):
return self.inputs
class VariableOp(Op):
def init_op(self, inputs):
self.op = 'Variable'
self.inputs = None
self.value_holder = inputs[0]
self.output = inputs[0]
class PlaceholderOp(Op):
def init_op(self, inputs, placeholder_count):
self.op = 'Placeholder'
self.placeholder = inputs[0]
self.inputs = None
self.output = inputs[0]
self.placeholder_count = placeholder_count
class CreateArrayOp(Op):
def __init__(self, inputs, producer, shape):
self.op = 'CreateArray'
self.inputs = inputs
self.nested_input = np.array(inputs, dtype=object).reshape(shape)
self.output = producer # TODO dominique: this is some weird naming convention: a producer is an output? isn't a producer normally responsible for the input?
self.shape = shape
if config.debug or config.group_class or config.group_func:
self.caller_info = get_caller_info('expander.py', 'intake.py')
else:
self.caller_info = None
self.add_to_graph()
class AssignOp(Op):
def __init__(self, inputs, at_idx=None, slices=None):
self.op = 'Assign'
self.inputs = check_type(*inputs)
self.slices = slices
if slices is not None:
if len(slices) == 1:
slice_shape = np.zeros(inputs[0].shape)[slices[0]].shape
else:
slice_shape = np.zeros(inputs[0].shape)[slices[0], slices[1]].shape
self.output = intake.array(name=self.op, dtype=inputs[0].dtype,
shape=inputs[0].shape,
producer=self,
slice_shape=slice_shape)
self.__repr__ = lambda : '{}[{}, {}]'.format(self.inputs[0].name,
slice_to_str(slices[0]),
slice_to_str(slices[1])) + '\\n' + str(self.output.shape)
else:
self.output = intake.array(name=self.op,
dtype=inputs[0].dtype,
shape=inputs[0].shape,
producer=self)
self.at_idx = at_idx
if at_idx:
if len(at_idx) == 1:
self.__repr__ = lambda : '{}[{}]'.format(self.inputs[0].name,
at_idx[0]) + '\\n' + str(self.output.shape)
else:
self.__repr__ = lambda : '{}[{}, {}]'.format(self.inputs[0].name,
at_idx[0], at_idx[1]) + '\\n' + str(self.output.shape)
# special thing for assign operator... have to add the others
# as dependency
successors = graph.successors(inputs[0].last_producer)
for s in successors:
if s != self.inputs[0].last_producer:
graph.add_edge(s, self, edge_type='helper')
if config.debug or config.group_class or config.group_func:
self.caller_info = get_caller_info('expander.py', 'intake.py')
else:
self.caller_info = None
self.add_to_graph()
self.inputs[0].assignment.append(self)
def __repr__(self):
return self.__repr__()
class ViewOp(Op):
def init_op(self, inputs, keys):
self.op = 'View'
# find shape by slicing zeros vector
self.slices = keys
if len(self.slices) == 1:
new_shape = np.zeros(inputs[0].shape)[self.slices[0]].shape
else:
new_shape = np.zeros(inputs[0].shape)[self.slices[0], self.slices[1]].shape
self.output = intake.array(name=self.op, shape=new_shape, producer=self)
def __repr__(self):
if len(self.slices) == 1:
return '{}[{}]'.format(self.inputs[0].name,
slice_to_str(self.slices[0])) + '\\n' + str(self.output.shape)
return '{}[{}, {}]'.format(self.inputs[0].name,
slice_to_str(self.slices[0]),
slice_to_str(self.slices[1])) + '\\n' + str(self.output.shape)
class ArrayAccessOp(Op):
def init_op(self, inputs, at_idx):
self.op = 'ArrayAccess'
if len(at_idx) == 1:
shape = np.zeros(inputs[0].shape)[at_idx[0]].shape
else:
shape = np.zeros(inputs[0].shape)[at_idx[0], at_idx[1]].shape
self.output = intake.array(name=self.op, shape=shape, producer=self)
self.at_idx = at_idx
def __repr__(self):
if len(self.at_idx) == 1:
return '{}[{}]'.format(self.inputs[0].name,
self.at_idx[0]) + '\\n' + str(self.output.shape)
return '{}[{}, {}]'.format(self.inputs[0].name,
self.at_idx[0], self.at_idx[1]) + '\\n' + str(self.output.shape)
class ConcatenateOp(Op):
def init_op(self, inputs, axis):
self.op = 'Concatenate'
self.axis=axis
self.shape = np.concatenate((np.zeros(inputs[0].shape),
np.zeros(inputs[1].shape)), axis=axis).shape
self.output = intake.array(name=self.op, shape=self.shape,
dtype=upcast(inputs),
producer=self)
class WhereOp(Op):
def init_op(self, inputs):
self.op = 'Where'
assert(inputs[1].shape == inputs[2].shape)
shape = inputs[1].shape
self.output = intake.array(name=self.op, shape=shape,
dtype=upcast(inputs[1:3]),
producer=self)
# not implemented yet
class WhileOp(Op):
def init_op(self, inputs):
self.op = 'While'
self.output = inputs[1:]
class ZerosOp(Op):
def init_op(self, inputs, shape, dtype=config.DTYPE):
self.op = 'Zeros'
self.shape = shape
self.output = intake.array(name='zeros_mat', shape=self.shape,
dtype=dtype, producer=self)
class OnesOp(Op):
def init_op(self, inputs, shape, dtype=config.DTYPE):
self.op = 'Ones'
self.shape = shape
self.output = intake.array(name='ones_mat', shape=self.shape,
dtype=dtype, producer=self)
class EyeOp(Op):
def init_op(self, inputs, shape, dtype=config.DTYPE):
self.op = 'Eye'
self.shape = shape
self.output = intake.array(name='eye_mat', shape=self.shape,
dtype=dtype, producer=self)
class MatMulOp(Op):
def init_op(self, inputs):
self.op = 'MatMul'
# result of mat mul = new matrix
# n x m * m x p -> n * p
shape = np.dot(np.zeros(inputs[0].shape), np.zeros(inputs[1].shape)).shape
self.output = intake.array(name=self.op,
shape=shape,
dtype=upcast(inputs),
producer=self)
class DotOp(Op):
"""
Note:
We use an explicit DOT operation instead, but we parse the numpy
dot correctly (ie. dot between 2D matrices -> matmul, dot between vectors -> inner product)
"""
def init_op(self, inputs):
self.op = 'Dot'
# result of mat mul = new matrix
# n x m * m x p -> n * p
shape = np.dot(np.zeros(inputs[0].shape), np.zeros(inputs[1].shape)).shape
self.output = intake.array(name=self.op,
shape=shape,
dtype=upcast(inputs),
producer=self)
class ModOp(Op):
def init_op(self, inputs):
self.op = 'Mod'
shape = self.inputs[0].shape
self.output = intake.array(name=self.op, shape=shape,
dtype=config.DTYPE,
producer=self)
class ArcTan2Op(Op):
def init_op(self, inputs):
self.op = 'ArcTan2'
shape = self.inputs[0].shape
self.output = intake.array(name=self.op, shape=shape,
dtype=config.DTYPE,
producer=self)
class ClipOp(Op):
def init_op(self, inputs):
self.op = 'Clip'
assert(inputs[1].shape == inputs[2].shape)
shape = inputs[0].shape
self.output = intake.array(name=self.op, shape=shape,
dtype=upcast(inputs),
producer=self)
##########################################################################
#### Unary Operations ####
##########################################################################
class UnaryOp(Op):
def init_op(self, inputs):
# TODO check why this happens and what it does
shape = self.inputs[0].shape
if hasattr(self, 'dtype'):
# dtype for sin etc. has to change
dtype = self.dtype
else:
dtype = self.inputs[0].dtype
self.output = intake.array(name=self.op, shape=shape, dtype=dtype,
producer=self)
class NegOp(UnaryOp):
op = 'Neg'
class CopyOp(UnaryOp):
op = 'Copy'
class SinOp(UnaryOp):
op = 'Sin'
dtype = config.DTYPE
class CosOp(UnaryOp):
op = 'Cos'
dtype = config.DTYPE
class TanOp(UnaryOp):
op = 'Tan'
dtype = config.DTYPE
class SqrtOp(UnaryOp):
op = 'Sqrt'
dtype = config.DTYPE
class SquareOp(UnaryOp):
op = 'Square'
dtype = config.DTYPE
class AbsOp(UnaryOp):
op = 'Abs'
dtype = config.DTYPE
class ExpOp(UnaryOp):
op = 'Exp'
dtype = config.DTYPE
class LogOp(UnaryOp):
op = 'Log'
dtype = config.DTYPE
class ArcSinOp(UnaryOp):
op = 'ArcSin'
dtype = config.DTYPE
class ArcCosOp(UnaryOp):
op = 'ArcCos'
dtype = config.DTYPE
class ArcTanOp(UnaryOp):
op = 'ArcTan'
dtype = config.DTYPE
##########################################################################
#### Binary Operations ####
##########################################################################
class BinOp(Op):
"""
Base class for binary ops that don't modify the shape
ie: 3 + [1, 2, 3] = [4, 5, 6]
or [1, 2, 3] + [4, 5, 6] = [5, 7, 9]
"""
def init_op(self, inputs):
self.output = intake.array(name=self.op, shape=self.shape_op(inputs),
dtype=upcast(inputs), producer=self)
def shape_op(self, inputs):
return ()
class AddOp(BinOp):
op = 'Add'
def shape_op(self, inputs):
return (np.zeros(inputs[0].shape) +
np.zeros(inputs[1].shape)).shape
class MultiplyOp(BinOp):
op = 'Mul'
def shape_op(self, inputs):
return (np.zeros(inputs[0].shape) *
np.zeros(inputs[1].shape)).shape
class DivideOp(BinOp):
op = 'Div'
def shape_op(self, inputs):
return ( | np.ones(inputs[0].shape) | numpy.ones |
import pickle
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from CarDetector import CarDetector
class AnalyseResults:
def __init__(self,dirChasing):
self.dirChasing = dirChasing
def loadPositionHistory(self, fileName):
return pickle.load(open(fileName, "rb"))
def EuclidianDistance(self,x1,x2,y1,y2,z1,z2):
return math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2))
def LoadResults(self):
file = 'res/results.txt'
with open(file,'r') as f:
lines = [line.rstrip() for line in f]
return lines
def FinishedPercentage(self, historyChased, historyChasing):
lastPosChasing = historyChasing[-1]
nOfPositionsDriven = 0
tmp = 0
for i in range(1,len(historyChasing)):
m = len(historyChasing) - 1
d = self.EuclidianDistance(lastPosChasing[0], historyChasing[m-i][0], lastPosChasing[1], historyChasing[m-i][1],
lastPosChasing[2], historyChasing[m-i][2])
if d > 2:
break
else:
tmp +=1
nOfPositionsDriven = len(historyChased) - tmp
# find shortest position from the last path to the path
shortestDist = 1000000000
shortestIndex = 0
for i in range(nOfPositionsDriven-1,-1,-1):
dist = self.EuclidianDistance(lastPosChasing[0],historyChased[i][0],lastPosChasing[1],historyChased[i][1],lastPosChasing[2],historyChased[i][2])
if dist < shortestDist:
shortestDist = dist
shortestIndex = i
else:
break
# if dist > 2*shortestDist:
# break
# shortestIndex = nOfPositionsDriven
distToLastPoint = 0
distfromLastPoint = 0
for i in range(len(historyChased)):
if i+1 < len(historyChased):
if i+1 <= shortestIndex:
distToLastPoint += self.EuclidianDistance(historyChased[i+1][0],historyChased[i][0],historyChased[i+1][1],historyChased[i][1],historyChased[i+1][2],historyChased[i][2])
else:
distfromLastPoint += self.EuclidianDistance(historyChased[i + 1][0], historyChased[i][0],
historyChased[i + 1][1], historyChased[i][1],
historyChased[i + 1][2], historyChased[i][2])
percentage = distToLastPoint/float(distToLastPoint+distfromLastPoint)
return percentage
def FindMAE(self,drivename):
results = self.LoadResults()
for i in range(len(results)):
tmp = results[i].split(',')
if drivename in tmp[0]:
return float(tmp[1]), float(tmp[2]), float(tmp[3])
def Analyse(dirChasing, dirChased, chasedFiles):
dirChasing = dirChasing
dirChased = dirChased
analyse = AnalyseResults(dirChasing)
chasedFiles = chasedFiles
finished = 0
percentages = []
maes = [];
rmses = [];
crashes = []
for file in chasedFiles:
print('\nAnalysis of drive',file,)
historyChased = analyse.loadPositionHistory(os.path.join(dirChased, file))
historyChasing = analyse.loadPositionHistory(os.path.join(dirChasing, file))
percentage = analyse.FinishedPercentage(historyChased, historyChasing)
mae, rmse, crash = analyse.FindMAE(file)
print('How long was the car able to chase the other car:',str(percentage),'[percentage]')
print('MAE between desired and actual distance:',str(mae),'[meters]')
print('RMSE between desired and actual distance:', str(rmse),'[meters]')
print('Total number of crashes:', str(crash))
if percentage >= 0.94:
finished += 1
# print(mae, rmse, crash)
maes.append(mae)
rmses.append(rmse)
crashes.append(crash)
percentages.append(percentage)
print('\n\n---------------------------OVERALL STATITISTICS-------------------------------')
print('Number of finished drives:', finished)
print('Average percentage of finished drives:', | np.mean(percentages) | numpy.mean |
'''
Noise estimation.
'''
import numpy as np
def sigma_noise_spd_welch(y, fs, noise_range, method='expmeanlog'):
'''
Estimating the noise level by a spectral power density (welch algorithm)
approach.
ARGUMENTS
`````````
y : signal, [y] = N x T
fs : sampling rate
noise_range : noise range (a, b) in Hz, implemented as interval [a, b)
If you choose fs to be one you can indicate your noise_range in units of
the sampling rate. Be aware that only [0, 0.5) is feasible.
method : ('expmeanlog', 'mean', 'median',)
'''
from scipy.signal import welch
if len(y.shape) == 1:
T = y.shape[0]
dim = 1
elif len(y.shape) == 2:
N, T = y.shape
dim = 2
else:
raise ValueError('y has dimension %s. Only 1 or 2 are allowed.' % len(y.shape))
f, pxx = welch(y, nperseg= | np.round(T/8) | numpy.round |
# from __future__ import print_function
import pretty_midi
import pypianoroll
import numpy as np
import pandas as pd
import scipy.stats
from collections import Counter
from functools import reduce
import os
def remove_empty_track(midi_file):
'''
1. read pretty midi data
2. remove empty track,
also remove track with fewer than 10% notes of the track
with most notes
********
Return: pretty_midi object, pypianoroll object
'''
try:
pretty_midi_data = pretty_midi.PrettyMIDI(midi_file)
except Exception as e:
print(f'exceptions in reading the file {midi_file}')
return None, None
# print('I00:', pretty_midi_data.instruments)
pypiano_data = pypianoroll.Multitrack()
try:
pypiano_data.parse_pretty_midi(pretty_midi_data, skip_empty_tracks=False)
except Exception as e:
print(f'exceptions for pypianoroll in reading the file {midi_file}')
return None, None
drum_idx = []
for i, instrument in enumerate(pretty_midi_data.instruments):
if instrument.is_drum:
drum_idx.append(i)
note_count = [np.count_nonzero(np.any(track.pianoroll, axis=1)) \
for track in pypiano_data.tracks]
empty_indices = np.array(note_count) < 10
remove_indices = np.arange(len(pypiano_data.tracks))[empty_indices]
for index in sorted(remove_indices, reverse=True):
del pypiano_data.tracks[index]
del pretty_midi_data.instruments[index]
return pretty_midi_data, pypiano_data
def remove_duplicate_tracks(features, replace=False):
if not replace:
features = features.copy()
file_names = features.file_names.unique()
duplicates = []
for file_name in file_names:
file_features = features[features.file_names == file_name]
number_notes = Counter(file_features.num_notes)
notes = []
for ele in number_notes:
if number_notes[ele] > 1:
notes.append(ele)
h_pits = []
for note in notes:
number_h_pit = Counter(file_features[file_features.num_notes == note].h_pit)
for ele in number_h_pit:
if number_h_pit[ele] > 1:
h_pits.append(ele)
l_pits = []
for h_pit in h_pits:
number_l_pit = Counter(file_features[file_features.h_pit == h_pit].l_pit)
for ele in number_l_pit:
if number_l_pit[ele] > 1:
l_pits.append(ele)
notes = list(set(notes))
h_pits = list(set(h_pits))
l_pits = list(set(l_pits))
for note in notes:
note_index = file_features[file_features.num_notes == note].index.values
for h_pit in h_pits:
h_pit_index = file_features[file_features.h_pit == h_pit].index.values
for l_pit in l_pits:
l_pit_index = file_features[file_features.l_pit == l_pit].index.values
index_intersect = reduce(np.intersect1d, (note_index, h_pit_index, l_pit_index))
if len(index_intersect) > 1:
duplicates.append(index_intersect)
### copy the labels in the tracks to be removed
melody_track_name = ['sing', 'vocals', 'vocal', 'melody', 'melody:']
bass_track_name = ['bass', 'bass:']
chord_track_name = ['chord', 'chords', 'harmony']
for indices in duplicates:
melody_track = False
bass_track = False
chord_track = False
labels = features.loc[indices, 'trk_names']
for label in labels:
if label in melody_track_name:
melody_track = True
elif label in bass_track_name:
bass_track = True
elif label in chord_track_name:
chord_track = True
else:
pass
if melody_track:
features.loc[indices, 'trk_names'] = 'melody'
if bass_track:
features.loc[indices, 'trk_names'] = 'bass'
if chord_track:
features.loc[indices, 'trk_names'] = 'chord'
features.drop(indices[1:], inplace=True)
print(indices[1:])
return features
def remove_file_duplicate_tracks(features, pm):
duplicates = []
index_to_remove = []
number_notes = Counter(features.num_notes)
notes = []
for ele in number_notes:
if number_notes[ele] > 1:
notes.append(ele)
h_pits = []
for note in notes:
number_h_pit = Counter(features[features.num_notes == note].h_pit)
for ele in number_h_pit:
if number_h_pit[ele] > 1:
h_pits.append(ele)
l_pits = []
for h_pit in h_pits:
number_l_pit = Counter(features[features.h_pit == h_pit].l_pit)
for ele in number_l_pit:
if number_l_pit[ele] > 1:
l_pits.append(ele)
notes = list(set(notes))
h_pits = list(set(h_pits))
l_pits = list(set(l_pits))
for note in notes:
note_index = features[features.num_notes == note].index.values
for h_pit in h_pits:
h_pit_index = features[features.h_pit == h_pit].index.values
for l_pit in l_pits:
l_pit_index = features[features.l_pit == l_pit].index.values
index_intersect = reduce(np.intersect1d, (note_index, h_pit_index, l_pit_index))
if len(index_intersect) > 1:
duplicates.append(index_intersect)
### copy the labels in the tracks to be removed
melody_track_name = ['sing', 'vocals', 'vocal', 'melody', 'melody:']
bass_track_name = ['bass', 'bass:']
chord_track_name = ['chord', 'chords', 'harmony']
for indices in duplicates:
features.drop(indices[1:], inplace=True)
for index in indices[1:]:
index_to_remove.append(index)
indices = np.sort(np.array(index_to_remove))
for index in indices[::-1]:
del pm.instruments[index]
features.reset_index(inplace=True, drop='index')
return
def walk(folder_name):
files = []
for p, d, f in os.walk(folder_name):
for file in f:
base_name = os.path.basename(file)
base_name = os.path.splitext(base_name)[0]
if base_name.isupper():
continue
if file.endswith('.mid') or file.endswith('.MID'):
files.append(os.path.join(p, file))
return files
def relative_duration(pianoroll_data):
'''
read pianoroll_data data
'''
note_count = [np.count_nonzero(np.any(track.pianoroll, axis=1)) \
for track in pianoroll_data.tracks]
relative_durations = note_count / np.max(note_count)
relative_durations = np.array(relative_durations)
relative_durations = relative_durations[:, np.newaxis]
assert relative_durations.shape == (len(pianoroll_data.tracks), 1)
return np.array(relative_durations)
def number_of_notes(pretty_midi_data):
'''
read pretty-midi data
'''
number_of_notes = []
for instrument in pretty_midi_data.instruments:
number_of_notes.append(len(instrument.notes))
number_of_notes = np.array(number_of_notes, dtype='uint16')
number_of_notes = number_of_notes[:, np.newaxis]
assert number_of_notes.shape == (len(pretty_midi_data.instruments), 1)
return number_of_notes
def occupation_rate(pianoroll_data):
'''
read pypianoroll data
'''
occup_rates = []
for track in pianoroll_data.tracks:
piano_roll = track.pianoroll
occup_rates.append(np.count_nonzero(np.any(piano_roll, 1)) / piano_roll.shape[0])
occup_rates = np.array(occup_rates)
occup_rates = occup_rates[:, np.newaxis]
assert occup_rates.shape == (len(pianoroll_data.tracks), 1)
return occup_rates
def polyphony_rate(pianoroll_data):
'''
use pianoroll data
'''
rates = []
for track in pianoroll_data.tracks:
piano_roll = track.pianoroll
number_poly_note = np.count_nonzero(np.count_nonzero(piano_roll, 1) > 1)
rate = number_poly_note / np.count_nonzero(np.any(piano_roll, 1))
rates.append(rate)
rates = np.array(rates)
rates = rates[:, np.newaxis]
assert rates.shape == (len(pianoroll_data.tracks), 1)
return rates
def pitch(pianoroll_data):
'''
read pypianoroll data
Returns
-------
a numpy array in the shape of (number of tracks, 8)
the 8 columns are highest pitch, lowest pitch, pitch mode, pitch std,
and the norm value across different tracks for those values
'''
highest = []
lowest = []
modes = []
stds = []
# pitches = np.array([note.pitch] for note in instrument.notes)
def array_creation_by_count(counts):
result = []
for i, count in enumerate(counts):
if count != 0:
result.append([i] * count)
result = np.array([item for sublist in result for item in sublist])
return result
for track in pianoroll_data.tracks:
highest_note = np.where(np.any(track.pianoroll, 0))[0][-1]
lowest_note = np.where(np.any(track.pianoroll, 0))[0][0]
pitch_array = array_creation_by_count(np.count_nonzero(track.pianoroll, 0))
mode_pitch = scipy.stats.mode(pitch_array)
mode_pitch = mode_pitch.mode[0]
std_pitch = np.std(pitch_array)
highest.append(highest_note)
lowest.append(lowest_note)
modes.append(mode_pitch)
stds.append(std_pitch)
highest = np.array(highest, dtype='uint8')
lowest = np.array(lowest, dtype='uint8')
modes = np.array(modes, dtype='uint8')
stds = np.array(stds, dtype='float32')
if np.max(highest) - np.min(highest) == 0:
highest_norm = np.ones_like(highest)
else:
highest_norm = (highest - np.min(highest)) / (np.max(highest) - np.min(highest))
if np.max(lowest) - np.min(lowest) == 0:
lowest_norm = np.zeros_like(lowest)
else:
lowest_norm = (lowest - np.min(lowest)) / (np.max(lowest) - np.min(lowest))
if np.max(modes) - np.min(modes) == 0:
modes_norm = np.zeros_like(modes)
else:
modes_norm = (modes - np.min(modes)) / (np.max(modes) - np.min(modes))
if np.max(stds) - np.min(stds) == 0:
stds_norm = np.zeros_like(stds)
else:
stds_norm = (stds - np.min(stds)) / (np.max(stds) - np.min(stds))
result = np.vstack((highest, lowest, modes, stds, highest_norm, lowest_norm, modes_norm, stds_norm))
result = result.T
assert result.shape == (len(pianoroll_data.tracks), 8)
return result
def pitch_intervals(pretty_midi_data):
'''
use pretty-midi data here
Returns
-------
a numpy array in the shape of (number of tracks, 5)
the 5 columns are number of different intervals, largest interval,
smallest interval, mode interval and interval std of this track,
and the norm value across different tracks for those values
'''
different_interval = []
largest_interval = []
smallest_interval = []
mode_interval = []
std_interval = []
def get_intervals(notes, threshold=-1):
'''
threshold is the second for the space between two consecutive notes
'''
intervals = []
for i in range(len(notes) - 1):
note1 = notes[i]
note2 = notes[i + 1]
if note1.end - note2.start >= threshold:
if note2.end >= note1.end:
intervals.append(abs(note2.pitch - note1.pitch))
return np.array(intervals)
for instrument in pretty_midi_data.instruments:
intervals = get_intervals(instrument.notes, -3)
if len(intervals) > 0:
different_interval.append(len(np.unique(intervals)))
largest_interval.append(np.max(intervals))
smallest_interval.append(np.min(intervals))
mode_interval.append(scipy.stats.mode(intervals).mode[0])
std_interval.append(np.std(intervals))
else:
different_interval.append(-1)
largest_interval.append(-1)
smallest_interval.append(-1)
mode_interval.append(-1)
std_interval.append(-1)
different_interval = np.array(different_interval, dtype='uint8')
largest_interval = np.array(largest_interval, dtype='uint8')
smallest_interval = np.array(smallest_interval, dtype='uint8')
mode_interval = np.array(mode_interval, dtype='uint8')
std_interval = np.array(std_interval, dtype='float32')
if np.max(different_interval) - np.min(different_interval) == 0:
different_interval_norm = np.zeros_like(different_interval)
else:
different_interval_norm = (different_interval - np.min(different_interval)) / (
np.max(different_interval) - np.min(different_interval))
if np.max(largest_interval) - np.min(largest_interval) == 0:
largest_interval_norm = np.ones_like(largest_interval)
else:
largest_interval_norm = (largest_interval - np.min(largest_interval)) / (
np.max(largest_interval) - np.min(largest_interval))
if np.max(smallest_interval) - np.min(smallest_interval) == 0:
smallest_interval_norm = np.zeros_like(smallest_interval)
else:
smallest_interval_norm = (smallest_interval - np.min(smallest_interval)) / (
np.max(smallest_interval) - np.min(smallest_interval))
if np.max(mode_interval) - | np.min(mode_interval) | numpy.min |
# <NAME>
# 3/18/2019
# General object to run empirical sr actflow process
# For group-level/cross-subject analyses
import numpy as np
import os
import multiprocessing as mp
import scipy.stats as stats
import nibabel as nib
import os
os.environ['OMP_NUM_THREADS'] = str(1)
import sklearn
from scipy import signal
import h5py
import sys
sys.path.append('glmScripts/')
import glmScripts.taskGLMPipeline_v2 as tgp
import sys
import pandas as pd
import pathlib
import calculateFC as fc
import tools
# Using final partition
networkdef = np.loadtxt('/home/ti61/f_mc1689_1/NetworkDiversity/data/network_partition.txt')
networkorder = np.asarray(sorted(range(len(networkdef)), key=lambda k: networkdef[k]))
networkorder.shape = (len(networkorder),1)
# network mappings for final partition set
networkmappings = {'fpn':7, 'vis1':1, 'vis2':2, 'smn':3, 'aud':8, 'lan':6, 'dan':5, 'con':4, 'dmn':9,
'pmulti':10, 'none1':11, 'none2':12}
networks = networkmappings.keys()
## General parameters/variables
nParcels = 360
class Model():
"""
Class to perform empirical actflow for a given subject (stimulus-to-response)
"""
def __init__(self,projectdir='/home/ti61/f_mc1689_1/SRActFlow/',ruletype='12',n_hiddenregions=10,randomize=False,scratchfcdir=None):
"""
instantiate:
indices for condition types
indices for specific condition instances
betas
"""
#### Set up basic model parameters
self.projectdir = projectdir
# Excluding 084
self.subjNums = ['013','014','016','017','018','021','023','024','026','027','028','030','031','032','033',
'034','035','037','038','039','040','041','042','043','045','046','047','048','049','050',
'053','055','056','057','058','062','063','066','067','068','069','070','072','074','075',
'076','077','081','085','086','087','088','090','092','093','094','095','097','098','099',
'101','102','103','104','105','106','108','109','110','111','112','114','115','117','119',
'120','121','122','123','124','125','126','127','128','129','130','131','132','134','135',
'136','137','138','139','140','141']
self.inputtypes = ['RED','VERTICAL','CONSTANT','HIGH']
self.ruletype = ruletype
#### Load in atlas
glasserfile2 = projectdir + 'data/Q1-Q6_RelatedParcellation210.LR.CorticalAreas_dil_Colors.32k_fs_RL.dlabel.nii'
glasser2 = nib.load(glasserfile2).get_data()
glasser2 = np.squeeze(glasser2)
self.glasser2 = glasser2
####
# Define hidden units
if n_hiddenregions!=None:
#######################################
#### Select hidden layer regions
hiddendir = projectdir + 'data/results/MAIN/RSA/'
hiddenregions = np.loadtxt(hiddendir + 'RSA_Similarity_SortedRegions2.txt',delimiter=',')
#######################################
#### Output directory
if randomize:
print("Constructing model with", n_hiddenregions, "randomly selected hidden regions")
fcdir = scratchfcdir
#### Necessary to optimize amarel
pathlib.Path(fcdir).mkdir(parents=True, exist_ok=True) # Make sure directory exists
hiddenregions = np.random.choice(hiddenregions,size=n_hiddenregions,replace=False)
else:
print("Constructing model with", n_hiddenregions, "hidden regions")
fcdir = projectdir + 'data/results/MAIN/fc/LayerToLayerFC_' + str(n_hiddenregions) + 'Hidden/'
pathlib.Path(fcdir).mkdir(parents=True, exist_ok=True) # Make sure directory exists
# Select hidden layer
if n_hiddenregions < 0:
hiddenregions = hiddenregions[n_hiddenregions:]
else:
hiddenregions = hiddenregions[:n_hiddenregions]
## Set object attributes
self.n_hiddenregions = n_hiddenregions
self.hiddenregions = np.squeeze(hiddenregions)
self.fcdir = fcdir
self.hidden = True # Set this variable to true - indicates to run sr simulations with a hidden layer
#### identify hidden region vertex indices
hidden_ind = []
for roi in hiddenregions:
hidden_ind.extend(np.where(self.glasser2==roi+1)[0])
self.hidden_ind = hidden_ind
else:
print("Constructing model with NO hidden layers")
fcdir = projectdir + 'data/results/MAIN/fc/LayerToLayerFC_NoHidden/'
pathlib.Path(fcdir).mkdir(parents=True, exist_ok=True) # Make sure directory exists
self.hidden = False # Set this variable to true - indicates to run sr simulations with a hidden layer
self.fcdir = fcdir
self.hiddenregions = None
self.n_hiddenregions = n_hiddenregions
####
# Define task rule (input) layer
ruledir = self.projectdir + 'data/results/MAIN/RuleDecoding/'
if ruletype=='12':
rule_regions = np.loadtxt(ruledir + self.ruletype + 'Rule_Regions.csv',delimiter=',')
elif ruletype=='fpn':
rule_regions = []
rule_regions.extend(np.where(networkdef==networkmappings['fpn'])[0])
rule_regions = np.asarray(rule_regions)
elif ruletype=='nounimodal':
allrule_regions = np.loadtxt(ruledir + '12Rule_Regions.csv',delimiter=',')
unimodal_nets = ['vis1','aud']
unimodal_regions = []
for net in unimodal_nets:
unimodal_regions.extend(np.where(networkdef==networkmappings[net])[0])
# only include regions that are in allrule_regions but also NOT in unimodal_regions
rule_regions = []
for roi in allrule_regions:
if roi in unimodal_regions:
continue
else:
rule_regions.append(roi)
rule_regions = np.asarray(rule_regions)
rule_ind = []
for roi in rule_regions:
rule_ind.extend(np.where(self.glasser2==roi+1)[0])
self.rule_ind = rule_ind
####
# Define motor regions
# Set indices for layer-by-layer vertices
targetdir = projectdir + 'data/results/MAIN/MotorResponseDecoding/'
motor_resp_regions_LH = np.loadtxt(targetdir + 'MotorResponseRegions_LH.csv',delimiter=',')
motor_resp_regions_RH = np.loadtxt(targetdir + 'MotorResponseRegions_RH.csv',delimiter=',')
targetROIs = np.hstack((motor_resp_regions_LH,motor_resp_regions_RH))
# Define all motor_ind
motor_ind = []
for roi in targetROIs:
roi_ind = np.where(glasser2==roi+1)[0]
motor_ind.extend(roi_ind)
motor_ind = np.asarray(motor_ind).copy()
self.motor_ind = motor_ind
#### override -- only pick the motor parcel with the greatest response decoding
motor_ind_lh = []
for roi in motor_resp_regions_LH:
# only include left hand responses in the right hemisphere
if roi>=180:
roi_ind = np.where(glasser2==roi+1)[0]
motor_ind_lh.extend(roi_ind)
motor_ind_rh = []
for roi in motor_resp_regions_RH:
# only include left hand responses in the right hemisphere
if roi<180:
roi_ind = np.where(glasser2==roi+1)[0]
motor_ind_rh.extend(roi_ind)
#
motor_ind_rh = np.asarray(motor_ind_rh).copy()
motor_ind_lh = np.asarray(motor_ind_lh).copy()
self.motor_ind_rh = motor_ind_rh
self.motor_ind_lh = motor_ind_lh
#### Load model task set
filename= projectdir + 'data/results/MAIN/EmpiricalSRActFlow_AllTrialKeys_15stims_v3.csv' # Great
self.trial_metadata = pd.read_csv(filename)
def computeGroupFC(self,n_components=500,nproc='max'):
"""
Function that wraps _computeSubjFC() to compute FC for all subjs, and computes averaged groupFC
"""
if nproc=='max':
nproc=mp.cpu_count()
inputs = []
for subj in self.subjNums:
inputs.append((subj,n_components))
pool = mp.Pool(processes=nproc)
if self.hidden:
pool.starmap_async(self._computeSubjFC,inputs)
else:
pool.starmap_async(self._computeSubjFC_NoHidden,inputs)
pool.close()
pool.join()
#### Compute group FC
for inputtype in self.inputtypes:
if self.hidden:
fc.computeGroupFC(inputtype,self.fcdir)
else:
fc.computeGroupFC_NoHidden(inputtype,self.fcdir)
if self.hidden:
fc.computeGroupFC(self.ruletype,self.fcdir)
else:
fc.computeGroupFC_NoHidden(self.ruletype,self.fcdir)
def loadRealMotorResponseActivations(self,vertexmasks=True):
#### Load motor response activations localized in output vertices only (for faster loading)
if vertexmasks:
print('Load real motor responses in output vertices')
self.data_task_rh, self.data_task_lh = tools.loadMotorResponsesOutputMask()
else:
print('Load real motor responses in output parcels -- inefficient since need to load all vertices first')
data_task_rh = []
data_task_lh = []
for subj in self.subjNums:
tmp_rh = tools.loadMotorResponses(subj,hand='Right')
tmp_lh = tools.loadMotorResponses(subj,hand='Left')
data_task_rh.append(tmp_rh[self.motor_ind_rh,:].copy().T)
data_task_lh.append(tmp_lh[self.motor_ind_lh,:].copy().T)
self.data_task_rh = np.asarray(data_task_rh).T
self.data_task_lh = np.asarray(data_task_lh).T
def loadModelFC(self):
if self.hidden:
print('Load Model FC weights')
fcdir = self.fcdir
self.fc_input2hidden = {}
self.eig_input2hidden = {}
for inputtype in ['VERTICAL','RED','HIGH','CONSTANT']:
self.fc_input2hidden[inputtype], self.eig_input2hidden[inputtype] = tools.loadGroupActFlowFC(inputtype,fcdir)
# Load rule to hidden
self.fc_12rule2hidden, self.eig_12rule2hidden = tools.loadGroupActFlowFC(self.ruletype,fcdir)
# Load hidden to motor resp mappings
self.fc_hidden2motorresp, self.eig_hidden2motorresp = tools.loadGroupActFlowFC('hidden2out',fcdir)
else:
print('Load Model FC weights -- No hidden layer')
fcdir = self.fcdir
self.fc_input2output = {}
self.eig_input2output = {}
for inputtype in ['VERTICAL','RED','HIGH','CONSTANT']:
self.fc_input2output[inputtype], self.eig_input2output[inputtype] = tools.loadGroupActFlowFC_NoHidden(inputtype,fcdir)
# Load rule to hidden
self.fc_12rule2output, self.eig_12rule2output = tools.loadGroupActFlowFC_NoHidden('12',fcdir)
def simulateGroupActFlow(self,thresh=0,nproc='max',vertexmasks=True):
"""
Simulate group level actflow (all subject simulations)
"""
if nproc=='max':
nproc=mp.cpu_count()
inputs = []
for subj in self.subjNums:
inputs.append((subj,thresh))
if nproc == 1:
results = []
for input1 in inputs:
results.append(self._simulateSubjActFlow(input1[0],input1[1]))
else:
pool = mp.Pool(processes=nproc)
results = pool.starmap_async(self._simulateSubjActFlow,inputs).get()
pool.close()
pool.join()
actflow_predictions = np.zeros((len(self.subjNums),len(self.motor_ind),4))
#actflow_predictions_noReLU = np.zeros((len(self.subjNums),len(self.motor_ind),4))
scount = 0
for result in results:
# actflow_predictions[scount,:,:] = result[0]
# actflow_predictions_noReLU[scount,:,:] = result[1]
actflow_predictions[scount,:,:] = result
scount += 1
## Reformat to fit shape of actual data array
actflow_rh = np.zeros((len(self.glasser2),2,len(self.subjNums)))
actflow_lh = np.zeros((len(self.glasser2),2,len(self.subjNums)))
for scount in range(len(self.subjNums)):
# RMID
actflow_rh[self.motor_ind,0,scount] = actflow_predictions[scount,:,2]
# RIND
actflow_rh[self.motor_ind,1,scount] = actflow_predictions[scount,:,3]
# LMID
actflow_lh[self.motor_ind,0,scount] = actflow_predictions[scount,:,0]
# LIND
actflow_lh[self.motor_ind,1,scount] = actflow_predictions[scount,:,1]
#### Now save out only relevant output mask vertices
if vertexmasks:
tmp = np.squeeze(nib.load(self.projectdir + 'data/results/MAIN/MotorRegionsMasksPerSubj/sractflow_smn_outputRH_mask.dscalar.nii').get_data())
rh_ind = np.where(tmp==True)[0]
actflow_rh = actflow_rh[rh_ind,:,:]
tmp = np.squeeze(nib.load(self.projectdir + 'data/results/MAIN/MotorRegionsMasksPerSubj/sractflow_smn_outputLH_mask.dscalar.nii').get_data())
lh_ind = np.where(tmp==True)[0]
actflow_lh = actflow_lh[lh_ind,:,:].copy()
else:
actflow_rh = actflow_rh[self.motor_ind_rh,:,:].copy()
actflow_lh = actflow_lh[self.motor_ind_lh,:,:].copy()
return actflow_rh, actflow_lh
def actflowDecoding(self,trainset,testset,outputfile,
nbootstraps=1000,featsel=False,nproc='max',null=False,verbose=True):
if nproc=='max':
nproc=mp.cpu_count()
# Decoding
for i in range(nbootstraps):
distances_baseline = np.zeros((1,len(self.subjNums)*2)) # subjs * nlabels
distances_baseline[0,:],rmatch,rmismatch, confusion_mats = tools.actflowDecodings(testset,trainset,
effects=True, featsel=featsel,confusion=True,permutation=null,
ncvs=1, nproc=nproc)
##### Save out and append file
# Open/create file
filetxt = open(outputfile,"a+")
# Write out to file
print(np.mean(distances_baseline),file=filetxt)
# Close file
filetxt.close()
if i%100==0 and verbose==True:
print('Permutation', i)
print('\tDecoding accuracy:', np.mean(distances_baseline), '| R-match:', np.mean(rmatch), '| R-mismatch:', np.mean(rmismatch))
def extractSubjActivations(self, subj, df_trials):
"""
extract activations for a sample subject, including motor response
"""
## Set up data parameters
X = tgp.loadTaskTiming(subj,'ALL')
self.stimIndex = np.asarray(X['stimIndex'])
self.stimCond = np.asarray(X['stimCond'])
datadir = self.projectdir + 'data/postProcessing/hcpPostProcCiric/'
h5f = h5py.File(datadir + subj + '_glmOutput_data.h5','r')
self.betas = h5f['taskRegression/ALL_24pXaCompCorXVolterra_taskReg_betas_canonical'][:].copy()
h5f.close()
## Set up task parameters
self.logicRules = ['BOTH', 'NOTBOTH', 'EITHER', 'NEITHER']
self.sensoryRules = ['RED', 'VERTICAL', 'HIGH', 'CONSTANT']
self.motorRules = ['LMID', 'LIND', 'RMID', 'RIND']
self.colorStim = ['RED', 'BLUE']
self.oriStim = ['VERTICAL', 'HORIZONTAL']
self.pitchStim = ['HIGH', 'LOW']
self.constantStim = ['CONSTANT','ALARM']
# Begin extraction for specific trials
n_trials = len(df_trials)
stimData = np.zeros((n_trials,self.betas.shape[0]))
logicRuleData = np.zeros((n_trials,self.betas.shape[0]))
sensoryRuleData = np.zeros((n_trials,self.betas.shape[0]))
motorRuleData = np.zeros((n_trials,self.betas.shape[0]))
respData = np.zeros((n_trials,self.betas.shape[0]))
sensoryRuleIndices = []
motorRespAll = []
for trial in range(n_trials):
logicRule = df_trials.iloc[trial].logicRule
sensoryRule = df_trials.iloc[trial].sensoryRule
motorRule = df_trials.iloc[trial].motorRule
motorResp = df_trials.iloc[trial].motorResp
stim1 = df_trials.iloc[trial].stim1
stim2 = df_trials.iloc[trial].stim2
# if verbose:
# print 'Running actflow predictions for:', logicRule, sensoryRule, motorRule, 'task'
logicKey = 'RuleLogic_' + logicRule
sensoryKey = 'RuleSensory_' + sensoryRule
motorKey = 'RuleMotor_' + motorRule
stimKey = 'Stim_' + stim1 + stim2
motorResp = solveInputs(logicRule, sensoryRule, motorRule, stim1, stim2, printTask=False)
respKey = 'Response_' + motorResp
stimKey_ind = np.where(self.stimCond==stimKey)[0]
logicRule_ind = np.where(self.stimCond==logicKey)[0]
sensoryRule_ind = np.where(self.stimCond==sensoryKey)[0]
motorRule_ind = np.where(self.stimCond==motorKey)[0]
respKey_ind = np.where(self.stimCond==respKey)[0]
stimData[trial,:] = np.real(self.betas[:,stimKey_ind].copy()[:,0])
logicRuleData[trial,:] = np.real(self.betas[:,logicRule_ind].copy()[:,0])
sensoryRuleData[trial,:] = np.real(self.betas[:,sensoryRule_ind].copy()[:,0])
motorRuleData[trial,:] = np.real(self.betas[:,motorRule_ind].copy()[:,0])
respData[trial,:] = np.real(self.betas[:,respKey_ind].copy()[:,0])
motorRespAll.append(motorResp)
sensoryRuleIndices.append(sensoryRule)
self.motorRespAll = motorRespAll
self.stimData = stimData
self.logicRuleData = logicRuleData
self.sensoryRuleData = sensoryRuleData
self.motorRuleData = motorRuleData
self.respData = respData
self.sensoryRuleIndices = sensoryRuleIndices
def extractSubjHiddenRSMActivations(self, subj):
"""
extract activations for a sample subject, including motor response
"""
## Set up data parameters
X = tgp.loadTaskTiming(subj,'ALL')
self.stimIndex = np.asarray(X['stimIndex'])
self.stimCond = np.asarray(X['stimCond'])
datadir = self.projectdir + 'data/postProcessing/hcpPostProcCiric/'
h5f = h5py.File(datadir + subj + '_glmOutput_data.h5','r')
self.betas = h5f['taskRegression/ALL_24pXaCompCorXVolterra_taskReg_betas_canonical'][:].copy()
h5f.close()
## Set up task parameters
self.logicRules = ['BOTH', 'NOTBOTH', 'EITHER', 'NEITHER']
self.sensoryRules = ['RED', 'VERTICAL', 'HIGH', 'CONSTANT']
self.motorRules = ['LMID', 'LIND', 'RMID', 'RIND']
self.colorStim = ['RED', 'BLUE']
self.oriStim = ['VERTICAL', 'HORIZONTAL']
self.pitchStim = ['HIGH', 'LOW']
self.constantStim = ['CONSTANT','ALARM']
total_conds = 28 # 12 rules + 16 stimulus pairings
rsm_activations = np.zeros((28,self.betas.shape[0]))
labels = []
condcount = 0
##
# START
for cond in self.logicRules:
labels.append(cond)
key = 'RuleLogic_' + cond
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
for cond in self.sensoryRules:
labels.append(cond)
key = 'RuleSensory_' + cond
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
for cond in self.motorRules:
labels.append(cond)
key = 'RuleMotor_' + cond
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
# This is nested for loop since stimuli come in pairs
for cond1 in self.colorStim:
for cond2 in self.colorStim:
labels.append(cond1 + cond2)
key = 'Stim_' + cond1 + cond2
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
for cond1 in self.oriStim:
for cond2 in self.oriStim:
labels.append(cond1 + cond2)
key = 'Stim_' + cond1 + cond2
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
for cond1 in self.pitchStim:
for cond2 in self.pitchStim:
labels.append(cond1 + cond2)
key = 'Stim_' + cond1 + cond2
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
for cond1 in self.constantStim:
for cond2 in self.constantStim:
labels.append(cond1 + cond2)
key = 'Stim_' + cond1 + cond2
ind = np.where(self.stimCond==key)[0]
rsm_activations[condcount,:] = np.real(self.betas[:,ind].copy()[:,0])
condcount += 1 # go to next condition
return rsm_activations, labels
def generateHiddenUnitRSMPredictions(self,thresh=0,n_hiddenregions=10,filename='',verbose=False):
"""
Run all predictions for all 64 tasks
"""
hidden_ind = self.hidden_ind
rule_ind = self.rule_ind
all_actflow_unthresh = []
all_actflow_thresh = []
all_true_activity = []
for subj in self.subjNums:
print('Predicting hidden layer activations for subject', subj)
rsm_activations, labels = self.extractSubjHiddenRSMActivations(subj)
tmp_actflow_unthresh = []
tmp_actflow_thresh = []
tmp_true_activity = []
labelcount = 0
for label in labels:
# Dissociate sensory rules from sensory stimuli since stimuli have two stimulus words (e.g., 'REDRED')
if label in ['BOTH', 'NOTBOTH', 'EITHER', 'NEITHER', 'RED', 'VERTICAL', 'HIGH', 'CONSTANT', 'LMID', 'LIND', 'RMID', 'RIND']:
input_units = 'rule'
if label in ['REDRED', 'REDBLUE', 'BLUERED', 'BLUEBLUE']:
input_units = 'RED' # specify sensory rules for sensory activations
if label in ['VERTICALVERTICAL', 'VERTICALHORIZONTAL', 'HORIZONTALVERTICAL', 'HORIZONTALHORIZONTAL']:
input_units = 'VERTICAL' # this is the sensory rule
if label in ['HIGHHIGH', 'HIGHLOW', 'LOWHIGH', 'LOWLOW']:
input_units = 'HIGH'
if label in ['CONSTANTCONSTANT', 'CONSTANTALARM', 'ALARMCONSTANT', 'ALARMALARM']:
input_units = 'CONSTANT'
if input_units!='rule':
input_ind = self._getStimIndices(input_units) # Identify the vertices for stimulus layer of the ANN
unique_input_ind = np.where(np.in1d(input_ind,hidden_ind)==False)[0]
fc = self.fc_input2hidden[input_units]
pc_act = np.matmul(rsm_activations[labelcount,:][unique_input_ind],self.eig_input2hidden[input_units].T)
# Unthresholded actflow
actflow_unthresh = np.matmul(pc_act,fc)
# Thresholded actflow
actflow_thresh = np.multiply(actflow_unthresh,actflow_unthresh>thresh)
if input_units=='rule':
unique_input_ind = np.where(np.in1d(rule_ind,hidden_ind)==False)[0]
fc = self.fc_12rule2hidden
pc_act = np.matmul(rsm_activations[labelcount,:][unique_input_ind],self.eig_12rule2hidden.T)
# Unthresholded actflow
actflow_unthresh = np.matmul(pc_act,fc)
# Thresholded actflow
actflow_thresh = np.multiply(actflow_unthresh,actflow_unthresh>thresh)
tmp_actflow_unthresh.append(actflow_unthresh)
tmp_actflow_thresh.append(actflow_thresh)
tmp_true_activity.append(np.squeeze(rsm_activations[labelcount,hidden_ind]))
labelcount += 1
# Compute subject-specific predicted activations for each condition
all_actflow_unthresh.append(np.asarray(tmp_actflow_unthresh))
all_actflow_thresh.append(np.asarray(tmp_actflow_thresh))
all_true_activity.append(np.asarray(tmp_true_activity))
np.savetxt(filename + '.txt', labels, fmt='%s')
h5f = h5py.File(filename + '.h5','a')
try:
h5f.create_dataset('actflow_unthresh',data=all_actflow_unthresh)
h5f.create_dataset('actflow_thresh',data=all_actflow_thresh)
h5f.create_dataset('true_activity',data=all_true_activity)
except:
del h5f['actflow_unthresh'], h5f['actflow_thresh'], h5f['true_activity']
h5f.create_dataset('actflow_unthresh',data=all_actflow_unthresh)
h5f.create_dataset('actflow_thresh',data=all_actflow_thresh)
h5f.create_dataset('true_activity',data=all_true_activity)
h5f.close()
def generateInputControlDecoding(self,n_hiddenregions=10,verbose=False):
"""
Run all predictions for all 64 tasks
"""
hidden_ind = self.hidden_ind
rule_ind = self.rule_ind
# Also exclude smn indices
smn_rois = np.where(networkdef==networkmappings['smn'])[0]
smn_ind = []
for roi in smn_rois:
smn_ind.extend(np.where(self.glasser2==roi+1)[0])
smn_ind = np.asarray(smn_ind)
target_vertices = self.fc_hidden2motorresp.shape[1]
actflow = np.zeros((target_vertices,4)) #LMID, LIND, RMID, rIND -- 4 cols in 3rd dim for each sensory rule
input_activations_lmid = []
input_activations_lind = []
input_activations_rmid = []
input_activations_rind = []
all_input_ind = []
for sensoryRule in self.sensoryRules:
input_ind = self._getStimIndices(sensoryRule) # Identify the vertices for the stimulus layer of the ANN
all_input_ind.extend(input_ind)
all_input_ind = np.asarray(all_input_ind)
#### Input activations
unique_input_ind = np.where(np.in1d(all_input_ind,hidden_ind)==False)[0]
unique_input_ind = np.where(np.in1d(unique_input_ind,smn_ind)==False)[0]
input_act = self.stimData[:,:][:,unique_input_ind]
#### 12 rule activations
unique_input_ind = np.where(np.in1d(rule_ind,hidden_ind)==False)[0]
unique_input_ind = np.where(np.in1d(unique_input_ind,smn_ind)==False)[0]
rule_composition = self.logicRuleData[:,unique_input_ind] + self.sensoryRuleData[:,unique_input_ind] + self.motorRuleData[:,unique_input_ind]
#rule_act = self.logicRuleData[:,:][:,unique_input_ind]
##### Concatenate input activations
input_activations = np.hstack((input_act,rule_composition))
## Apply threshold
input_activations = | np.multiply(input_act,input_act>0) | numpy.multiply |
import numpy as np
import keras
import sys
from keras.models import Sequential, Model
from keras.engine.topology import Layer
from keras.layers import Input, Dense, TimeDistributed, merge, Lambda
from keras.layers.core import *
from keras.layers.convolutional import *
from keras.layers.recurrent import *
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
import tensorflow as tf
from keras import backend as K
from keras.activations import relu
from functools import partial
from RP_Bilinear_Pooling import RPBinaryPooling2, RPTrinaryPooling,RPGaussianPooling,MultiModalLowRankPooling,RPLearnable, FBM
clipped_relu = partial(relu, max_value=5)
### import the module of compact bilinear pooling: https://github.com/murari023/tensorflow_compact_bilinear_pooling
### we modify the code to fit the feature vector sequence tensor, i.e. [n_batches, n_frames, n_channels]
from compact_bilinear_pooling import compact_bilinear_pooling_layer
from adaptive_correlation_pooling import InceptionK_module
def max_filter(x):
# Max over the best filter score (like ICRA paper)
max_values = K.max(x, 2, keepdims=True)
max_flag = tf.greater_equal(x, max_values)
out = x * tf.cast(max_flag, tf.float32)
return out
def channel_normalization(x):
# Normalize by the highest activation
max_values = K.max(K.abs(x), 2, keepdims=True)+1e-5
out = x / max_values
return out
def WaveNet_activation(x):
tanh_out = Activation('tanh')(x)
sigm_out = Activation('sigmoid')(x)
return keras.layers.Multiply()([tanh_out, sigm_out])
def lp_normalization(x,p=2):
if p == 2:
return K.l2_normalize(x, axis=-1)
else:
norm_x = tf.maximum(tf.norm(x, ord=p, axis=-1, keepdims=True), 1e-6)
return x/norm_x
def power_normalization(x):
y = tf.sign(x) * tf.sqrt(tf.abs(x))
return y
# -------------------------------------------------------------
def temporal_convs_linear(n_nodes, conv_len, n_classes, n_feat, max_len,
causal=False, loss='categorical_crossentropy',
optimizer='adam', return_param_str=False):
""" Used in paper:
Segmental Spatiotemporal CNNs for Fine-grained Action Segmentation
Lea et al. ECCV 2016
Note: Spatial dropout was not used in the original paper.
It tends to improve performance a little.
"""
inputs = Input(shape=(max_len,n_feat))
if causal: model = ZeroPadding1D((conv_len//2,0))(model)
model = Convolution1D(n_nodes, conv_len, input_dim=n_feat, input_length=max_len, border_mode='same', activation='relu')(inputs)
if causal: model = Cropping1D((0,conv_len//2))(model)
model = SpatialDropout1D(0.3)(model)
model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model)
model = Model(input=inputs, output=model)
model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal")
if return_param_str:
param_str = "tConv_C{}".format(conv_len)
if causal:
param_str += "_causal"
return model, param_str
else:
return model
'''
low-rank approximation: backpropagation of SVD
https://github.com/tensorflow/tensorflow/issues/6503
Note: (1) GPU runing is considerably slow,
(2) running on cpu does not converge and randomly terminates without reporting error.
https://gist.github.com/psycharo/60f58d5435281bdea8b9d4ee4f6e895b
'''
def mmsym(x):
return (x + tf.transpose(x, [0,1,3,2])) / 2
def mmdiag(x):
return tf.matrix_diag(tf.matrix_diag_part(x))
def get_eigen_K(x, square=False):
"""
Get K = 1 / (sigma_i - sigma_j) for i != j, 0 otherwise
Parameters
----------
x : tf.Tensor with shape as [..., dim,]
Returns
-------
"""
if square:
x = tf.square(x)
res = tf.expand_dims(x, 2) - tf.expand_dims(x, 3)
res += tf.eye(tf.shape(res)[-1])
res = 1 / res
res -= tf.eye(tf.shape(res)[-1])
# Keep the results clean
res = tf.where(tf.is_nan(res), tf.zeros_like(res), res)
res = tf.where(tf.is_inf(res), tf.zeros_like(res), res)
return res
@ops.RegisterGradient('SvdGrad')
def gradient_svd(op, grad_s, grad_u, grad_v):
"""
Define the gradient for SVD, we change it to SVD a matrix sequence
References
Ionescu, C., et al, Matrix Backpropagation for Deep Networks with Structured Layers
Parameters
----------
op
grad_s
grad_u
grad_v
Returns
-------
"""
s, U, V = op.outputs
V_t = tf.transpose(V, [0,1,3,2])
U_t = tf.transpose(U, [0,1,3,2])
K = get_eigen_K(s, True)
K_t = tf.transpose(K, [0,1,3,2])
S = tf.matrix_diag(s)
grad_S = tf.matrix_diag(grad_s)
D = tf.matmul(grad_u, 1.0/S)
D_t = tf.transpose(D, [0,1,3,2])
# compose the full gradient
term1 = tf.matmul(D, V_t)
term2_1 = mmdiag(grad_S - tf.matmul(U_t, D))
term2 = tf.matmul(U, tf.matmul(term2_1, V_t))
term3_1 = tf.matmul(V, tf.matmul(D_t, tf.matmul(U, S)))
term3_2 = mmsym(K_t * tf.matmul(V_t, grad_v-term3_1))
term3 = 2*tf.matmul(U, tf.matmul(S, tf.matmul(term3_2, V_t)))
dL_dX = term1+term2+term3
return dL_dX
def sort_tensor_column(X, col_idx):
# X - the tensor with shape [batch, time, feature_dim, feature_dim]
# col_idx - the column index with shape[batch, time, r]
# this function returns a tensor with selected columns by r, i.e. return a tensor with [batch, time, feature_dim, r]
# notice that the first dimension batch is usually None
#n_batch = X.get_shape().as_list()[0]
n_batch = 4
n_time = X.get_shape().as_list()[1]
n_dim = X.get_shape().as_list()[2]
n_rank = col_idx.get_shape().as_list()[-1]
Xt = tf.transpose(X, [0,1,3,2])
Xt = tf.reshape(Xt, [n_batch*n_time, n_dim, n_dim])
col_idx = tf.reshape(col_idx, [n_batch*n_time, n_rank])
Xt_list = tf.unstack(Xt, axis=0)
X_sort_list = [tf.gather_nd(Xt_list[t], col_idx[t,:]) for t in range(len(Xt_list))]
print('X_sort_list[0].shape='+str(X_sort_list[0].shape))
X_sort = tf.stack(X_sort_list, axis=0)
X_sort = tf.reshape(X_sort,[n_batch, n_time, n_rank, n_dim])
X_sort = tf.transpose(X_sort, [0,1,3,2])
return X_sort
class EigenPooling(Layer):
def __init__(self, rank,method='svd', **kwargs):
self.rank = rank
self.method = method
super(EigenPooling, self).__init__(**kwargs)
def build(self, input_shape):
self.shape = input_shape
super(EigenPooling,self).build(input_shape)
def call(self,x):
if self.method is 'eigen':
## eigendecomposition
e,v = tf.self_adjoint_eig(x)
v_size = v.get_shape().as_list()
e = tf.abs(e)
e1,idx = tf.nn.top_k(e, k=self.rank)
E1 = tf.sqrt(tf.matrix_diag(e1))
u = sort_tensor_column(v, idx)
print('v.shape='+str(v.shape))
print('idx.shape='+str(idx.shape))
print('u.shape='+str(u.shape))
l = tf.matmul(u[:,:,:,:self.rank],E1[:,:,:self.rank,:self.rank])
## signlar value decomposition
elif self.method is 'svd':
G = tf.get_default_graph()
with G.gradient_override_map({'Svd':'SvdGrad'}):
s,u,v = tf.svd(x, full_matrices=True)
l = tf.matmul(u[:,:,:,:self.rank], tf.matrix_diag(tf.sqrt(1e-5+s[:,:,:self.rank])))
else:
sys.exit('[ERROR] the specified method for matrix decomposition is not valid')
return l
def call(self,x):
G = tf.get_default_graph()
d = x.shape[-1]
## eigendecomposition
#e,v = tf.self_adjoint_eig(x)
#e = tf.abs(e)
#e1,idx = tf.nn.top_k(e, k=self.rank)
#e1 = tf.matrix_diag(e1)
#v_list = tf.unstack(v, axis=1)
#vr_list = [tf.gather(xx, idx[]
#print(idx)
#print(v1.shape)
#l = tf.matmul(v1, e1)
#print(l.shape)
## signlar value decomposition
# G = tf.get_default_graph()
with G.gradient_override_map({'Svd':'SvdGrad'}):
s,u,v = tf.svd(x, full_matrices=True)
l = tf.matmul(u[:,:,:,:self.rank], tf.matrix_diag(s[:,:,:self.rank]))
return l
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], input_shape[2],self.rank)
def tensor_product_local(X,W):
#input: X in [batch, T, channel]
#input: W in [T,T]
#compute X^T * W * X (* is multiplication)
n_channels = X.shape[-1]
A = K.dot(K.permute_dimensions(X, [0,2,1]), W)
B = K.batch_dot(A,X)
#return K.reshape(B,[-1,n_channels*n_channels])
return B
# def _get_tril_batch(D):
# # convert each element in the batch to lower triangular matrix
# # D has the size[ batch, dimension, dimension]
# mat_list = tf.unstack(D, axis=0)
# fun_tril = Lambda(lambda x: tf.matrix_band_part(x, -1,0))
# mat_tril_list = [ fun_tril(x) for x in mat_list ]
# return tf.stack(mat_tril_list, axis=0)
def tensor_product_local_lowdim(X,W, tril_idx, scaling_mask):
# input: X in [batch, T, channel]
# input: w is a 1D vector with size (time, )
# compute X^T * W * X (* is multiplication)
n_channels = X.shape[-1]
n_batches = X.shape[0]
A = K.dot(K.permute_dimensions(X, [0,2,1]), W)
B = K.batch_dot(A,X)
B = B * scaling_mask
# B_vec = K.reshape(B, [-1, n_channels*n_channels]) # [batch (None), 1, d**2]
#ii = Lambda(lambda x: tf.tile(tf.range(x)[:, tf.newaxis], (1, n_channels)))(n_batches)
#B_list = Lambda(lambda x: tf.split(x, tf.shape(X)[0], axis=0))(B)
#B_vec_lowdim_list = [np.sqrt(2)*tf.gather_nd(x, tril_idx) for x in B_list]
#B_vec_lowdim = K.stack(B_vec_lowdim_list, axis=0)
B_vec_lowdim = tf.map_fn(lambda x: tf.gather_nd(x, tril_idx), B)
# print(B_vec_lowdim.shape)
return B_vec_lowdim*np.sqrt(2)
def weighted_average_local(X,w):
W = K.expand_dims(w,axis=-1)
W = K.repeat_elements(W, X.shape[-1], axis=-1)
y = X*W
return K.sum(y, axis=1, keepdims=False)
def tensor_product(inputs, st_conv_filter_one, conv_len, stride=1, low_dim=False):
# input - [batch, time, channels]
local_size=conv_len
n_frames = inputs.shape[1]
n_batches = inputs.shape[0]
x = ZeroPadding1D((local_size//2))(inputs)
W = Lambda(lambda x: tf.diag(x))(st_conv_filter_one)
if not low_dim:
y = [ tensor_product_local(x[:,i:i+local_size,:],W) for i in range(0,n_frames,stride) ]
outputs =K.stack(y,axis=1)
outputs = K.reshape(outputs, [-1,outputs.shape[1],outputs.shape[-2]*outputs.shape[-1] ])
else:
n_channels = inputs.get_shape().as_list()[-1]
tril_idx = np.stack(np.tril_indices(n_channels), axis=0)
tril_idx2 = np.squeeze(np.split(tril_idx, tril_idx.shape[1], axis=1))
scaling_mask = np.expand_dims(np.eye(n_channels) / | np.sqrt(2) | numpy.sqrt |
from splipy import Curve, BSplineBasis
from objects.parameters import NUM_SAMPLES_FOR_REPARAMETERIZATION, ORDER, NUM_INTERPOLATION_POINTS, EPSILON
from objects.utilities import open_uniform_knot_vector
import numpy as np
from copy import deepcopy
class Backbone:
def __init__(self, controlpoints, reparameterize=True, name=None):
self.controlpoints = controlpoints
self.num_controlpoints = self.controlpoints.shape[0]
self.name = name
# Construct B-Spline
self.construct_B_spline()
# Arc length parameterization
if reparameterize is True:
self.backbone = self.reparameterize()
def construct_B_spline(self):
"""Construct the initial B-spline. This is formed by the relatively small number of control points that will give the curve its shape. An open uniform knot vector is used so that the endpoints are the first and last controlpoints. The resulting curved must be reparameterized to be arc length parameterized."""
knot = open_uniform_knot_vector(self.num_controlpoints, ORDER)
basis = BSplineBasis(order=ORDER, knots=knot, periodic=-1)
self.backbone = Curve(basis=basis, controlpoints=self.controlpoints, rational=False)
def reparameterize(self):
"""Create arc length parameterization of the backbone. This is works by sampling many evenly-spaced points along the original backbone and using these as the controlpoints of a new B-spline curve with a uniform knot-vector. This reparameterization is approximate. However, by choosing a large number of sample points, the curves become very close.
See https://homepage.cs.uiowa.edu/~kearney/pubs/CurvesAndSurfacesArcLength.pdf for the idea."""
#### Choose controlpoints that are evenly spaced
# The arc length (that we want) to each control point
target_arc_lengths = np.linspace(0, self.backbone.length(), NUM_INTERPOLATION_POINTS)
# Sample many points along the backbone and choose the one that results in the arc length that is closest to our target arc length
# This method seems coarse but is way faster than using a function optimizer (e.g. scipy.optimize.minimize), which is also an approximation.
t = np.linspace(0, 1, NUM_SAMPLES_FOR_REPARAMETERIZATION)
points = self.backbone(t)
dists = np.linalg.norm(points[1:] - points[:-1], axis=1)
cum_dists = np.cumsum(dists) # Approximate distance by summing linear distances
idx = np.searchsorted(cum_dists, target_arc_lengths, side="left")
controlpoints = points[idx]
#### Create new backbone that is reparameterized
# Wrap first and last controlpoint so that new backbone goes through these endpoints
NUM_EXTRA_CP = 2
controlpoints_wrapped = np.zeros((NUM_INTERPOLATION_POINTS + NUM_EXTRA_CP, 3))
controlpoints_wrapped[1:-1] = controlpoints
controlpoints_wrapped[[0, -1]] = controlpoints[[0, -1]] # Duplicate first and last cp
# Construct new backbone
knot = np.linspace(0, 1, NUM_INTERPOLATION_POINTS + ORDER + NUM_EXTRA_CP) # uniform (not open uniform!)
basis = BSplineBasis(order=ORDER, knots=knot, periodic=-1)
backbone = Curve(basis=basis, controlpoints=controlpoints_wrapped, rational=False)
backbone.reparam() # Reparameterize between 0 and 1
return backbone
def dx(self, t):
"""First derivative (velocity) of b-spline backbone."""
if type(t) != type(np.array(0)):
t = np.array(t, dtype="float64")
# Copy t to avoid problems when it gets changed
t = t.copy() # Changing t was causing a bug
# Handle array that may contain t == 0 or t == 1
mask_t_0 = t == 0
mask_t_1 = t == 1
t[mask_t_0] = EPSILON
t[mask_t_1] = 1 - EPSILON
dx = self.backbone.derivative(t, 1)
# Negative/positive values close to zero cause inconsistency when taking cross product
dx = np.round(dx, 8)
return dx
def r(self, t):
if type(t) != type(np.array(0)):
t = np.array(t, dtype="float64")
return self.backbone(t)
def T(self, t):
"""Tangent vector is unit vector in same direction as velocity vector."""
if type(t) != type(np.array(0)):
t = np.array(t, dtype="float64")
dx = self.dx(t)
T = dx / np.linalg.norm(dx, axis=1, keepdims=True)
return T
def N(self, t):
"""Normal vector is unit vector that is perpendicular to tangent vector and to [0,0,1].
I chose [0,0,1] arbitrarily, in any case, it will result in the binormal vector that is perpendicular to the tangent and is pointing "most upward" (has largest Z-component)."""
if type(t) != type(np.array(0)):
t = np.array(t, dtype="float64")
UP_VECTOR = np.array([0, 0, 1])
T = self.T(t)
cross = np.cross(UP_VECTOR, T)
# Normalize
magnitude = np.linalg.norm(cross, axis=1, keepdims=True)
assert np.all(
~np.isclose(magnitude, 0)
), "Normal vectors with 0 magnitude aren't valid. This may be because the tangent vector was colinear with [0,0,1]."
N = cross / magnitude
# Check that the two are perpendicular
assert np.all(np.isclose(np.dot(T, N.T).diagonal(), 0)), "Tangent and Normal vectors are not perpendicular."
return N
def B(self, t):
"""Binormal vector is unit vector that is perpendicular to tangent vector and closest to [0,0,1]."""
if type(t) != type(np.array(0)):
t = np.array(t, dtype="float64")
T = self.T(t)
N = self.N(t)
cross = np.cross(T, N)
B = cross / | np.linalg.norm(cross, axis=1, keepdims=True) | numpy.linalg.norm |
import numpy as np
import os
import Augmentor
from itertools import combinations
from utils import img_to_array, load_img, unison_shuffle
def build_class_generator(class_path, probability, width, height):
pipeline = Augmentor.Pipeline(class_path)
pipeline.random_erasing(probability, 0.4)
pipeline.rotate(probability, 20, 20)
pipeline.shear(probability, 20, 20)
pipeline.skew(probability, 0.8)
pipeline.zoom(probability, 1.1, 1.5)
pipeline.random_distortion(probability, 3, 3, 3)
pipeline.random_distortion(probability, 8, 8, 3)
pipeline.resize(1.0, width, height)
return pipeline.keras_generator(batch_size=1)
def build_all_generators(data_directory, probability, target_shape):
width, height, _ = target_shape
generators = list()
for class_directory in os.listdir(data_directory):
class_path = os.path.join(data_directory, class_directory)
generator = build_class_generator(class_path,
probability,
width, height)
generators.append(generator)
return generators
def fill_batch_entry(left_label, right_label, generators, target_shape):
# Left.
left_image, _ = next(generators[left_label])
left_image = left_image.reshape(target_shape) - 0.5
# Right.
right_image, _ = next(generators[right_label])
right_image = right_image.reshape(target_shape) - 0.5
# Similarity.
is_same = 1.0 if left_label == right_label else 0.0
return left_image.astype(np.float32), right_image.astype(np.float32), is_same
def get_different_pairs(batch_size, amount_classes):
all_pair_combinations = np.array([*combinations(list(range(amount_classes)), 2)])
amount_different_pairs = batch_size // 2
amount_repeats = amount_different_pairs // len(all_pair_combinations)
remainder = amount_different_pairs - amount_repeats * len(all_pair_combinations)
pairs = all_pair_combinations.copy()
for _ in range(amount_repeats - 1):
pairs = np.concatenate((pairs, all_pair_combinations))
all_pairs = np.concatenate((pairs, all_pair_combinations[:remainder]))
max_allowed = batch_size // 2
return np.random.permutation(all_pairs)[:max_allowed]
def test_image_arrays(*args):
for array in args:
assert not np.isnan(array).any()
assert np.max(array) <= 1.0
assert np.min(array) >= -1.0
assert array.dtype == np.float32, '{}'.format(array.dtype)
for element in array:
assert np.mean(element) != 0
assert | np.std(element) | numpy.std |
"""AHOT Spatial Solver tests"""
# TODO:
# Add tests with supported warning configurations?
# a = populate_with_warnings("AHOTN")
# a = populate_with_warnings("DGFEM")
import numpy as np
from numpy.testing import assert_array_almost_equal
import pyne.spatialsolver
from .dictionary_populate_test import (
populate_simple,
populate_simple_with_warnings,
populate_intermediate_1,
)
def test_ahotn_ln():
a = populate_simple("AHOTN", "LN")
dict_results = pyne.spatialsolver.solve(a)
if dict_results["success"] == 0:
raise AssertionError("Error: " + dict_results["error_msg"])
exp = np.array(
[
[
[3.52650199, 3.09260257, 3.09260257, 3.52650199],
[3.09260257, 2.73209732, 2.73209732, 3.09260257],
[3.09260257, 2.73209732, 2.73209732, 3.09260257],
[3.52650199, 3.09260257, 3.09260257, 3.52650199],
],
[
[2.89021832, 2.61284811, 2.61284811, 2.89021832],
[2.61284811, 2.38571678, 2.38571678, 2.61284811],
[2.61284811, 2.38571678, 2.38571678, 2.61284811],
[2.89021832, 2.61284811, 2.61284811, 2.89021832],
],
[
[2.89021832, 2.61284811, 2.61284811, 2.89021832],
[2.61284811, 2.38571678, 2.38571678, 2.61284811],
[2.61284811, 2.38571678, 2.38571678, 2.61284811],
[2.89021832, 2.61284811, 2.61284811, 2.89021832],
],
[
[3.52650199, 3.09260257, 3.09260257, 3.52650199],
[3.09260257, 2.73209732, 2.73209732, 3.09260257],
[3.09260257, 2.73209732, 2.73209732, 3.09260257],
[3.52650199, 3.09260257, 3.09260257, 3.52650199],
],
]
)
obs = np.array(dict_results["flux"])
assert_array_almost_equal(exp, obs, 4)
def test_ahotn_ll():
a = populate_simple("AHOTN", "LL")
dict_results = pyne.spatialsolver.solve(a)
exp = np.array(
[
[
[3.52588507, 3.09173385, 3.09173385, 3.52588507],
[3.09173385, 2.73355777, 2.73355777, 3.09173385],
[3.09173385, 2.73355777, 2.73355777, 3.09173385],
[3.52588507, 3.09173385, 3.09173385, 3.52588507],
],
[
[2.88989501, 2.61223446, 2.61223446, 2.88989501],
[2.61223446, 2.38668358, 2.38668358, 2.61223446],
[2.61223446, 2.38668358, 2.38668358, 2.61223446],
[2.88989501, 2.61223446, 2.61223446, 2.88989501],
],
[
[2.88989501, 2.61223446, 2.61223446, 2.88989501],
[2.61223446, 2.38668358, 2.38668358, 2.61223446],
[2.61223446, 2.38668358, 2.38668358, 2.61223446],
[2.88989501, 2.61223446, 2.61223446, 2.88989501],
],
[
[3.52588507, 3.09173385, 3.09173385, 3.52588507],
[3.09173385, 2.73355777, 2.73355777, 3.09173385],
[3.09173385, 2.73355777, 2.73355777, 3.09173385],
[3.52588507, 3.09173385, 3.09173385, 3.52588507],
],
]
)
obs = np.array(dict_results["flux"])
assert_array_almost_equal(exp, obs, 4)
def test_ahotn_nefd():
a = populate_simple("AHOTN", "NEFD")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[3.524073, 3.091501, 3.091501, 3.524073],
[3.091501, 2.734906, 2.734906, 3.091501],
[3.091501, 2.734906, 2.734906, 3.091501],
[3.524073, 3.091501, 3.091501, 3.524073],
],
[
[2.888798, 2.612178, 2.612178, 2.888798],
[2.612178, 2.387341, 2.387341, 2.612178],
[2.612178, 2.387341, 2.387341, 2.612178],
[2.888798, 2.612178, 2.612178, 2.888798],
],
[
[2.888798, 2.612178, 2.612178, 2.888798],
[2.612178, 2.387341, 2.387341, 2.612178],
[2.612178, 2.387341, 2.387341, 2.612178],
[2.888798, 2.612178, 2.612178, 2.888798],
],
[
[3.524073, 3.091501, 3.091501, 3.524073],
[3.091501, 2.734906, 2.734906, 3.091501],
[3.091501, 2.734906, 2.734906, 3.091501],
[3.524073, 3.091501, 3.091501, 3.524073],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_dgfem_ld():
a = populate_simple("DGFEM", "LD")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[3.540511, 3.104096, 3.104096, 3.540511],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.540511, 3.104096, 3.104096, 3.540511],
],
[
[2.899079, 2.620152, 2.620152, 2.899079],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.899079, 2.620152, 2.620152, 2.899079],
],
[
[2.899079, 2.620152, 2.620152, 2.899079],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.899079, 2.620152, 2.620152, 2.899079],
],
[
[3.540511, 3.104096, 3.104096, 3.540511],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.540511, 3.104096, 3.104096, 3.540511],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_dgfem_dense():
a = populate_simple("DGFEM", "DENSE")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[3.540511, 3.104096, 3.104096, 3.540511],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.540511, 3.104096, 3.104096, 3.540511],
],
[
[2.899079, 2.620152, 2.620152, 2.899079],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.899079, 2.620152, 2.620152, 2.899079],
],
[
[2.899079, 2.620152, 2.620152, 2.899079],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.620152, 2.383940, 2.383940, 2.620152],
[2.899079, 2.620152, 2.620152, 2.899079],
],
[
[3.540511, 3.104096, 3.104096, 3.540511],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.104096, 2.730554, 2.730554, 3.104096],
[3.540511, 3.104096, 3.104096, 3.540511],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_dgfem_lagrange():
a = populate_simple("DGFEM", "LAGRANGE")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[3.536038, 3.096808, 3.096808, 3.536038],
[3.096808, 2.732475, 2.732475, 3.096808],
[3.096808, 2.732475, 2.732475, 3.096808],
[3.536038, 3.096808, 3.096808, 3.536038],
],
[
[2.896267, 2.615275, 2.615275, 2.896267],
[2.615275, 2.385484, 2.385484, 2.615275],
[2.615275, 2.385484, 2.385484, 2.615275],
[2.896267, 2.615275, 2.615275, 2.896267],
],
[
[2.896267, 2.615275, 2.615275, 2.896267],
[2.615275, 2.385484, 2.385484, 2.615275],
[2.615275, 2.385484, 2.385484, 2.615275],
[2.896267, 2.615275, 2.615275, 2.896267],
],
[
[3.536038, 3.096808, 3.096808, 3.536038],
[3.096808, 2.732475, 2.732475, 3.096808],
[3.096808, 2.732475, 2.732475, 3.096808],
[3.536038, 3.096808, 3.096808, 3.536038],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_sct_step():
a = populate_simple("SCTSTEP", "anything")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[3.273572, 2.948301, 2.948502, 3.291909],
[2.811363, 2.464789, 2.468086, 2.813676],
[2.921249, 2.576771, 2.593078, 2.919847],
[3.138840, 2.784381, 2.785791, 3.139999],
],
[
[2.466767, 2.188464, 2.191274, 2.465690],
[2.168904, 1.883310, 1.884325, 2.169292],
[2.181507, 1.891052, 1.895120, 2.178766],
[2.438198, 2.161378, 2.161873, 2.438270],
],
[
[2.429940, 2.143983, 2.143274, 2.427243],
[2.144259, 1.849312, 1.848996, 2.143790],
[2.142347, 1.843699, 1.841852, 2.140937],
[2.425510, 2.142483, 2.142357, 2.425371],
],
[
[3.091479, 2.729188, 2.728940, 3.091578],
[2.727627, 2.366091, 2.365882, 2.727488],
[2.726782, 2.365203, 2.364727, 2.726503],
[3.087793, 2.725209, 2.725085, 3.087700],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_ahotn_ln_alternating():
a = populate_intermediate_1("AHOTN", "LN")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[2.302715, 2.230236, 1.817902, 2.952883],
[2.230236, 1.292285, 1.620001, 1.817902],
[1.817902, 1.620001, 1.292285, 2.230236],
[2.952883, 1.817902, 2.230236, 2.302715],
],
[
[2.289555, 1.443020, 1.762396, 1.811167],
[1.443020, 1.283541, 1.038793, 1.762396],
[1.762396, 1.038793, 1.283541, 1.443020],
[1.811167, 1.762396, 1.443020, 2.289555],
],
[
[1.811167, 1.762396, 1.443020, 2.289555],
[1.762396, 1.038793, 1.283541, 1.443020],
[1.443020, 1.283541, 1.038793, 1.762396],
[2.289555, 1.443020, 1.762396, 1.811167],
],
[
[2.952883, 1.817902, 2.230236, 2.302715],
[1.817902, 1.620001, 1.292285, 2.230236],
[2.230236, 1.292285, 1.620001, 1.817902],
[2.302715, 2.230236, 1.817902, 2.952883],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_ahotn_ll_alternating():
a = populate_intermediate_1("AHOTN", "LL")
dict_results = pyne.spatialsolver.solve(a)
exp = np.array(
[
[
[2.31140733, 2.20295478, 1.83219443, 2.93370678],
[2.20295478, 1.32420289, 1.56965005, 1.83219443],
[1.83219443, 1.56965005, 1.32420289, 2.20295478],
[2.93370678, 1.83219443, 2.20295478, 2.31140733],
],
[
[2.27440404, 1.45579431, 1.74010961, 1.81996174],
[1.45579431, 1.24553997, 1.0624916, 1.74010961],
[1.74010961, 1.0624916, 1.24553997, 1.45579431],
[1.81996174, 1.74010961, 1.45579431, 2.27440404],
],
[
[1.81996174, 1.74010961, 1.45579431, 2.27440404],
[1.74010961, 1.0624916, 1.24553997, 1.45579431],
[1.45579431, 1.24553997, 1.0624916, 1.74010961],
[2.27440404, 1.45579431, 1.74010961, 1.81996174],
],
[
[2.93370678, 1.83219443, 2.20295478, 2.31140733],
[1.83219443, 1.56965005, 1.32420289, 2.20295478],
[2.20295478, 1.32420289, 1.56965005, 1.83219443],
[2.31140733, 2.20295478, 1.83219443, 2.93370678],
],
]
)
obs = np.array(dict_results["flux"])
assert_array_almost_equal(exp, obs, 4)
def test_ahotn_nefd_alternating():
a = populate_intermediate_1("AHOTN", "NEFD")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[2.320847, 2.193170, 1.836823, 2.923995],
[2.193170, 1.310507, 1.568554, 1.836823],
[1.836823, 1.568554, 1.310507, 2.193170],
[2.923995, 1.836823, 2.193170, 2.320847],
],
[
[2.266863, 1.456056, 1.732060, 1.824538],
[1.456056, 1.241531, 1.049696, 1.732060],
[1.732060, 1.049696, 1.241531, 1.456056],
[1.824538, 1.732060, 1.456056, 2.266863],
],
[
[1.824538, 1.732060, 1.456056, 2.266863],
[1.732060, 1.049696, 1.241531, 1.456056],
[1.456056, 1.241531, 1.049696, 1.732060],
[2.266863, 1.456056, 1.732060, 1.824538],
],
[
[2.923995, 1.836823, 2.193170, 2.320847],
[1.836823, 1.568554, 1.310507, 2.193170],
[2.193170, 1.310507, 1.568554, 1.836823],
[2.320847, 2.193170, 1.836823, 2.923995],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_dgfem_ld_alternating():
a = populate_intermediate_1("DGFEM", "LD")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[2.420725, 2.104426, 1.900442, 2.889886],
[2.104426, 1.299636, 1.433389, 1.900442],
[1.900442, 1.433389, 1.299636, 2.104426],
[2.889886, 1.900442, 2.104426, 2.420725],
],
[
[2.224013, 1.498666, 1.647904, 1.894524],
[1.498666, 1.119896, 1.039153, 1.647904],
[1.647904, 1.039153, 1.119896, 1.498666],
[1.894524, 1.647904, 1.498666, 2.224013],
],
[
[1.894524, 1.647904, 1.498666, 2.224013],
[1.647904, 1.039153, 1.119896, 1.498666],
[1.498666, 1.119896, 1.039153, 1.647904],
[2.224013, 1.498666, 1.647904, 1.894524],
],
[
[2.889886, 1.900442, 2.104426, 2.420725],
[1.900442, 1.433389, 1.299636, 2.104426],
[2.104426, 1.299636, 1.433389, 1.900442],
[2.420725, 2.104426, 1.900442, 2.889886],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_dgfem_dense_alternating():
a = populate_intermediate_1("DGFEM", "DENSE")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[2.420725, 2.104426, 1.900442, 2.889886],
[2.104426, 1.299636, 1.433389, 1.900442],
[1.900442, 1.433389, 1.299636, 2.104426],
[2.889886, 1.900442, 2.104426, 2.420725],
],
[
[2.224013, 1.498666, 1.647904, 1.894524],
[1.498666, 1.119896, 1.039153, 1.647904],
[1.647904, 1.039153, 1.119896, 1.498666],
[1.894524, 1.647904, 1.498666, 2.224013],
],
[
[1.894524, 1.647904, 1.498666, 2.224013],
[1.647904, 1.039153, 1.119896, 1.498666],
[1.498666, 1.119896, 1.039153, 1.647904],
[2.224013, 1.498666, 1.647904, 1.894524],
],
[
[2.889886, 1.900442, 2.104426, 2.420725],
[1.900442, 1.433389, 1.299636, 2.104426],
[2.104426, 1.299636, 1.433389, 1.900442],
[2.420725, 2.104426, 1.900442, 2.889886],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_dgfem_lagrange_alternating():
a = populate_intermediate_1("DGFEM", "LAGRANGE")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[2.403548, 2.135009, 1.885348, 2.906123],
[2.135009, 1.300693, 1.469197, 1.885348],
[1.885348, 1.469197, 1.300693, 2.135009],
[2.906123, 1.885348, 2.135009, 2.403548],
],
[
[2.241881, 1.486578, 1.673153, 1.882209],
[1.486578, 1.145347, 1.036189, 1.673153],
[1.673153, 1.036189, 1.145347, 1.486578],
[1.882209, 1.673153, 1.486578, 2.241881],
],
[
[1.882209, 1.673153, 1.486578, 2.241881],
[1.673153, 1.036189, 1.145347, 1.486578],
[1.486578, 1.145347, 1.036189, 1.673153],
[2.241881, 1.486578, 1.673153, 1.882209],
],
[
[2.906123, 1.885348, 2.135009, 2.403548],
[1.885348, 1.469197, 1.300693, 2.135009],
[2.135009, 1.300693, 1.469197, 1.885348],
[2.403548, 2.135009, 1.885348, 2.906123],
],
]
correct_flux_rounded = np.around(correct_flux, decimals=4)
if (rounded_flux == correct_flux_rounded).all():
print("flux's are equal!")
else:
raise AssertionError(
"Flux outputs are not equal for ahotn-nefd example. Check system setup."
)
def test_sct_step_alternating():
a = populate_intermediate_1("SCTSTEP", "anything")
dict_results = pyne.spatialsolver.solve(a)
rounded_flux = np.around(dict_results["flux"], decimals=4)
correct_flux = [
[
[2.103727, 2.129333, 1.775806, 2.709218],
[1.984849, 1.172710, 1.337597, 1.664623],
[1.757312, 1.459605, 1.282230, 2.107971],
[2.551582, 1.644416, 1.966496, 1.996478],
],
[
[1.909362, 1.216011, 1.443766, 1.521228],
[1.198507, 0.8426090, 0.7858172, 1.423269],
[1.435932, 0.7960783, 0.8584189, 1.209827],
[1.500600, 1.417286, 1.194468, 1.887075],
],
[
[1.497664, 1.410221, 1.186999, 1.881503],
[1.408052, 0.7672912, 0.8230592, 1.185632],
[1.186346, 0.8224311, 0.7656347, 1.407697],
[1.878868, 1.184635, 1.406690, 1.494015],
],
[
[2.519203, 1.608783, 1.927761, 1.963608],
[1.608023, 1.265341, 1.108607, 1.927101],
[1.9271, 1.108730, 1.265047, 1.608085],
[1.962463, 1.926423, 1.607454, 2.518035],
],
]
correct_flux_rounded = | np.around(correct_flux, decimals=4) | numpy.around |
import numpy as np
import torch
def cross_op(r):
"""
Return the cross operator as a matrix
i.e. for input vector r \in \R^3
output rX s.t. rX.dot(v) = np.cross(r, v)
where rX \in \R^{3 X 3}
"""
rX = | np.zeros((3, 3)) | numpy.zeros |
#####################################################################################################################
# more_nodes: This module implements several new nodes and helper functions. It is part of the Cuicuilco framework. #
# #
# These nodes include: BasicAdaptiveCutoffNode, SFA_GaussianClassifier, RandomizedMaskNode, GeneralExpansionNode, #
# PointwiseFunctionNode, RandomPermutationNode #
# #
# By <NAME>. <EMAIL> #
# Ruhr-University-Bochum, Institute for Neural Computation, Group of Prof. Dr. Wiskott #
#####################################################################################################################
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy
import scipy
import scipy.optimize
import scipy.stats
from scipy.stats import ortho_group
import copy
import sys
import inspect
import mdp
from mdp.utils import (mult, pinv, symeig, CovarianceMatrix, SymeigException)
from . import sfa_libs
from .sfa_libs import select_rows_from_matrix, distance_squared_Euclidean
# from . import inversion
from .histogram_equalization import *
def add_corrections(initial_corrections, added_corrections):
if initial_corrections is None:
return added_corrections
elif added_corrections is None:
return initial_corrections
else:
return initial_corrections * added_corrections
def combine_correction_factors(flow_or_node, average_over_layers = True, average_inside_layers=False):
"""This function takes into account all corrections performed by the BasicAdaptiveCutoffNodes of
a flow (possibly a hierarchical network) and combines them into a single vector. The function also
works on standard nodes.
average_over_layers: if True, the combined corrections are the average of the corrections of each
node in the flow, otherwise they are multiplied (omitting nodes without corrections)
average_inside_layers: if True, the combined corrections of Layers are computed as the average of
the corrections of each node in the layer, otherwise they are multiplied
The combined correction factor of each sample estimates the probability that it is not an anomaly. That is,
correction=1.0 implies "not anomaly", and smaller values increase the rareness of the sample.
"""
final_corrections = None
final_gauss_corrections = None
if isinstance(flow_or_node, mdp.Flow):
flow = flow_or_node
if average_over_layers:
corrections = []
gauss_corrections = []
for node in flow:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(node, average_over_layers)
if another_node_corrections is not None:
corrections.append(another_node_corrections)
if another_node_gauss_corrections is not None:
gauss_corrections.append(another_node_gauss_corrections)
if len(corrections) > 0:
corrections = numpy.stack(corrections, axis=1)
final_corrections = corrections.mean(axis=1)
gauss_corrections = numpy.stack(gauss_corrections, axis=1)
final_gauss_corrections = gauss_corrections.mean(axis=1)
else:
final_corrections = None
final_gauss_corrections = None
else:
for node in flow:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(node)
final_corrections = add_corrections(final_corrections, another_node_corrections)
final_gauss_corrections = add_corrections(final_gauss_corrections, another_node_gauss_corrections)
elif isinstance(flow_or_node, mdp.Node):
node = flow_or_node
if isinstance(node, mdp.hinet.CloneLayer):
err = "CloneLayers not yet supported when computing/storing correction factors"
print(err)
final_corrections = None
final_gauss_corrections = None
# raise Exception(err)
elif isinstance(node, mdp.hinet.Layer):
if average_inside_layers:
corrections = []
gauss_corrections = []
for another_node in node.nodes:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(another_node)
corrections.append(another_node_corrections)
gauss_corrections.append(another_node_gauss_corrections)
if len(corrections) > 0:
corrections = numpy.stack(corrections, axis=1)
final_corrections = corrections.mean(axis=1)
gauss_corrections = numpy.stack(gauss_corrections, axis=1)
final_gauss_corrections = gauss_corrections.mean(axis=1)
else:
final_corrections = None
final_gauss_corrections = None
else:
for another_node in node.nodes:
another_node_corrections, another_node_gauss_corrections = combine_correction_factors(another_node)
final_corrections = add_corrections(final_corrections, another_node_corrections)
final_gauss_corrections = add_corrections(final_gauss_corrections, another_node_gauss_corrections)
elif isinstance(node, BasicAdaptiveCutoffNode):
final_corrections = add_corrections(final_corrections, node.corrections)
final_gauss_corrections = add_corrections(final_gauss_corrections, node.gauss_corrections)
return final_corrections, final_gauss_corrections
class BasicAdaptiveCutoffNode(mdp.PreserveDimNode):
"""Node that allows to "cut off" values at bounds derived from the training data.
This node is similar to CutoffNode, but the bounds are computed based on the training data. And it is
also similar to AdaptiveCutoffNode, but no histograms are stored and the limits are hard.
This node does not have any have no effect on training data but it corrects atypical variances in test data
and may improve generalization.
"""
def __init__(self, input_dim=None, output_dim=None, num_rotations=1, measure_corrections=False,
only_measure=False, verbose=True, dtype=None):
"""Initialize node. """
super(BasicAdaptiveCutoffNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype)
self.lower_bounds = None
self.upper_bounds = None
self.rotation_matrices = None
self.num_rotations = num_rotations
self.measure_corrections = measure_corrections
self.corrections = None
self.gauss_corrections = None
self.only_measure = only_measure
self.verbose = verbose
self._avg_x = None
self._avg_x_squared = None
self._num_samples = 0
self._std_x = None
if self.verbose:
print("num_rotations:", num_rotations, "measure_corrections:", measure_corrections,
"only_measure:", only_measure, "verbose:", verbose)
@staticmethod
def is_trainable():
return True
@staticmethod
def is_invertible():
return True
@staticmethod
def _get_supported_dtypes():
return (mdp.utils.get_dtypes('Float'))
def _train(self, x):
# initialize rotations and arrays that store the bounds
dim = x.shape[1]
if self.rotation_matrices is None:
self.rotation_matrices = [None] * self.num_rotations
self.lower_bounds = [None] * self.num_rotations
self.upper_bounds = [None] * self.num_rotations
if self.num_rotations >= 1:
self.rotation_matrices[0] = numpy.eye(dim)
for i in range(1, self.num_rotations):
self.rotation_matrices[i] = ortho_group.rvs(dim=dim)
# The training method updates the lower and upper bounds
for i in range(self.num_rotations):
rotated_data = numpy.dot(x, self.rotation_matrices[i])
if self.lower_bounds[i] is None:
self.lower_bounds[i] = rotated_data.min(axis=0)
else:
self.lower_bounds[i] = numpy.minimum(self.lower_bounds[i], rotated_data.min(axis=0))
if self.upper_bounds[i] is None:
self.upper_bounds[i] = rotated_data.max(axis=0)
else:
self.upper_bounds[i] = numpy.maximum(self.upper_bounds[i], rotated_data.max(axis=0))
if self._avg_x is None:
self._avg_x = x.sum(axis=0)
self._avg_x_squared = (x**2).sum(axis=0)
else:
self._avg_x += x.sum(axis=0)
self._avg_x_squared += (x ** 2).sum(axis=0)
self._num_samples += x.shape[0]
def _stop_training(self):
self._avg_x /= self._num_samples
self._avg_x_squared /= self._num_samples
self._std_x = (self._avg_x_squared - self._avg_x **2) ** 0.5
if self.verbose:
print("self._avg_x", self._avg_x)
print("self._avg_x_squared", self._avg_x_squared)
print("self._std_x", self._std_x)
def _execute(self, x):
"""Return the clipped data."""
num_samples = x.shape[0]
self.corrections = numpy.ones(num_samples)
self.gauss_corrections = numpy.ones(num_samples)
if self.only_measure:
x_copy = x.copy()
for i in range(self.num_rotations):
data_rotated = numpy.dot(x, self.rotation_matrices[i])
data_rotated_clipped = numpy.clip(data_rotated, self.lower_bounds[i], self.upper_bounds[i])
if self.measure_corrections:
interval = numpy.abs(self.upper_bounds[i] - self.lower_bounds[i])
delta = numpy.abs(data_rotated_clipped - data_rotated)
# factors = interval ** 2 / (delta + interval) ** 2
norm_delta = delta / interval
factors = 1.0 - (norm_delta / (norm_delta + 0.15)) ** 2
self.corrections *= factors.prod(axis=1) # consider using here and below the mean instead of the product
if self.verbose:
print("Factors of BasicAdaptiveCutoffNode:", factors)
# Computation of Gaussian probabilities
factors = scipy.stats.norm.pdf(x, loc=self._avg_x, scale=4*self._std_x)
if self.verbose:
print("Factors of BasicAdaptiveCutoffNode (gauss):", factors)
print("x.mean(axis=0):", x.mean(axis=0))
print("x.std(axis=0):", x.std(axis=0))
self.gauss_corrections *= factors.prod(axis=1)
x = numpy.dot(data_rotated_clipped, self.rotation_matrices[i].T) # Project back to original coordinates
if self.verbose:
print("Corrections of BasicAdaptiveCutoffNode:", self.corrections)
print("20 worst final corrections at indices:", numpy.argsort(self.corrections)[0:20])
print("20 worst final corrections:", self.corrections[numpy.argsort(self.corrections)[0:20]])
print("Gaussian corrections of BasicAdaptiveCutoffNode:", self.gauss_corrections)
print("20 worst final Gaussian corrections at indices:", numpy.argsort(self.gauss_corrections)[0:20])
print("20 worst final Gaussian corrections:",
self.corrections[numpy.argsort(self.gauss_corrections)[0:20]])
if self.only_measure:
return x_copy
else:
return x
def _inverse(self, x):
"""An approximate inverse applies the same clipping. """
return self.execute(x)
class SFA_GaussianClassifier(mdp.ClassifierNode):
""" This node is a simple extension of the GaussianClassifier node, where SFA is applied before the classifier.
The labels are important, since they are used to order the data samples before SFA.
"""
def __init__(self, reduced_dim=None, verbose=False, **argv):
super(SFA_GaussianClassifier, self).__init__(**argv)
self.gc_node = mdp.nodes.GaussianClassifier()
self.reduced_dim = reduced_dim
if self.reduced_dim > 0:
self.sfa_node = mdp.nodes.SFANode(output_dim=self.reduced_dim)
else:
self.sfa_node = mdp.nodes.IdentityNode()
self.verbose = verbose
def _train(self, x, labels=None):
if self.reduced_dim > 0:
ordering = numpy.argsort(labels)
x_ordered = x[ordering, :]
self.sfa_node.train(x_ordered)
self.sfa_node.stop_training()
if self.verbose:
print("SFA_GaussianClassifier: sfa_node.d = ", self.sfa_node.d)
else: # sfa_node is the identity node
pass
y = self.sfa_node.execute(x)
self.gc_node.train(y, labels=labels)
self.gc_node.stop_training()
def _label(self, x):
y = self.sfa_node.execute(x)
return self.gc_node.label(y)
def regression(self, x, avg_labels, estimate_std=False):
y = self.sfa_node.execute(x)
return self.gc_node.regression(y, avg_labels, estimate_std)
def regressionMAE(self, x, avg_labels):
y = self.sfa_node.execute(x)
return self.gc_node.regressionMAE(y, avg_labels)
def softCR(self, x, true_classes):
y = self.sfa_node.execute(x)
return self.gc_node.softCR(y, true_classes)
def class_probabilities(self, x):
y = self.sfa_node.execute(x)
return self.gc_node.class_probabilities(y)
@staticmethod
def is_trainable():
return True
# using the provided average and standard deviation
def gauss_noise(x, avg, std):
return numpy.random.normal(avg, std, x.shape)
# Zero centered
def additive_gauss_noise(x, std):
return x + numpy.random.normal(0, std, x.shape)
class RandomizedMaskNode(mdp.Node):
"""Selectively mask some components of a random variable by
hiding them with arbitrary noise or by removing them from the feature vector.
This code has been inspired by NoiseNode
"""
def __init__(self, remove_mask=None, noise_amount_mask=None, noise_func=gauss_noise, noise_args=(0, 1),
noise_mix_func=None, input_dim=None, dtype=None):
self.remove_mask = remove_mask
self.noise_amount_mask = noise_amount_mask
self.noise_func = noise_func
self.noise_args = noise_args
self.noise_mix_func = noise_mix_func
self.seen_samples = 0
self.x_avg = None
self.x_std = None
self.type = dtype
if remove_mask is not None and input_dim is None:
input_dim = remove_mask.size
elif remove_mask is None and input_dim is not None:
remove_mask = numpy.zeros(input_dim) > 0.5
elif remove_mask and input_dim is not None:
if remove_mask.size != input_dim:
err = "size of remove_mask and input_dim not compatible"
raise Exception(err)
else:
err = "At least one of input_dim or remove_mask should be specified"
raise Exception(err)
if noise_amount_mask is None:
print ("Signal will be only the computed noise")
self.noise_amount_mask = numpy.ones(input_dim)
else:
self.noise_amount_mask = noise_amount_mask
output_dim = remove_mask.size - remove_mask.sum()
print ("Output_dim should be:", output_dim)
super(RandomizedMaskNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype)
@staticmethod
def is_trainable():
return True
def _train(self, x):
if self.x_avg is None:
self.x_avg = numpy.zeros(self.input_dim, dtype=self.type)
self.x_std = numpy.zeros(self.input_dim, dtype=self.type)
new_samples = x.shape[0]
self.x_avg = (self.x_avg * self.seen_samples + x.sum(axis=0)) / (self.seen_samples + new_samples)
self.x_std = (self.x_std * self.seen_samples + x.std(axis=0) * new_samples) / (self.seen_samples + new_samples)
self.seen_samples = self.seen_samples + new_samples
@staticmethod
def is_invertible():
return False
def _execute(self, x):
print ("computed X_avg=", self.x_avg)
print ("computed X_std=", self.x_std)
noise_mat = self.noise_func(x, self.x_avg, self.x_std)
# noise_mat = self._refcast(self.noise_func(*self.noise_args,
# **{'size': x.shape}))
print ("Noise_amount_mask:", self.noise_amount_mask)
print ("Noise_mat:", noise_mat)
noisy_signal = (1.0 - self.noise_amount_mask) * x + self.noise_amount_mask * noise_mat
preserve_mask = (self.remove_mask == False)
return noisy_signal[:, preserve_mask]
class GeneralExpansionNode(mdp.Node):
def __init__(self, funcs, input_dim=None, dtype=None, \
use_pseudoinverse=True, use_hint=False, output_dim=None, starting_point=None, use_special_features=False, max_steady_factor=1.5,
delta_factor=0.6, min_delta=0.00001, verbose=False):
self.funcs = funcs
self.exp_output_dim = output_dim
self.expanded_dims = None
self.starting_point = starting_point
self.use_special_features = use_special_features
if self.funcs == "RandomSigmoids" and self.exp_output_dim <= 0:
er = "output_dim in GeneralExpansion node with RandomSigmoids should be at least 1, but is" + \
str(self.exp_output_dim)
raise Exception(er)
self.use_pseudoinverse = use_pseudoinverse
self.use_hint = use_hint
self.max_steady_factor = max_steady_factor
self.delta_factor = delta_factor
self.min_delta = min_delta
self.verbose = verbose
if self.verbose:
print("GeneralExpansionNode with expansion functions:", funcs)
self.rs_coefficients = None
self.rs_offsets = None
self.rs_data_training_std = None
self.rs_data_training_mean = None
self.normalization_constant = None
super(GeneralExpansionNode, self).__init__(input_dim, dtype)
def expanded_dim(self, n):
exp_dim = 0
x = numpy.zeros((1, n))
for func in self.funcs:
outx = func(x)
# print "outx= ", outx
exp_dim += outx.shape[1]
return exp_dim
def output_sizes(self, n):
if self.funcs == "RandomSigmoids":
sizes = [self.exp_output_dim]
else:
sizes = numpy.zeros(len(self.funcs), dtype=int)
x = numpy.zeros((1, n))
for i, func in enumerate(self.funcs):
outx = func(x)
sizes[i] = outx.shape[1]
print ("S", end="")
return sizes
def is_trainable(self):
if self.funcs == "RandomSigmoids":
return True
else:
return False
def _train(self, x, verbose=None):
if verbose is None:
verbose = self.verbose
if self.input_dim is None:
self.set_input_dim(x.shape[1])
input_dim = self.input_dim
# Generate functions used for regression
self.rs_data_training_mean = x.mean(axis=0)
self.rs_data_training_std = x.std(axis=0)
if verbose:
print ("GeneralExpansionNode: output_dim=", self.output_dim, end="")
starting_point = self.starting_point
c1, l1 = generate_random_sigmoid_weights(self.input_dim, self.output_dim)
if starting_point == "Identity":
if verbose:
print ("starting_point: adding (encoded) identity coefficients to expansion")
c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim)
l1[0:input_dim] = numpy.ones(input_dim) * 1.0 # Code identity
elif starting_point == "Sigmoids":
if verbose:
print ("starting_point: adding sigmoid of coefficients to expansion")
c1[0:input_dim, 0:input_dim] = 4.0 * numpy.identity(input_dim)
l1[0:input_dim] = numpy.ones(input_dim) * 0.0
elif starting_point == "08Exp":
if verbose:
print ("starting_point: adding (encoded) 08Exp coefficients to expansion")
c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim)
c1[0:input_dim, input_dim:2 * input_dim] = numpy.identity(input_dim)
l1[0:input_dim] = numpy.ones(input_dim) * 1.0 # Code identity
l1[input_dim:2 * input_dim] = numpy.ones(input_dim) * 0.8 # Code abs(x)**0.8
elif starting_point == "Pseudo-Identity":
if verbose:
print ("starting_point: adding pseudo-identity coefficients to expansion")
c1[0:input_dim, 0:input_dim] = 0.1 * numpy.identity(input_dim)
l1[0:input_dim] = numpy.zeros(input_dim) # nothig is encoded
elif starting_point is None:
if verbose:
print ("starting_point: no starting point")
else:
er = "Unknown starting_point", starting_point
raise Exception(er)
self.rs_coefficients = c1
self.rs_offsets = l1
# 4.0 was working fine, 2.0 was apparently better. This also depends on how many features are computed!!!
self.normalization_constant = (2.0 / self.input_dim) ** 0.5
def is_invertible(self):
return self.use_pseudoinverse
def inverse(self, x, use_hint=None, max_steady_factor=None, delta_factor=None, min_delta=None):
if self.use_pseudoinverse is False:
ex = "Inversion not activated"
raise Exception(ex)
if use_hint is None:
use_hint = self.use_hint
if max_steady_factor is None:
max_steady_factor = self.max_steady_factor
if delta_factor is None:
delta_factor = self.delta_factor
if min_delta is None:
min_delta = self.min_delta
# print "Noisy pre = ", x, "****************************************************"
app_x_2, app_ex_x_2 = invert_exp_funcs2(x, self.input_dim, self.funcs, use_hint=use_hint,
max_steady_factor=max_steady_factor, delta_factor=delta_factor,
min_delta=min_delta)
# print "Noisy post = ", x, "****************************************************"
return app_x_2
def _set_input_dim(self, n):
self._input_dim = n
if self.funcs == "RandomSigmoids":
self._output_dim = self.exp_output_dim
else:
self._output_dim = self.expanded_dim(n)
self.expanded_dims = self.output_sizes(n)
def _execute(self, x):
if self.input_dim is None:
self.set_input_dim(x.shape[1])
if "expanded_dims" not in self.__dict__:
self.expanded_dims = self.output_sizes(self.input_dim)
if self.funcs != "RandomSigmoids":
num_samples = x.shape[0]
# output_dim = expanded_dim(self.input_dim)
# self.expanded_dims = self.output_sizes(self.input_dim)
out = numpy.zeros((num_samples, self.output_dim))
current_pos = 0
for i, func in enumerate(self.funcs):
out[:, current_pos:current_pos + self.expanded_dims[i]] = func(x)
current_pos += self.expanded_dims[i]
else:
data_norm = self.normalization_constant * (x - self.rs_data_training_mean) / self.rs_data_training_std
# A variation of He random weight initialization
out = extract_sigmoid_features(data_norm, self.rs_coefficients, self.rs_offsets, scale=1.0, offset=0.0,
use_special_features=self.use_special_features)
return out
class PointwiseFunctionNode(mdp.Node):
""""This node applies a function to the whole input.
It also supports a given 'inverse' function.
"""
def __init__(self, func, inv_func, input_dim=None, dtype=None):
self.func = func
self.inv_func = inv_func
super(PointwiseFunctionNode, self).__init__(input_dim, dtype)
@staticmethod
def is_trainable():
return False
def is_invertible(self):
if self.inv_func is None:
return True
else:
return False
def inverse(self, x):
if self.inv_func:
return self.inv_func(x)
else:
return x
def _set_input_dim(self, n):
self._input_dim = n
self._output_dim = n
def _execute(self, x):
if self.input_dim is None:
self.set_input_dim(x.shape[1])
if self.func:
return self.func(x)
else:
return x
class PairwiseAbsoluteExpansionNode(mdp.Node):
def expanded_dim(self, n):
return n + n * (n + 1) // 2
def is_trainable(self):
return False
def is_invertible(self):
return False
def _set_input_dim(self, n):
self._input_dim = n
self._output_dim = self.expanded_dim(n)
def _execute(self, x):
out = numpy.concatenate((x, pairwise_expansion(x, abs_sum)), axis=1)
return out
# TODO:ADD inverse type sum, suitable for when output_scaling is True
class PInvSwitchboard(mdp.hinet.Switchboard):
"""This node is a variation of the RectangularSwitchboard that facilitates (approximate) inverse operations. """
def __init__(self, input_dim, connections, slow_inv=False, type_inverse="average", output_scaling=True,
additive_noise_std=0.00004, verbose=False):
super(PInvSwitchboard, self).__init__(input_dim=input_dim, connections=connections)
self.pinv = None
self.mat2 = None
self.slow_inv = slow_inv
self.type_inverse = type_inverse
self.output_dim = len(connections)
self.output_scales = None
self.additive_noise_std = additive_noise_std
self.verbose = verbose
if verbose:
print ("self.inverse_connections=", self.inverse_connections, "self.slow_inv=", self.slow_inv)
# WARNING! IF/ELIF doesn't make any sense! what are the semantics of inverse_connections
if self.inverse_connections is None:
if verbose:
print ("type(connections)", type(connections))
all_outputs = numpy.arange(self.output_dim)
self.inverse_indices = [[]] * self.input_dim
for i in range(self.input_dim):
self.inverse_indices[i] = all_outputs[connections == i]
# print "inverse_indices[%d]="%i, self.inverse_indices[i]
# print "inverse_indices =", self.inverse_indices
elif self.inverse_connections is None and not self.slow_inv:
index_array = numpy.argsort(connections)
value_array = connections[index_array]
value_range = numpy.zeros((input_dim, 2))
self.inverse_indices = range(input_dim)
for i in range(input_dim):
value_range[i] = numpy.searchsorted(value_array, [i - 0.5, i + 0.5])
if value_range[i][1] == value_range[i][0]:
self.inverse_indices[i] = []
else:
self.inverse_indices[i] = index_array[value_range[i][0]: value_range[i][1]]
if verbose:
print ("inverse_indices computed in PINVSB")
elif self.inverse_connections is None and self.slow_inv:
if verbose:
print ("warning using slow inversion in PInvSwitchboard!!!")
# find input variables not used by connections:
used_inputs = numpy.unique(connections)
used_inputs_set = set(used_inputs)
all_inputs_set = set(range(input_dim))
unused_inputs_set = all_inputs_set - all_inputs_set.intersection(used_inputs_set)
unused_inputs = list(unused_inputs_set)
self.num_unused_inputs = len(unused_inputs)
# extend connections array
# ext_connections = numpy.concatenate((connections, unused_inputs))
# create connections matrix
mat_height = len(connections) + len(unused_inputs)
mat_width = input_dim
mat = numpy.zeros((mat_height, mat_width))
# fill connections matrix
for i in range(len(connections)):
mat[i, connections[i]] = 1
#
for i in range(len(unused_inputs)):
mat[i + len(connections), unused_inputs[i]] = 1
#
if verbose:
print ("extended matrix is:", mat)
# compute pseudoinverse
mat2 = numpy.matrix(mat)
self.mat2 = mat2
self.pinv = (mat2.T * mat2).I * mat2.T
else:
if verbose:
print ("Inverse connections already given, in PInvSwitchboard")
if output_scaling:
if self.inverse_connections is None and not self.slow_inv:
if verbose:
print ("**A", end="")
if self.type_inverse != "average":
err = "self.type_inverse not supported " + self.type_inverse
raise Exception(err)
self.output_scales = numpy.zeros(self.output_dim)
tt = 0
for i in range(self.input_dim):
output_indices = self.inverse_indices[i]
multiplicity = len(output_indices)
for j in output_indices:
self.output_scales[j] = (1.0 / multiplicity) ** 0.5
tt += 1
if verbose:
print ("connections in switchboard considered: ", tt, "output dimension=", self.output_dim)
elif self.inverse_connections is None and self.slow_inv:
if verbose:
print ("**B", end="")
err = "use of self.slow_inv = True is obsolete"
raise Exception(err)
else: # inverse connections are unique, mapping bijective
if verbose:
print ("**C", end="")
self.output_scales = numpy.ones(self.output_dim)
else:
if verbose:
print ("**D", end="")
self.output_scales = numpy.ones(self.output_dim)
if verbose:
print ("PINVSB output_scales =", self.output_scales)
print ("SUM output_scales/len(output_scales)=", self.output_scales.sum() / len(self.output_scales))
print ("output_scales.min()", self.output_scales.min())
# PInvSwitchboard is always invertible
def is_invertible(self):
return True
def _execute(self, x):
force_float32_type = False # Experimental variation, ignore
if force_float32_type:
x = x.astype("float32")
use_fortran_ordering = False # Experimental variation, ignore
if use_fortran_ordering:
x = numpy.array(x, order="FORTRAN")
y = super(PInvSwitchboard, self)._execute(x)
# print "y computed"
# print "y.shape", y.shape
# print "output_scales ", self.output_scales
y *= self.output_scales
if self.additive_noise_std > 0.0:
n, dim = y.shape
steps = int(n / 9000 + 1)
if self.verbose:
print ("PInvSwitchboard is adding noise to the output features with std", self.additive_noise_std,
end="")
print (" computation in %d steps" % steps)
step_size = int(n / steps)
for s in range(steps):
y[step_size * s:step_size * (s + 1)] += numpy.random.uniform(low=-(3 ** 0.5) * self.additive_noise_std,
high=(3 ** 0.5) * self.additive_noise_std,
size=(step_size, dim))
if self.verbose:
print ("noise block %d added" % s)
if step_size * steps < n:
rest = n - step_size * steps
y[step_size * steps:step_size * steps + rest] += numpy.random.uniform(
low=-(3 ** 0.5) * self.additive_noise_std, high=(3 ** 0.5) * self.additive_noise_std,
size=(rest, dim))
if self.verbose:
print ("remaining noise block added")
return y
# If true inverse is present, just use it, otherwise compute it by means of the pseudoinverse
def _inverse(self, x):
x = x * (1.0 / self.output_scales)
if self.inverse_connections is None and not self.slow_inv:
height_x = x.shape[0]
mat2 = numpy.zeros((height_x, self.input_dim))
for row in range(height_x):
x_row = x[row]
for i in range(self.input_dim):
elements = x_row[self.inverse_indices[i]]
if self.type_inverse == "average":
if elements.size > 0:
mat2[row][i] = elements.mean()
else:
err = "self.type_inverse not supported: " + self.type_inverse
raise Exception(err)
output = mat2
elif self.inverse_connections is None and self.slow_inv:
height_x = x.shape[0]
full_x = numpy.concatenate((x, 255 * numpy.ones((height_x, self.num_unused_inputs))), axis=1)
data2 = numpy.matrix(full_x)
if self.verbose:
print ("x=", x)
print ("data2=", data2)
print ("PINV=", self.pinv)
output = (self.pinv * data2.T).T
else:
if self.verbose:
print ("using inverse_connections in PInvSwitchboard")
# return apply_permutation_to_signal(x, self.inverse_connections, self.input_dim)
output = select_rows_from_matrix(x, self.inverse_connections)
return output
class RandomPermutationNode(mdp.Node):
"""This node randomly permutes the components of the input signal in a consistent way.
The concrete permuntation is fixed during the training procedure.
"""
def __init__(self, input_dim=None, output_dim=None, dtype=None, verbose=False):
super(RandomPermutationNode, self).__init__(input_dim, output_dim, dtype)
self.permutation = None
self.inv_permutation = None
self.dummy = 5 # without it the hash fails!!!!!
def is_trainable(self):
return True
def is_invertible(self):
return True
def inverse(self, x):
return select_rows_from_matrix(x, self.inv_permutation)
# def localized_inverse(self, xf, yf, y):
# return y[:, self.inv_permutation]
def _set_input_dim(self, n, verbose=False):
if verbose:
print ("RandomPermutationNode: Setting input_dim to ", n)
self._input_dim = n
self._output_dim = n
def _train(self, x, verbose=True):
n = x.shape[1]
if self.input_dim is None:
self.set_input_dim(n)
if self.input_dim is None:
print ("*******Really Setting input_dim to ", n)
self.input_dim = n
if self.output_dim is None:
print ("*******Really Setting output_dim to ", n)
self.output_dim = n
if self.permutation is None:
if verbose:
print ("Creating new random permutation")
print ("Permutation=", self.permutation)
print ("x=", x, "with shape", x.shape)
print ("Input dim is: ", self.input_dim())
self.permutation = numpy.random.permutation(range(self.input_dim))
self.inv_permutation = numpy.zeros(self.input_dim, dtype="int")
self.inv_permutation[self.permutation] = numpy.arange(self.input_dim)
if verbose:
print ("Permutation=", self.permutation)
print ("Output dim is: ", self.output_dim)
def _execute(self, x, verbose=False):
# print "RandomPermutationNode: About to excecute, with input x= ", x
y = select_rows_from_matrix(x, self.permutation)
if verbose:
print ("Output shape is = ", y.shape, end="")
return y
def sfa_pretty_coefficients(sfa_node, transf_training, start_negative=True):
count = 0
for i in range(sfa_node.output_dim):
sum_firsts = transf_training[0, i] + transf_training[1, i] + transf_training[2, i] + transf_training[3, i] + \
transf_training[4, i] + transf_training[5, i] + transf_training[6, i] + transf_training[7, i] + \
transf_training[8, i] + transf_training[9, i] + transf_training[10, i] + transf_training[11, i]
if (sum_firsts > 0 and start_negative) or (sum_firsts < 0 and not start_negative):
sfa_node.sf[:, i] = (sfa_node.sf[:, i] * -1)
transf_training[:, i] = (transf_training[:, i] * -1)
count += 1
print ("Polarization of %d SFA Signals Corrected!!!\n" % count, end="")
sfa_node._bias = mdp.utils.mult(sfa_node.avg, sfa_node.sf)
print ("Bias updated")
return transf_training
def describe_flow(flow):
length = len(flow)
total_size = 0
print ("Flow has %d nodes:" % length)
for i in range(length):
node = flow[i]
node_size = compute_node_size(node)
total_size += node_size
print ("Node[%d] is %s, has input_dim=%d, output_dim=%d and size=%d" % (i, str(node), node.input_dim,
node.output_dim, node_size))
if isinstance(node, mdp.hinet.CloneLayer):
print (" contains %d cloned nodes of type %s, each with input_dim=%d, output_dim=%d" %
(len(node.nodes), str(node.nodes[0]), node.nodes[0].input_dim, node.nodes[0].output_dim))
elif isinstance(node, mdp.hinet.Layer):
print (" contains %d nodes of type %s, each with input_dim=%d, output_dim=%d" %
(len(node.nodes), str(node.nodes[0]), node.nodes[0].input_dim, node.nodes[0].output_dim))
print ("Total flow size: %d" % total_size)
print ("Largest node size: %d" % compute_largest_node_size(flow))
def display_node_eigenvalues(node, i, mode="All"):
if isinstance(node, mdp.hinet.CloneLayer):
if isinstance(node.nodes[0], mdp.nodes.SFANode):
print ("Node %d is a CloneLayer that contains an SFANode with d=" % i, node.nodes[0].d)
# elif isinstance(node.nodes[0], mdp.nodes.IEVMNode):
# if node.nodes[0].use_sfa:
# print ("Node %d is a CloneLayer that contains an IEVMNode containing an SFA node with" % i, end="")
# print ("num_sfa_features_preserved=%d" % node.nodes[0].num_sfa_features_preserved, end="")
# print ("and d=", node.nodes[0].sfa_node.d)
elif isinstance(node.nodes[0], mdp.nodes.iGSFANode):
print ("Node %d is a CloneLayer that contains an iGSFANode containing an SFA node with " % i, end="")
print ("num_sfa_features_preserved=%d " % node.nodes[0].num_sfa_features_preserved, end="")
print ("and d=", node.nodes[0].sfa_node.d, end=" ")
print ("and evar=", node.nodes[0].evar)
elif isinstance(node.nodes[0], mdp.nodes.PCANode):
print ("Node %d is a CloneLayer that contains a PCANode with d=" % i, node.nodes[0].d, end=" ")
print ("and evar=", node.nodes[0].explained_variance)
elif isinstance(node, mdp.hinet.Layer):
if isinstance(node.nodes[0], mdp.nodes.SFANode):
if mode == "Average":
out = 0.0
for n in node.nodes:
out += n.d
print ("Node %d is a Layer that contains %d SFANodes with avg(d)= " % (i, len(node.nodes)), out / len(node.nodes))
elif mode == "All":
for n in node.nodes:
print ("Node %d is a Layer that contains an SFANode with d= " % i, n.d)
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first SFANode has d= " % i, node.nodes[0].d)
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node.nodes[0], mdp.nodes.iGSFANode):
if mode == "Average":
evar_avg = 0.0
d_avg = 0.0
avg_num_sfa_features = 0.0
min_num_sfa_features_preserved = min([n.num_sfa_features_preserved for n in node.nodes])
for n in node.nodes:
d_avg += n.sfa_node.d[:min_num_sfa_features_preserved]
evar_avg += n.evar
avg_num_sfa_features += n.num_sfa_features_preserved
d_avg /= len(node.nodes)
evar_avg /= len(node.nodes)
avg_num_sfa_features /= len(node.nodes)
print ("Node %d" % i, "is a Layer that contains", len(node.nodes), "iGSFANodes containing SFANodes with " +
"avg(num_sfa_features_preserved)=%f " % avg_num_sfa_features, "and avg(d)=%s" % str(d_avg) +
"and avg(evar)=%f" % evar_avg)
elif mode == "All":
print ("Node %d is a Layer that contains iGSFANodeRecNodes:" % i)
for n in node.nodes:
print (" iGSFANode containing an SFANode with num_sfa_features_preserved=%f, d=%s and evar=%f" %
(n.num_sfa_features_preserved, str(n.sfa_node.d), n.evar))
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first iGSFANode " % i, end="")
print ("contains an SFANode with num_sfa_features_preserved)=%f, d=%s and evar=%f" %
(node.nodes[0].num_sfa_features_preserved, str(node.nodes[0].sfa_node.d), node.nodes[0].evar))
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node.nodes[0], mdp.nodes.SFAAdaptiveNLNode):
if mode == "Average":
out = 0.0
for n in node.nodes:
out += n.sfa_node.d
print ("Node %d is a Layer that contains SFAAdaptiveNLNodes containing SFANodes with", end="")
print ("avg(d)=" % i, out / len(node.nodes))
elif mode == "All":
for n in node.nodes:
print ("Node %d is a Layer that contains an SFAAdaptiveNLNode" % i, end="")
print ("containing an SFANode with d=", n.sfa_node.d)
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first SFAAdaptiveNLNode" % i)
print ("contains an SFANode with d=", node.nodes[0].sfa_node.d)
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node.nodes[0], mdp.nodes.PCANode):
if mode == "Average":
d_avg = 0.0
evar_avg = 0.0
min_num_pca_features_preserved = min([n.output_dim for n in node.nodes])
for n in node.nodes:
d_avg += n.d[:min_num_pca_features_preserved]
evar_avg += n.explained_variance
d_avg /= len(node.nodes)
evar_avg /= len(node.nodes)
print ("Node %d is a Layer that contains PCA nodes with avg(d)=%s and avg(evar)=%f" % (
i, str(d_avg), evar_avg))
elif mode == "All":
print ("Node %d is a Layer that contains PCA nodes:" % i)
for n in node.nodes:
print (" PCANode with d=%s and evar=%f" % (str(n.d), n.explained_variance))
elif mode == "FirstNodeInLayer":
print ("Node %d is a Layer, and its first PCANode" % i, "has d=%s and evar=%f" % (
str(node.nodes[0].sfa_node.d), node.nodes[0].explained_variance))
else:
er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"'
raise Exception(er)
elif isinstance(node, mdp.nodes.iGSFANode):
print ("Node %d is an iGSFANode containing an SFA node with num_sfa_features_preserved=%d" %
(i, node.num_sfa_features_preserved), end="")
print ("and d=", node.sfa_node.d)
elif isinstance(node, mdp.nodes.SFANode):
print ("Node %d is an SFANode with d=" % i, node.d)
elif isinstance(node, mdp.nodes.PCANode):
print ("Node %d is a PCANode with d=%s and evar=%f" % (i, str(node.d), node.explained_variance))
else:
print ("Cannot display eigenvalues of Node %d" % i, node)
def display_eigenvalues(flow, mode="All"):
"""This function displays the learned eigenvalues of different nodes in a trained Flow object.
Three mode parameter can take three values and it specifies what to do when a layer is found:
"FirstNodeInLayer": the eigenvalues of the first node in the layer are displayed
"Average": the average eigenvalues of all nodes in a layer are displayed (bounded to the smallest length).
"All": the eigenvalues of all nodes in the layer are displayed.
"""
length = len(flow)
print ("Displaying eigenvalues of SFA Nodes in flow of length", length)
for i in range(length):
node = flow[i]
display_node_eigenvalues(node, i, mode)
def compute_node_size(node, verbose=False):
""" Computes the number of parameters (weights) that have been learned by node.
Note: Means and offsets are not counted, only (multiplicative) weights. The node must have been already trained.
The following nodes are supported currently:
SFANode, PCANode, WhitheningNode, CloneLayer, Layer, GSFANode, iGSFANode, LinearRegressionNode
"""
if isinstance(node, mdp.nodes.iGSFANode):
return compute_node_size(node.sfa_node) + compute_node_size(node.pca_node) + compute_node_size(node.lr_node)
elif isinstance(node, (mdp.nodes.SFANode, mdp.nodes.PCANode, mdp.nodes.GSFANode, mdp.nodes.LinearRegressionNode,
mdp.nodes.WhiteningNode)) and node.input_dim is not None and node.output_dim is not None:
return node.input_dim * node.output_dim
elif isinstance(node, mdp.hinet.CloneLayer):
return compute_node_size(node.nodes[0])
elif isinstance(node, mdp.hinet.Layer):
size = 0
for node_child in node.nodes:
size += compute_node_size(node_child)
return size
else:
if verbose:
print ("compute_node_size not implemented for nodes of type:", type(node), "or training has not finished")
return 0
def compute_flow_size(flow):
""" Computes the number of weights learned by the whole flow after training.
See compute_node_size for more details on the counting procedure
"""
flow_size = 0
for node in flow:
flow_size += compute_node_size(node)
return flow_size
def compute_largest_node_size(flow):
""" Computes the larger number of weights learned by a node after training.
See compute_node_size for more details on the counting procedure
"""
largest_size = 0
for node in flow:
if (isinstance(node, mdp.nodes.SFANode) or isinstance(node, mdp.nodes.PCANode) or
isinstance(node, mdp.nodes.WhiteningNode)):
current_size = compute_node_size(node)
elif isinstance(node, mdp.hinet.CloneLayer):
current_size = compute_node_size(node.nodes[0])
elif isinstance(node, mdp.hinet.Layer):
current_size = 0
for nodechild in node.nodes:
tmp_size = compute_node_size(nodechild)
if tmp_size > current_size:
current_size = tmp_size
else:
current_size = 0
if current_size > largest_size:
largest_size = current_size
return largest_size
# Used to compare the effectiveness of several PCA Networks
def estimate_explained_variance(images, flow, sl_images, num_considered_images=100, verbose=True):
# Here explained variance is defined as 1 - normalized reconstruction error
num_images = images.shape[0]
im_numbers = numpy.random.randint(num_images, size=num_considered_images)
avg_image = images[im_numbers].mean(axis=0)
selected_images = images[im_numbers]
ori_differences = selected_images - avg_image
ori_energies = ori_differences ** 2
ori_energy = ori_energies.sum()
sl_selected_images = sl_images[im_numbers]
print ("sl_selected_images.shape=", sl_selected_images.shape)
inverses = flow.inverse(sl_selected_images)
rec_differences = inverses - avg_image
rec_energies = rec_differences ** 2
rec_energy = rec_energies.sum()
rec_errors = selected_images - inverses
rec_error_energies = rec_errors ** 2
rec_error_energy = rec_error_energies.sum()
if verbose:
explained_individual = rec_energies.sum(axis=1) / ori_energies.sum(axis=1)
print ("Individual explained variances: ", explained_individual)
print ("Which, itself has standar deviation: ", explained_individual.std())
print ("Therefore, estimated explained variance has std of about: ", explained_individual.std() / numpy.sqrt(
num_considered_images))
print ("Dumb reconstruction_energy/original_energy=", rec_energy / ori_energy)
print ("rec_error_energy/ori_energy=", rec_error_energy / ori_energy)
print ("Thus explained variance about:", 1 - rec_error_energy / ori_energy)
return 1 - rec_error_energy / ori_energy # rec_energy/ori_energy
class HeadNode(mdp.Node):
"""Preserve only the first k dimensions from the data
"""
def __init__(self, input_dim=None, output_dim=None, dtype=None):
self.type = dtype
super(HeadNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype)
def is_trainable(self):
return True
def _train(self, x):
pass
def _is_invertible(self):
return True
def _execute(self, x):
if self.output_dim is None:
er = "Warning 12345..."
raise Exception(er)
return x[:, 0:self.output_dim]
def _stop_training(self):
pass
def _inverse(self, y):
num_samples, out_dim = y.shape[0], y.shape[1]
zz = numpy.zeros((num_samples, self.input_dim - out_dim))
return numpy.concatenate((y, zz), axis=1)
# # This code is obsolete.
# class SFAPCANode(mdp.Node):
# """Node that extracts slow features unless their delta value is too high. In such a case PCA features are extracted.
# """
#
# def __init__(self, input_dim=None, output_dim=None, max_delta=1.95, sfa_args={}, pca_args={}, **argv):
# super(SFAPCANode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv)
# self.sfa_node = mdp.nodes.SFANode(**sfa_args)
# # max delta value allowed for a slow feature, otherwise a principal component is extracted
# self.max_delta = max_delta
# self.avg = None # input average
# self.W = None # weights for complete transformation
# self.pinv = None # weights for pseudoinverse of complete transformation
#
# def is_trainable(self):
# return True
#
# def _train(self, x, **argv):
# self.sfa_node.train(x, **argv)
#
# @staticmethod
# def _is_invertible():
# return True
#
# def _execute(self, x):
# W = self.W
# avg = self.avg
# return numpy.dot(x - avg, W)
#
# def _stop_training(self, **argv):
# # New GraphSFA node
# if "_covdcovmtx" in dir(self.sfa_node):
# # Warning, fix is computed twice. TODO: avoid double computation
# C, self.avg, CD = self.sfa_node._covdcovmtx.fix()
# else:
# # Old fix destroys data... so we copy the matrices first.
# cov_mtx = copy.deepcopy(self.sfa_node._cov_mtx)
# dcov_mtx = copy.deepcopy(self.sfa_node._dcov_mtx)
#
# C, self.avg, tlen = cov_mtx.fix()
# DC, davg, dtlen = dcov_mtx.fix()
#
# dim = C.shape[0]
# type_ = C.dtype
# self.sfa_node.stop_training()
# d = self.sfa_node.d
# sfa_output_dim = len(d[d <= self.max_delta])
# sfa_output_dim = min(sfa_output_dim, self.output_dim)
# print ("sfa_output_dim=", sfa_output_dim)
#
# Wsfa = self.sfa_node.sf[:, 0:sfa_output_dim]
# print ("Wsfa.shape=", Wsfa.shape)
# if Wsfa.shape[1] == 0: # No slow components will be used
# print ("No Psfa created")
# PS = numpy.zeros((dim, dim), dtype=type_)
# else:
# Psfa = pinv(Wsfa)
# print ("Psfa.shape=", Psfa.shape)
# PS = numpy.dot(Wsfa, Psfa)
#
# print ("PS.shape=", PS.shape)
# Cproy = numpy.dot(PS, numpy.dot(C, PS.T))
# Cpca = C - Cproy
#
# if self.output_dim is None:
# self.output_dim = dim
#
# pca_output_dim = self.output_dim - sfa_output_dim
# print ("PCA output_dim=", pca_output_dim)
# if pca_output_dim > 0:
# pca_node = mdp.nodes.PCANode(output_dim=pca_output_dim) # WARNING: WhiteningNode should be used here
# pca_node._cov_mtx._dtype = type_
# pca_node._cov_mtx._input_dim = dim
# pca_node._cov_mtx._avg = numpy.zeros(dim, type_)
# pca_node._cov_mtx.bias = True
# pca_node._cov_mtx._tlen = 1 # WARNING!!! 1
# pca_node._cov_mtx._cov_mtx = Cpca
# pca_node._input_dim = dim
# pca_node._train_phase_started = True
# pca_node.stop_training()
# print ("pca_node.d=", pca_node.d)
# print ("1000000 * pca_node.d[0]=", 1000000 * pca_node.d[0])
#
# Wpca = pca_node.v
# Ppca = pca_node.v.T
# else:
# Wpca = numpy.array([]).reshape((dim, 0))
# Ppca = numpy.array([]).reshape((0, dim))
#
# print ("Wpca.shape=", Wpca.shape)
# print ("Ppca.shape=", Ppca.shape)
#
# self.W = numpy.concatenate((Wsfa, Wpca), axis=1)
# self.pinv = None # WARNING, why this does not work correctly: numpy.concatenate((Psfa, Ppca),axis=0) ?????
# # print "Pinv 1=", self.pinv
# # print "Pinv 2-Pinv1=", pinv(self.W)-self.pinv
# print ("W.shape=", self.W.shape)
# # print "pinv.shape=", self.pinv.shape
# print ("avg.shape=", self.avg.shape)
#
# def _inverse(self, y):
# if self.pinv is None:
# print ("Computing PINV", end="")
# self.pinv = pinv(self.W)
# return numpy.dot(y, self.pinv) + self.avg
# Computes the variance of some MDP data array
def data_variance(x):
return ((x - x.mean(axis=0)) ** 2).sum(axis=1).mean()
def estimate_explained_var_linearly(x, y, x_test, y_test):
x_test_app = approximate_linearly(x, y, y_test)
explained_variance = compute_explained_var(x_test, x_test_app)
x_variance = data_variance(x_test)
print ("x_variance=", x_variance, ", explained_variance=", explained_variance)
return explained_variance / x_variance
def approximate_linearly(x, y, y_test):
lr_node = mdp.nodes.LinearRegressionNode(use_pseudoinverse=True)
lr_node.train(y, x)
lr_node.stop_training()
x_test_app = lr_node.execute(y_test)
return x_test_app
# Approximates x from y, and computes how sensitive the estimation is to changes in y
def sensivity_of_linearly_approximation(x, y):
lr_node = mdp.nodes.LinearRegressionNode(use_pseudoinverse=True)
lr_node.train(y, x)
lr_node.stop_training()
beta = lr_node.beta[1:, :] # bias is used by default, we do not need to consider it
print ("beta.shape=", beta.shape)
sens = (beta ** 2).sum(axis=1)
return sens
def estimate_explained_var_with_kNN(x, y, max_num_samples_for_ev=None, max_test_samples_for_ev=None, k=1,
ignore_closest_match=False, operation="average"):
num_samples = x.shape[0]
indices_all_x = numpy.arange(x.shape[0])
if max_num_samples_for_ev is not None: # use all samples for reconstruction
max_num_samples_for_ev = min(max_num_samples_for_ev, num_samples)
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
y_sel = y[indices_all_x_selection]
else:
x_sel = x
y_sel = y
if max_test_samples_for_ev is not None: # use all samples for reconstruction
max_test_samples_for_ev = min(max_test_samples_for_ev, num_samples)
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_test_samples_for_ev]
x_test = x[indices_all_x_selection]
y_test = y[indices_all_x_selection]
else:
x_test = x
y_test = y
x_app_test = approximate_kNN_op(x_sel, y_sel, y_test, k, ignore_closest_match, operation=operation)
print ("x_test=", x_test)
print ("x_app_test=", x_app_test)
explained_variance = compute_explained_var(x_test, x_app_test)
test_variance = data_variance(x_test)
print ("explained_variance=", explained_variance)
print ("test_variance=", test_variance)
return explained_variance / test_variance
def random_subindices(num_indices, size_selection):
if size_selection > num_indices:
ex = "Error, size_selection is larger than num_indices! (", size_selection, ">", num_indices, ")"
raise Exception(ex)
all_indices = numpy.arange(num_indices)
numpy.random.shuffle(all_indices)
return all_indices[0:size_selection] + 0
def estimate_explained_var_linear_global(subimages_train, sl_seq_training, subimages_newid, sl_seq_newid,
reg_num_signals, number_samples_EV_linear_global):
"""Function that computes how much variance is explained linearly from a global mapping.
It works as follows: 1) Linear regression is trained with sl_seq_training and subimages_train.
2) Estimation is done on subset of size number_samples_EV_linear_global from training and test data
3) For training data evaluation is done on the same data used to train LR, and on new random subset of data.
4) For test data all samples are used.
"""
indices_all_train1 = random_subindices(subimages_train.shape[0], number_samples_EV_linear_global)
indices_all_train2 = random_subindices(subimages_train.shape[0], number_samples_EV_linear_global)
indices_all_newid = numpy.arange(subimages_newid.shape[0])
lr_node = mdp.nodes.LinearRegressionNode()
sl_seq_training_sel1 = sl_seq_training[indices_all_train1, 0:reg_num_signals]
subimages_train_sel1 = subimages_train[indices_all_train1]
lr_node.train(sl_seq_training_sel1,
subimages_train_sel1) # Notice that the input "x"=n_sfa_x and the output to learn is "y" = x_pca
lr_node.stop_training()
subimages_train_app1 = lr_node.execute(sl_seq_training_sel1)
EVLinGlobal_train1 = compute_explained_var(subimages_train_sel1, subimages_train_app1)
data_variance_train1 = data_variance(subimages_train_sel1)
sl_seq_training_sel2 = sl_seq_training[indices_all_train2, 0:reg_num_signals]
subimages_train_sel2 = subimages_train[indices_all_train2]
subimages_train_app2 = lr_node.execute(sl_seq_training_sel2)
EVLinGlobal_train2 = compute_explained_var(subimages_train_sel2, subimages_train_app2)
data_variance_train2 = data_variance(subimages_train_sel2)
sl_seq_newid_sel = sl_seq_newid[indices_all_newid, 0:reg_num_signals]
subimages_newid_sel = subimages_newid[indices_all_newid]
subimages_newid_app = lr_node.execute(sl_seq_newid_sel)
EVLinGlobal_newid = compute_explained_var(subimages_newid_sel, subimages_newid_app)
data_variance_newid = data_variance(subimages_newid_sel)
print ("Data variances=", data_variance_train1, data_variance_train2, data_variance_newid)
print ("EVLinGlobal=", EVLinGlobal_train1, EVLinGlobal_train2, EVLinGlobal_newid)
return EVLinGlobal_train1 / data_variance_train1, EVLinGlobal_train2 / data_variance_train2, \
EVLinGlobal_newid / data_variance_newid
def compute_explained_var(true_samples, approximated_samples):
"""Computes the explained variance provided by the approximation to some data, with respect to the true data.
Additionally, the original data variance is provided:
app = true_samples + error
exp_var ~ energy(true_samples) - energy(error)
"""
error = (approximated_samples - true_samples)
error_energy = (error ** 2.0).sum(axis=1).mean() # average squared error per sample
true_energy = data_variance(true_samples) # (true_samples-true_samples.mean(axis=0)).var()
explained_var = true_energy - error_energy
# print "Debug information:", error_energy, true_energy
return explained_var
def approximate_kNN_op(x, x_exp, y_exp, k=1, ignore_closest_match=False, operation=None):
""" Approximates a signal y given its expansion y_exp. The method is kNN with training data given by x, x_exp
If label_avg=True, the inputs of the k closest expansions are averaged, otherwise the most frequent
among k-closest is returned.
When label_avg=True, one can also specify to ignore the best match (useful if y_exp = x_exp)
"""
n = mdp.nodes.KNNClassifier(k=k, execute_method="label")
n.train(x_exp, range(len(x_exp)))
if operation == "average":
n.stop_training()
ii = n.klabels(y_exp)
if ignore_closest_match and k == 1:
ex = "Error, k==1 but ignoring closest match!"
raise Exception(ex)
elif ignore_closest_match:
ii = ii[:, 1:]
y = x[ii].mean(axis=1)
# y_exp_app = x_exp[ii].mean(axis=1)
# print "Error for y_exp is:", ((y_exp_app - y_exp)**2).sum(axis=1).mean()
# print "y=",y
return y # x[ii].mean(axis=1)
elif operation == "lin_app":
n.stop_training()
ii = n.klabels(y_exp)
if ignore_closest_match and k == 1:
ex = "Error, k==1 but ignoring closest match!"
raise Exception(ex)
elif ignore_closest_match:
ii = ii[:, 1:]
x_dim = x.shape[1]
x_exp_dim = x_exp.shape[1]
x_mean = x.mean(axis=0)
x = x - x_mean
nk = ii.shape[1]
y = numpy.zeros((len(y_exp), x_dim))
y_exp_app = numpy.zeros((len(y_exp), x_exp_dim))
x_ind = x[ii]
x_exp_ind = x_exp[ii]
y_expit = numpy.zeros((x_exp_dim + 1, 1))
k = 1.0e10 # make larger to force sum closer to one?!
y_expit[x_exp_dim, 0] = 0.0 * 1.0 * k
x_expit = numpy.zeros((x_exp_dim + 1, nk))
x_expit[x_exp_dim, :] = 1.0 * k #
zero_threshold = -40.0500 # -0.004
max_zero_weights = nk // 5
w_0 = numpy.ones((nk, 1)) * 1.0 / nk
# print "w_0", w_0
for i in range(len(y_exp)):
negative_weights = 0
iterate = True
# print "Iteration: ", i,
x_expit[0:x_exp_dim, :] = x_exp_ind[i].T
y_0 = numpy.dot(x_exp_ind[i].T, w_0)
fixing_zero_threshold = zero_threshold * 500
while iterate:
# print x_exp_ind[i].T.shape
# print x_expit[0:x_exp_dim,:].shape
x_pinv = numpy.linalg.pinv(x_expit)
# print y_0.shape, y_exp[i].shape
y_expit[0:x_exp_dim, 0] = y_exp[i] - y_0.flatten()
w_i = numpy.dot(x_pinv, y_expit) + w_0
iterate = False
if (w_i < zero_threshold).any():
# print "w_i[:,0] =", w_i[:,0]
# print "x_expit = ", x_expit
negative_weights += (w_i < fixing_zero_threshold).sum()
negative_elements = numpy.arange(nk)[w_i[:, 0] < fixing_zero_threshold]
numpy.random.shuffle(negative_elements)
for nn in negative_elements:
# print "nn=", nn
x_expit[0:x_exp_dim + 1, nn] = 0.0
# print "negative_elements", negative_elements
iterate = True
fixing_zero_threshold /= 2
if negative_weights >= max_zero_weights:
iterate = False
# FORCE SUM WEIGHTS=1:
# print "w_i[:,0] =", w_i[:,0]
# print "weight sum=",w_i.sum(),"min_weight=",w_i.min(),"max_weight=",w_i.max(),
# "negative weights=", negative_weights
w_i /= w_i.sum()
# print "y[i].shape", y[i].shape
# print "as.shape", numpy.dot(x_ind[i].T, w_i).T.shape
y[i] = numpy.dot(x_ind[i].T, w_i).T + x_mean # numpy.dot(w_i, x_ind[i]).T
y_exp_app[i] = numpy.dot(x_exp_ind[i].T, w_i).T
if w_i.min() < zero_threshold: # 0.1: #negative_weights >= max_zero_weights:
# quit()max_zero_weights
print ("Warning smallest weight is", w_i.min(), "thus replacing with simple average")
# print "Warning, at least %d all weights turned out to be negative! (%d)"%(max_zero_weights,
# negative_weights)
# print x_ind[i]
# print x_ind[i].shape
y[i] = x_ind[i].mean(axis=0)
print (".", end="")
# print "Error for y_exp is:", ((y_exp_app - y_exp)**2).sum(axis=1).mean()
# print "y=",y
return y # x[ii].mean(axis=1)
elif operation == "plainKNN":
ii = n.execute(y_exp)
ret = x[ii]
return ret
else:
er = "operation unknown:", operation
raise Exception(er)
def approximate_kNN(x, x_exp, y_exp, k=1, ignore_closest_match=False, label_avg=True):
n = mdp.nodes.KNNClassifier(k=k, execute_method="label")
n.train(x_exp, range(len(x_exp)))
if label_avg:
n.stop_training()
ii = n.klabels(y_exp)
if ignore_closest_match and k == 1:
ex = "Error, k==1 but ignoring closest match!"
raise Exception(ex)
elif ignore_closest_match:
ii = ii[:, 1:]
y = x[ii].mean(axis=1)
return y # x[ii].mean(axis=1)
else:
ii = n.execute(y_exp)
ret = x[ii]
return ret
def rank_expanded_signals_max_linearly(x, x_exp, y, y_exp, max_comp=10, max_num_samples_for_ev=None,
max_test_samples_for_ev=None, verbose=False):
""" Third ranking method. More robust and closer to max EV(x; y_i + Y)-EV(x;Y) for all Y, EV computed linearly.
Ordering and scoring of signals respects principle of best incremental feature selection
Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
max_scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is not None and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print ("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
y_app_sel = approximate_linearly(x_sel, x_exp_tmp_sel, y_exp_tmp_sel)
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print ("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" %
(index_short, index_long, temp_explained_vars[index_short]))
# Update scores
max_scores[indices_available] = numpy.maximum(max_scores[indices_available],
temp_explained_vars - last_explained_var)
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print ("Selecting index short:", max_explained_var_index_short, end="")
print (" and index_ long:", max_explained_var_index_long)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
# last_score = scores[max_explained_var_index_long]
last_explained_var = temp_explained_vars[max_explained_var_index_short]
print ("brute max_scores = ", max_scores)
print ("brute taken = ", taken)
# Find ordering of variables not yet taken
if max_comp < dim_out:
max_explained_var_indices_short = temp_explained_vars.argsort()[::-1][1:]
# In increasing order, then remove first element, which was already added to taken
for max_explained_var_index_short in max_explained_var_indices_short:
taken.append(indices_available[max_explained_var_index_short])
print ("final taken = ", taken)
# Make scoring decreasing in ordering stored in taken
last_explained_var = max(last_explained_var, 0.01) # For numerical reasons
last_max_score = -numpy.inf
sum_max_scores = 0.0
for i, long_index in enumerate(taken):
current_max_score = max_scores[long_index]
sum_max_scores += current_max_score
if current_max_score > last_max_score and i > 0:
max_scores[long_index] = last_max_score
tmp_sum_max_scores = max_scores[taken[0:i + 1]].sum()
max_scores[taken[0:i + 1]] += (sum_max_scores - tmp_sum_max_scores) / (i + 1)
last_max_score = max_scores[long_index]
# print "iteration max_scores = ", max_scores
print ("preeliminar max_scores = ", max_scores)
# max_scores *= (last_explained_var / max_scores.sum())**0.5
# NOTE: last_explained_var is not the data variance.
# Here it is the variance up to max_comp components
# 3 options: all features, first max_comp features, output_dim features
max_scores *= (last_explained_var / max_scores.sum()) ** 0.5
print ("final max_scores = ", max_scores)
if (max_scores == 0.0).any():
print ("WARNING, removing 0.0 max_scores!")
max_score_min = (max_scores[max_scores > 0.0]).min()
# TODO:Find reasonable way to fix this, is this causing the distorted reconstructions???
max_scores += max_score_min * 0.001
# max_scores += (max_scores[max_scores>0.0])
return max_scores
def rank_expanded_signals_max(x, x_exp, y, y_exp, max_comp=10, k=1, operation="average", max_num_samples_for_ev=None,
max_test_samples_for_ev=None, offsetting_mode="max_comp features", verbose=False):
""" This Second ranking method more robust and closer to max I(x; y_i + Y)-I(x;Y) for all Y.
Ordering and scoring of signals respects principle of best incremental feature selection
Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
max_scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is notNone and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print ("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
if operation == "linear_rec":
y_app_sel = approximate_linearly(x_sel, x_exp_tmp_sel, y_exp_tmp_sel)
else:
y_app_sel = approximate_kNN_op(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True,
operation=operation) # invert from taken variables
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print ("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % (
index_short, index_long, temp_explained_vars[index_short]))
# Update scores
max_scores[indices_available] = numpy.maximum(max_scores[indices_available],
temp_explained_vars - last_explained_var)
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print("Selecting index short:", max_explained_var_index_short,
" and index_ long:", max_explained_var_index_long)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
# last_score = scores[max_explained_var_index_long]
last_explained_var = temp_explained_vars[max_explained_var_index_short]
print("brute max_scores = ", max_scores)
print("brute taken = ", taken)
# Find ordering of variables not yet taken
if max_comp < dim_out:
max_explained_var_indices_short = \
temp_explained_vars.argsort()[::-1][1:]
# In increasing order, then remove first element, which was already added to taken
for max_explained_var_index_short in max_explained_var_indices_short:
taken.append(indices_available[max_explained_var_index_short])
print("final taken = ", taken)
# Make scoring decreasing in ordering stored in taken
last_explained_var = max(last_explained_var, 0.01) # For numerical reasons
last_max_score = -numpy.inf
sum_max_scores = 0.0
for i, long_index in enumerate(taken):
current_max_score = max_scores[long_index]
sum_max_scores += current_max_score
if current_max_score > last_max_score and i > 0:
max_scores[long_index] = last_max_score
tmp_sum_max_scores = max_scores[taken[0:i + 1]].sum()
max_scores[taken[0:i + 1]] += (sum_max_scores - tmp_sum_max_scores) / (i + 1)
last_max_score = max_scores[long_index]
# print "iteration max_scores = ", max_scores
print("preeliminar max_scores = ", max_scores)
# Compute explained variance with all features
indices_all_x_selection = random_subindices(x.shape[0], max_num_samples_for_ev)
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
indices_all_y_selection = random_subindices(y.shape[0], max_test_samples_for_ev)
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
if operation == "linear_rec":
y_app_sel = approximate_linearly(x_sel, x_exp_sel, y_exp_sel)
else:
y_app_sel = approximate_kNN_op(x_sel, x_exp_sel, y_exp_sel, k=k, ignore_closest_match=True,
operation=operation) # invert from taken variables
explained_var_all_feats = compute_explained_var(y_sel, y_app_sel)
print("last_explained_var =", last_explained_var)
print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance)
# max_scores *= (last_explained_var / max_scores.sum())**0.5
# NOTE: last_explained_var is not the data variance. It is the variance up to max_comp components
# 3 options: all scores, max_comp scores, output_dim scores (usually all scores)
if offsetting_mode == "max_comp features":
max_scores *= (last_explained_var / max_scores.sum())
elif offsetting_mode == "all features":
print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance)
max_scores *= (explained_var_all_feats / max_scores.sum())
elif offsetting_mode == "all features smart":
max_scores *= (last_explained_var / max_scores.sum())
print("scaled max_scores=", max_scores)
max_scores += (explained_var_all_feats - last_explained_var) / max_scores.shape[0]
print("offsetted max_scores=", max_scores)
elif offsetting_mode == "democratic":
max_scores = numpy.ones_like(max_scores) * explained_var_all_feats / max_scores.shape[0]
print("democractic max_scores=", max_scores)
elif offsetting_mode == "linear":
# Code fixed!!!
max_scores = numpy.arange(dim_out, 0, -1) * explained_var_all_feats / (dim_out * (dim_out + 1) / 2)
print("linear max_scores=", max_scores)
elif offsetting_mode == "sensitivity_based":
sens = sensivity_of_linearly_approximation(x_sel, x_exp_sel)
max_scores = sens * explained_var_all_feats / sens.sum()
print("sensitivity_based max_scores=", max_scores)
else:
ex = "offsetting_mode unknown", offsetting_mode
raise Exception(ex)
print("final max_scores = ", max_scores)
if (max_scores == 0.0).any():
print("WARNING, removing 0.0 max_scores!")
max_score_min = (max_scores[max_scores > 0.0]).min()
max_scores += max_score_min * 0.001
# TODO:Find reasonable way to fix this, is this causing the distorted reconstructions???
# max_scores += (max_scores[max_scores>0.0])
return max_scores
# TODO: Improve: if max_comp < output_dim choose remaining features from the last evaluation of explained variances.
def rank_expanded_signals(x, x_exp, y, y_exp, max_comp=10, k=1, linear=False, max_num_samples_for_ev=None,
max_test_samples_for_ev=None, verbose=False):
""" Computes a scores vector that measures the importance of each expanded component at reconstructing a signal
x, x_exp are training data, y and y_exp are test data
At most max_comp are evaluated exhaustively, the rest is set equal to the remaining
"""
dim_out = x_exp.shape[1]
all_indices = numpy.arange(dim_out)
indices_all_x = numpy.arange(x.shape[0])
indices_all_y = numpy.arange(y.shape[0])
scores = numpy.zeros(dim_out)
available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored
taken = [] # list with the same elements.
# Compute maximum explainable variance (taking all components)
total_variance = data_variance(y)
last_explained_var = 0.0
last_score = 0.0
for iteration in range(min(max_comp, dim_out)):
# find individual contribution to expl var, from not taken
indices_available = all_indices[available_mask] # mapping from index_short to index_long
temp_explained_vars = numpy.zeros(
dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index
# On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed
if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]:
indices_all_x_selection = indices_all_x + 0
numpy.random.shuffle(indices_all_x_selection)
indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev]
x_sel = x[indices_all_x_selection]
x_exp_sel = x_exp[indices_all_x_selection]
else:
x_sel = x
x_exp_sel = x_exp
if max_test_samples_for_ev is not None and max_test_samples_for_ev < x.shape[0]:
indices_all_y_selection = indices_all_y + 0
numpy.random.shuffle(indices_all_y_selection)
indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev]
y_sel = y[indices_all_y_selection]
y_exp_sel = y_exp[indices_all_y_selection]
else:
y_sel = y
y_exp_sel = y_exp
if verbose:
print("indices available=", indices_available)
for index_short, index_long in enumerate(indices_available):
taken_tmp = list(taken) # Copy the taken list
taken_tmp.append(index_long) # Add index_long to it
x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables
y_exp_tmp_sel = y_exp_sel[:, taken_tmp]
y_app_sel = approximate_kNN(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True,
label_avg=True) # invert from taken variables
# print "QQQ=", compute_explained_var(y_sel, y_app_sel)
temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var
if verbose:
print("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % (
index_short, index_long, temp_explained_vars[index_short]))
# select maximum
# print "temp_explained_vars=", temp_explained_vars
max_explained_var_index_short = temp_explained_vars.argmax()
# print "max_explained_var_index_short=", max_explained_var_index_short
# print "indices_available=",indices_available
max_explained_var_index_long = indices_available[max_explained_var_index_short]
if verbose:
print("Selecting index short:", max_explained_var_index_short)
print(" and index_ long:", max_explained_var_index_long)
# update total explained var & scores
# Add logic to robustly handle strange contributions: 3, 2, 1, 4 => 5, 2.5, 1.25, 1.25 ?
# TODO:FIX NORMALIZATION WHEN FIRST SCORES ARE ZERO OR NEGATIVE!
# TODO:NORMALIZATION SHOULD BE OPTIONAL, SINCE IT WEAKENS THE INTERPRETATION OF THE SCORES
explained_var = max(temp_explained_vars[max_explained_var_index_short], 0.0)
new_score = explained_var - last_explained_var
if verbose:
print("new_score raw = ", new_score)
new_score = max(new_score, 0.0)
if new_score > last_score and iteration > 0:
new_score = last_score # Here some options are available to favour components taken first
scores[max_explained_var_index_long] = new_score
if verbose:
print("tmp scores = ", scores)
# normalize scores, so that they sume up to explained_var
sum_scores = scores.sum()
residual = max(explained_var, 0.0) - sum_scores
if residual > 0.0:
correction = residual / (iteration + 1)
scores[taken] += correction
scores[max_explained_var_index_long] += correction
# scores = scores * explained_var / (sum_scores+1e-6) #TODO:CORRECT THIS; INSTEAD OF FACTOR USE ADDITIVE TERM
if verbose:
print("normalized scores = ", scores, "sum to:", scores.sum(), "explained_var =", explained_var)
# mark as taken and update temporal variables
taken.append(max_explained_var_index_long)
available_mask[max_explained_var_index_long] = False
last_score = scores[max_explained_var_index_long]
last_explained_var = explained_var
# handle variables not used, assign equal scores to all of them
preserve_last_evaluation = True
if preserve_last_evaluation and max_comp < dim_out:
# The score of the last feature found will be modified, as well as of not yet found features
# TODO: Take care of negative values
if last_score <= 0.0:
last_score = 0.01 # Just some value is needed here
remaining_output_features = len(temp_explained_vars) # including feature already processed
remaining_ordered_explained_variances_short_index = numpy.argsort(temp_explained_vars)[::-1]
remaining_ordered_explained_variances_long_index = indices_available[
remaining_ordered_explained_variances_short_index]
remaining_ordered_explained_variances = temp_explained_vars[
remaining_ordered_explained_variances_short_index] + 0.0
remaining_total_contribution = last_score
print("last_score=", last_score)
beta = 0.95
remaining_ordered_explained_variances[
remaining_ordered_explained_variances <= 0.0] = 0.0001 # To avoid division over zero, numerical hack
# numpy.clip(remaining_ordered_explained_variances, 0.0, None) fails here!!!!
print("remaining_ordered_explained_variances=", remaining_ordered_explained_variances)
minimum = remaining_ordered_explained_variances.min() # first element
ev_sum = remaining_ordered_explained_variances.sum()
normalized_scores = (remaining_total_contribution / (ev_sum - remaining_output_features * minimum) * beta) * \
(remaining_ordered_explained_variances - minimum) + \
((1.0 - beta) / remaining_output_features) * remaining_total_contribution
print("normalized_scores=", normalized_scores)
print("remaining_ordered_explained_variances_long_index=", remaining_ordered_explained_variances_long_index)
print(scores.dtype)
print(normalized_scores.dtype)
scores[remaining_ordered_explained_variances_long_index] = normalized_scores
else:
# rest_explained_variance = total_variance-last_explained_var
sum_scores = scores.sum()
rest_explained_variance = total_variance - sum_scores
if verbose:
print("rest_explained_variance=", rest_explained_variance)
correction = rest_explained_variance / dim_out
scores += correction
if (scores == 0.0).any():
print("WARNING, removing 0.0 scores!")
scores += 0.0001
# num_unused = dim_out - max_comp
# scores[available_mask] = min(rest_explained_variance / num_unused, last_score)
# sum_scores = scores.sum()
# scores = scores * explained_var / (sum_scores+1e-6)
if verbose:
print("final scores: ", scores)
if verbose and linear and False:
for i in indices_available:
taken.append(i)
scores[taken] = numpy.arange(dim_out - 1, -1, -1) # **2 #WARNING!!! QUADRATIC SCORES!!!
scores = scores * total_variance / scores.sum()
print("Overriding with linear scores:", scores)
return scores
# TODO: Remove this node, it is now obsolete
class IEVMNode(mdp.Node):
""" Node implementing simple Incremental Explained Variance Maximization.
Extracted features are moderately useful for reconstruction, although this node does
itself provide reconstruction.
The expansion function is optional, as well as performing PCA on the scores.
The added variance of the first k-outputs is equal to the explained variance of such k-outputs.
"""
def __init__(self, input_dim=None, output_dim=None, expansion_funcs=None, k=5, max_comp=None,
max_num_samples_for_ev=None, max_test_samples_for_ev=None, use_pca=False, use_sfa=False,
max_preserved_sfa=2.0, second_weighting=False, operation="average", out_sfa_filter=False, **argv):
super(IEVMNode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv)
if expansion_funcs is not None:
self.exp_node = GeneralExpansionNode(funcs=expansion_funcs)
else:
self.exp_node = None
self.sfa_node = None
self.second_weighting = second_weighting
self.use_pca = use_pca
self.use_sfa = use_sfa
if use_sfa and not use_pca:
er = "Combination of use_sfa and use_pca not considered. Please activate use_pca or deactivate use_sfa"
raise Exception(er)
self.k = k
self.max_comp = max_comp
self.max_num_samples_for_ev = max_num_samples_for_ev
self.max_test_samples_for_ev = max_test_samples_for_ev
self.feature_scaling_factor = 0.5 # Factor that prevents amplitudes of features from growing across the network
self.exponent_variance = 0.5
self.operation = operation
self.max_preserved_sfa = max_preserved_sfa
self.out_sfa_filter = out_sfa_filter
@staticmethod
def is_trainable():
return True
def _train(self, x, block_size=None, train_mode=None, node_weights=None, edge_weights=None, scheduler=None,
n_parallel=None, **argv):
num_samples, self.input_dim = x.shape
if self.output_dim is None:
self.output_dim = self.input_dim
if self.max_comp is None:
self.max_comp = min(self.input_dim, self.output_dim)
else:
self.max_comp = min(self.max_comp, self.input_dim, self.output_dim)
print("Training IEVMNode...")
self.x_mean = x.mean(axis=0) # Remove mean before expansion
x = x - self.x_mean
if self.exp_node is not None: # Expand data
print("expanding x...")
exp_x = self.exp_node.execute(x)
else:
exp_x = x
self.expanded_dim = exp_x.shape[1]
self.exp_x_mean = exp_x.mean(axis=0)
self.exp_x_std = exp_x.std(axis=0)
print("self.exp_x_mean=", self.exp_x_mean)
print("self.exp_x_std=", self.exp_x_std)
if (self.exp_x_std == 0).any():
er = "zero-component detected"
raise Exception(er)
n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std # Remove media and variance from expansion
print("ranking n_exp_x ...")
rankings = rank_expanded_signals_max(x, n_exp_x, x, n_exp_x, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=True)
rankings *= self.feature_scaling_factor
print("rankings=", rankings)
if (rankings == 0).any():
er = "zero-component detected"
raise Exception(er)
self.perm1 = numpy.argsort(rankings)[::-1] # Sort in decreasing ranking
self.magn1 = rankings
print("self.perm1=", self.perm1)
s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking
s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first
if self.second_weighting:
print("ranking s_x_1 ...")
rankings_B = rank_expanded_signals_max(x, s_x_1, x, s_x_1, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=False)
print("rankings_B=", rankings_B)
if (rankings_B == 0).any():
er = "zero-component detected"
raise Exception(er)
self.perm1_B = numpy.argsort(rankings_B)[::-1] # Sort in decreasing ranking
self.magn1_B = rankings_B
print("self.perm1_B=", self.perm1_B)
# WARNING, this only works for normalized s_x_1
s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking
s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first
else:
s_x_1B = s_x_1
if self.use_sfa:
self.sfa_node = mdp.nodes.SFANode()
# TODO: Preserve amplitude
self.sfa_node.train(s_x_1B, block_size=block_size, train_mode=train_mode)
# , node_weights=None, edge_weights=None, scheduler = None, n_parallel=None)
self.sfa_node.stop_training()
print("self.sfa_node.d", self.sfa_node.d)
# Adaptive mechanism based on delta values
if isinstance(self.max_preserved_sfa, float):
self.num_sfa_features_preserved = (self.sfa_node.d <= self.max_preserved_sfa).sum()
elif isinstance(self.max_preserved_sfa, int):
self.num_sfa_features_preserved = self.max_preserved_sfa
else:
ex = "Cannot handle type of self.max_preserved_sfa"
print(ex)
raise Exception(ex)
# self.num_sfa_features_preserved = 10
sfa_x = self.sfa_node.execute(s_x_1B)
# TODO: Change internal variables of SFANode, so that we do not need to zero some components
# TODO: Is this equivalent to truncation of the matrices??? PERHAPS IT IS NOT !!!
sfa_x[:, self.num_sfa_features_preserved:] = 0.0
proj_sfa_x = self.sfa_node.inverse(sfa_x)
sfa_x = sfa_x[:, 0:self.num_sfa_features_preserved]
# Notice that sfa_x has WEIGHTED zero-mean, thus we correct this here?
self.sfa_x_mean = sfa_x.mean(axis=0)
self.sfa_x_std = sfa_x.std(axis=0)
print("self.sfa_x_mean=", self.sfa_x_mean)
print("self.sfa_x_std=", self.sfa_x_std)
sfa_x -= self.sfa_x_mean
sfa_removed_x = s_x_1B - proj_sfa_x # Remove sfa projection of data
else:
self.num_sfa_features_preserved = 0
sfa_x = numpy.ones((num_samples, 0))
sfa_removed_x = s_x_1B
pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved
if self.use_pca and pca_out_dim > 0:
self.pca_node = mdp.nodes.PCANode(output_dim=pca_out_dim)
self.pca_node.train(sfa_removed_x)
# TODO:check that pca_out_dim > 0
pca_x = self.pca_node.execute(sfa_removed_x)
self.pca_x_mean = pca_x.mean(axis=0)
self.pca_x_std = pca_x.std(axis=0)
print("self.pca_x_std=", self.pca_x_std)
if (self.pca_x_std == 0).any():
er = "zero-component detected"
raise Exception(er)
# TODO: Is this step needed? if heuristic works well this weakens algorithm
n_pca_x = (pca_x - self.pca_x_mean) / self.pca_x_std
else:
n_pca_x = sfa_removed_x[:, 0:pca_out_dim]
# Concatenate SFA and PCA signals and rank them preserving SFA components in ordering
if self.use_pca or self.use_sfa:
# TODO: Either both signals conserve magnitudes or they are both normalized
sfa_pca_x = numpy.concatenate((sfa_x, n_pca_x), axis=1)
sfa_pca_rankings = rank_expanded_signals_max(x, sfa_pca_x, x, sfa_pca_x, max_comp=self.max_comp, k=self.k,
operation=self.operation,
max_num_samples_for_ev=self.max_num_samples_for_ev,
max_test_samples_for_ev=self.max_test_samples_for_ev,
verbose=False)
sfa_pca_rankings *= self.feature_scaling_factor
# Only one magnitude normalization by node, but where should it be done? I guess after last transformation
print("sfa_pca_rankings=", sfa_pca_rankings)
if (sfa_pca_rankings == 0).any():
er = "zero-component detected"
raise Exception(er)
self.magn2 = sfa_pca_rankings
perm2a = numpy.arange(self.num_sfa_features_preserved, dtype="int")
perm2b = numpy.argsort(sfa_pca_rankings[self.num_sfa_features_preserved:])[::-1]
self.perm2 = numpy.concatenate((perm2a, perm2b + self.num_sfa_features_preserved))
print("second permutation=", self.perm2)
# WARNING, this only works for normalized sfa_pca_x
s_x_2 = sfa_pca_x * self.magn2 ** self.exponent_variance # Scale according to ranking
s_x_2 = s_x_2[:, self.perm2] # Permute with slow features first, and then most important signal first
else:
s_x_2 = n_pca_x
# Tuncating output_dim components
s_x_2_truncated = s_x_2[:, 0:self.output_dim]
# Filtering output through SFA
if self.out_sfa_filter:
self.out_sfa_node = mdp.nodes.SFANode()
self.out_sfa_node.train(s_x_2_truncated, block_size=block_size, train_mode=train_mode)
self.out_sfa_node.stop_training()
sfa_filtered = self.out_sfa_node.execute(s_x_2_truncated)
else:
sfa_filtered = s_x_2_truncated
self.stop_training()
# def __init__(self, funcs, input_dim = None, dtype = None, \
# use_pseudoinverse=True, use_hint=False, max_steady_factor=1.5, \
# delta_factor=0.6, min_delta=0.00001):
#
#
#
# self.sfa_node.train(x, **argv)
def _is_invertible(self):
return True
def _execute(self, x):
x_orig = x + 0.0
num_samples = x.shape[0]
zm_x = x - self.x_mean
if self.exp_node:
exp_x = self.exp_node.execute(zm_x)
else:
exp_x = zm_x
n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std
if numpy.isnan(n_exp_x).any() or numpy.isinf(n_exp_x).any():
print("n_exp_x=", n_exp_x)
quit()
n_exp_x[numpy.isnan(n_exp_x)] = 0.0
if numpy.isnan(self.magn1).any():
print("self.magn1=", self.magn1)
quit()
s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking
s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first
if self.second_weighting:
s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking_B
s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first
else:
s_x_1B = s_x_1
if numpy.isnan(s_x_1B).any():
print("s_x_1B=", s_x_1B)
quit()
if self.use_sfa:
sfa_x = self.sfa_node.execute(s_x_1B)
# TODO: Change internal variables of SFANode, so that we do not need to zero some components
sfa_x[:, self.num_sfa_features_preserved:] = 0.0
proj_sfa_x = self.sfa_node.inverse(sfa_x)
sfa_x = sfa_x[:, 0:self.num_sfa_features_preserved]
sfa_x -= self.sfa_x_mean
sfa_removed_x = s_x_1B - proj_sfa_x
else:
sfa_x = numpy.ones((num_samples, 0))
sfa_removed_x = s_x_1B
pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved
if self.use_pca and pca_out_dim > 0:
pca_x = self.pca_node.execute(sfa_removed_x)
n_pca_x = (pca_x - self.pca_x_mean) / self.pca_x_std
else:
n_pca_x = sfa_removed_x[:, 0:pca_out_dim]
if self.use_pca or self.use_sfa:
sfa_pca_x = numpy.concatenate((sfa_x, n_pca_x), axis=1)
s_x_2 = sfa_pca_x * self.magn2 ** self.exponent_variance # Scale according to ranking
s_x_2 = s_x_2[:, self.perm2] # Permute with most important signal first
else:
s_x_2 = n_pca_x
if numpy.isnan(s_x_2).any():
print("s_x_2=", s_x_2)
quit()
# Tuncating output_dim components
s_x_2_truncated = s_x_2[:, 0:self.output_dim]
# Filtering output through SFA
if self.out_sfa_filter:
sfa_filtered = self.out_sfa_node.execute(s_x_2_truncated)
else:
sfa_filtered = s_x_2_truncated
verbose = False
if verbose:
print("x[0]=", x_orig[0])
print("x_zm[0]=", x[0])
print("exp_x[0]=", exp_x[0])
print("s_x_1[0]=", s_x_1[0])
print("sfa_removed_x[0]=", sfa_removed_x[0])
print("proj_sfa_x[0]=", proj_sfa_x[0])
print("pca_x[0]=", pca_x[0])
print("n_pca_x[0]=", n_pca_x[0])
print("sfa_x[0]=", sfa_x[0] + self.sfa_x_mean)
print("s_x_2_truncated[0]=", s_x_2_truncated[0])
print("sfa_filtered[0]=", sfa_filtered[0])
return sfa_filtered
# TODO:Code inverse with SFA
def _inverse(self, y):
num_samples = y.shape[0]
if y.shape[1] != self.output_dim:
er = "Serious dimensionality inconsistency:", y.shape[0], self.output_dim
raise Exception(er)
# input_dim = self.input_dim
# De-Filtering output through SFA
sfa_filtered = y
if self.out_sfa_filter:
s_x_2_truncated = self.out_sfa_node.inverse(sfa_filtered)
else:
s_x_2_truncated = sfa_filtered
# De-Tuncating output_dim components
s_x_2_full = numpy.zeros((num_samples, self.expanded_dim))
s_x_2_full[:, 0:self.output_dim] = s_x_2_truncated
if self.use_pca or self.use_sfa:
perm_2_inv = numpy.zeros(self.expanded_dim, dtype="int")
# print "input_dim", input_dim
# print "self.perm2", self.perm2
# print "len(self.perm2)", len(self.perm2)
perm_2_inv[self.perm2] = numpy.arange(self.expanded_dim, dtype="int")
# print perm_2_inv
sfa_pca_x = s_x_2_full[:, perm_2_inv]
sfa_pca_x /= self.magn2 ** self.exponent_variance
sfa_x = sfa_pca_x[:, 0:self.num_sfa_features_preserved]
n_pca_x = sfa_pca_x[:, self.num_sfa_features_preserved:]
else:
# sfa_x = ...?
n_pca_x = s_x_2_full
pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved
if self.use_pca and pca_out_dim > 0:
pca_x = n_pca_x * self.pca_x_std + self.pca_x_mean
sfa_removed_x = self.pca_node.inverse(pca_x)
else:
sfa_removed_x = n_pca_x
if self.use_sfa:
sfa_x += self.sfa_x_mean
sfa_x_full = numpy.zeros((num_samples, self.expanded_dim))
sfa_x_full[:, 0:self.num_sfa_features_preserved] = sfa_x
proj_sfa_x = self.sfa_node.inverse(sfa_x_full)
s_x_1B = sfa_removed_x + proj_sfa_x
else:
s_x_1B = sfa_removed_x
if self.second_weighting:
perm_1B_inv = numpy.zeros(self.expanded_dim, dtype="int")
perm_1B_inv[self.perm1_B] = numpy.arange(self.expanded_dim, dtype="int")
s_x_1 = s_x_1B[:, perm_1B_inv]
s_x_1 /= self.magn1_B ** self.exponent_variance
else:
s_x_1 = s_x_1B
perm_1_inv = numpy.zeros(self.expanded_dim, dtype="int")
perm_1_inv[self.perm1] = numpy.arange(self.expanded_dim, dtype="int")
n_exp_x = s_x_1[:, perm_1_inv]
n_exp_x /= self.magn1 ** self.exponent_variance
exp_x = n_exp_x * self.exp_x_std + self.exp_x_mean
if self.exp_node:
zm_x = self.exp_node.inverse(exp_x)
else:
zm_x = exp_x
x = zm_x + self.x_mean
verbose = False
if verbose:
print("x[0]=", x[0])
print("zm_x[0]=", zm_x[0])
print("exp_x[0]=", exp_x[0])
print("s_x_1[0]=", s_x_1[0])
print("proj_sfa_x[0]=", proj_sfa_x[0])
print("sfa_removed_x[0]=", sfa_removed_x[0])
print("pca_x[0]=", pca_x[0])
print("n_pca_x[0]=", n_pca_x[0])
print("sfa_x[0]=", sfa_x[0])
return x
def export_to_libsvm(labels_classes, features, filename):
dim_features = features.shape[1]
filehandle = open(filename, "wb")
if len(features) != len(labels_classes):
er = "number of labels_classes %d does not match number of samples %d!" % (len(labels_classes), len(features))
raise Exception(er)
for i in range(len(features)):
filehandle.write("%d" % labels_classes[i])
for j in range(dim_features):
filehandle.write(" %d:%f" % (j + 1, features[i, j]))
filehandle.write("\n")
filehandle.close()
def is_monotonic_increasing(x):
prev = x[0]
for curr in x[1:]:
if curr <= prev:
return False
prev = curr
return True
def compute_average_labels_for_each_class(classes, labels):
all_classes = numpy.unique(classes)
avg_labels = numpy.zeros(len(all_classes))
for i, cl in enumerate(all_classes):
avg_label = labels[classes == cl].mean()
avg_labels[i] = avg_label
return avg_labels
def map_class_numbers_to_avg_label(all_classes, avg_labels, class_numbers):
if not (is_monotonic_increasing(all_classes)):
er = "Array of class numbers should be monotonically increasing:" + str(all_classes)
raise Exception(er)
if not (is_monotonic_increasing(avg_labels)):
er = "SEVERE WARNING! Array of labels should be monotonically increasing:" + str(avg_labels)
raise Exception(er)
if len(all_classes) != len(avg_labels):
er = "SEVERE WARNING! Array of classes should have the same length as the array of labels: %d vs. %d" % \
(len(all_classes), len(avg_labels))
raise Exception(er)
indices = numpy.searchsorted(all_classes, class_numbers)
return avg_labels[indices]
def map_labels_to_class_number(all_classes, avg_labels, labels):
if not (is_monotonic_increasing(all_classes)):
er = "Array of class numbers should be monotonically increasing:", all_classes
raise Exception(er)
if not (is_monotonic_increasing(avg_labels)):
er = "Array of labels should be monotonically increasing:", avg_labels
raise Exception(er)
if len(all_classes) != len(avg_labels):
er = "Array of classes should have the same length as the array of labels:" + str(len(all_classes)) + \
" vs. " + str(len(avg_labels))
raise Exception(er)
interval_midpoints = (avg_labels[1:] + avg_labels[:-1]) / 2.0
indices = numpy.searchsorted(interval_midpoints, labels)
return all_classes[indices]
def random_boolean_array(size):
return numpy.random.randint(2, size=size) == 1
def generate_random_sigmoid_weights(input_dim, num_features):
# scale_factor = 8.0 / numpy.sqrt(input_dim)
scale_factor = 1.0
c = numpy.random.normal(loc=0.0, scale=scale_factor, size=(input_dim, num_features))
c2 = (numpy.abs(c) ** 1.5)
# print "c2=", c2
# print "c2[0]=", c2[0]
c = 4.0 * numpy.sign(c) * c2 / c2.max()
# print "c=", c
# print "c[0]=", c[0]
l = numpy.random.normal(loc=0.0, scale=1.0, size=num_features)
return c, l
def extract_sigmoid_features(x, c1, l1, scale=1.0, offset=0.0, use_special_features=False):
if x.shape[1] != c1.shape[0] or c1.shape[1] != len(l1):
er = "Array dimensions mismatch: x.shape =" + str(x.shape) + ", c1.shape =" + str(
c1.shape) + ", l1.shape=" + str(l1.shape)
print(er)
raise Exception(er)
s = numpy.dot(x, c1) + l1
f = numpy.tanh(s)
if use_special_features:
# replace features with l1 = -1.0 to x^T * c1[i]
# replace features with l1 = 0.8 to 0.8 expo(x^T * c1[i])
# print "f.shape=", f.shape
# print "numpy.dot(x,c1[:,0]).shape=", numpy.dot(x,c1[:,0]).shape
fixed = 0
for i in range(c1.shape[1]):
if l1[i] == 0.8:
f[:, i] = numpy.abs(numpy.dot(x, c1[:, i])) ** 0.8
fixed += 1
elif l1[i] == 1.0: # identity
f[:, i] = numpy.dot(x, c1[:, i])
fixed += 1
print("Number of features adapted to either identity or 08Expo:", fixed)
return f * scale + offset
# sf_matrix has shape input_dim x output_dim
def evaluate_coefficients(sf_matrix):
# Exponentially decaying weights
# weighting = numpy.e ** -numpy.arange(sf_matrix.shape[1])
weighting = 2.0 ** -numpy.arange(sf_matrix.shape[1])
weighted_relevances = numpy.abs(sf_matrix) * weighting
relevances = weighted_relevances.sum(axis=1)
return relevances
class SFAAdaptiveNLNode(mdp.Node):
"""Node that implements SFA with an adaptive non-linearity.
"""
def __init__(self, input_dim=None, output_dim=None, pre_expansion_node_class=None, final_expanded_dim=None,
initial_expansion_size=None, starting_point=None, expansion_size_decrement=None,
expansion_size_increment=None,
number_iterations=2, **argv):
super(SFAAdaptiveNLNode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv)
self.pre_expansion_node_class = pre_expansion_node_class
self.pre_expansion_node = None
self.final_expanded_dim = final_expanded_dim
self.initial_expansion_size = initial_expansion_size
self.starting_point = starting_point
self.expansion_size_decrement = expansion_size_decrement
self.expansion_size_increment = expansion_size_increment
self.number_iterations = number_iterations
self.sfa_node = None
self.f1_mean = None
self.f1_std = None
@staticmethod
def is_trainable():
return True
# sfa_block_size, sfa_train_mode, etc. would be preferred
# max_preserved_sfa=1.995
def _train(self, x, block_size=None, train_mode=None, node_weights=None, edge_weights=None, scheduler=None,
n_parallel=None, **argv):
self.input_dim = x.shape[1]
if self.output_dim is None:
self.output_dim = self.input_dim
print("Training SFAAdaptiveNLNode...")
print("block_size =", block_size, ", train_mode =", train_mode)
print("x.shape=", x.shape, "self.starting_point=", self.starting_point)
# TODO: Remove mean and normalize variance before expansion
# self.x_mean = x.mean(axis=0)
# x_zm=x-self.x_mean
# TODO:Make this code more pretty (refactoring)
if self.starting_point == "Identity":
print("wrong1")
c0 = numpy.identity(self.input_dim)
l0 = numpy.ones(self.input_dim) * -1.0 # Code identity
elif self.starting_point == "08Exp":
print("good 1")
c0 = numpy.concatenate((numpy.identity(self.input_dim), numpy.identity(self.input_dim)), axis=1)
l0 = numpy.concatenate((numpy.ones(self.input_dim) * 1.0, numpy.ones(self.input_dim) * 0.8), axis=0)
if self.starting_point == "Identity" or self.starting_point == "08Exp":
print("good 2")
remaining_feats = self.initial_expansion_size - c0.shape[1]
print("remaining_feats =", remaining_feats)
if remaining_feats < 0:
er = "Error, features needed for identity or 08Exp exceeds number of features availabe" + \
"remaining_feats=%d < 0" % remaining_feats + \
". self.initial_expansion_size=%d" % self.initial_expansion_size + \
"c0.shape[1]%d" % c0.shape[1]
raise Exception(er)
c2, l2 = generate_random_sigmoid_weights(self.input_dim, remaining_feats)
c1 = numpy.concatenate((c0, c2), axis=1)
l1 = numpy.concatenate((l0, l2), axis=0)
else:
print("wrong wrong")
c1, l1 = generate_random_sigmoid_weights(self.input_dim,
self.initial_expansion_size - self.expansion_size_increment)
for num_iter in range(self.number_iterations):
print("**************** Iteration %d of %d ********************" % (num_iter, self.number_iterations))
if num_iter > 0: # Only add additional features after first iteration
cp, lp = generate_random_sigmoid_weights(self.input_dim, self.expansion_size_increment)
c1 = numpy.append(c1, cp, axis=1)
l1 = numpy.append(l1, lp, axis=0)
# print "c1=", c1
# print "l1=", l1
f1 = extract_sigmoid_features(x, c1, l1, use_special_features=True)
f1_mean = f1.mean(axis=0)
f1 = f1 - f1_mean
f1_std = f1.std(axis=0)
f1 = f1 / f1_std
# print "Initial features f1=", f1
print("f1.shape=", f1.shape)
print("f1[0]=", f1[0])
print("f1[-1]=", f1[-1])
sfa_node = mdp.nodes.SFANode(output_dim=self.output_dim)
sfa_node.train(f1, block_size=block_size, train_mode=train_mode, node_weights=node_weights,
edge_weights=edge_weights, scheduler=scheduler, n_parallel=n_parallel)
sfa_node.stop_training()
print("self.sfa_node.d (full expanded) =", sfa_node.d)
# Evaluate features based on sfa coefficient
coeffs = evaluate_coefficients(sfa_node.sf)
print("Scores of each feature from SFA coefficients:", coeffs)
# find indices of best features. Largest scores first
best_feat_indices = coeffs.argsort()[::-1]
print("indices of best features:", best_feat_indices)
# remove worst expansion_size_decrement features
if num_iter < self.number_iterations - 1: # Except during last iteration
best_feat_indices = best_feat_indices[:-self.expansion_size_decrement]
c1 = c1[:, best_feat_indices]
l1 = l1[best_feat_indices]
# print "cc=", cc
# print "ll=", ll
if c1.shape[1] > self.final_expanded_dim:
c1 = c1[:, :self.final_expanded_dim]
l1 = l1[:self.final_expanded_dim]
self.c1 = c1
self.l1 = l1
print("self.c1.shape=,", self.c1.shape, "self.l1.shape=,", self.l1.shape)
print("Learning of non-linear features finished")
f1 = extract_sigmoid_features(x, self.c1, self.l1, use_special_features=True)
self.f1_mean = f1.mean(axis=0)
f1 -= self.f1_mean
self.f1_std = f1.std(axis=0)
f1 /= self.f1_std
self.sfa_node = mdp.nodes.SFANode(output_dim=self.output_dim)
self.sfa_node.train(f1, block_size=block_size, train_mode=train_mode, node_weights=node_weights,
edge_weights=edge_weights, scheduler=scheduler, n_parallel=n_parallel)
self.sfa_node.stop_training()
print("self.sfa_node.d (final features) =", self.sfa_node.d)
# Evaluate features based on sfa coefficient
coeffs = evaluate_coefficients(self.sfa_node.sf)
print("evaluation of each features from SFA coefficients: ", coeffs)
# find indices of best features. Largest scores first
best_feat_indices = coeffs.argsort()[::-1]
print("indices of best features:", best_feat_indices)
print("f1.shape=", f1.shape)
# Train linear regression node for a linear approximation to inversion
self.lr_node = mdp.nodes.LinearRegressionNode()
y = self.sfa_node.execute(f1)
self.lr_node.train(y, x)
self.lr_node.stop_training()
x_app = self.lr_node.execute(y)
ev_linear_inverse = compute_explained_var(x, x_app) / data_variance(x)
print("EV_linear_inverse (train)=", ev_linear_inverse)
self.stop_training()
def _is_invertible(self):
return True
def _execute(self, x):
num_samples = x.shape[0]
f1 = extract_sigmoid_features(x, self.c1, self.l1, use_special_features=True)
f1 -= self.f1_mean
f1 /= self.f1_std
return self.sfa_node.execute(f1)
def _inverse(self, y, linear_inverse=True):
x_app = self.lr_node.execute(y)
return x_app
# TODO:Finish this and correct it
def indices_training_graph_split(num_samples, train_mode="regular", block_size=None, num_parts=1):
if train_mode == "regular":
indices = | numpy.arange(num_samples) | numpy.arange |
import collections
import dataclasses
import enum
import functools
from copy import deepcopy
from itertools import chain
import deap
import numpy as np
import pyDOE2
from deap.base import Fitness
def listify(fn=None, wrapper=list):
"""
From https://github.com/shazow/unstdlib.py/blob/master/unstdlib/standard/list_.py#L149
A decorator which wraps a function's return value in ``list(...)``.
Useful when an algorithm can be expressed more cleanly as a generator but
the function should return an list.
Example::
>>> @listify
... def get_lengths(iterable):
... for i in iterable:
... yield len(i)
>>> get_lengths(["spam", "eggs"])
[4, 4]
>>>
>>> @listify(wrapper=tuple)
... def get_lengths_tuple(iterable):
... for i in iterable:
... yield len(i)
>>> get_lengths_tuple(["foo", "bar"])
(3, 3)
"""
def listify_return(fn):
@functools.wraps(fn)
def listify_helper(*args, **kw):
return wrapper(fn(*args, **kw))
return listify_helper
if fn is None:
return listify_return
return listify_return(fn)
@dataclasses.dataclass(frozen=True)
class VariableProperties:
discrete: bool
bounded: bool
ordered: bool
class VariableType(VariableProperties, enum.Enum):
CONTINUOUS = (False, True, True)
INTEGER = (True, True, True)
ORDINAL = (True, False, True)
NOMINAL = (True, False, False)
@dataclasses.dataclass(frozen=True)
class ObjectiveValueWithConstraintViolation:
objectives: tuple
constraint_violation: float
def __iter__(self):
yield from self.objectives
class ConstraintDominatedFitness(Fitness):
feasibility_tolerance = 1e-12
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.constraint_violation = None
def __deepcopy__(self, memo):
# The base Fitness class uses an optimized deepcopy that throws away attributes
copy_ = super().__deepcopy__(memo)
copy_.constraint_violation = self.constraint_violation
return copy_
@property
def feasible(self):
return self.valid and self.constraint_violation <= self.feasibility_tolerance
def set_values(self, values):
if isinstance(values, ObjectiveValueWithConstraintViolation):
self.constraint_violation = values.constraint_violation
values = values.objectives
Fitness.setValues(self, values)
def del_values(self):
self.constraint_violation = None
Fitness.delValues(self)
values = property(Fitness.getValues, set_values, del_values)
def dominates(self, other, obj=slice(None)):
if self.feasible and other.feasible:
return super().dominates(other, obj)
else:
return self.constraint_violation < other.constraint_violation
class Individual(list):
def __init__(self, *args, fitness_class, **kwargs):
super().__init__(*args, **kwargs)
self.fitness = fitness_class()
# Metadata
self.generation = None
def __repr__(self):
return f"Individual({super().__repr__()})"
@dataclasses.dataclass(frozen=True)
class IndividualBounds:
lower: tuple
upper: tuple
@classmethod
def from_design_var_meta(cls, design_var_meta):
lower = {
name: (meta["lower"] if meta["type"].bounded else np.zeros(meta["shape"]))
for name, meta in design_var_meta.items()
}
upper = {
name: (
meta["upper"]
if meta["type"].bounded
else np.vectorize(len)(meta["values"]) - 1
)
for name, meta in design_var_meta.items()
}
return cls(
lower=tuple(individual_sequence(lower, design_var_meta)),
upper=tuple(individual_sequence(upper, design_var_meta)),
)
def individual_sequence(design_vars, design_var_meta):
return chain.from_iterable(
np.broadcast_to(design_vars[name], meta["shape"]).flat
for name, meta in design_var_meta.items()
)
def individual_types_sequence(design_var_meta):
return chain.from_iterable(
[meta["type"]] * np.product(meta["shape"] or (1,))
for meta in design_var_meta.values()
)
def stretch_array(array, shape):
try:
return np.broadcast_to(array, shape)
except ValueError:
return np.reshape(array, shape)
missing_dims = len(shape) - array.ndim
indexer = (...,) + (np.newaxis,) * missing_dims
array = array[indexer]
return np.broadcast_to(array, shape)
def random_ints(shape, lower, upper):
ret = np.empty(shape, dtype=np.int)
lower = np.broadcast_to(lower, shape)
upper = np.broadcast_to(upper, shape)
for i in np.ndindex(*shape):
ret[i] = np.random.randint(lower[i], upper[i], dtype=np.int)
return ret
def random_floats(shape, lower, upper):
ret = np.empty(shape)
lower = stretch_array(lower, shape)
upper = stretch_array(upper, shape)
for i in np.ndindex(*shape):
ret[i] = np.random.rand() * (upper[i] - lower[i]) + lower[i]
return ret
def random_choice(shape: tuple, values: set):
ret = np.empty(shape, dtype=object)
values = np.broadcast_to(values, shape)
for i in np.ndindex(*shape):
ret[i] = np.random.choice(list(values[i]))
return ret
def convert_design_vars_to_individual(design_var_meta, fitness_class, design_vars):
"""
Converts a dict of OpenMDAO design variables into a DEAP individual.
"""
return Individual(
individual_sequence(design_vars, design_var_meta), fitness_class=fitness_class
)
def convert_individual_to_design_vars(
individual, design_var_meta, discrete_value_mappings
):
"""
Converts a DEAP individual into a dict of OpenMDAO design variables.
"""
ind = deepcopy(individual)
design_vars = {}
for name, meta in design_var_meta.items():
shape = meta["shape"]
type_ = meta["type"]
ind_items = np.prod(shape, dtype=int)
values = np.array(
ind[:ind_items],
dtype=(np.float if type_ is VariableType.CONTINUOUS else np.int),
).reshape(shape)
if type_.discrete and not type_.bounded:
values = values.astype("O")
for arr_index, index in np.ndenumerate(values):
a = discrete_value_mappings[name][arr_index][index]
values[arr_index] = a
assert values.shape == shape
design_vars[name] = values if shape else values.item()
ind = ind[ind_items:]
return design_vars
def random_individual_value(type_, lower, upper):
if type_ is VariableType.CONTINUOUS:
return np.random.rand() * (upper - lower) + lower
else:
return np.random.randint(lower, upper + 1)
def init_population(count, individual_types, individual_bounds, fitness_class):
for i in range(count):
yield Individual(
[
random_individual_value(type_, lower, upper)
for (type_, lower, upper) in zip(
individual_types, individual_bounds.lower, individual_bounds.upper
)
],
fitness_class=fitness_class,
)
def init_population_gsd(
min_count,
individual_types,
individual_bounds,
fitness_class,
cont_levels,
gsd_reduction,
):
# Mad experiment with GSD
gsd_levels = [
int(upper - lower + 1) if type_ is not VariableType.CONTINUOUS else cont_levels
for (type_, lower, upper) in zip(
individual_types, individual_bounds.lower, individual_bounds.upper
)
]
gsd_reduction = int(np.prod(gsd_levels) // min_count)
gsd_designs = pyDOE2.gsd(gsd_levels, gsd_reduction)
designs = gsd_designs.astype("O")
cont_mask = np.array(individual_types) == VariableType.CONTINUOUS
noncont_adder = np.array(individual_bounds.lower)
cont = np.linspace(individual_bounds.lower, individual_bounds.upper, cont_levels)
designs[cont_mask] = np.choose(gsd_designs[:, cont_mask], cont.T)
np.copyto(designs, gsd_designs + noncont_adder, where=~cont_mask)
for d in designs:
yield Individual(
d,
fitness_class=fitness_class,
)
def disassemble_individuals(types, individuals):
for ind in individuals:
yield {
type_: [attr for attr, attr_type in zip(ind, types) if attr_type is type_]
for type_ in VariableType
}
def types_index(types):
counter = collections.Counter()
for type_ in types:
yield counter[type_]
counter[type_] += 1
def reassemble_individuals(types, individual_pairs):
for (index, (type_, sub_index)) in enumerate(zip(types, types_index(types))):
for ind, targ_ind in individual_pairs:
targ_ind[index] = ind[type_][sub_index]
def mate_disassembled(
ind1,
ind2,
individual_types,
individual_bounds,
cont_eta=30,
int_eta=30,
ord_indpb=1.0,
nom_indpb=1.0,
):
(da_ind1, da_ind2, da_lower, da_upper) = disassemble_individuals(
individual_types, (ind1, ind2, individual_bounds.lower, individual_bounds.upper)
)
deap.tools.cxSimulatedBinaryBounded(
da_ind1[VariableType.CONTINUOUS],
da_ind2[VariableType.CONTINUOUS],
eta=cont_eta,
low=da_lower[VariableType.CONTINUOUS],
up=da_upper[VariableType.CONTINUOUS],
)
deap.tools.cxSimulatedBinaryBounded(
da_ind1[VariableType.INTEGER],
da_ind2[VariableType.INTEGER],
eta=int_eta,
low=da_lower[VariableType.INTEGER],
up=da_upper[VariableType.INTEGER],
)
da_ind1[VariableType.INTEGER] = np.round(da_ind1[VariableType.INTEGER]).tolist()
da_ind2[VariableType.INTEGER] = np.round(da_ind2[VariableType.INTEGER]).tolist()
deap.tools.cxUniform(
da_ind1[VariableType.ORDINAL], da_ind2[VariableType.ORDINAL], indpb=ord_indpb
)
deap.tools.cxUniform(
da_ind1[VariableType.NOMINAL], da_ind2[VariableType.NOMINAL], indpb=nom_indpb
)
reassemble_individuals(individual_types, [(da_ind1, ind1), (da_ind2, ind2)])
return ind1, ind2
def mutate_disassembled(
ind,
individual_types,
individual_bounds,
cont_eta=20,
cont_indpb=1.0,
int_eta=20,
int_indpb=1.0,
ord_indpb=1.0,
nom_indpb=1.0,
):
(da_ind, da_lower, da_upper) = disassemble_individuals(
individual_types, (ind, individual_bounds.lower, individual_bounds.upper)
)
deap.tools.mutPolynomialBounded(
da_ind[VariableType.CONTINUOUS],
eta=cont_eta,
indpb=cont_indpb,
low=da_lower[VariableType.CONTINUOUS],
up=da_upper[VariableType.CONTINUOUS],
)
deap.tools.mutPolynomialBounded(
da_ind[VariableType.INTEGER],
eta=int_eta,
indpb=int_indpb,
low=da_lower[VariableType.INTEGER],
up=da_upper[VariableType.INTEGER],
)
da_ind[VariableType.INTEGER] = np.round(da_ind[VariableType.INTEGER]).tolist()
deap.tools.mutUniformInt(
da_ind[VariableType.ORDINAL],
indpb=ord_indpb,
low=da_lower[VariableType.ORDINAL],
up=da_upper[VariableType.ORDINAL],
)
deap.tools.mutUniformInt(
da_ind[VariableType.NOMINAL],
indpb=nom_indpb,
low=da_lower[VariableType.NOMINAL],
up=da_upper[VariableType.NOMINAL],
)
reassemble_individuals(individual_types, [(da_ind, ind)])
return (ind,)
def _make_discrete_value_mapping(values_set, type_):
if type_.ordered:
values_set = sorted(values_set)
enum_values = enumerate(values_set)
return {index: value for index, value in enum_values}
def make_discrete_value_mappings(design_var_meta):
for name, meta in design_var_meta.items():
type_ = meta["type"]
if type_.bounded:
continue
yield name, np.vectorize(lambda x: _make_discrete_value_mapping(x, type_))(
meta["values"]
)
def epsilonify(value, eps=np.finfo(float).eps):
value = np.copy(value)
if isinstance(value, np.ndarray):
value[value == 0.0] = eps
else:
value = value or eps
return value
def constraint_violation(values, meta):
keys = set(values.keys())
assert keys == set(meta.keys())
total_violation = 0.0
for key in keys:
var_meta = meta[key]
val = values[key]
lower = var_meta["lower"]
lower_eps = epsilonify(lower)
upper = var_meta["upper"]
upper_eps = epsilonify(upper)
equals = var_meta["equals"]
equals_eps = epsilonify(equals)
total_violation += np.sum(np.abs(np.fmax(upper, val) / upper_eps - 1))
total_violation += np.sum(np.abs( | np.fmin(lower, val) | numpy.fmin |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from collections import defaultdict
from typing import Any, Callable, Dict
import attr
import magnum as mn
import numpy as np
import habitat_sim
from habitat.core.registry import registry
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.tasks.rearrange.obj_loaders import (
add_obj,
load_articulated_objs,
load_objs,
place_viz_objs,
)
from habitat.tasks.rearrange.utils import (
convert_legacy_cfg,
get_aabb,
get_nav_mesh_settings,
)
from habitat_sim.gfx import LightInfo, LightPositionModel
from habitat_sim.physics import MotionType
from habitat_sim.robots import FetchRobot
# (unique id, Filename, BB size, BB offset, Robot base offset [can be none])
ART_BBS = [
("fridge", "fridge.urdf", [0.33, 0.9, 0.33], [0, 0.2, 0], [1.033, 0.0]),
(
"counter",
"kitchen_counter.urdf",
[0.28, 0.5, 1.53],
[-0.067, 0.5, 0.0],
None,
),
# Counter R
(
"counter_R",
"kitchen_counter.urdf",
[0.28, 0.5, 0.55],
[-0.067, 0.5, 1.0],
[0.526, 1.107],
),
# Counter L
(
"counter_L",
"kitchen_counter.urdf",
[0.28, 0.5, 0.75],
[-0.067, 0.5, -0.7],
[0.524, -0.896],
),
]
# temp workflow for loading lights into Habitat scene
def load_light_setup_for_glb(json_filepath):
with open(json_filepath) as json_file:
data = json.load(json_file)
lighting_setup = []
for light in data["lights"].values():
t = light["position"]
light_w = 1.0
position = [float(t[0]), float(t[1]), float(t[2]), light_w]
color_scale = float(light["intensity"])
color = [float(c * color_scale) for c in light["color"]]
lighting_setup.append(
LightInfo(
vector=position,
color=color,
model=LightPositionModel.Global,
)
)
return lighting_setup
@attr.s(auto_attribs=True, slots=True)
class SimEvent:
is_ready: Callable[[], bool]
run: Callable[[], None]
# Distance from the base of the end-effector to the actual end-effector
# position, which should be in the center of the gripper.
EE_GRIPPER_OFFSET = mn.Vector3(0.08, 0, 0)
@registry.register_simulator(name="RearrangeSim-v0")
class RearrangeSim(HabitatSim):
def __init__(self, config):
super().__init__(config)
agent_config = self.habitat_config
self.navmesh_settings = get_nav_mesh_settings(self._get_agent_config())
self.first_setup = True
self.is_render_obs = False
self.pov_mode = agent_config.POV
self.update_i = 0
self.h_offset = 0.3
self.ep_info = None
self.do_grab_using_constraint = True
self.snap_to_link_on_grab = True
self.snapped_obj_id = None
self.snapped_obj_constraint_id = []
self.prev_loaded_navmesh = None
self.prev_scene_id = None
self.robot_name = agent_config.ROBOT_URDF.split("/")[-1].split(".")[0]
self._force_back_pos = None
# Number of physics updates per action
self.ac_freq_ratio = agent_config.AC_FREQ_RATIO
# The physics update time step.
self.ctrl_freq = agent_config.CTRL_FREQ
# Effective control speed is (ctrl_freq/ac_freq_ratio)
self.art_objs = []
self.start_art_states = {}
self.cached_art_obj_ids = []
self.scene_obj_ids = []
self.viz_obj_ids = []
self.event_callbacks = []
# Used to get data from the RL environment class to sensors.
self.track_markers = []
self._goal_pos = None
self.viz_ids: Dict[Any, Any] = defaultdict(lambda: None)
# Disables arm control. Useful if you are hiding the arm to perform
# some scene sensing.
self.ctrl_arm = True
self._light_setup = load_light_setup_for_glb(
"data/replica_cad/configs/lighting/frl_apartment_stage.lighting_config.json"
)
obj_attr_mgr = self.get_object_template_manager()
obj_attr_mgr.load_configs("data/objects/ycb")
self.concur_render = self.habitat_config.get(
"CONCUR_RENDER", True
) and hasattr(self, "get_sensor_observations_async_start")
def _create_art_bbs(self):
"""
Creates transformed bounding boxes for the articulated objects.
"""
self.art_bbs = []
for _, (name, urdf_name, bb_size, bb_pos, robo_pos) in enumerate(
ART_BBS
):
if urdf_name not in self.art_name_to_id:
continue
ao = self.art_name_to_id[urdf_name]
art_T = ao.transformation
if robo_pos is not None:
robo_pos_vec = mn.Vector3([robo_pos[0], 0.5, robo_pos[1]])
robo_pos_vec = art_T.transform_point(robo_pos_vec)
robo_pos = np.array(robo_pos_vec)[[0, 2]]
bb = mn.Range3D.from_center(
mn.Vector3(*bb_pos), mn.Vector3(*bb_size)
)
bb = habitat_sim.geo.get_transformed_bb(bb, art_T)
self.art_bbs.append((name, bb, robo_pos))
def _get_target_trans(self):
"""
This is how the target transforms should be accessed since
multiprocessing does not allow pickling.
"""
# Preprocess the ep_info making necessary datatype conversions.
target_trans = []
for i in range(len(self.ep_info["targets"])):
targ_idx, trans = self.ep_info["targets"][i]
if len(trans) == 3:
# Legacy position only format.
trans = mn.Matrix4.translation(mn.Vector3(*trans))
else:
trans = mn.Matrix4(trans)
target_trans.append((targ_idx, trans))
return target_trans
def find_robo_for_art_name(self, art_name):
"""
Gets the desired robot starting position for interacting with an
articulated object.
"""
for name, bb, robo_pos in self.art_bbs:
if name == art_name and robo_pos is not None:
return (bb, robo_pos)
return None, None
def get_nav_pos(self, pos, set_back: bool = False):
"""
Gets the desired robot base position for an object. If the object is in
an articulated object, a proper offset is applied.
- set_back: (bool): Pushes the robot even further back if specified by
offset. Used when spawning the robot in front of an open cabinet.
"""
pos = mn.Vector3(*pos)
force_spawn_pos = self.ep_info["force_spawn_pos"]
if set_back and force_spawn_pos is not None:
rel_art_bb_id, offset = force_spawn_pos
_, urdf_name, _, _, robo_pos = ART_BBS[rel_art_bb_id]
art_id = self.art_name_to_id[urdf_name]
art_T = self.get_articulated_object_root_state(art_id)
robo_pos = [robo_pos[0] + offset[0], 0.5, robo_pos[1] + offset[1]]
robo_pos = art_T.transform_point(mn.Vector3(*robo_pos))
robo_pos = np.array(robo_pos)
robo_pos = robo_pos[[0, 2]]
return | np.array([robo_pos[0], 0.5, robo_pos[1]]) | numpy.array |
'''
usage: python pypsd.py [-h] -b BINSFILE -i INPUTFILE [-o OUTPUTDIR]
A Python script for calculating the particle size distribution (PSD) of any
sample. Please read the adjoining README.md file for more information.
Written by <NAME> and <NAME>.
'''
import os
import argparse
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
# Argparse stuff
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-b", dest='binsfile', help="file with particle diameter bins", type=str, required=True)
PARSER.add_argument("-i", dest='inputfile', help="file with particle areas", type=str, required=True)
PARSER.add_argument("-o", dest='outputdir', help="output directory", type=str, required=False)
ARGS = PARSER.parse_args()
# Read input file from command line, create arrays
INFILE = ARGS.inputfile
BASENAME = INFILE.split('.txt')[0]
BINS = np.loadtxt(ARGS.binsfile, unpack=True)
PARTICLES = np.loadtxt(INFILE)
if ARGS.outputdir:
OUTPATH = ARGS.outputdir + '/'
os.makedirs(OUTPATH, exist_ok=True)
else:
OUTPATH = BASENAME + '_'
def distribution(values, cutoff):
""" a function for creating and then solving a linear equation """
counter = np.argmax(values >= cutoff)
point2 = np.array([BINS[counter], values[counter]])
point1 = np.array([BINS[counter-1], values[counter-1]])
slope = (point2[1] - point1[1])/(point2[0] - point1[0])
intercept = point2[1] - slope*point2[0]
dist = (cutoff - intercept) * (1/slope)
return dist
# Rolling mean for significant sample size
AVGCUM = | np.cumsum(PARTICLES, dtype=float) | numpy.cumsum |
"""
Thompson Sampling strategies for continuous context.
"""
from argparse import Namespace
import numpy as np
from scipy.stats import norm as normal_distro
from scipy.optimize import minimize
from OCBO.cstrats.cts_opt import ContinuousOpt
from dragonfly.utils.option_handler import get_option_specs
from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient
prof_args = [\
get_option_specs('num_profiles', False, 50,
'Number of contexts to consider picking from.'),
get_option_specs('profile_evals', False, 100,
'Number of evaluations for each context to determine max.'),
get_option_specs('xi', False, 0.0,
'expected improvement hyperparameter, which controls the exploitation and exploration trade-off'),
get_option_specs('opt_sampling', False, True, 'whether to perform the optimization by finite sampling strategy')
]
class ProfileOpt(ContinuousOpt):
def _child_set_up(self, function, domain, ctx_dim, options):
self.num_profiles = options.num_profiles
self.profile_evals = options.profile_evals
self.xi = options.xi
self.opt_sampling = options.opt_sampling
def _determine_next_query(self):
# Get the contexts to test out.
ctxs = self._get_ctx_candidates(self.num_profiles)
# For each context...
best_pt, best_imp = None, float('-inf')
for ctx in ctxs:
# Find the best context and give its improvement.
pt, imp = self._get_ctx_improvement(ctx)
if imp > best_imp:
best_pt, best_imp = pt, imp
# Return the best context and action.
return best_pt
def _get_ctx_improvement(self, ctx):
"""Get the improvement for the context.
Args:
ctx: ndarray characterizing the context.
Returns: Best action and the improvement it provides.
"""
raise NotImplementedError('Abstract Method')
class ProfileEI(ProfileOpt):
@staticmethod
def get_strat_name():
"""Get the name of the strategies."""
return 'pei'
def _get_ctx_improvement(self, ctx, predict=False):
"""Get expected improvement over best posterior mean capped by
the best seen reward so far.
"""
if self.opt_sampling is False:
return self._get_ctx_improvement_no_sampling(ctx, predict=predict)
#if predict:
# _, act = self.get_maximal_mean(ctx)
# return np.hstack((ctx, act))
act_set = sample_grid([list(ctx)], self.act_domain, self.profile_evals)
means, covmat = self.gp.eval(act_set, include_covar=True)
best_post = np.min([ | np.max(means) | numpy.max |
## This is python reference code for the opencl kernel _hbk_lamb_grid_sampler.
## Imports
import numpy as np
root_2 = 1.4142135623730951
tau = 6.283185307179586
medium_wavelength = 0.008575
def hbk_lamb_grid_sampler_ref(
required_resolution,
radius,
N,
x0,
y0,
z0
):
'''
---------------------------------------------
hbk_lamb_grid_sampler_ref( required_resolution,radius,N,x0,y0,z0)
---------------------------------------------
This method generates a hemispherical grid of sampling points
using the lambert projection.
Parameters
-----------
required_resolution : float
Distance between sampling points in the grid.
x0 : float
x-coordinate of the origin of the grid.
y0 : float
y-coordinate of the origin of the grid.
z0 : float
z-coordinate of the origin of the grid.
radius : float
Radius of the hemisphere defining the lambert projection.
'''
no_points_required = np.ceil((tau*radius)/required_resolution)
density = (2*root_2) / no_points_required
N = len(np.arange(-1,1,density))
# Initialise lists to store the sampling grid coordinates.
x_points = []
y_points = []
z_points = []
# Perform the lambert equi-area projection to generate hemispherical
# sampling points.
for idx_x in range(N):
for idx_y in range(N):
x_base = (-1 + density * idx_x ) * root_2
y_base = (-1 + density * idx_y ) * root_2
rho = np.sqrt(x_base * x_base + y_base * y_base)
c = 2 * np.arcsin(0.5*rho)
phi = np.arcsin(np.cos(c)) / rho
l = np.arctan2( (x_base * np.sin(c)), -y_base*np.sin(c))
cos_phi = | np.cos(phi) | numpy.cos |
import math
import numpy as np
import pandas as pd
from multiprocessing import Pool
from scipy.special import expit
from scipy.stats import beta
from opaque.beta_regression import BetaRegressor
from opaque.stats import equal_tailed_interval, KL_beta
from opaque.simulations.prevalence import run_trial_for_theta
class EndtoEndSimulator:
def __init__(
self,
sens_coefs_mean,
sens_coefs_disp,
spec_coefs_mean,
spec_coefs_disp,
sens_noise_mean=0.0,
sens_noise_disp=0.0,
spec_noise_mean=0.0,
spec_noise_disp=0.0,
cov=None,
n_mean=6.0,
n_sigma=1.0,
random_state=None,
n_jobs=1,
):
if cov is None:
cov = np.diag(np.full(len(sens_coefs_mean) - 1, 1.0))
else:
cov = np.array(cov)
if random_state is None:
self.random_state = np.random.RandomState()
elif isinstance(random_state, int):
self.random_state = np.random.RandomState(random_state)
else:
self.random_state = random_state
assert len(sens_coefs_mean) == len(sens_coefs_disp) == cov.shape[0] + 1
assert len(spec_coefs_mean) == len(spec_coefs_disp) == cov.shape[0] + 1
self.sens_coefs_mean = np.array(sens_coefs_mean)
self.sens_coefs_disp = np.array(sens_coefs_disp)
self.spec_coefs_mean = np.array(spec_coefs_mean)
self.spec_coefs_disp = np.array(spec_coefs_disp)
self.sens_noise_mean = sens_noise_mean
self.sens_noise_disp = sens_noise_disp
self.spec_noise_mean = spec_noise_mean
self.spec_noise_disp = spec_noise_disp
self.cov = cov
self.num_covariates = cov.shape[0]
self.n_mean = n_mean
self.n_sigma = n_sigma
self.n_jobs = n_jobs
def generate_data(self, size):
X = self.random_state.multivariate_normal(
np.zeros(self.cov.shape[0]), self.cov, size=size
)
X = np.hstack([np.full((X.shape[0], 1), 1), X])
sens_mu = expit(
X.dot(self.sens_coefs_mean)
+ np.random.normal(0, self.sens_noise_mean, size=size)
)
sens_nu = np.exp(
X.dot(self.sens_coefs_disp)
+ self.random_state.normal(0, self.sens_noise_disp, size=size)
)
sens_prior = beta(sens_mu * sens_nu, (1 - sens_mu) * sens_nu)
sens_prior.random_state = self.random_state
sens = sens_prior.rvs()
spec_mu = expit(
X.dot(self.spec_coefs_mean)
+ np.random.normal(0, self.spec_noise_mean, size=size)
)
spec_nu = np.exp(
X.dot(self.spec_coefs_disp)
+ np.random.normal(0, self.spec_noise_disp, size=size)
)
spec_prior = beta(spec_mu * spec_nu, (1 - spec_mu) * spec_nu)
spec_prior.random_state = self.random_state
spec = spec_prior.rvs()
sens.shape = sens_mu.shape = sens_nu.shape = (size, 1)
spec.shape = spec_mu.shape = spec_nu.shape = (size, 1)
data = np.hstack(
[
X[:, 1:],
sens,
spec,
sens_mu,
sens_nu,
spec_mu,
spec_nu,
sens_mu * sens_nu,
(1 - sens_mu) * sens_nu,
spec_mu * spec_nu,
(1 - spec_mu) * spec_nu,
]
)
data = pd.DataFrame(
data,
columns=[f"X{i}" for i in range(self.num_covariates)]
+ [
"sens",
"spec",
"sens_mu",
"sens_nu",
"spec_mu",
"spec_nu",
"sens_a",
"sens_b",
"spec_a",
"spec_b",
],
)
return data
def simulate_anomaly_detection(self, sens_list, spec_list):
points = (
(
self.random_state.random_sample(),
sens,
spec,
math.floor(
self.random_state.lognormal(
mean=self.n_mean,
sigma=self.n_sigma
)
),
np.random.RandomState(self.random_state.randint(10 ** 6)),
)
for sens, spec in zip(sens_list, spec_list)
)
with Pool(self.n_jobs) as pool:
results = pool.starmap(run_trial_for_theta, points)
return results
def run(self, size_train=1000, size_test=200):
data_train = self.generate_data(size=size_train)
data_test = self.generate_data(size=size_test)
X_train = data_train.iloc[:, : self.num_covariates].values
X_test = data_test.iloc[:, : self.num_covariates].values
sens_train = data_train["sens"].values
spec_train = data_train["spec"].values
sens_test = data_test["sens"].values
spec_test = data_test["spec"].values
br = BetaRegressor()
br.fit(X_train, sens_train)
sens_shape = br.predict_shape_params(X_test)
br.fit(X_train, spec_train)
spec_shape = br.predict_shape_params(X_test)
ad = self.simulate_anomaly_detection(sens_test, spec_test)
points = []
rows = []
for i in range(len(ad)):
n, t, theta = ad[i]
sens_a_est, sens_b_est = sens_shape[i, :]
spec_a_est, spec_b_est = spec_shape[i, :]
sens_a, sens_b = data_test.iloc[i, -4], data_test.iloc[i, -3]
spec_a, spec_b = data_test.iloc[i, -2], data_test.iloc[i, -1]
point = [n, t, sens_a_est, sens_b_est, spec_a_est, spec_b_est]
points.append(point)
rows.append(
point
+ [
sens_a,
sens_b,
spec_a,
spec_b,
KL_beta(sens_a, sens_b, sens_a_est, sens_b_est),
KL_beta(spec_a, spec_b, spec_a_est, spec_b_est),
theta,
]
)
with Pool(self.n_jobs) as pool:
intervals = pool.starmap(equal_tailed_interval, points)
data = np.array(rows)
intervals = np.array(intervals)
data = | np.hstack([data, intervals]) | numpy.hstack |
import unittest
import sys
from os.path import dirname, abspath
import numpy as np
from scipy.stats import spearmanr, pearsonr
from halla.utils import stats
from utils import compare_numpy_array
class TestStatsUtils(unittest.TestCase):
'''Tests the p-value permutation test function; extreme cases
'''
def test_compute_permutation_test_pvalue_significant(self):
np.random.seed(1)
eps = 0.001
# source: https://machinelearningmastery.com/how-to-calculate-nonparametric-rank-correlation-in-python/
x = np.random.normal(size=1000) * 20
y = x + np.random.normal(size=1000) * 10
# get expected p-value
_, expected_pvalue = spearmanr(x, y)
test_pvalue = stats.compute_permutation_test_pvalue(x, y, pdist_metric='spearman',
permute_func='ecdf', speedup=False, iters=1000, seed=123)
self.assertLessEqual(abs(test_pvalue - expected_pvalue), eps)
def test_compute_permutation_test_pvalue_significant_speedup(self):
np.random.seed(1)
eps = 0.001
# source: https://machinelearningmastery.com/how-to-calculate-nonparametric-rank-correlation-in-python/
x = np.random.normal(size=1000) * 20
y = x + np.random.normal(size=1000) * 10
# get expected p-value
_, expected_pvalue = spearmanr(x, y)
test_pvalue = stats.compute_permutation_test_pvalue(x, y, pdist_metric='spearman',
permute_func='ecdf', speedup=True, alpha=0.05, iters=1000, seed=123)
self.assertLessEqual(abs(test_pvalue - expected_pvalue), eps)
def test_compute_permutation_test_pvalue_insignificant(self):
np.random.seed(2)
eps = 0.02
x = np.random.normal(size=1000)
y = np.random.normal(size=1000)
_, expected_pvalue = pearsonr(x,y)
test_pvalue = stats.compute_permutation_test_pvalue(x, y, pdist_metric='pearson',
permute_func='ecdf', speedup=False, iters=1000, seed=123)
self.assertLessEqual(abs(test_pvalue - expected_pvalue), eps)
def test_compute_permutation_test_pvalue_insignificant_speedup(self):
np.random.seed(2)
x = | np.random.normal(size=1000) | numpy.random.normal |
"""
Parallax fitting and computation of distances
"""
import os
import warnings
import collections
from bisect import bisect_left
import h5py
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d
from astropy.coordinates import SkyCoord
from healpy import ang2pix
from dustmaps.sfd import SFDQuery
from dustmaps.bayestar import BayestarWebQuery
from astropy.utils.exceptions import AstropyWarning
import basta.utils_distances as udist
import basta.constants as cnsts
import basta.stats as stats
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# Don't print Astropy warnings (catch error caused by mock'ing astropy in Sphinx)
try:
warnings.filterwarnings("ignore", category=AstropyWarning)
except AssertionError:
pass
try:
from basta._dustpath import __dustpath__
except ModuleNotFoundError:
print("\nCannot find path to dustmaps. Did you run 'setup.py'?\n")
raise
def LOS_reddening(distanceparams):
"""
Returns color excess E(B-V) for a line of sight using a
pre-downloaded 3D extinction map provided by Green et al. 2015/2018 - see
http://argonaut.skymaps.info/.
The extinction map is only computed for distance modulus between
:math:`4 < m-M < 19` in units of magnitude.
Parameters
----------
distanceparams : dictionary
Dictionary with distance parameters
Returns
-------
EBV : function
excess color function
"""
if "EBV" in distanceparams:
return lambda x: np.asarray(
np.random.normal(
distanceparams["EBV"][1],
distanceparams["EBV"][2] - distanceparams["EBV"][1],
size=[
len(i) if isinstance(i, collections.Iterable) else 1 for i in [x]
][0],
)
)
frame = distanceparams["dustframe"]
# Convert to galactic coordinates
if frame == "icrs":
ra = distanceparams["RA"]
dec = distanceparams["DEC"]
c = SkyCoord(ra=ra, dec=dec, frame="icrs", unit="deg")
elif frame == "galactic":
lon = distanceparams["lon"]
lat = distanceparams["lat"]
c = SkyCoord(l=lon, b=lat, frame="galactic", unit="deg")
else:
raise ValueError("Unknown dust map frame for computing reddening!")
# Load extinction data cube
pathmap = os.path.join(__dustpath__, "bayestar/bayestar2019.h5")
dcube = h5py.File(pathmap, "r")
# Distance modulus bins
bin_edges = dcube["/pixel_info"].attrs["DM_bin_edges"]
dmbin = bin_edges + (bin_edges[1] - bin_edges[0]) / 2.0
# If webquery fails use local copy of dustmap
try:
bayestar = BayestarWebQuery(version="bayestar2019")
Egr_samples = bayestar(c, mode="samples")
except Exception:
# contains positional info
pinfo = dcube["/pixel_info"][:]
nsides = np.unique(dcube["/pixel_info"][:]["nside"])
# Convert coordinates to galactic frame
lon = c.galactic.l.deg
lat = c.galactic.b.deg
# Convert l,b[deg] to theta,phi[rad]
theta = np.pi / 2.0 - lat * np.pi / 180.0
phi = lon * np.pi / 180.0
# To check if we are within the maps coordinates
Egr_samples = np.array([np.nan])
# Run through nsides
for ncont in reversed(nsides):
healpixNside = ang2pix(ncont, theta, phi, nest=True)
indNside = np.where(np.asarray([x[0] for x in pinfo]) == ncont)[0]
dcubepixNside = [x[1] for x in pinfo[indNside]]
kNside = int(bisect_left(dcubepixNside, healpixNside)) + indNside[0]
if healpixNside == dcubepixNside[kNside - indNside[0]]:
index = kNside
Egr_samples = dcube["/samples"][index]
break
# If coordinates outside dust map, use Schegel
if np.isnan(Egr_samples).any():
print("WARNING: Coordinates outside dust map boundaries!")
print("Default to Schegel 1998 dust map")
sfd = SFDQuery()
EBV_fun = lambda x: np.full_like(x, sfd(c))
return EBV_fun
Egr_med, Egr_err = [], []
for i in range(len(dmbin)):
Egr_med.append(np.nanmedian(Egr_samples[:, i]))
Egr_err.append(np.nanstd(Egr_samples[:, i]))
Egr_med_fun = interp1d(
dmbin, Egr_med, bounds_error=False, fill_value=(0, np.max(Egr_med))
)
Egr_err_fun = interp1d(
dmbin, Egr_err, bounds_error=False, fill_value=np.max(Egr_err)
)
dcube.close()
def EBV_fun(dm):
Egr = np.asarray(np.random.normal(Egr_med_fun(dm), Egr_err_fun(dm)))
EBV = cnsts.extinction.Conv_Bayestar * Egr
return EBV
return EBV_fun
def get_EBV(dist, LOS_EBV, debug=False, outfilename=""):
"""
Estimate E(B-V) by drawing distances from a normal parallax
distribution with EDSD prior.
Parameters
-----
dist : array
The drawn distances
LOS_EBV : func
EBV function.
debug : bool, optional
Debug flag.
If True, this function outputs two plots, one of distance modulus
vs. E(B-V) and a histogram of the E(B-V).
outfilename : str, optional
Name of directory of where to put plots outputted if debug is True.
Returns
-------
EBVs : array
E(B-V) at distances
"""
dmod = 5 * np.log10(dist / 10)
EBVs = LOS_EBV(dmod)
if debug:
plt.figure()
plt.plot(dmod, EBVs, ".")
plt.xlabel("dmod")
plt.ylabel("E(B-V)")
plt.savefig(outfilename + "_DEBUG_dmod_EBVs.png")
plt.close()
return EBVs
def get_absorption(EBV, fitparams, filt):
"""
Compute extinction coefficient Rzeta for band zeta.
Using parameterized law from Casagrande & VandenBerg 2014.
Valid for:
logg = 4.1
Teff = 5250 - 7000K
Fe/H = -2.0 - 0.25
a/Fe = -0.4 - 0.4
Assume nominal reddening law with RV=3.1. In a band zeta, Azeta = Rzeta*E(B-V).
Parameters
----------
EBV : array
E(B-V) values
fitparams : dict
The fitting params in inputparams.
filt : str
Name of the given filter
Returns
-------
R*EBV : array
Extinction coefficient times E(B-V)
"""
N = len(EBV)
table = cnsts.extinction.R
i_filter = table["Filter"] == filt
if not any(i_filter) or table["RZ_mean"][i_filter] == 0:
print("WARNING: Unknown extinction coefficient for filter: " + filt)
print(" Using reddening law coefficient R = 0.")
return np.zeros(N)
metal = "MeH" if "MeH" in fitparams else "FeH"
if "Teff" not in fitparams or metal not in fitparams:
R = np.ones_like(EBV) * table["RZ_mean"][i_filter].item()
else:
Teff_val, Teff_err = fitparams["Teff"]
metal_val, metal_err = fitparams[metal]
Teff = np.random.normal(Teff_val, Teff_err, size=N)
FeH = np.random.normal(metal_val, metal_err, size=N)
a0 = table["a0"][i_filter].item()
a1 = table["a1"][i_filter].item()
a2 = table["a2"][i_filter].item()
a3 = table["a3"][i_filter].item()
T4 = 1e-4 * Teff
R = a0 + T4 * (a1 + a2 * T4) + a3 * FeH
return R * EBV
def add_absolute_magnitudes(
inputparams, n=1000, k=1000, outfilename="", debug=False, use_gaussian_priors=False
):
"""
Convert apparent magnitudes to absolute magnitudes using the distance
Extinction E(B-V) is estimated based on Green et al. (2015) dust map.
Extinction is converted to reddening using Casagrande & VandenBerg 2014.
The converted colors and magnitudes are added to fitsparams.
Parameters
----------
inputparams : dict
Inputparams used in BASTA run.
n : int
Number of samples from parallax range
k : int
Number of samples from apparent magnitude range.
outfilename : str, optional
Name of directory of where to put plots outputted if debug is True.
debug : bool, optional
Debug flag. If True, debugging plots will be outputted.
use_gaussian_priors : bool, optional
If True, gaussian priors will be used for apparent magnitude in
the distance computation.
Returns
-------
inputparams : dict
Modified version of inputparams including absolute magnitudes.
"""
if "parallax" not in inputparams["fitparams"]:
return inputparams
print("\nPreparing distance/parallax/magnitude input ...", flush=True)
qs = [0.158655, 0.5, 0.841345]
fitparams = inputparams["fitparams"]
distanceparams = inputparams["distanceparams"]
if use_gaussian_priors:
inputparams["fitparams"][filt] = [val, std]
return inputparams
# Get apparent magnitudes from input data
mobs = distanceparams["m"]
mobs_err = distanceparams["m_err"]
if len(mobs.keys()) == 0:
raise ValueError("No filters were given")
# Convert the inputted parallax in mas to as
plxobs = fitparams["parallax"][0] * 1e-3
plxobs_err = fitparams["parallax"][1] * 1e-3
L = udist.EDSD(None, None) * 1e3
fitparams.pop("parallax")
# Sample distances more densely around the mode of the distance distribution
# See Bailer-Jones 2015, Eq 19
coeffs = [1 / L, -2, plxobs / (plxobs_err**2), -1 / (plxobs_err**2)]
roots = np.roots(coeffs)
if np.sum((np.isreal(roots))) == 1:
(mode,) = np.real(roots[np.isreal(roots)])
else:
assert np.sum((np.isreal(roots))) == 3
if plxobs >= 0:
mode = np.amin(np.real(roots[np.isreal(roots)]))
else:
(mode,) = np.real(roots[roots > 0])
# By sampling linearly in quantiles, the probablity mass is equal for the samples
bla = scipy.stats.norm.cdf(0, loc=mode, scale=1000) + 0.01
dist = scipy.stats.norm.ppf(
np.linspace(bla, 0.96, n - n // 2), loc=mode, scale=1000
)
# We also want to sample across the entire range.
lindist = 10 ** np.linspace(-0.4, 4.4, n // 2)
assert np.all(np.isfinite(dist))
assert np.all(dist > 0)
dist = np.concatenate([dist, lindist])
dist = np.sort(dist)
lldist = udist.compute_distlikelihoods(
dist, plxobs, plxobs_err, L, outfilename=outfilename, debug=debug
)
dists = np.repeat(dist, k)
lldists = np.repeat(lldist, k)
# Get EBV values
LOS_EBV = LOS_reddening(distanceparams)
EBV = get_EBV(dist, LOS_EBV, debug=debug, outfilename=outfilename)
EBVs = np.repeat(EBV, k)
distanceparams["As"] = {}
llabsms_joined = np.zeros(n * k)
for filt in mobs.keys():
# Sample apparent magnitudes over the entire parameter range
if filt in cnsts.distanceranges.filters:
m = np.linspace(
cnsts.distanceranges.filters[filt]["min"],
cnsts.distanceranges.filters[filt]["max"],
k - k // 2,
)
else:
m = np.linspace(-10, 25, k - k // 2)
m = np.concatenate(
[
m,
scipy.stats.norm.ppf(
| np.linspace(0.04, 0.96, k // 2) | numpy.linspace |
import numpy as np
from numpy.linalg import inv, norm
from math import sqrt, acos, cos, tan
def stereo_proj(p, z):
r'''
For a given zone axis z in Z^3 of a crystal,
it calculates the stereoprojection of vector p.
Noticed that if p denotes a plane of the crystal,
it should be written in its reciprocal base.
It always computes the projection from the south pole.
'''
R = norm(z)
zhat = z/norm(z)
Q = -np.eye(3) + 2*np.outer(zhat,zhat)
if z[0]**2+z[1]**2>1e-6:
z_1 = np.array([z[1],-z[0],0])
z_1 = z_1/norm(z_1)
z_2 = np.cross(z, z_1)
z_2 = z_2/norm(z_2)
else:
z_1 = np.array([1,0,0])
z_2 = np.array([0,1,0])
if | norm(p) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 08:48:56 2020
@author: fuchsca
"""
import cv2
import numpy as np
#img_gray=cv2.imread('images/fond-gris.jpg',0)
#img_bgr=cv2.imread('images/voiture.jpg',1)
img_8k=cv2.imread('images/8k.jpg',1)
#print("Gray levels image shape ="+ str(img_gray.shape))
#print("BGR image shape= "+str(img_bgr.shape))
#cv2.imshow("gra levels image",img_gray)
#cv2.imshow("BGR image",img_bgr)
#cv2.waitKey()
def invert_colors_manual(input_img : np.ndarray) :
image = np.asarray(input_img)
nb_lignes,nb_colonnes,_ = image.shape
image_sortie = | np.copy(image) | numpy.copy |
#!/usr/bin/env python3
import cv2
# from cv2 import aruco
from tqdm import trange
import numpy as np
import os, os.path
from glob import glob
from collections import defaultdict
import pandas as pd
## TODO: rewrite this whole file with aniposelib
from .common import \
get_calibration_board, get_board_type, \
find_calibration_folder, make_process_fun, \
get_cam_name, get_video_name, load_intrinsics, load_extrinsics
from .triangulate import triangulate_optim, triangulate_simple, \
reprojection_error, reprojection_error_und
from .calibrate_extrinsics import detect_aruco, estimate_pose, fill_points
def expand_matrix(mtx):
z = np.zeros((4,4))
z[0:3,0:3] = mtx[0:3,0:3]
z[3,3] = 1
return z
def process_trig_errors(config, fname_dict, cam_intrinsics, extrinsics, skip=20):
minlen = np.inf
caps = dict()
for cam_name, fname in fname_dict.items():
cap = cv2.VideoCapture(fname)
caps[cam_name] = cap
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
minlen = min(length, minlen)
cam_names = sorted(fname_dict.keys())
board = get_calibration_board(config)
cam_mats = []
cam_mats_dist = []
for cname in cam_names:
mat = np.array(extrinsics[cname])
left = np.array(cam_intrinsics[cname]['camera_mat'])
cam_mats.append(mat)
cam_mats_dist.append(left)
cam_mats = np.array(cam_mats)
cam_mats_dist = np.array(cam_mats_dist)
go = skip
all_points = []
framenums = []
all_rvecs = []
all_tvecs = []
for framenum in trange(minlen, desc='detecting', ncols=70):
row = []
rvecs = []
tvecs = []
for cam_name in cam_names:
intrinsics = cam_intrinsics[cam_name]
cap = caps[cam_name]
ret, frame = cap.read()
if framenum % skip != 0 and go <= 0:
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# corners, ids = detect_aruco(gray, intrinsics)
detected, stuff = estimate_pose(gray, intrinsics, board)
if detected:
corners, ids, rvec, tvec = stuff
rvec = rvec.flatten()
tvec = tvec.flatten()
else:
corners = ids = None
rvec = np.zeros(3)*np.nan
tvec = np.zeros(3)*np.nan
points = fill_points(corners, ids, board)
points_flat = points.reshape(-1, 1, 2)
points_new = cv2.undistortPoints(
points_flat,
np.array(intrinsics['camera_mat']),
| np.array(intrinsics['dist_coeff']) | numpy.array |
"""
Tests for the BNMTF Gibbs sampler.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
import numpy, math, pytest, itertools
from BNMTF.code.models.bnmtf_gibbs_optimised import bnmtf_gibbs_optimised
""" Test constructor """
def test_init():
# Test getting an exception when R and M are different sizes, and when R is not a 2D array.
R1 = numpy.ones(3)
M = numpy.ones((2,3))
I,J,K,L = 5,3,1,2
lambdaF = numpy.ones((I,K))
lambdaS = numpy.ones((K,L))
lambdaG = numpy.ones((J,L))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R1,M,K,L,priors)
assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 1-dimensional."
R2 = numpy.ones((4,3,2))
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R2,M,K,L,priors)
assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 3-dimensional."
R3 = numpy.ones((3,2))
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R3,M,K,L,priors)
assert str(error.value) == "Input matrix R is not of the same size as the indicator matrix M: (3, 2) and (2, 3) respectively."
# Similarly for lambdaF, lambdaS, lambdaG
I,J,K,L = 2,3,1,2
R4 = numpy.ones((2,3))
lambdaF = numpy.ones((2+1,1))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R4,M,K,L,priors)
assert str(error.value) == "Prior matrix lambdaF has the wrong shape: (3, 1) instead of (2, 1)."
lambdaF = numpy.ones((2,1))
lambdaS = numpy.ones((1+1,2+1))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R4,M,K,L,priors)
assert str(error.value) == "Prior matrix lambdaS has the wrong shape: (2, 3) instead of (1, 2)."
lambdaS = numpy.ones((1,2))
lambdaG = numpy.ones((3,2+1))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R4,M,K,L,priors)
assert str(error.value) == "Prior matrix lambdaG has the wrong shape: (3, 3) instead of (3, 2)."
# Test getting an exception if a row or column is entirely unknown
lambdaF = numpy.ones((I,K))
lambdaS = numpy.ones((K,L))
lambdaG = numpy.ones((J,L))
M1 = [[1,1,1],[0,0,0]]
M2 = [[1,1,0],[1,0,0]]
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R4,M1,K,L,priors)
assert str(error.value) == "Fully unobserved row in R, row 1."
with pytest.raises(AssertionError) as error:
bnmtf_gibbs_optimised(R4,M2,K,L,priors)
assert str(error.value) == "Fully unobserved column in R, column 2."
# Finally, a successful case
I,J,K,L = 3,2,2,2
R5 = 2*numpy.ones((I,J))
lambdaF = numpy.ones((I,K))
lambdaS = numpy.ones((K,L))
lambdaG = numpy.ones((J,L))
M = numpy.ones((I,J))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
BNMTF = bnmtf_gibbs_optimised(R5,M,K,L,priors)
assert numpy.array_equal(BNMTF.R,R5)
assert numpy.array_equal(BNMTF.M,M)
assert BNMTF.I == I
assert BNMTF.J == J
assert BNMTF.K == K
assert BNMTF.L == L
assert BNMTF.size_Omega == I*J
assert BNMTF.alpha == alpha
assert BNMTF.beta == beta
assert numpy.array_equal(BNMTF.lambdaF,lambdaF)
assert numpy.array_equal(BNMTF.lambdaS,lambdaS)
assert numpy.array_equal(BNMTF.lambdaG,lambdaG)
# Test when lambdaF S G are integers
I,J,K,L = 3,2,2,2
R5 = 2*numpy.ones((I,J))
lambdaF = 3
lambdaS = 4
lambdaG = 5
M = numpy.ones((I,J))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
BNMTF = bnmtf_gibbs_optimised(R5,M,K,L,priors)
assert numpy.array_equal(BNMTF.R,R5)
assert numpy.array_equal(BNMTF.M,M)
assert BNMTF.I == I
assert BNMTF.J == J
assert BNMTF.K == K
assert BNMTF.L == L
assert BNMTF.size_Omega == I*J
assert BNMTF.alpha == alpha
assert BNMTF.beta == beta
assert numpy.array_equal(BNMTF.lambdaF,3*numpy.ones((I,K)))
assert numpy.array_equal(BNMTF.lambdaS,4*numpy.ones((K,L)))
assert numpy.array_equal(BNMTF.lambdaG,5*numpy.ones((J,L)))
""" Test initialing parameters """
def test_initialise():
I,J,K,L = 5,3,2,4
R = numpy.ones((I,J))
M = numpy.ones((I,J))
lambdaF = 2*numpy.ones((I,K))
lambdaS = 3*numpy.ones((K,L))
lambdaG = 4*numpy.ones((J,L))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
# First do a random initialisation - we can then only check whether values are correctly initialised
init_S = 'random'
init_FG = 'random'
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
assert BNMTF.tau >= 0.0
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMTF.F[i,k] >= 0.0
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
assert BNMTF.S[k,l] >= 0.0
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
assert BNMTF.G[j,l] >= 0.0
# Initialisation of S using random draws from prior
init_S, init_FG = 'random', 'exp'
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMTF.F[i,k] == 1./lambdaF[i,k]
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
assert BNMTF.S[k,l] != 1./lambdaS[k,l] # test whether we overwrote the expectation
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
assert BNMTF.G[j,l] == 1./lambdaG[j,l]
# Initialisation of F and G using random draws from prior
init_S, init_FG = 'exp', 'random'
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMTF.F[i,k] != 1./lambdaF[i,k] # test whether we overwrote the expectation
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
assert BNMTF.S[k,l] == 1./lambdaS[k,l]
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
assert BNMTF.G[j,l] != 1./lambdaG[j,l] # test whether we overwrote the expectation
# Initialisation of F and G using Kmeans
init_S, init_FG = 'exp', 'kmeans'
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMTF.F[i,k] == 0.2 or BNMTF.F[i,k] == 1.2
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
assert BNMTF.G[j,l] == 0.2 or BNMTF.G[j,l] == 1.2
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
assert BNMTF.S[k,l] == 1./lambdaS[k,l]
""" Test computing values for alpha, beta, mu, tau. """
I,J,K,L = 5,3,2,4
R = numpy.ones((I,J))
M = numpy.ones((I,J))
M[0,0], M[2,2], M[3,1] = 0, 0, 0
lambdaF = 2*numpy.ones((I,K))
lambdaS = 3*numpy.ones((K,L))
lambdaG = 5*numpy.ones((J,L))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
init_S, init_FG = 'exp', 'exp'
# F = 1/2, S = 1/3, G = 1/5
# R - FSG.T = [[1]] - [[4/15]] = [[11/15]]
def test_alpha_s():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
alpha_s = alpha + 6.
assert BNMTF.alpha_s() == alpha_s
def test_beta_s():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
beta_s = beta + .5*(12*(11./15.)**2) #F*S = [[1/6+1/6=1/3,..]], F*S*G^T = [[1/15*4=4/15,..]]
assert abs(BNMTF.beta_s() - beta_s) < 0.00000000000001
def test_tauF():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
# S*G.T = [[4/15]], (S*G.T)^2 = [[16/225]], sum_j S*G.T = [[32/225,32/225],[48/225,48/225],[32/225,32/225],[32/225,32/225],[48/225,48/225]]
tauF = 3.*numpy.array([[32./225.,32./225.],[48./225.,48./225.],[32./225.,32./225.],[32./225.,32./225.],[48./225.,48./225.]])
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert abs(BNMTF.tauF(k)[i] - tauF[i,k]) < 0.000000000000001
def test_muF():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
tauF = 3.*numpy.array([[32./225.,32./225.],[48./225.,48./225.],[32./225.,32./225.],[32./225.,32./225.],[48./225.,48./225.]])
# Rij - Fi*S*Gj + Fik(Sk*Gj) = 11/15 + 1/2 * 4/15 = 13/15
# (Rij - Fi*S*Gj + Fik(Sk*Gj)) * (Sk*Gj) = 13/15 * 4/15 = 52/225
muF = 1./tauF * ( 3. * numpy.array([[2*(52./225.),2*(52./225.)],[3*(52./225.),3*(52./225.)],[2*(52./225.),2*(52./225.)],[2*(52./225.),2*(52./225.)],[3*(52./225.),3*(52./225.)]]) - lambdaF )
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert abs(BNMTF.muF(tauF[:,k],k)[i] - muF[i,k]) < 0.000000000000001
def test_tauS():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
# F outer G = [[1/10]], (F outer G)^2 = [[1/100]], sum (F outer G)^2 = [[12/100]]
tauS = 3.*numpy.array([[3./25.,3./25.,3./25.,3./25.],[3./25.,3./25.,3./25.,3./25.]])
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
assert abs(BNMTF.tauS(k,l) - tauS[k,l]) < 0.000000000000001
def test_muS():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
tauS = 3.*numpy.array([[3./25.,3./25.,3./25.,3./25.],[3./25.,3./25.,3./25.,3./25.]])
# Rij - Fi*S*Gj + Fik*Skl*Gjk = 11/15 + 1/2*1/3*1/5 = 23/30
# (Rij - Fi*S*Gj + Fik*Skl*Gjk) * Fik*Gjk = 23/30 * 1/10 = 23/300
muS = 1./tauS * ( 3. * numpy.array([[12*23./300.,12*23./300.,12*23./300.,12*23./300.],[12*23./300.,12*23./300.,12*23./300.,12*23./300.]]) - lambdaS )
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
assert abs(BNMTF.muS(tauS[k,l],k,l) - muS[k,l]) < 0.000000000000001
def test_tauG():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
# F*S = [[1/3]], (F*S)^2 = [[1/9]], sum_i F*S = [[4/9]]
tauG = 3.*numpy.array([[4./9.,4./9.,4./9.,4./9.],[4./9.,4./9.,4./9.,4./9.],[4./9.,4./9.,4./9.,4./9.]])
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
assert BNMTF.tauG(l)[j] == tauG[j,l]
def test_muG():
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init_S,init_FG)
BNMTF.tau = 3.
tauG = 3.*numpy.array([[4./9.,4./9.,4./9.,4./9.],[4./9.,4./9.,4./9.,4./9.],[4./9.,4./9.,4./9.,4./9.]])
# Rij - Fi*S*Gj + Gjl*(Fi*Sl)) = 11/15 + 1/5 * 1/3 = 12/15 = 4/5
# (Rij - Fi*S*Gj + Gjl*(Fi*Sl)) * (Fi*Sl) = 4/5 * 1/3 = 4/15
muG = 1./tauG * ( 3. * numpy.array([[4.*4./15.,4.*4./15.,4.*4./15.,4.*4./15.],[4.*4./15.,4.*4./15.,4.*4./15.,4.*4./15.],[4.*4./15.,4.*4./15.,4.*4./15.,4.*4./15.]]) - lambdaG )
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
assert abs(BNMTF.muG(tauG[:,l],l)[j] - muG[j,l]) < 0.000000000000001
""" Test some iterations, and that the values have changed in U and V. """
def test_run():
I,J,K,L = 10,5,3,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
M[0,0], M[2,2], M[3,1] = 0, 0, 0
lambdaF = 2*numpy.ones((I,K))
lambdaS = 3*numpy.ones((K,L))
lambdaG = 4*numpy.ones((J,L))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
init = 'exp' #F=1/2,S=1/3,G=1/4
F_prior = numpy.ones((I,K))/2.
S_prior = numpy.ones((K,L))/3.
G_prior = numpy.ones((J,L))/4.
iterations = 15
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.initialise(init)
(Fs,Ss,Gs,taus) = BNMTF.run(iterations)
assert BNMTF.all_F.shape == (iterations,I,K)
assert BNMTF.all_S.shape == (iterations,K,L)
assert BNMTF.all_G.shape == (iterations,J,L)
assert BNMTF.all_tau.shape == (iterations,)
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert Fs[0,i,k] != F_prior[i,k]
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
assert Ss[0,k,l] != S_prior[k,l]
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
assert Gs[0,j,l] != G_prior[j,l]
assert taus[1] != alpha/float(beta)
""" Test approximating the expectations for F, S, G, tau """
def test_approx_expectation():
burn_in = 2
thinning = 3 # so index 2,5,8 -> m=3,m=6,m=9
(I,J,K,L) = (5,3,2,4)
Fs = [numpy.ones((I,K)) * 3*m**2 for m in range(1,10+1)]
Ss = [numpy.ones((K,L)) * 2*m**2 for m in range(1,10+1)]
Gs = [numpy.ones((J,L)) * 1*m**2 for m in range(1,10+1)] #first is 1's, second is 4's, third is 9's, etc.
taus = [m**2 for m in range(1,10+1)]
expected_exp_tau = (9.+36.+81.)/3.
expected_exp_F = numpy.array([[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.]])
expected_exp_S = numpy.array([[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)],[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)]])
expected_exp_G = numpy.array([[(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.)],[(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.)],[(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.),(9.+36.+81.)*(1./3.)]])
R = numpy.ones((I,J))
M = numpy.ones((I,J))
lambdaF = 2*numpy.ones((I,K))
lambdaS = 3*numpy.ones((K,L))
lambdaG = 4*numpy.ones((J,L))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF, 'lambdaS':lambdaS, 'lambdaG':lambdaG }
BNMTF = bnmtf_gibbs_optimised(R,M,K,L,priors)
BNMTF.all_F = Fs
BNMTF.all_S = Ss
BNMTF.all_G = Gs
BNMTF.all_tau = taus
(exp_F, exp_S, exp_G, exp_tau) = BNMTF.approx_expectation(burn_in,thinning)
assert expected_exp_tau == exp_tau
assert numpy.array_equal(expected_exp_F,exp_F)
assert numpy.array_equal(expected_exp_S,exp_S)
assert | numpy.array_equal(expected_exp_G,exp_G) | numpy.array_equal |
'''
This file contains the regression model used to fit surrogate models
to sampled data. More specifically, the class below defines both the
Pyomo model used for surrogate fitting, the information criteria used
to perform model selection, and helper methods to interact with these.
Note that this is closely related to the file `surrogate.py`, which
defines the Surrogate class used to instantiate Regression objects.
'''
from pyomo.environ import *
from pyomo.opt import *
import numpy as np
import sys
class Regression:
'''
This object is used to:
* Generate penalized regression models for surrogate construction;
* Fit these regression models to previously obtained sample data;
* Automatically select the penalty based on information criteria.
Note that the information criteria used for penalty selection are
defined at the end of this class, and new ones can easily be added.
'''
def __init__(self, surrogate):
'''
Argument:
Surrogate model object containing all the model details.
Returns:
Abstract Pyomo model which can be used for regression.
'''
# Create an abstract optimization model (concrete data can be loaded later)
self.model = AbstractModel()
# Declare the sets used for variable indices etc.
self.model.SampleSet = Set(dimen=1, ordered=True)
self.model.IndvarSet = RangeSet(0, surrogate.dim - 1, 1)
self.model.RegressorSet = RangeSet(0, len(surrogate.index) - 1, 1)
# Set without first element (representing constant term θ₀)
self.model.ConstraintSet = RangeSet(1, len(surrogate.index) - 1, 1)
# Scaled versions of the independent variables
self.model.x = Param(self.model.IndvarSet, self.model.SampleSet)
# Actual output model/plant output
self.model.z = Param(self.model.SampleSet)
# LASSO regularization parameter λ (Lagrange multiplier)
self.model.penalty = Param(domain=Reals, mutable=True)
# Regression parameters (currently initialized to 0 with bounds [-100,100])
self.model.theta = Var(self.model.RegressorSet, domain=Reals, initialize=0, bounds=(None,None))
# Reformulate L1 terms as constraints and auxiliary variables
# (The reformulation is |θ| = |θ+| + |θ-| s.t. θ = θ+ - θ-)
self.model.theta_p = Var(self.model.ConstraintSet, domain=NonNegativeReals)
self.model.theta_m = Var(self.model.ConstraintSet, domain=NonNegativeReals)
self.model.constraint = \
Constraint(self.model.ConstraintSet, rule=lambda m, i:
m.theta_p[i] - m.theta_m[i] == m.theta[i])
# Save a copy of the surrogate object inside the model
self.surrogate = surrogate
# Define the objective function used for LASSO regression
# (Note that for penalty=0, we get ordinary least squares.)
self.model.objective = \
Objective(sense=minimize, rule=lambda m:
(sum((m.z[i] - self.surrogate(m.x, m.theta, pos=i))**2 for i in m.SampleSet) \
+ m.penalty * sum(m.theta_p[j] + m.theta_m[j] for j in m.ConstraintSet)))
# Check what information criterion to use to set the penalty
key = 'regpen_crit'
val = surrogate.conf[key]
try:
self.criterion = getattr(self, 'criterion_' + val)
except:
raise ValueError('"{}" cannot be set to "{}".'.format(key, val))
# Load miscellaneous config options
self.penalty_lim = surrogate.conf['regpen_lim']
self.penalty_num = surrogate.conf['regpen_num']
self.penalty_lb = surrogate.conf['regpen_lb']
self.penalty_ub = surrogate.conf['regpen_ub']
def fit(self, coords, results, penalty):
'''
This function performs a regression to a Pyomo model using provided
sampling coordinates and sampling results for a given penalty value.
Arguments:
coords:
2D Numpy array containing all sampling coordinates.
results:
Values of the true function at those coordinates.
penalty:
Size of the regression penalty factor.
'''
# Pack sampling data into a Pyomo-compatible format
data = {}
data['penalty'] = {None: penalty}
data['SampleSet'] = {None: [i for i, _ in enumerate(coords)]}
data['x'] = {(i,j) : v for (j,i), v in np.ndenumerate(coords)}
data['z'] = { i : v for i, v in np.ndenumerate(results)}
# Instantiate the Pyomo model using this data
instance = self.model.create_instance({None: data})
# Perform the regression using ipopt
ipopt = SolverFactory('ipopt', warmstart=True)
output = ipopt.solve(instance, load_solutions=False)
# Check if the regression succeeded
if output.solver.termination_condition == TerminationCondition.optimal:
# Extract the successful results
instance.solutions.load_from(output)
# Extract the regression parameters
self.theta = [value(instance.theta[i]) for i in instance.RegressorSet]
# Eliminate parameters below cutoff
pmax = max(np.abs(self.theta))
for i, p in enumerate(self.theta):
if np.abs(p/pmax) < self.penalty_lim:
self.theta[i] = 0
# Calculate the model error. The infinitesimal constant added
# at the end prevents crashes when calculating information criteria
# for models that are *exactly* correct (which can happen for trivial
# test functions such as f(x)=1-x, but rarely happens in real life).
self.error = sum((results[i] - self.surrogate(xi, self.theta))**2
for i, xi in enumerate(coords))/len(results) + 1e-256
else:
# Use fallback values in case of failure
self.theta = [np.nan for i in instance.RegressorSet]
self.error = np.nan
def autofit(self, coords, results):
'''
This function tries to find an optimal penalized regression
model using an information criterion to compare penalties.
In other words, while Regression.fit requires the user to
manually set a regression penalty, Regression.autofit will
autodetect an optimal penalty using an information criterion.
What information criterion is used (AICC, AIC, BIC, etc.)
can be specified by the user at runtime using `config.ini`.
TODO:
* In order to speed up the implementation, we might want to look into
whether we can reuse one Pyomo instance instead of recreating it.
(This might be possible if we mark all parameters as 'Mutable'.)
* We might also want to change the logspace-search to a more efficient
optimization algorithm if determining λ is a performance bottleneck.
'''
# Define the penalty search space
domain = np.logspace(np.log10(self.penalty_lb),
np.log10(self.penalty_ub),
self.penalty_num)
# Storage for information criteria
values = np.zeros(np.size(domain))
# Perform regressions for each penalty value
print('Penalty \tCriterion')
for i, penalty in enumerate(domain):
# Perform a new regression for this penalty
self.fit(coords, results, penalty)
# Count the number of finite parameters
params = len(np.nonzero(self.theta)[0])
# Count the total sample size
samples = len(results)
# Evaluate information criterion
values[i] = self.criterion(samples, params, self.error)
# Print status information
if np.isnan(values[i]):
# Failed results
values[i] = np.inf
print('% 8.4e\t [diverged]' % penalty)
else:
# Valid results
print('% 8.4e\t% 8.4e' % (penalty, values[i]))
# Find the penalty that minimizes the information criterion
penalty = domain[np.argmin(values)]
# Perform one last fit using that result
self.fit(coords, results, penalty)
# Confirm that the final results are usable
if | np.isnan(self.error) | numpy.isnan |
"""Main module."""
import time as _time
import datetime as _datetime
from copy import deepcopy as _dcopy
import numpy as _np
from scipy.optimize import least_squares as _least_squares
import matplotlib.pyplot as _plt
import matplotlib.gridspec as _mpl_gs
import matplotlib.cm as _cmap
from siriuspy.namesys import SiriusPVName as _PVName
from siriuspy.devices import SOFB as _SOFB, PowerSupply as _PowerSupply, \
CurrInfoSI as _CurrInfoSI
from siriuspy.clientconfigdb import ConfigDBClient
from siriuspy.epics import PV as _PV
import pyaccel as _pyacc
from ..utils import ThreadedMeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class BBAParams(_ParamsBaseClass):
"""."""
BPMNAMES = (
'SI-01M2:DI-BPM', 'SI-01C1:DI-BPM-1',
'SI-01C1:DI-BPM-2', 'SI-01C2:DI-BPM',
'SI-01C3:DI-BPM-1', 'SI-01C3:DI-BPM-2',
'SI-01C4:DI-BPM', 'SI-02M1:DI-BPM',
'SI-02M2:DI-BPM', 'SI-02C1:DI-BPM-1',
'SI-02C1:DI-BPM-2', 'SI-02C2:DI-BPM',
'SI-02C3:DI-BPM-1', 'SI-02C3:DI-BPM-2',
'SI-02C4:DI-BPM', 'SI-03M1:DI-BPM',
'SI-03M2:DI-BPM', 'SI-03C1:DI-BPM-1',
'SI-03C1:DI-BPM-2', 'SI-03C2:DI-BPM',
'SI-03C3:DI-BPM-1', 'SI-03C3:DI-BPM-2',
'SI-03C4:DI-BPM', 'SI-04M1:DI-BPM',
'SI-04M2:DI-BPM', 'SI-04C1:DI-BPM-1',
'SI-04C1:DI-BPM-2', 'SI-04C2:DI-BPM',
'SI-04C3:DI-BPM-1', 'SI-04C3:DI-BPM-2',
'SI-04C4:DI-BPM', 'SI-05M1:DI-BPM',
'SI-05M2:DI-BPM', 'SI-05C1:DI-BPM-1',
'SI-05C1:DI-BPM-2', 'SI-05C2:DI-BPM',
'SI-05C3:DI-BPM-1', 'SI-05C3:DI-BPM-2',
'SI-05C4:DI-BPM', 'SI-06M1:DI-BPM',
'SI-06M2:DI-BPM', 'SI-06C1:DI-BPM-1',
'SI-06C1:DI-BPM-2', 'SI-06C2:DI-BPM',
'SI-06C3:DI-BPM-1', 'SI-06C3:DI-BPM-2',
'SI-06C4:DI-BPM', 'SI-07M1:DI-BPM',
'SI-07M2:DI-BPM', 'SI-07C1:DI-BPM-1',
'SI-07C1:DI-BPM-2', 'SI-07C2:DI-BPM',
'SI-07C3:DI-BPM-1', 'SI-07C3:DI-BPM-2',
'SI-07C4:DI-BPM', 'SI-08M1:DI-BPM',
'SI-08M2:DI-BPM', 'SI-08C1:DI-BPM-1',
'SI-08C1:DI-BPM-2', 'SI-08C2:DI-BPM',
'SI-08C3:DI-BPM-1', 'SI-08C3:DI-BPM-2',
'SI-08C4:DI-BPM', 'SI-09M1:DI-BPM',
'SI-09M2:DI-BPM', 'SI-09C1:DI-BPM-1',
'SI-09C1:DI-BPM-2', 'SI-09C2:DI-BPM',
'SI-09C3:DI-BPM-1', 'SI-09C3:DI-BPM-2',
'SI-09C4:DI-BPM', 'SI-10M1:DI-BPM',
'SI-10M2:DI-BPM', 'SI-10C1:DI-BPM-1',
'SI-10C1:DI-BPM-2', 'SI-10C2:DI-BPM',
'SI-10C3:DI-BPM-1', 'SI-10C3:DI-BPM-2',
'SI-10C4:DI-BPM', 'SI-11M1:DI-BPM',
'SI-11M2:DI-BPM', 'SI-11C1:DI-BPM-1',
'SI-11C1:DI-BPM-2', 'SI-11C2:DI-BPM',
'SI-11C3:DI-BPM-1', 'SI-11C3:DI-BPM-2',
'SI-11C4:DI-BPM', 'SI-12M1:DI-BPM',
'SI-12M2:DI-BPM', 'SI-12C1:DI-BPM-1',
'SI-12C1:DI-BPM-2', 'SI-12C2:DI-BPM',
'SI-12C3:DI-BPM-1', 'SI-12C3:DI-BPM-2',
'SI-12C4:DI-BPM', 'SI-13M1:DI-BPM',
'SI-13M2:DI-BPM', 'SI-13C1:DI-BPM-1',
'SI-13C1:DI-BPM-2', 'SI-13C2:DI-BPM',
'SI-13C3:DI-BPM-1', 'SI-13C3:DI-BPM-2',
'SI-13C4:DI-BPM', 'SI-14M1:DI-BPM',
'SI-14M2:DI-BPM', 'SI-14C1:DI-BPM-1',
'SI-14C1:DI-BPM-2', 'SI-14C2:DI-BPM',
'SI-14C3:DI-BPM-1', 'SI-14C3:DI-BPM-2',
'SI-14C4:DI-BPM', 'SI-15M1:DI-BPM',
'SI-15M2:DI-BPM', 'SI-15C1:DI-BPM-1',
'SI-15C1:DI-BPM-2', 'SI-15C2:DI-BPM',
'SI-15C3:DI-BPM-1', 'SI-15C3:DI-BPM-2',
'SI-15C4:DI-BPM', 'SI-16M1:DI-BPM',
'SI-16M2:DI-BPM', 'SI-16C1:DI-BPM-1',
'SI-16C1:DI-BPM-2', 'SI-16C2:DI-BPM',
'SI-16C3:DI-BPM-1', 'SI-16C3:DI-BPM-2',
'SI-16C4:DI-BPM', 'SI-17M1:DI-BPM',
'SI-17M2:DI-BPM', 'SI-17C1:DI-BPM-1',
'SI-17C1:DI-BPM-2', 'SI-17C2:DI-BPM',
'SI-17C3:DI-BPM-1', 'SI-17C3:DI-BPM-2',
'SI-17C4:DI-BPM', 'SI-18M1:DI-BPM',
'SI-18M2:DI-BPM', 'SI-18C1:DI-BPM-1',
'SI-18C1:DI-BPM-2', 'SI-18C2:DI-BPM',
'SI-18C3:DI-BPM-1', 'SI-18C3:DI-BPM-2',
'SI-18C4:DI-BPM', 'SI-19M1:DI-BPM',
'SI-19M2:DI-BPM', 'SI-19C1:DI-BPM-1',
'SI-19C1:DI-BPM-2', 'SI-19C2:DI-BPM',
'SI-19C3:DI-BPM-1', 'SI-19C3:DI-BPM-2',
'SI-19C4:DI-BPM', 'SI-20M1:DI-BPM',
'SI-20M2:DI-BPM', 'SI-20C1:DI-BPM-1',
'SI-20C1:DI-BPM-2', 'SI-20C2:DI-BPM',
'SI-20C3:DI-BPM-1', 'SI-20C3:DI-BPM-2',
'SI-20C4:DI-BPM', 'SI-01M1:DI-BPM',
)
QUADNAMES = (
'SI-01M2:PS-QS', 'SI-01C1:PS-Q1',
'SI-01C1:PS-QS', 'SI-01C2:PS-QS',
'SI-01C3:PS-Q4', 'SI-01C3:PS-QS',
'SI-01C4:PS-Q1', 'SI-02M1:PS-QDB2',
'SI-02M2:PS-QDB2', 'SI-02C1:PS-Q1',
'SI-02C1:PS-QS', 'SI-02C2:PS-QS',
'SI-02C3:PS-Q4', 'SI-02C3:PS-QS',
'SI-02C4:PS-Q1', 'SI-03M1:PS-QDP2',
'SI-03M2:PS-QDP2', 'SI-03C1:PS-Q1',
'SI-03C1:PS-QS', 'SI-03C2:PS-QS',
'SI-03C3:PS-Q4', 'SI-03C3:PS-QS',
'SI-03C4:PS-Q1', 'SI-04M1:PS-QDB2',
'SI-04M2:PS-QDB2', 'SI-04C1:PS-Q1',
'SI-04C1:PS-QS', 'SI-04C2:PS-QS',
'SI-04C3:PS-Q4', 'SI-04C3:PS-QS',
'SI-04C4:PS-Q1', 'SI-05M1:PS-QS',
'SI-05M2:PS-QS', 'SI-05C1:PS-Q1',
'SI-05C1:PS-QS', 'SI-05C2:PS-QS',
'SI-05C3:PS-Q4', 'SI-05C3:PS-QS',
'SI-05C4:PS-Q1', 'SI-06M1:PS-QDB2',
'SI-06M2:PS-QDB2', 'SI-06C1:PS-Q1',
'SI-06C1:PS-QS', 'SI-06C2:PS-QS',
'SI-06C3:PS-Q4', 'SI-06C3:PS-QS',
'SI-06C4:PS-Q1', 'SI-07M1:PS-QDP2',
'SI-07M2:PS-QDP2', 'SI-07C1:PS-Q1',
'SI-07C1:PS-QS', 'SI-07C2:PS-QS',
'SI-07C3:PS-Q4', 'SI-07C3:PS-QS',
'SI-07C4:PS-Q1', 'SI-08M1:PS-QDB2',
'SI-08M2:PS-QDB2', 'SI-08C1:PS-Q1',
'SI-08C1:PS-QS', 'SI-08C2:PS-QS',
'SI-08C3:PS-Q4', 'SI-08C3:PS-QS',
'SI-08C4:PS-Q1', 'SI-09M1:PS-QS',
'SI-09M2:PS-QS', 'SI-09C1:PS-Q1',
'SI-09C1:PS-QS', 'SI-09C2:PS-QS',
'SI-09C3:PS-Q4', 'SI-09C3:PS-QS',
'SI-09C4:PS-Q1', 'SI-10M1:PS-QDB2',
'SI-10M2:PS-QDB2', 'SI-10C1:PS-Q1',
'SI-10C1:PS-QS', 'SI-10C2:PS-QS',
'SI-10C3:PS-Q4', 'SI-10C3:PS-QS',
'SI-10C4:PS-Q1', 'SI-11M1:PS-QDP2',
'SI-11M2:PS-QDP2', 'SI-11C1:PS-Q1',
'SI-11C1:PS-QS', 'SI-11C2:PS-QS',
'SI-11C3:PS-Q4', 'SI-11C3:PS-QS',
'SI-11C4:PS-Q1', 'SI-12M1:PS-QDB2',
'SI-12M2:PS-QDB2', 'SI-12C1:PS-Q1',
'SI-12C1:PS-QS', 'SI-12C2:PS-QS',
'SI-12C3:PS-Q4', 'SI-12C3:PS-QS',
'SI-12C4:PS-Q1', 'SI-13M1:PS-QS',
'SI-13M2:PS-QS', 'SI-13C1:PS-Q1',
'SI-13C1:PS-QS', 'SI-13C2:PS-QS',
'SI-13C3:PS-Q4', 'SI-13C3:PS-QS',
'SI-13C4:PS-Q1', 'SI-14M1:PS-QDB2',
'SI-14M2:PS-QDB2', 'SI-14C1:PS-Q1',
'SI-14C1:PS-QS', 'SI-14C2:PS-QS',
'SI-14C3:PS-Q4', 'SI-14C3:PS-QS',
'SI-14C4:PS-Q1', 'SI-15M1:PS-QDP2',
'SI-15M2:PS-QDP2', 'SI-15C1:PS-Q1',
'SI-15C1:PS-QS', 'SI-15C2:PS-QS',
'SI-15C3:PS-Q4', 'SI-15C3:PS-QS',
'SI-15C4:PS-Q1', 'SI-16M1:PS-QDB2',
'SI-16M2:PS-QDB2', 'SI-16C1:PS-Q1',
'SI-16C1:PS-QS', 'SI-16C2:PS-QS',
'SI-16C3:PS-Q4', 'SI-16C3:PS-QS',
'SI-16C4:PS-Q1', 'SI-17M1:PS-QS',
'SI-17M2:PS-QS', 'SI-17C1:PS-Q1',
'SI-17C1:PS-QS', 'SI-17C2:PS-QS',
'SI-17C3:PS-Q4', 'SI-17C3:PS-QS',
'SI-17C4:PS-Q1', 'SI-18M1:PS-QDB2',
'SI-18M2:PS-QDB2', 'SI-18C1:PS-Q1',
'SI-18C1:PS-QS', 'SI-18C2:PS-QS',
'SI-18C3:PS-Q4', 'SI-18C3:PS-QS',
'SI-18C4:PS-Q1', 'SI-19M1:PS-QDP2',
'SI-19M2:PS-QDP2', 'SI-19C1:PS-Q1',
'SI-19C1:PS-QS', 'SI-19C2:PS-QS',
'SI-19C3:PS-Q4', 'SI-19C3:PS-QS',
'SI-19C4:PS-Q1', 'SI-20M1:PS-QDB2',
'SI-20M2:PS-QDB2', 'SI-20C1:PS-Q1',
'SI-20C1:PS-QS', 'SI-20C2:PS-QS',
'SI-20C3:PS-Q4', 'SI-20C3:PS-QS',
'SI-20C4:PS-Q1', 'SI-01M1:PS-QS',
)
BPMNAMES = tuple([_PVName(bpm) for bpm in BPMNAMES])
QUADNAMES = tuple([_PVName(quad) for quad in QUADNAMES])
def __init__(self):
"""."""
super().__init__()
self.deltaorbx = 100 # [um]
self.deltaorby = 100 # [um]
self.meas_nrsteps = 8
self.quad_deltakl = 0.01 # [1/m]
self.quad_nrcycles = 1
self.wait_sofb = 0.3 # [s]
self.wait_correctors = 2 # [s]
self.wait_quadrupole = 2 # [s]
self.timeout_wait_sofb = 3 # [s]
self.sofb_nrpoints = 10
self.sofb_maxcorriter = 5
self.sofb_maxorberr = 5 # [um]
def __str__(self):
"""."""
ftmp = '{0:24s} = {1:9.3f} {2:s}\n'.format
dtmp = '{0:24s} = {1:9d} {2:s}\n'.format
st = ftmp('deltaorbx [um]', self.deltaorbx, '')
st += ftmp('deltaorby [um]', self.deltaorby, '')
st += dtmp('meas_nrsteps', self.meas_nrsteps, '')
st += ftmp('quad_deltakl [1/m]', self.quad_deltakl, '')
st += ftmp('quad_nrcycles', self.quad_nrcycles, '')
st += ftmp('wait_sofb [s]', self.wait_sofb, '(time to process calcs)')
st += ftmp('wait_correctors [s]', self.wait_correctors, '')
st += ftmp('wait_quadrupole [s]', self.wait_quadrupole, '')
st += ftmp(
'timeout_wait_sofb [s]', self.timeout_wait_sofb, '(get orbit)')
st += dtmp('sofb_nrpoints', self.sofb_nrpoints, '')
st += dtmp('sofb_maxcorriter', self.sofb_maxcorriter, '')
st += ftmp('sofb_maxorberr [um]', self.sofb_maxorberr, '')
return st
class DoBBA(_BaseClass):
"""."""
def __init__(self, isonline=True):
"""."""
super().__init__(
params=BBAParams(), target=self._do_bba, isonline=isonline)
self._bpms2dobba = list()
self.clt_confdb = ConfigDBClient(config_type='si_bbadata')
self.clt_confdb._TIMEOUT_DEFAULT = 20
self.data['bpmnames'] = list(BBAParams.BPMNAMES)
self.data['quadnames'] = list(BBAParams.QUADNAMES)
self.data['scancenterx'] = _np.zeros(len(BBAParams.BPMNAMES))
self.data['scancentery'] = _np.zeros(len(BBAParams.BPMNAMES))
self.data['measure'] = dict()
if self.isonline:
self.devices['sofb'] = _SOFB(_SOFB.DEVICES.SI)
self.devices['currinfosi'] = _CurrInfoSI()
self.connect_to_quadrupoles()
def __str__(self):
"""."""
stn = 'Params\n'
stp = self.params.__str__()
stp = ' ' + stp.replace('\n', '\n ')
stn += stp + '\n'
stn += 'Connected? ' + str(self.connected) + '\n\n'
stn += ' {:^20s} {:^20s} {:6s} {:6s}\n'.format(
'BPM', 'Quad', 'Xc [um]', 'Yc [um]')
tmplt = '{:03d}: {:^20s} {:^20s} {:^6.1f} {:^6.1f}\n'
dta = self.data
for bpm in self.bpms2dobba:
idx = dta['bpmnames'].index(bpm)
stn += tmplt.format(
idx, dta['bpmnames'][idx], dta['quadnames'][idx],
dta['scancenterx'][idx], dta['scancentery'][idx])
return stn
@property
def havebeam(self):
"""."""
haveb = self.devices['currinfosi']
return haveb.connected and haveb.storedbeam
@property
def measuredbpms(self):
"""."""
return sorted(self.data['measure'])
@property
def bpms2dobba(self):
"""."""
if self._bpms2dobba:
return _dcopy(self._bpms2dobba)
return sorted(
set(self.data['bpmnames']) - self.data['measure'].keys())
@bpms2dobba.setter
def bpms2dobba(self, bpmlist):
"""."""
self._bpms2dobba = sorted([_PVName(bpm) for bpm in bpmlist])
def connect_to_quadrupoles(self):
"""."""
for bpm in self.bpms2dobba:
idx = self.data['bpmnames'].index(bpm)
qname = self.data['quadnames'][idx]
if qname and qname not in self.devices:
self.devices[qname] = _PowerSupply(qname)
def get_orbit(self):
"""."""
sofb = self.devices['sofb']
sofb.cmd_reset()
sofb.wait_buffer(self.params.timeout_wait_sofb)
return _np.hstack([sofb.orbx, sofb.orby])
@staticmethod
def get_cycling_curve():
"""."""
return [1/2, -1/2, 0]
def correct_orbit_at_bpm(self, bpmname, x0, y0):
"""."""
sofb = self.devices['sofb']
idxx = self.data['bpmnames'].index(bpmname)
refx, refy = sofb.refx, sofb.refy
refx[idxx], refy[idxx] = x0, y0
sofb.refx = refx
sofb.refy = refy
_time.sleep(self.params.wait_sofb)
idx, resx, resy = sofb.correct_orbit_manually(
nr_iters=self.params.sofb_maxcorriter,
residue=self.params.sofb_maxorberr)
return idx, _np.max([resx, resy])
def correct_orbit(self):
"""."""
sofb = self.devices['sofb']
sofb.correct_orbit_manually(
nr_iters=self.params.sofb_maxcorriter,
residue=self.params.sofb_maxorberr)
def process_data(
self, nbpms_linfit=None, thres=None, mode='symm',
discardpoints=None, nonlinear=False):
"""."""
for bpm in self.data['measure']:
self.analysis[bpm] = self.process_data_single_bpm(
bpm, nbpms_linfit=nbpms_linfit, thres=thres, mode=mode,
discardpoints=discardpoints, nonlinear=nonlinear)
def process_data_single_bpm(
self, bpm, nbpms_linfit=None, thres=None, mode='symm',
discardpoints=None, nonlinear=False):
"""."""
anl = dict()
idx = self.data['bpmnames'].index(bpm)
nbpms = len(self.data['bpmnames'])
orbini = self.data['measure'][bpm]['orbini']
orbpos = self.data['measure'][bpm]['orbpos']
orbneg = self.data['measure'][bpm]['orbneg']
usepts = set(range(orbini.shape[0]))
if discardpoints is not None:
usepts = set(usepts) - set(discardpoints)
usepts = sorted(usepts)
xpos = orbini[usepts, idx]
ypos = orbini[usepts, idx+nbpms]
if mode.lower().startswith('symm'):
dorb = orbpos - orbneg
elif mode.lower().startswith('pos'):
dorb = orbpos - orbini
else:
dorb = orbini - orbneg
dorbx = dorb[usepts, :nbpms]
dorby = dorb[usepts, nbpms:]
if '-QS' in self.data['quadnames'][idx]:
dorbx, dorby = dorby, dorbx
anl['xpos'] = xpos
anl['ypos'] = ypos
px = _np.polyfit(xpos, dorbx, deg=1)
py = _np.polyfit(ypos, dorby, deg=1)
nbpms_linfit = nbpms_linfit or len(self.data['bpmnames'])
sidx = _np.argsort(_np.abs(px[0]))
sidy = _np.argsort(_np.abs(py[0]))
sidx = sidx[-nbpms_linfit:][::-1]
sidy = sidy[-nbpms_linfit:][::-1]
pxc = px[:, sidx]
pyc = py[:, sidy]
if thres:
ax2 = pxc[0]*pxc[0]
ay2 = pyc[0]*pyc[0]
ax2 /= ax2[0]
ay2 /= ay2[0]
nx = _np.sum(ax2 > thres)
ny = _np.sum(ay2 > thres)
pxc = pxc[:, :nx]
pyc = pyc[:, :ny]
x0s = -pxc[1]/pxc[0]
y0s = -pyc[1]/pyc[0]
x0 = _np.dot(pxc[0], -pxc[1]) / _np.dot(pxc[0], pxc[0])
y0 = _np.dot(pyc[0], -pyc[1]) / _np.dot(pyc[0], pyc[0])
stdx0 = _np.sqrt(
_np.dot(pxc[1], pxc[1]) / _np.dot(pxc[0], pxc[0]) - x0*x0)
stdy0 = _np.sqrt(
_np.dot(pyc[1], pyc[1]) / _np.dot(pyc[0], pyc[0]) - y0*y0)
extrapx = not min(xpos) <= x0 <= max(xpos)
extrapy = not min(ypos) <= y0 <= max(ypos)
anl['linear_fitting'] = dict()
anl['linear_fitting']['dorbx'] = dorbx
anl['linear_fitting']['dorby'] = dorby
anl['linear_fitting']['coeffsx'] = px
anl['linear_fitting']['coeffsy'] = py
anl['linear_fitting']['x0s'] = x0s
anl['linear_fitting']['y0s'] = y0s
anl['linear_fitting']['extrapolatedx'] = extrapx
anl['linear_fitting']['extrapolatedy'] = extrapy
anl['linear_fitting']['x0'] = x0
anl['linear_fitting']['y0'] = y0
anl['linear_fitting']['stdx0'] = stdx0
anl['linear_fitting']['stdy0'] = stdy0
rmsx = _np.sum(dorbx*dorbx, axis=1) / dorbx.shape[1]
rmsy = _np.sum(dorby*dorby, axis=1) / dorby.shape[1]
if xpos.size > 3:
px, covx = _np.polyfit(xpos, rmsx, deg=2, cov=True)
py, covy = _np.polyfit(ypos, rmsy, deg=2, cov=True)
else:
px = _np.polyfit(xpos, rmsx, deg=2, cov=False)
py = _np.polyfit(ypos, rmsy, deg=2, cov=False)
covx = covy = | _np.zeros((3, 3)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""SHERIFS
Seismic Hazard and Earthquake Rates In Fault Systems
Version 1.2
@author: <NAME>
"""
import numpy as np
import matplotlib.path as mplPath
from geometry_tools import *
def build(XMLfile,host_model_file,Lon_bg,Lat_bg):
Poly = []
for x1,y1 in zip(Lon_bg,Lat_bg): # creation du polygon de la zone
Poly.append((x1,y1))
bbPath = mplPath.Path(Poly)
read_host_file = open(host_model_file,'r')
lines_of_the_host_file = read_host_file.readlines()
lines_of_the_host_file = [x.strip('L\n') for x in lines_of_the_host_file]
lines_of_the_host_file = [x.strip('\r\n') for x in lines_of_the_host_file]
lines_of_the_host_file = [x.strip('\n') for x in lines_of_the_host_file]
line_number = 0
source_read = False
simple_fault = False
complex_fault = False
area_source = False
point_source = False
subduction_source = False
for line in lines_of_the_host_file:
if '<simpleFaultSource' in line :
line_start = line_number
index_id = line.find('id="')+4
source_read = False
simple_fault = True
complex_fault = False
area_source = False
point_source = False
subduction_source = False
if 'Subduction' in line or 'subduction' in line:
subduction_source = True
if '<areaSource' in line :
Xing_bg = False
type_increment = False
type_mfd = False
zone_defined = False
line_start = line_number
index_id = line.find('id="')+4
source_read = False
simple_fault = False
complex_fault = False
area_source = True
point_source = False
subduction_source = False
if 'Subduction' in line or 'subduction' in line:
subduction_source = True
if '<complexFaultSource' in line :
line_start = line_number
index_id = line.find('id="')+4
source_read = False
simple_fault = False
complex_fault = True
area_source = False
point_source = False
subduction_source = False
if 'Subduction' in line or 'subduction' in line:
subduction_source = True
if '<pointSource' in line :
line_start = line_number
index_id = line.find('id="')+4
source_read = False
simple_fault = False
complex_fault = False
area_source = False
point_source = True
subduction_source = False
if 'Subduction' in line or 'subduction' in line:
subduction_source = True
if '</sourceModel' in line :
source_read = False
simple_fault = False
complex_fault = False
area_source = False
point_source = False
subduction_source = False
if simple_fault == True or complex_fault == True:
print_source = True
if '<gml:posList>' in line :
line_start_lonlat = line_number
if '</gml:posList>' in line :
line_stop_lonlat = line_number
lon_lat = ''
for line_lon_lat in lines_of_the_host_file[line_start_lonlat:line_stop_lonlat+1]:
line_lon_lat = line_lon_lat.replace('<gml:posList>','')
line_lon_lat = line_lon_lat.replace('</gml:posList>','')
lon_lat += ' ' + line_lon_lat
lon_lat = lon_lat.replace(' ',' ')
lon_lat = lon_lat.replace(' ',' ')
lon_lat = lon_lat.split(' ')
points = []
for i in range(len(lon_lat)-1):
if lon_lat[i] != '':
if len(points)!= 0:
if float(lon_lat[i]) != points[-1][1]:
points.append([float(lon_lat[i]), float(lon_lat[i+1])])
else :
points.append([float(lon_lat[i]), float(lon_lat[i+1])])
for point in points :
if bbPath.contains_point((point[0],point[1])) == True:
print_source = False
if '<\simpleFaultSource' in line or '</complexFaultSource' in line:
line_end = line_number
source_read = True
if print_source == True or subduction_source == True:
if source_read == True :
line_to_print = lines_of_the_host_file[line_start][:index_id]+'1111'+lines_of_the_host_file[line_start][index_id:]
XMLfile.write(line_to_print+'\n')
for line_to_print in lines_of_the_host_file[line_start+1:line_end+1] :
XMLfile.write(line_to_print+'\n')
if point_source == True:
print_source = True
if '<gml:posList>' in line :
line_start_lonlat = line_number
if '</gml:posList>' in line :
line_stop_lonlat = line_number
lon_lat = ''
for line_lon_lat in lines_of_the_host_file[line_start_lonlat:line_stop_lonlat+1]:
line_lon_lat = line_lon_lat.replace('<gml:posList>','')
line_lon_lat = line_lon_lat.replace('</gml:posList>','')
lon_lat += ' ' + line_lon_lat
lon_lat = lon_lat.replace(' ',' ')
lon_lat = lon_lat.replace(' ',' ')
lon_lat = lon_lat.split(' ')
points = []
for i in range(len(lon_lat)-1):
if lon_lat[i] != '':
if len(points)!= 0:
if float(lon_lat[i]) != points[-1][1]:
points.append([float(lon_lat[i]), float(lon_lat[i+1])])
else :
points.append([float(lon_lat[i]), float(lon_lat[i+1])])
for point in points :
if bbPath.contains_point((point[0],point[1])) == True:
print_source = False
if '<\pointSource' in line:
line_end = line_number
source_read = True
if print_source == True or subduction_source == True:
if source_read == True :
line_to_print = lines_of_the_host_file[line_start][:index_id]+'1111'+lines_of_the_host_file[line_start][index_id:]
XMLfile.write(line_to_print+'\n')
for line_to_print in lines_of_the_host_file[line_start+1:line_end+1] :
XMLfile.write(line_to_print+'\n')
if area_source == True:
if '<gml:posList>' in line :
line_start_lonlat = line_number
if '</gml:posList>' in line :
line_stop_lonlat = line_number
lon_lat = ''
for line_lon_lat in lines_of_the_host_file[line_start_lonlat:line_stop_lonlat+1]:
line_lon_lat = line_lon_lat.replace('<gml:posList>','')
line_lon_lat = line_lon_lat.replace('</gml:posList>','')
lon_lat += ' ' + line_lon_lat
lon_lat = lon_lat.replace(' ',' ')
lon_lat = lon_lat.replace(' ',' ')
lon_lat = lon_lat.split(' ')
points_zone = []
for i in range(len(lon_lat)-1):
if lon_lat[i] != '':
if len(points_zone)!= 0:
if float(lon_lat[i]) != points_zone[-1][1]:
points_zone.append([float(lon_lat[i]), float(lon_lat[i+1])])
else :
points_zone.append([float(lon_lat[i]), float(lon_lat[i+1])])
ColX = []
ColY = []
for point in points_zone :
ColX.append(point[0])
ColY.append(point[1])
if bbPath.contains_point((point[0],point[1])) == True:
Xing_bg = True
Poly = []
for x1,y1 in zip(ColX,ColY):
Poly.append((x1,y1))
bbPath_zone = mplPath.Path(Poly)
for lon,lat in zip(Lon_bg,Lat_bg):
if bbPath_zone.contains_point((lon,lat)) == True:
Xing_bg = True
if '</areaSource>' in line:
line_end = line_number
source_read = True
if Xing_bg == False or subduction_source == True: #doesn't change anything, just write the source down
if source_read == True :
line_to_print = lines_of_the_host_file[line_start][:index_id]+'1111'+lines_of_the_host_file[line_start][index_id:]
XMLfile.write(line_to_print+'\n')
for line_to_print in lines_of_the_host_file[line_start+1:line_end+1] :
XMLfile.write(line_to_print+'\n')
elif Xing_bg ==True and subduction_source == False:
if zone_defined == False :
zone_defined = True
#number_of_Xing=0
listpoint_in_bg = []
ColX = []
ColY = []
for point in points_zone :
# find the points that are in the zone and the couple of point that crosses the other zone limit
# Xsing_couple_zone = []
ColX.append(point[0])
ColY.append(point[1])
if bbPath.contains_point((point[0],point[1])) == True:
listpoint_in_bg.append(1)
else:
listpoint_in_bg.append(0)
#does the same for the background points
# Xsing_points_bg =[]
bg_point_inzone = []
Poly = []
for x1,y1 in zip(ColX,ColY):
Poly.append((x1,y1))
bbPath_zone = mplPath.Path(Poly)
for lon,lat in zip(Lon_bg,Lat_bg):
if bbPath_zone.contains_point((lon,lat)) == True:
bg_point_inzone.append(1)
else :
bg_point_inzone.append(0)
#find the number of crossing
nb_Xing_zone = 0
for index_pt_z in range(len(listpoint_in_bg)):
if index_pt_z != 0:
if listpoint_in_bg[index_pt_z]!=listpoint_in_bg[index_pt_z-1]:
nb_Xing_zone+=1
else:
if listpoint_in_bg[-1]!=listpoint_in_bg[index_pt_z]:
nb_Xing_zone+=1
nb_Xing_bg = 0
for index_pt_bg in range(len(bg_point_inzone)):
if index_pt_bg != 0:
if bg_point_inzone[index_pt_bg]!=bg_point_inzone[index_pt_bg-1]:
nb_Xing_bg+=1
else:
if bg_point_inzone[-1]!=bg_point_inzone[index_pt_bg]:
nb_Xing_bg+=1
number_of_Xing = max([nb_Xing_zone,nb_Xing_bg])
if sum(bg_point_inzone) == len(bg_point_inzone) and sum(listpoint_in_bg)==0: # if the background if completely included in the zone
lon_zone_modif = []
lat_zone_modif = []
for point in points_zone :
lon_zone = point[0]
lat_zone = point[1]
lon_zone_modif.append(lon_zone)
lat_zone_modif.append(lat_zone)
distances = []
for lon_bg,lat_bg in zip(Lon_bg,Lat_bg):
distances.append(distance(lon_bg,lat_bg,points_zone[-1][0],points_zone[-1][1]))
index_dist_min = np.argmin(distances)
lon_bg_modif = Lon_bg[index_dist_min:]+Lon_bg[:index_dist_min]
lat_bg_modif = Lat_bg[index_dist_min:]+Lat_bg[:index_dist_min]
if (distance(lon_bg_modif[-1],lat_bg_modif[-1],points_zone[0][0],points_zone[0][1])
>distance(lon_bg_modif[0],lat_bg_modif[0],points_zone[0][0],points_zone[0][1])):
lon_bg_modif = list(reversed(lon_bg_modif))
lat_bg_modif = list(reversed(lat_bg_modif))
for lon_bg,lat_bg in zip(lon_bg_modif,lat_bg_modif):
lon_zone_modif.append(lon_bg)
lat_zone_modif.append(lat_bg)
#avoid intersections of the bg and moves the point slightly so OQ doesn't freak out
line1 = [[points_zone[-1][0],points_zone[-1][1]],[lon_bg_modif[0],lat_bg_modif[0]]]
line2 = [[np.mean([lon_bg_modif[0],points_zone[-1][0]])+0.0001,np.mean([lat_bg_modif[0],points_zone[-1][1]])],
[np.mean([lon_bg_modif[0],points_zone[-1][0]]),np.mean([lat_bg_modif[-1],points_zone[-1][1]])]]
x,y = line_intersection(line1, line2)
if x != 'no_intesection':
if (points_aligned([np.mean([lon_bg_modif[0],points_zone[-1][0]])+0.0001,np.mean([lat_bg_modif[0],points_zone[-1][1]])],
[np.mean([lon_bg_modif[0],points_zone[-1][0]]), | np.mean([lat_bg_modif[-1],points_zone[-1][1]]) | numpy.mean |
import tensorflow as tf
import numpy as np
import cv2
import argparse
from sklearn.utils import shuffle
def Dataset_preprocessing(dataset = 'MNIST', image_type = True):
if dataset == 'mnist':
nch = 1
r = 32
(train_images, _), (test_images, _) = tf.keras.datasets.mnist.load_data()
elif dataset == 'fmnist':
(train_images, _), (test_images, _) = tf.keras.datasets.fashion_mnist.load_data()
r = 32
nch = 1
elif dataset == 'cifar10':
(train_images, _), (test_images, _) = tf.keras.datasets.cifar10.load_data()
r = 32
nch = 3
elif dataset == 'celeba':
celeba = np.load('/raid/konik/data/celeba_64_100k.npy')
celeba = shuffle(celeba)
train_images, test_images = np.split(celeba, [80000], axis=0)
print(type(train_images[0,0,0,0]))
nch = 3
r = 64
elif dataset == 'imagenet':
imagenet = np.load('/raid/Amir/Projects/datasets/Tiny_imagenet.npy')
imagenet = shuffle(imagenet)
train_images, test_images = np.split(imagenet, [80000], axis=0)
nch = 3
r = 64
elif dataset == 'rheo':
rheo = np.load('/raid/Amir/Projects/datasets/rheology.npy')
rheo = shuffle(rheo)
train_images, test_images = np.split(rheo, [1500], axis=0)
nch = 3
r = 64
elif dataset == 'chest':
chest = np.load('/raid/Amir/Projects/datasets/X_ray_dataset_128.npy')[:100000,:,:,0:1]
chest = shuffle(chest)
print(np.shape(chest))
train_images, test_images = np.split(chest, [80000], axis=0)
# print(type(train_images[0,0,0,0]))
nch = 1
r = 128
elif dataset == 'church':
church = np.load('/raid/Amir/Projects/datasets/church_outdoor_train_lmdb_color_64.npy')[:100000,:,:,:]
church = shuffle(church)
print(np.shape(church))
train_images, test_images = np.split(church, [80000], axis=0)
# print(type(train_images[0,0,0,0]))
nch = 3
r = 64
training_images = np.zeros((np.shape(train_images)[0], r, r, 1))
testing_images = np.zeros(( | np.shape(test_images) | numpy.shape |
'''
Author: <NAME>, Bioscience Core Lab @ KAUST, KSA
Project Name: longQC.py
Start Date: 2017-10-10
Usage:
longQC.py [options]
Try 'longQC.py -h' for more information.
Purpose: LongQC enables you to asses the quality of sequence data
coming from third-generation sequencers (long read).
Bugs: Please contact to <EMAIL>
'''
import sys, os, json, argparse, shlex, array
import logging
import numpy as np
import pandas as pd
from time import sleep
from scipy.stats import gamma
from jinja2 import Environment, FileSystemLoader
from collections import OrderedDict
from multiprocessing import Pool
from _version import __version__
import lq_nanopore
import lq_rs
import lq_sequel
from lq_gamma import estimate_gamma_dist_scipy, plot_length_dist
from lq_utils import (eprint, open_seq_chunk, get_N50, subsample_from_chunk,
write_fastq, get_Qx_bases, copytree, guess_format, enc_b64_str)
from lq_adapt import cut_adapter
from lq_gcfrac import LqGC
from lq_exec import LqExec
from lq_coverage import LqCoverage
from lq_mask import LqMask
def command_run(args):
if args.suf:
suf = args.suf
else:
suf = None
if args.platform == 'rs2':
lq_rs.run_platformqc(args.raw_data_dir, args.out, suffix=suf)
elif args.platform == 'sequel':
lq_sequel.run_platformqc(args.raw_data_dir, args.out, suffix=suf)
elif args.platform == 'minion':
lq_nanopore.run_platformqc(args.platform, args.raw_data_dir, args.out, suffix=suf, n_channel=512)
elif args.platform == 'gridion':
lq_nanopore.run_platformqc(args.platform, args.raw_data_dir, args.out, suffix=suf, n_channel=512)
else:
pass
def command_help(args):
print(parser.parse_args([args.command, '--help']))
def main(args):
if hasattr(args, 'handler'):
args.handler(args)
else:
parser.print_help()
def command_sample(args):
if args.ncpu < 4:
eprint("Error: -p/--ncpu needs to be 4 or higher.")
sys.exit(1)
if not os.path.exists(args.input):
eprint("Error: input file %s does not exist." % args.input)
sys.exit(1)
if args.mem < 0 or args.mem > 2:
eprint("Error: -m(--mem) option has an out-of-range ranged value.")
sys.exit(1)
if args.nsample < 0 or args.nsample > 10000:
eprint("Error: -n(--n_sample) option has an out-of-range ranged value.")
sys.exit(1)
if os.path.exists(args.out):
eprint("Error: output path %s already exists." % args.out)
sys.exit(1)
if args.db and args.short and args.ncpu < 9:
print("-d/--db option with -b/--short flag requires at least nine cpus.")
sys.exit(0)
if args.db and args.ncpu < 6:
print("-d/--db option requires at least six cpus.")
sys.exit(0)
if args.suf:
suffix = "_" + args.suf
else:
suffix = ""
ncpu = int(args.ncpu)
path_minimap2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "minimap2-coverage")
pb_control = None
merged_control = None
cov_path = os.path.join(args.out, "analysis", "minimap2", "coverage_out" + suffix + ".txt")
cov_path_e = os.path.join(args.out, "analysis", "minimap2", "coverage_err" + suffix + ".txt")
sample_path = os.path.join(args.out, "analysis", "subsample" + suffix + ".fastq")
if args.short:
length_threshold = 500
short_sample_path = os.path.join(args.out, "analysis", "short_subsample" + suffix + ".fastq")
short_cov_path = os.path.join(args.out, "analysis", "minimap2", "short_coverage_out" + suffix + ".txt")
short_cov_path_e = os.path.join(args.out, "analysis", "minimap2", "short_coverage_err" + suffix + ".txt")
merged_cov_path = os.path.join(args.out, "analysis", "minimap2", "merged_coverage_out" + suffix + ".txt")
log_path = os.path.join(args.out, "logs", "log_longQC_sampleqc" + suffix + ".txt")
fig_path = os.path.join(args.out, "figs", "fig_longQC_sampleqc_length" + suffix + ".png")
fig_path_rq = os.path.join(args.out, "figs", "fig_longQC_sampleqc_average_qv" + suffix + ".png")
fig_path_ma = os.path.join(args.out, "figs", "fig_longQC_sampleqc_masked_region" + suffix + ".png")
fig_path_gc = os.path.join(args.out, "figs", "fig_longQC_sampleqc_gcfrac" + suffix + ".png")
fig_path_cv = os.path.join(args.out, "figs", "fig_longQC_sampleqc_coverage" + suffix + ".png")
fig_path_qv = os.path.join(args.out, "figs", "fig_longQC_sampleqc_olp_qv" + suffix + ".png")
fig_path_ta = os.path.join(args.out, "figs", "fig_longQC_sampleqc_terminal_analysis" + suffix + ".png")
fig_path_cl = os.path.join(args.out, "figs", "fig_longQC_sampleqc_coverage_over_read_length" + suffix + ".png")
json_path = os.path.join(args.out, "QC_vals_longQC_sampleqc" + suffix + ".json")
fastx_path = ""
html_path = os.path.join(args.out, "web_summary" + suffix + ".html")
tempdb_path = ""
df_mask = None
minimap2_params = ''
minimap2_db_params = ''
minimap2_med_score_threshold = 0
# for BCL
'''
db_index_pb = os.path.join(os.path.dirname(os.path.abspath(__file__)), "db", "nt_pb.index")
db_index_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)), "db", "nt_ont.index")
db_paf = None
db_paf_err = None
'''
nonsense_read_error_threshold = 0.45
nonsense_read_warn_threshold = 0.25
very_low_coverage_threshold = 6
# output_path will be made too.
if not os.path.isdir(os.path.join(args.out, "analysis", "minimap2")):
os.makedirs(os.path.join(args.out, "analysis", "minimap2"), exist_ok=True)
if not os.path.isdir(os.path.join(args.out, "logs")):
os.makedirs(os.path.join(args.out, "logs"), exist_ok=True)
if not os.path.isdir(os.path.join(args.out, "figs")):
os.makedirs(os.path.join(args.out, "figs"), exist_ok=True)
### logging conf ###
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path, 'w')
sh = logging.StreamHandler()
formatter = logging.Formatter('%(module)s:%(asctime)s:%(lineno)d:%(levelname)s:%(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
#####################
logger.info("Cmd: %s" % " ".join(sys.argv))
if args.preset:
p = args.preset
if p == 'pb-rs2':
args.pb = True
args.adp5 = "ATCTCTCTCTTTTCCTCCTCCTCCGTTGTTGTTGTTGAGAGAGAT" if not args.adp5 else args.adp5
args.adp3 = "ATCTCTCTCTTTTCCTCCTCCTCCGTTGTTGTTGTTGAGAGAGAT" if not args.adp3 else args.adp3
minimap2_params = "-Y -l 0 -q 160"
minimap2_med_score_threshold = 80
if args.short:
minimap2_med_score_threshold_short = 60
elif p == 'pb-sequel':
args.pb = True
args.sequel = True
args.adp5 = "ATCTCTCTCAACAACAACAACGGAGGAGGAGGAAAAGAGAGAGAT" if not args.adp5 else args.adp5
args.adp3 = "ATCTCTCTCAACAACAACAACGGAGGAGGAGGAAAAGAGAGAGAT" if not args.adp3 else args.adp3
minimap2_params = "-Y -l 0 -q 160"
minimap2_med_score_threshold = 80
if args.short:
minimap2_med_score_threshold_short = 60
elif p == 'pb-hifi':
args.pb = True
args.sequel = True
args.adp5 = "ATCTCTCTCAACAACAACAACGGAGGAGGAGGAAAAGAGAGAGAT" if not args.adp5 else args.adp5
args.adp3 = "ATCTCTCTCAACAACAACAACGGAGGAGGAGGAAAAGAGAGAGAT" if not args.adp3 else args.adp3
minimap2_params = "-Y -l 0 -q 160"
minimap2_med_score_threshold = 80
elif p == 'ont-ligation':
args.ont = True
args.adp5 = "AATGTACTTCGTTCAGTTACGTATTGCT" if not args.adp5 else args.adp5
#args.adp3 = "GCAATACGTAACTGAACGAAGT"
args.adp3 = "GCAATACGTAACTGAACG" if not args.adp3 else args.adp3
minimap2_params = "-Y -l 0 -q 160"
minimap2_med_score_threshold = 160
if args.short:
minimap2_med_score_threshold_short = 140
elif p == 'ont-rapid':
args.ont = True
args.adp5 = "GTTTTCGCATTTATCGTGAAACGCTTTCGCGTTTTTCGTGCGCCGCTTCA" if not args.adp5 else args.adp5
minimap2_params = "-Y -l 0 -q 160"
minimap2_med_score_threshold = 160
if args.short:
minimap2_med_score_threshold_short = 140
elif p == 'ont-1dsq':
args.ont = True
args.adp5 = "GGCGTCTGCTTGGGTGTTTAACCTTTTTGTCAGAGAGGTTCCAAGTCAGAGAGGTTCCT" if not args.adp5 else args.adp5
args.adp3 = "GGAACCTCTCTGACTTGGAACCTCTCTGACAAAAAGGTTAAACACCCAAGCAGACGCCAGCAAT" if not args.adp3 else args.adp3
minimap2_params = "-Y -l 0 -q 160"
minimap2_med_score_threshold = 160
if args.short:
minimap2_med_score_threshold_short = 140
if p == 'pb-hifi':
if args.fast:
minimap2_db_params = "-k 19 -w 10 -I %s" % args.inds
else:
minimap2_db_params = "-k 15 -w 5 -I %s" % args.inds
else:
if args.fast:
minimap2_db_params = "-k 15 -w 5 -I %s" % args.inds
else:
minimap2_db_params = "-k 12 -w 5 -I %s" % args.inds
logger.info("Preset \"%s\" was applied. Options --pb(--ont) is overwritten." % (p,))
file_format_code = guess_format(args.input)
if file_format_code == 0:
fastx_path = os.path.join(args.out, "analysis", "pbbam_converted_seq_file" + suffix + ".fastq")
logger.info('Temporary work file was made at %s' % fastx_path)
elif file_format_code == 4: #fast5
fastx_path = os.path.join(args.out, "analysis", "fast5_converted_seq_file" + suffix + ".fastq")
logger.info('Temporary work file was made at %s' % fastx_path)
elif file_format_code == -1 or file_format_code == 1:
logger.error('Input file is unsupported file format: %s' % args.input)
sys.exit()
else:
fastx_path = args.input
if args.pb:
if args.sequel:
filter_ref = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), "refs"), "Sequel_control_reference.fasta")
else:
filter_ref = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), "refs"), "RS2_control_reference.fasta")
pb_control = os.path.join(args.out, "analysis", "minimap2", "spiked_in_control" + suffix + ".txt")
pb_control_err = os.path.join(args.out, "analysis", "minimap2", "spiked_in_control" + suffix + "_stderr.txt")
minimap2_filtering_params = "-Y -Hk15 -w 10 -c 1 -l 0 --filter"
nonsense_read_error_threshold = 0.2
nonsense_read_warn_threshold = 0.15
if args.short:
pb_control_short = os.path.join(args.out, "analysis", "minimap2", "short_spiked_in_control" + suffix + ".txt")
pb_control_short_err = os.path.join(args.out, "analysis", "minimap2", "short_spiked_in_control" + suffix + "_stderr.txt")
merged_control = os.path.join(args.out, "analysis", "minimap2", "merged_spiked_in_control" + suffix + ".txt")
if args.short:
minimap2_db_params_short = "-k 12 -w 5 -I %s" % args.inds
if args.db and file_format_code != 0:
ncpu -= 3 # subtract cpus for the minimap2 db
tempdb_path = os.path.join(args.out, "analysis", "minimap2", "t_db_minimap2")
le = LqExec(os.path.join(path_minimap2, "minimap2-coverage"))
le_args = shlex.split("%s -d %s %s" % (minimap2_db_params, tempdb_path, fastx_path))
le.exec(*le_args, out=cov_path, err=cov_path_e)
if args.short:
ncpu -= 3 # subtract cpus further for the minimap2 db
tempdb_short_path = os.path.join(args.out, "analysis", "minimap2", "t_db_minimap2_short")
le_short = LqExec(os.path.join(path_minimap2, "minimap2-coverage"))
le_args_short = shlex.split("%s -d %s %s" % (minimap2_db_params_short, tempdb_short_path, fastx_path))
le_short.exec(*le_args_short, out=short_cov_path, err=short_cov_path_e)
### initialization for chunked reads ###
pool = Pool(processes=2)
ncpu -= 2 # subtract cpus for the executor
pool_res = {}
lm = LqMask(os.path.join(path_minimap2, "sdust"), args.out, suffix=suffix, max_n_proc=10 if ncpu > 10 else ncpu)
lg = LqGC(chunk_size=150)
if args.adp5:
num_trim5 = 0
max_iden_adp5 = 0.0
adp_pos5 = array.array('i')
if args.adp3:
num_trim3 = 0
max_iden_adp3 = 0.0
adp_pos3 = array.array('i')
# vars for subsampling
cum_n_seq = 0
s_reads = []
#sample_random_fastq_list(reads, args.nsample, elist=exclude_seqs)
chunk_n = 0
for (reads, n_seqs, n_bases) in open_seq_chunk(args.input, file_format_code, chunk_size=args.mem*1024**3, is_upper=True):
### iterate over chunks
### 1. bam/fast5 to fastq conversion
if file_format_code == 0 or file_format_code == 4:
write_fastq(fastx_path, reads, is_chunk=True)
### 2. low-complexity region calc -> another process
logger.info("Computation of the low complexity region started for a chunk %d" % chunk_n)
lm.submit_sdust(reads, chunk_n)
### 3. adapter search -> another process
if args.adp5 or args.adp3:
logger.info("Adapter search is starting for a chunk %d." % chunk_n)
if args.adp5 and args.adp3:
#(tuple_5, tuple_3) = cut_adapter(reads, adp_t=args.adp5, adp_b=args.adp3, logger=logger)
pool_res['adapter'] = pool.apply_async(cut_adapter, args=(reads,), kwds={'adp_t':args.adp5, 'adp_b':args.adp3})
elif not args.adp5 and args.adp3:
#tuple_3 = cut_adapter(reads, adp_b=args.adp3, adp_t=None, logger=logger)
pool_res['adapter'] = pool.apply_async(cut_adapter, args=(reads,), kwds={'adp_b':args.adp3})
elif args.adp5 and not args.adp3:
#tuple_5 = cut_adapter(reads, adp_t=args.adp5, adp_b=None, logger=logger)
pool_res['adapter'] = pool.apply_async(cut_adapter, args=(reads,), kwds={'adp_t':args.adp5})
### 4. subsampling -> another process
pool_res['subsample'] = pool.apply_async(subsample_from_chunk, args=(reads, cum_n_seq, s_reads, args.nsample))
#pool_res['subsample'] = executor.submit(subsample_from_chunk, reads, cum_n_seq, s_reads, args.nsample, **{'minlen': 300})
### 5. GC fraction -> within this process as this is not pickable (class method)
logger.info("Computation of the GC fraction started for a chunk %d" % chunk_n)
lg.calc_read_and_chunk_gc_frac(reads)
if args.adp5 and args.adp3:
(tuple_5, tuple_3) = pool_res['adapter'].get()
logger.info("Adapter search has done for a chunk %d." % chunk_n)
elif not args.adp5 and args.adp3:
tuple_3 = pool_res['adapter'].get()
logger.info("Adapter search has done for a chunk %d." % chunk_n)
elif args.adp5 and not args.adp3:
tuple_5 = pool_res['adapter'].get()
logger.info("Adapter search has done for a chunk %d." % chunk_n)
### 6. termination of one chunk
s_reads = pool_res['subsample'].get()
logger.info('subsample finished for chunk %d.' % chunk_n)
# trimmed reads by edlib are saved as fastq
if args.trim:
write_fastq(args.trim, reads, is_chunk=True)
logger.info("Trimmed read added.")
if args.adp5 and tuple_5:
if tuple_5[0] > max_iden_adp5:
max_iden_adp5 = tuple_5[0]
num_trim5 += tuple_5[1]
adp_pos5.fromlist(tuple_5[2])
if args.adp3 and tuple_3:
if tuple_3[0] > max_iden_adp3:
max_iden_adp3 = tuple_3[0]
num_trim3 += tuple_3[1]
adp_pos3.fromlist(tuple_3[2])
chunk_n += 1
cum_n_seq += n_seqs
### file traverse is over now.
logger.info('Input file parsing was finished. #seqs:%d, #bases: %d' % (n_seqs, n_bases))
# wait for completion of DUST analysis
lm.close_pool()
logger.info("Summary table %s was made." % lm.get_outfile_path())
# list up seqs should be avoided
df_mask = pd.read_table(lm.get_outfile_path(), sep='\t', header=None)
exclude_seqs = df_mask[(df_mask[2] > 500000) & (df_mask[3] > 0.2)][0].values.tolist() # len > 0.5M and mask_region > 20%. k = 15
exclude_seqs = exclude_seqs + df_mask[(df_mask[2] > 10000) & (df_mask[3] > 0.4)][0].values.tolist() # len > 0.01M and mask_region > 40%. k = 12. more severe.
logger.debug("Highly masked seq list:\n%s" % "\n".join(exclude_seqs) )
# polishing subsampled seqs
s_reads = [i for i in s_reads if i != 0] # removing empty pos. this happens if numseq < numsample
ng_set = set(exclude_seqs)
ng_ovlp = 0
ng_ovlp_indices = []
for i, r in enumerate(s_reads):
if r[0] in ng_set:
ng_ovlp += 1
ng_ovlp_indices.append(i)
if ng_ovlp > 0:
logger.info('There are %d overlap reads between highly masked samples and subsampled reads. Start replacing.' % ng_ovlp)
temp = [0] * ng_ovlp
j = 0
for r in s_reads:
ng_set.add(r[0]) # as skip already picked up ones
for (reads, n_seqs, n_bases) in open_seq_chunk(args.input, file_format_code, chunk_size=0.1*1024**3):
subsample_from_chunk(reads, j, temp, ng_ovlp, elist=ng_set)
j += n_seqs
if len([i for i in temp if i]) < ng_ovlp:
continue
else:
break
if len([i for i in temp if i]) < ng_ovlp:
# an edgy case, but can happen.
logger.warn('Replacing failed. Just removing highly masked ones.')
for i in ng_ovlp_indices:
s_reads[i] = 0
s_reads = [i for i in s_reads if i]
else:
for i, t in enumerate(temp):
logger.info('Replacing %s with %s.' % (s_reads[ng_ovlp_indices[i]][0], t[0]))
s_reads[ng_ovlp_indices[i]] = t # replacing bad ones with ok ones
s_n_seqs = len([i for i in s_reads if i])
if args.short:
ss_reads = [s for s in s_reads if len(s[1]) < length_threshold]
if write_fastq(short_sample_path, ss_reads):
logger.info('Short subsampled seqs were written to a file. #seqs:%d' % s_n_seqs)
s_reads = [s for s in s_reads if len(s[1]) >= length_threshold]
if write_fastq(sample_path, s_reads):
logger.info('Subsampled seqs were written to a file. #seqs:%d' % s_n_seqs)
else:
if write_fastq(sample_path, s_reads):
logger.info('Subsampled seqs were written to a file. #seqs:%d' % s_n_seqs)
# waiting db make by minimap2
if args.db and file_format_code != 0:
while True:
if le.get_poll() is not None:
logger.info("Process %s for %s terminated." % (le.get_pid(), le.get_bin_path()))
break
logger.info("Making a db of sampled reads...")
sleep(10)
if args.short:
while True:
if le_short.get_poll() is not None:
logger.info("Process %s for %s terminated." % (le_short.get_pid(), le_short.get_bin_path()))
break
logger.info("Making a db of sampled short reads...")
sleep(10)
logger.info("Temp db %s was generated." % tempdb_path)
# asynchronized minimap2 starts
le = LqExec(os.path.join(path_minimap2, "minimap2-coverage"))
if args.db and file_format_code != 0:
le_args = shlex.split("%s -p %d -t %d %s %s" \
% (minimap2_params, int(minimap2_med_score_threshold), int(args.ncpu), tempdb_path, sample_path))
else:
le_args = shlex.split("%s %s -p %d -t %d %s %s" \
% (minimap2_params, minimap2_db_params, int(minimap2_med_score_threshold), int(args.ncpu), fastx_path, sample_path))
le.exec(*le_args, out=cov_path, err=cov_path_e)
logger.info("Overlap computation started. Process is %s" % le.get_pid())
# gc frac plot
gc_read_mean, gc_read_sd = lg.plot_unmasked_gc_frac(fp=fig_path_gc)
logger.info("Genarated the sample gc fraction plot.")
q7 = np.sum(df_mask[5].values) # make c code to compute Q7 now for speed
#q10 = get_Qx_bases(reads, threshold=10) # too slow
logger.info("Q%d bases %d" % (7, q7))
if df_mask is not None:
lengths = df_mask[2].values
else:
logger.error("The reads summary table made by sdust does not exist!")
sys.exit(1)
tobe_json = {}
# reads does not exist anymore due to chunking
#if len(lengths) == 0:
# lengths = [len(r[1]) for r in reads]
throughput = np.sum(lengths)
longest = np.max(lengths)
mean_len = np.array(lengths).mean()
n50 = get_N50(lengths)
# exceptionally short case.
#if args.ont:
# if n50 < 1000 or float(len(np.where(np.asarray(lengths)< 1000)[0]))/len(lengths) > 0.25:
# minimap2_med_score_threshold = 60
if n50 < 3000:
lm.plot_qscore_dist(df_mask, 4, 2, interval=n50/2, fp=fig_path_rq)
else:
lm.plot_qscore_dist(df_mask, 4, 2, fp=fig_path_rq)
# plot masked fraction
lm.plot_masked_fraction(fig_path_ma)
# length distribution. a ~= 1.0 is usual (exponential dist).
(a, b) = estimate_gamma_dist_scipy(lengths)
plot_length_dist(fig_path, lengths, a, b, longest, mean_len, n50, True if args.pb else False)
logger.info("Genarated the sample read length plot.")
logger.info("Throughput: %d" % throughput)
logger.info("Length of longest read: %d" % longest)
logger.info("The number of reads: %d", len(lengths))
tobe_json["Yield"] = int(throughput)
tobe_json["Q7 bases"] = str("%.2f%%" % float(100*q7/throughput))
tobe_json["Longest_read"] = int(longest)
tobe_json["Num_of_reads"] = len(lengths)
tobe_json["Length_stats"] = {}
tobe_json["Length_stats"]["gamma_params"] = [float(a), float(b)]
tobe_json["Length_stats"]["Mean_read_length"] = float(mean_len)
tobe_json["Length_stats"]["N50_read_length"] = float(n50)
tobe_json["GC_stats"] = {}
tobe_json["GC_stats"]["Mean_GC_content"] = float(gc_read_mean)
tobe_json["GC_stats"]["SD_GC_content"] = float(gc_read_sd)
if args.adp5 and max_iden_adp5 >= 0.75:
tobe_json["Stats_for_adapter5"] = {}
tobe_json["Stats_for_adapter5"]["Num_of_trimmed_reads_5"] = num_trim5
tobe_json["Stats_for_adapter5"]["Max_identity_adp5"] = max_iden_adp5
tobe_json["Stats_for_adapter5"]["Average_position_from_5_end"] = np.mean(adp_pos5)
if args.adp3 and max_iden_adp3 >= 0.75:
tobe_json["Stats_for_adapter3"] = {}
tobe_json["Stats_for_adapter3"]["Num_of_trimmed_reads_3"] = num_trim3
tobe_json["Stats_for_adapter3"]["Max_identity_adp3"] = max_iden_adp3
tobe_json["Stats_for_adapter3"]["Average_position_from_3_end"] = np.mean(adp_pos3)
# here wait until the minimap procerss finishes
while True:
if le.get_poll() is not None:
logger.info("Process %s for %s terminated." % (le.get_pid(), le.get_bin_path()))
break
logger.info("Calculating overlaps of sampled reads...")
sleep(10)
logger.info("Overlap computation finished.")
if args.short:
le_short = LqExec(os.path.join(path_minimap2, "minimap2-coverage"))
if args.db and file_format_code != 0:
le_short_args = shlex.split("%s -p %d -t %d %s %s" \
% (minimap2_params, int(minimap2_med_score_threshold_short), int(args.ncpu), tempdb_short_path, short_sample_path))
else:
le_short_args = shlex.split("%s %s -p %d -t %d %s %s" \
% (minimap2_params, minimap2_db_params_short, int(minimap2_med_score_threshold_short), int(args.ncpu), fastx_path, short_sample_path))
le_short.exec(*le_short_args, out=short_cov_path, err=short_cov_path_e)
logger.info("Overlap computation for short reads started. Process is %s" % le.get_pid())
while True:
if le_short.get_poll() is not None:
logger.info("Process %s for %s terminated." % (le.get_pid(), le.get_bin_path()))
break
logger.info("Calculating overlaps of short sampled reads...")
sleep(10)
logger.info("Overlap computation for short reads finished.")
with open(merged_cov_path, 'w') as outf:
with open(cov_path, 'r') as inf:
outf.write(inf.read())
with open(short_cov_path, 'r') as inf:
outf.write(inf.read())
logger.info("Outputs for normal and short reads were merged.")
# filtering for spiked in
if args.pb:
le_spike = LqExec(os.path.join(path_minimap2, "minimap2-coverage"))
le_spike_args = shlex.split("%s -t %d %s %s" \
% (minimap2_filtering_params, int(args.ncpu), filter_ref, sample_path))
le_spike.exec(*le_spike_args, out=pb_control, err=pb_control_err)
logger.info("Spike-in control filteration started. Process is %s" % le_spike.get_pid())
# here wait until the minimap procerss finishes
while True:
if le.get_poll() is not None:
logger.info("Process %s for %s terminated." % (le_spike.get_pid(), le_spike.get_bin_path()))
break
logger.info("Filtering spike-in control in sampled reads...")
sleep(10)
if args.short:
le_spike_short = LqExec(os.path.join(path_minimap2, "minimap2-coverage"))
le_spike_short_args = shlex.split("%s -t %d %s %s" \
% (minimap2_filtering_params, int(args.ncpu), filter_ref, short_sample_path))
le_spike_short.exec(*le_spike_short_args, out=pb_control_short, err=pb_control_short_err)
logger.info("Spike-in control filteration started. Process is %s" % le_spike_short.get_pid())
# here wait until the minimap procerss finishes
while True:
if le.get_poll() is not None:
logger.info("Process %s for %s terminated." % (le_spike_short.get_pid(), le_spike_short.get_bin_path()))
break
logger.info("Filtering spike-in control in sampled reads...")
sleep(10)
logger.info("Filteration finished.")
sleep(10)
with open(merged_control, 'w') as outf:
with open(pb_control, 'r') as inf:
outf.write(inf.read())
with open(pb_control_short, 'r') as inf:
outf.write(inf.read())
logger.info("Outputs for normal and short reads were merged.")
logger.info("Filteration finished.")
# for laggy file system, we neeed to wait a bit. otherwise, no data exception will be raised.
sleep(10)
# execute minimap2_coverage
logger.info("Generating coverage related plots...")
if args.short:
lc = LqCoverage(merged_cov_path, isTranscript=args.transcript, control_filtering=merged_control)
else:
lc = LqCoverage(cov_path, isTranscript=args.transcript, control_filtering=pb_control)
lc.plot_coverage_dist(fig_path_cv)
lc.plot_unmapped_frac_terminal(fig_path_ta, \
adp5_pos=np.mean(adp_pos5) if args.adp5 and adp_pos5 and np.mean(adp_pos5) > 0 else None, \
adp3_pos=np.mean(adp_pos3) if args.adp3 and adp_pos3 and | np.mean(adp_pos3) | numpy.mean |
from sklearn.metrics import roc_auc_score, recall_score, precision_score
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
Rfast = importr('Rfast')
_ROC_AUC = 'roc_auc'
_PRECISION_OUTLIERS = 'precision_outliers'
_RECALL_OUTLIERS = 'recall_outliers'
_F1_SCORE_OUTLIERS = 'f1_score_outliers'
def metric_names():
return [_ROC_AUC, _PRECISION_OUTLIERS, _RECALL_OUTLIERS, _F1_SCORE_OUTLIERS]
def calculate_all_metrics(y_true, y_pred, metrics_to_calculate=None, run_R=False):
perfs = {}
for m in metric_names():
if metrics_to_calculate is not None and m not in metrics_to_calculate:
continue
perfs.update(calculate_metric(y_true, y_pred, m, run_R))
return perfs
def calculate_metric(y_true, y_pred, metric_id, run_R=False):
assert len(y_true.shape) == 1, len(y_true)
assert y_true.shape[0] == y_pred.shape[0]
if metric_id == _ROC_AUC:
return calculate_roc_auc(y_true, y_pred, run_R)
elif metric_id == _PRECISION_OUTLIERS:
return run_metric(y_true, y_pred, calculate_precision_outliers, metric_id)
elif metric_id == _RECALL_OUTLIERS:
return run_metric(y_true, y_pred, calculate_recall_outliers, metric_id)
elif metric_id == _F1_SCORE_OUTLIERS:
return run_metric(y_true, y_pred, calculate_f1_score_outliers, metric_id)
else:
assert False, 'Metric ' + metric_id + ' not found'
def run_metric(y_true, y_pred, metric_func, metric_id):
if predictions_have_mul_cols(y_pred):
vals = np.zeros(y_pred.shape[1])
for i in range(y_pred.shape[1]):
val = metric_func(y_true, y_pred[:, i])
vals[i] =val[metric_id]
return {metric_id: vals}
else:
return metric_func(y_true, y_pred)
def calculate_roc_auc(y_true, y_pred, run_R=False):
if len(np.unique(y_true)) == 1:
return {_ROC_AUC: -1}
if run_R is True:
pandas2ri.activate()
y_true = ro.r.matrix(y_true, nrow=y_true.shape[0], ncol=1)
if len(y_pred.shape) == 2 and y_pred.shape[1] > 1:
y_pred = ro.r.matrix(y_pred, nrow=y_pred.shape[0], ncol=y_pred.shape[1])
return {_ROC_AUC: Rfast.colaucs(y_true, y_pred)}
else:
y_pred = ro.r.matrix(y_pred, nrow=y_pred.shape[0], ncol=1)
return {_ROC_AUC: Rfast.auc(y_true, y_pred)[0]}
else:
return {_ROC_AUC: roc_auc_score(y_true, y_pred)}
def calculate_precision_outliers(y_true, y_pred):
if len(np.unique(y_true)) == 1:
return {_PRECISION_OUTLIERS: -1}
y_pred = make_y_pred_is_binary(y_pred)
conf_mat = conf_matrix(y_true, y_pred)
if np.count_nonzero(y_pred == 1) == 0:
return {_PRECISION_OUTLIERS: 0.0}
else:
prec = conf_mat['tp'] / (conf_mat['tp'] + conf_mat['fp'])
return {_PRECISION_OUTLIERS: prec}
def calculate_recall_outliers(y_true, y_pred):
if len(np.unique(y_true)) == 1:
return {_RECALL_OUTLIERS: -1}
y_pred = make_y_pred_is_binary(y_pred)
conf_mat = conf_matrix(y_true, y_pred)
rec = conf_mat['tp'] / (conf_mat['tp'] + conf_mat['fn'])
return {_RECALL_OUTLIERS: rec}
def calculate_f1_score_outliers(y_true, y_pred):
if len(np.unique(y_true)) == 1:
return {_F1_SCORE_OUTLIERS: -1}
y_pred = make_y_pred_is_binary(y_pred)
recall = calculate_recall_outliers(y_true, y_pred)[_RECALL_OUTLIERS]
precision = calculate_precision_outliers(y_true, y_pred)[_PRECISION_OUTLIERS]
if recall == 0 and precision == 0:
return {_F1_SCORE_OUTLIERS: 0.0}
else:
f1 = 2 * ((precision * recall) / (precision + recall))
return {_F1_SCORE_OUTLIERS: f1}
def conf_matrix(y_true, y_pred):
return {
'tn': len(np.intersect1d( | np.where(y_true == 0) | numpy.where |
import numpy as np
from scipy import signal
from scipy import ndimage
import preprocessing.image2D.preprocess_2D as pre
import preprocessing.image2D.convolution as conv
import preprocessing.image2D.iterative_point_processing as itproc
def fl_linear(data, kernel):
return signal.convolve2d(data, kernel, 'same')
def fl_isotropic(data, c, its):
for _ in range(its):
grad_x = np.gradient(data, axis=0)
print("grad_x len:", len(grad_x), ", y [0]:", len(grad_x[0]))
gradient_x = itproc.iterate_matrix_by_const(grad_x, c, np.prod)
grad_y = np.gradient(data, axis=1)
print("grad_y len:", len(grad_y), ", y [0]:", len(grad_y[0]))
gradient_y = itproc.iterate_matrix_by_const(grad_y, c, np.prod)
print("gradient_x len:", len(gradient_x), ", y [0]:", len(gradient_x[0]))
print("gradient_y len:", len(gradient_y), ", y [0]:", len(gradient_y[0]))
data = itproc.iterate_matrices_by_matrix(gradient_x, gradient_y, np.sum)
return data
def fl_anisotropic(data, c, its):
for _ in range(its):
laplacian = ndimage.laplace(data)
claplacian = itproc.iterate_matrix_by_const(laplacian, c**2, np.divide, as_array=False)
claplacian = itproc.iterate_matrix_by_const(claplacian, -1, np.prod)
data = itproc.iterate_matrix_func(claplacian, np.exp)
return data
def fl_laplacian(data, c, its):
for _ in range(its):
laplacian = ndimage.laplace(data)
claplacian = itproc.iterate_matrix_by_const(laplacian, c, np.prod)
print("laplacian len:", len(laplacian), ", y [0]:", len(laplacian[0]))
print("claplacian len:", len(claplacian), ", y [0]:", len(claplacian[0]))
data = itproc.iterate_matrices_by_matrix(data, claplacian, np.sum)
return data
def example():
import matplotlib.pyplot as plt
def plotImg(data):
_ = plt.figure(1,figsize=(10, 10))
plt.imshow(data, interpolation='nearest', aspect='auto', cmap='gray')
plt.axis('off')
plt.show()
data = np.random.random(50*50)
data = data.reshape(50, 50)
data = pre.normalize(data)
orig_data = np.copy(data)
kernel3x3 = [[0.1, 5.0, 0.3],
[0.1, 0.7, 0.4],
[0.5, 5.0, 0.8]]
kernel5x5 = [[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]
kernel = []
for i in range(5):
kernel.append([])
for _ in range(7):
kernel[i].append(0)
#data = normalize(filterLinear(data, kernel))
#data = normalize(make_convolution(data, kernel3x3))
#data = normalize(fl_laplacian(data, 0.5, 4))
#data = normalize(fl_isotropic(data, 0.5, 4))
print("5x5 mean")
data = pre.normalize(conv.make_convolution_with_func(data, kernel5x5, np.mean))
#data = normalize(fl_anisotropic(data, 0.5, 4))
print("data len:", len(data), ", y [0]:", len(data[0]))
print("data mean: ", | np.mean(data) | numpy.mean |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines log curves.
:copyright: 2016 Agile Geoscience
:license: Apache 2.0
"""
import numpy as np
import matplotlib.pyplot as plt
class CurveError(Exception):
"""
Generic error class.
"""
pass
class Curve(object):
"""
Class for log curves.
"""
def __init__(self, params, basis=None):
"""
Args:
params (dict or lasio.Curve)
basis (array-like): An array representing depth.
"""
if basis is None:
raise CurveError('you must provide a depth basis.')
try: # treating as a lasio CurveItem object
self.mnemonic = params.mnemonic
self.description = params.descr
self.units = params.unit
self.data = params.data
self.basis = basis
except:
for k, v in params.items():
if k and v:
setattr(self, k, v)
self.start, self.stop = self.basis[0], self.basis[-1]
self.step = self.basis[1] - self.basis[0]
def __str__(self):
"""
What to return for ``print(instance)``.
"""
if self.units:
s = "{} [{}]: {} samples"
return s.format(self.mnemonic, self.units, self.data.size)
else:
s = "{}: {} samples"
return s.format(self.mnemonic, self.data.size)
def plot(self):
"""
Plot a curve.
"""
fig = plt.figure(figsize=(2, 10))
ax = fig.add_subplot(111)
ax.plot(self.data, self.basis)
plt.title(self.mnemonic)
ax.set_ylim([self.stop, self.start])
return
def mean(self):
"""
Could have all sorts of helpful transforms etc.
"""
try:
return | np.mean(self.data) | numpy.mean |
# -*- coding: utf-8 -*-
#
# Authors: Swolf <<EMAIL>>
# Date: 2021/1/07
# License: MIT License
"""
Common Spatial Patterns and his happy little buddies!
"""
from copy import deepcopy
from typing import Union, Optional, List, Dict, Tuple
from functools import partial
import numpy as np
from numpy import ndarray
from scipy.linalg import eigh, pinv, solve
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV, StratifiedKFold, ShuffleSplit
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.pipeline import make_pipeline
from .base import robust_pattern, FilterBank
from ..utils.covariance import nearestPD, covariances
def csp_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel in CSP algorithm based on paper [1]_.
Parameters
----------
X: ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y: ndarray
labels of X, shape (n_trials,).
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
if len(labels) != 2:
raise ValueError("the current kernel is for 2-class problem.")
C1 = covariances(X[y==labels[0]])
C2 = covariances(X[y==labels[1]])
# # trace normalization
# # this operation equals to trial normalization
# C1 = C1 / np.trace(C1, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
# C2 = C2 / np.trace(C2, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C1 = np.mean(C1, axis=0)
C2 = np.mean(C2, axis=0)
Cc = C1 + C2
# check positive-definiteness
Cc = nearestPD(Cc)
# generalized eigenvalue problem
D, W = eigh(C1, Cc)
ix = np.argsort(D)[::-1]
W = W[:, ix]
D = D[ix]
A = robust_pattern(W, C1, W.T@C1@W)
return W, D, A
def csp_feature(W: ndarray, X: ndarray,
n_components: int = 2) -> ndarray:
"""Return CSP features in paper [1]_.
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 2
Returns
-------
ndarray
features of shape (n_trials, n_features)
Raises
------
ValueError
n_components should less than the number of channels
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
# normalized variance
features = np.mean(np.square(np.matmul(W[:, :n_components].T, X)), axis=-1)
features = features / (np.sum(features, axis=-1, keepdims=True) + eps)
# log-transformation
features = np.log(np.clip(features, eps, None))
return features
def _rjd(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stopping criterion (default 1e-8).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the Cardoso AJD algorithm [1]_ used in
JADE. The code is a translation of the matlab code provided in the author
website.
References
----------
.. [1] Cardoso, Jean-Francois, and <NAME>. Jacobi angles for simultaneous diagonalization. SIAM journal on matrix analysis and applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = np.cos(theta)
s = np.sin(theta)
encore = encore | (np.abs(s) > eps)
if (np.abs(s) > eps):
tmp = A[:, Ip].copy()
A[:, Ip] = c * A[:, Ip] + s * A[:, Iq]
A[:, Iq] = c * A[:, Iq] - s * tmp
tmp = A[p, :].copy()
A[p, :] = c * A[p, :] + s * A[q, :]
A[q, :] = c * A[q, :] - s * tmp
tmp = V[:, p].copy()
V[:, p] = c * V[:, p] + s * V[:, q]
V[:, q] = c * V[:, q] - s * tmp
D = np.reshape(A, (m, int(nm / m), m)).transpose(1, 0, 2)
return V, D
def _ajd_pham(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on pham's algorithm.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-6).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the PHAM's AJD algorithm [1]_.
References
----------
.. [1] Pham, <NAME>. "Joint approximate diagonalization of positive definite Hermitian matrices." SIAM Journal on Matrix Analysis and Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(n_iter_max):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = | np.mean(c2 / c1) | numpy.mean |
from __future__ import division, print_function, absolute_import
import numpy as np
from highway_env import utils
from highway_env.envs.abstract import AbstractEnv
from highway_env.road.lane import LineType, StraightLane, SineLane, LanesConcatenation
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.control import ControlledVehicle, MDPVehicle, CarSim, FreeControl
from highway_env.vehicle.behavior import IDMVehicle
from highway_env.vehicle.dynamics import RedLight
import time
import random
class MergeEnvOut(AbstractEnv):
"""
A highway merge negotiation environment.
The ego-vehicle is driving on a highway and approached a merge, with some vehicles incoming on the access ramp.
It is rewarded for maintaining a high velocity and avoiding collisions, but also making room for merging
vehicles.
"""
COLLISION_REWARD = -1
RIGHT_LANE_REWARD = 0.1
HIGH_VELOCITY_REWARD = 0.2
MERGING_VELOCITY_REWARD = -0.5
LANE_CHANGE_REWARD = -0.05
DEFAULT_CONFIG = {"other_vehicles_type": "highway_env.vehicle.behavior.IDMVehicle",
"incoming_vehicle_destination": None,
"other_vehicles_destination": None}
def __init__(self):
super(MergeEnvOut, self).__init__()
self.config = self.DEFAULT_CONFIG.copy()
self.steps = 0
# self.make_road()
# self.reset()
# self.double_merge()
# self.make_vehicles()
def configure(self, config):
self.config.update(config)
def _observation(self):
return super(MergeEnvOut, self)._observation()
def _reward(self, action):
"""
The vehicle is rewarded for driving with high velocity on lanes to the right and avoiding collisions, but
an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low velocity.
:param action: the action performed
:return: the reward of the state-action transition
"""
action_reward = {0: self.LANE_CHANGE_REWARD,
1: 0,
2: self.LANE_CHANGE_REWARD,
3: 0,
4: 0}
reward = self.COLLISION_REWARD * self.vehicle.crashed \
+ self.RIGHT_LANE_REWARD * self.vehicle.lane_index / (len(self.road.lanes) - 2) \
+ self.HIGH_VELOCITY_REWARD * self.vehicle.velocity_index / (self.vehicle.SPEED_COUNT - 1)
# Altruistic penalty
for vehicle in self.road.vehicles:
if vehicle.lane_index == len(self.road.lanes) - 1 and isinstance(vehicle, ControlledVehicle):
reward += self.MERGING_VELOCITY_REWARD * \
(vehicle.target_velocity - vehicle.velocity) / vehicle.target_velocity
return reward + action_reward[action]
def _is_terminal(self):
"""
The episode is over when a collision occurs or when the access ramp has been passed.
"""
return self.vehicle.crashed or self.vehicle.position[0] > 300
def reset(self):
# self.make_road()
print("enter reset")
self.make_roads()
self.make_vehicles()
return self._observation()
def make_roads(self):
net = RoadNetwork()
n, c, s = LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED
net.add_lane("s1", "inter1", StraightLane(np.array([0, 0]), np.array([100, 0]), line_types=[c, s]))
net.add_lane("inter1", "inter2", StraightLane(np.array([100, 0]), np.array([150, 0]), line_types=[c, s]))
net.add_lane("inter2", "inter3", StraightLane(np.array([150, 0]), np.array([200, 0]), line_types=[c, s]))
net.add_lane("inter3", "x1", StraightLane(np.array([200, 0]), np.array([300, 0]), line_types=[c, s]))
net.add_lane("s1", "inter1", StraightLane(np.array([0, 4]), np.array([100, 4]), line_types=[s, s]))
net.add_lane("inter1", "inter2", StraightLane(np.array([100, 4]), np.array([150, 4]), line_types=[s, s]))
net.add_lane("inter2", "inter3", StraightLane(np.array([150, 4]), np.array([200, 4]), line_types=[s, s]))
net.add_lane("inter3", "x1", StraightLane(np.array([200, 4]), np.array([300, 4]), line_types=[s, s]))
net.add_lane("s1", "inter1", StraightLane(np.array([0, 8]), np.array([100, 8]), line_types=[s, s]))
net.add_lane("inter1", "inter2", StraightLane(np.array([100, 8]), np.array([150, 8]), line_types=[s, s]))
net.add_lane("inter2", "inter3", StraightLane(np.array([150, 8]), np.array([200, 8]), line_types=[s, c]))
net.add_lane("inter3", "x1", StraightLane( | np.array([200, 8]) | numpy.array |
import os
import csv
import numpy as np
import pandas as pd
import pickle
import networkx as nx
from scipy.linalg import eigh
from itertools import product, permutations, combinations
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, roc_auc_score
class DeshpandeMontanari:
def __init__(self, v, p, subgraph, sg, d):
self._params = {
'vertices': v,
'probability': p,
'subgraph': subgraph, # 'clique', 'dag-clique', 'k-plex', 'biclique' or 'G(k, q)' for G(k, q) with probability q (e.g. 'G(k, 0.9)').
'subgraph_size': sg,
'directed': d
}
self._key_name = (subgraph, f"n_{v}_p_{p}_size_{sg}_{'d' if d else 'ud'}")
self._head_path = os.path.join(os.path.dirname(__file__), "../../..", 'graph_calculations', 'pkl',
subgraph, self._key_name[1] + '_runs')
self._load_data()
def _load_data(self):
graph_ids = os.listdir(self._head_path)
if len(graph_ids) == 0:
raise ValueError(f"No runs of G({self._params['vertices']}, {self._params['probability']}) "
f"with a {self._params['subgraph']} subgraph of size {self._params['subgraph_size']} "
f"were saved, and no new runs were requested.")
self._graphs, self._labels = [], []
for run in range(len(graph_ids)):
dir_path = os.path.join(self._head_path, self._key_name[1] + "_run_" + str(run))
gnx = pickle.load(open(os.path.join(dir_path, 'gnx.pkl'), 'rb'))
labels = pickle.load(open(os.path.join(dir_path, 'labels.pkl'), 'rb'))
if type(labels) == dict:
labels = [y for x, y in labels.items()]
self._graphs.append(gnx)
self._labels.append(labels)
def algorithm(self, t_star):
ranks = []
all_labels = []
for g in range(len(self._graphs)):
graph = self._graphs[g]
labels = self._labels[g]
res = self._algorithm(graph, labels, t_star)
ranks += res
all_labels += labels
return ranks, all_labels
def _algorithm(self, graph, labels, t_star):
# INITIALIZATION #
w = nx.to_numpy_array(graph)
for i, j in permutations(range(w.shape[0]), 2):
if i != j and w[i, j] == 0:
w[i, j] = -1
elif w[i, j] == 1:
w[i, j] = (1 - self._params['probability']) / self._params['probability']
kappa = self._params['subgraph_size'] / np.sqrt(self._params['vertices'])
gamma_vectors = [np.ones((self._params['vertices'],))]
gamma_matrices = [np.subtract(np.ones((self._params['vertices'], self._params['vertices'])),
np.eye(self._params['vertices']))]
# Belief Propagation iterations #
for t in range(t_star):
helping_matrix = np.exp(gamma_matrices[t]) / np.sqrt(self._params['vertices'])
log_numerator = np.log(1 + np.multiply(1 + w, helping_matrix))
log_denominator = np.log(1 + helping_matrix)
helping_for_vec = log_numerator - log_denominator
gamma_vec = np.log(kappa) + np.sum(helping_for_vec, axis=1) - np.diag(helping_for_vec)
gamma_mat = np.tile(gamma_vec, (self._params['vertices'], 1)) - helping_for_vec.transpose()
gamma_vectors.append(gamma_vec)
gamma_matrices.append(gamma_mat)
sorted_vertices = np.argsort(gamma_vectors[t_star])
c_n_hat = sorted_vertices[-self._params['subgraph_size']:]
print(f"After the final stage, {len([v for v in c_n_hat if labels[v]])} {self._params['subgraph']} vertices "
f"out of {len(c_n_hat)} vertices are left")
return list(gamma_vectors[t_star])
def roc_curves_for_comparison(size, prob, subgraph, sub_size, directed):
plt.figure()
dm = DeshpandeMontanari(size, prob, subgraph, sub_size, directed)
ranks, labels = dm.algorithm(t_star=100)
auc = []
for r in range(len(labels) // size):
ranks_by_run = ranks[r*size:(r+1)*size]
labels_by_run = labels[r*size:(r+1)*size]
fpr, tpr, _ = roc_curve(labels_by_run, ranks_by_run)
auc_by_run = roc_auc_score(labels_by_run, ranks_by_run)
auc.append(auc_by_run)
plt.plot(fpr, tpr, label=f"AUC = {auc_by_run:.4f}")
plt.xlabel('fpr')
plt.ylabel('tpr')
plt.title(f"DM on G({size}, {prob}, {sub_size}), subgraph: {subgraph}, mean AUC = {np.mean(auc):.4f}")
plt.legend()
plt.savefig(os.path.join("../../../Downloads/figures", subgraph, f"DM_{size}_{sub_size}.png"))
def performance_test_dm(sizes, sg_sizes, subgraph, filename):
with open(os.path.join("results", subgraph, filename), 'w') as f:
wr = csv.writer(f)
wr.writerow(['Graph Size (all undirected)', 'Subgraph Size', 'Mean remaining subgraph vertices %', 'AUC on all runs'])
for sz, sg_sz in list(product(sizes, sg_sizes)):
print(str(sz) + ",", sg_sz)
dm = DeshpandeMontanari(sz, 0.5, subgraph, sg_sz, True if subgraph == "dag-clique" else False)
scores, lbs = dm.algorithm(t_star=100)
auc = roc_auc_score(lbs, scores)
remaining_subgraph_vertices = []
for r in range(len(lbs) // sz):
ranks_by_run = scores[r*sz:(r+1)*sz]
labels_by_run = lbs[r*sz:(r+1)*sz]
sorted_vertices_by_run = np.argsort(ranks_by_run)
c_n_hat_by_run = sorted_vertices_by_run[-2 * sg_sz:]
remaining_subgraph_vertices.append(len([v for v in c_n_hat_by_run if labels_by_run[v]]))
wr.writerow([str(val) for val in [sz, sg_sz,
np.round(np.mean(remaining_subgraph_vertices) * (100. / sg_sz), 2),
np.round(auc, 4)]])
def test_subgraph(graph, subgraph, final_set, subgraph_vertices=None):
if subgraph == "clique":
return all([graph.has_edge(v1, v2) for v1, v2 in combinations(final_set, 2)])
elif subgraph == "dag-clique":
return all([any([graph.has_edge(v1, v2), graph.has_edge(v2, v1)]) for v1, v2 in combinations(final_set, 2)] +
[nx.is_directed_acyclic_graph(nx.induced_subgraph(graph, final_set))])
elif subgraph == "k-plex":
return all([d[1] >= len(final_set) - 2 for d in nx.degree(nx.induced_subgraph(graph, final_set))])
elif subgraph == "biclique":
if not nx.is_connected(nx.induced_subgraph(graph, final_set)):
return False
try:
first, second = nx.algorithms.bipartite.basic.sets(nx.induced_subgraph(graph, final_set))
return all([graph.has_edge(v1, v2) for v1, v2 in product(first, second)])
except nx.exception.NetworkXError:
return False
else: # G(k, q). The only case we have the exact vertices we want and not a subgraph shape.
return len(subgraph_vertices) == len(set(subgraph_vertices).intersection(set(final_set)))
def condition(s, updates, graph, subgraph):
if subgraph in ["clique", "biclique", "dag-clique", "k-plex"]:
return not test_subgraph(graph, subgraph, s) and updates < 50
else:
return updates < 50
def cleaning_algorithm(graph, subgraph, first_candidates, cl_sz):
dm_candidates = first_candidates
dm_adjacency = nx.adjacency_matrix(graph, nodelist=dm_candidates).toarray()
normed_dm_adj = (dm_adjacency + dm_adjacency.T) - 1 + np.eye(dm_adjacency.shape[0]) # Zeros on the diagonal
_, eigenvec = eigh(normed_dm_adj, eigvals=(normed_dm_adj.shape[0] - 1, normed_dm_adj.shape[0] - 1))
dm_next_set = [dm_candidates[v] for v in np.argsort(np.abs(eigenvec.ravel()))[-cl_sz:].tolist()]
updates = 0
while condition(dm_next_set, updates, graph, subgraph):
connection_to_set = [len(set(graph.neighbors(v)).intersection(set(dm_next_set))) for v in graph]
dm_next_set = np.argsort(connection_to_set)[-cl_sz:].tolist()
updates += 1
return dm_next_set, updates
def get_subgraphs(sizes, subgraph, filename, p=0.5):
# Assuming we have already applied remaining vertices analysis on the relevant graphs.
success_rate_dict = {'Graph Size': [], 'Subgraph Size': [], 'Num. Graphs': [], 'Num. Successes': []}
for sz, sg_sz in sizes:
print(str(sz) + ",", sg_sz)
dm = DeshpandeMontanari(sz, p, subgraph, sg_sz, True if subgraph == "dag-clique" else False)
scores, _ = dm.algorithm(t_star=100)
num_success = 0
num_trials = len(scores) // sz
key_name = (subgraph, f"n_{sz}_p_{p}_size_{sg_sz}_{'d' if subgraph == 'dag-clique' else 'ud'}")
head_path = os.path.join(os.path.dirname(__file__), '../../..', 'graph_calculations', 'pkl', key_name[0], key_name[1] + '_runs')
for r in range(num_trials):
ranks_by_run = scores[r*sz:(r+1)*sz]
sorted_vertices_by_run = np.argsort(ranks_by_run)
c_n_hat_by_run = sorted_vertices_by_run[-2 * sg_sz:]
dir_path = os.path.join(head_path, key_name[1] + "_run_" + str(r))
graph = pickle.load(open(os.path.join(dir_path, 'gnx.pkl'), 'rb'))
final_set, _ = cleaning_algorithm(graph, subgraph, c_n_hat_by_run, sg_sz)
num_success += int(test_subgraph(graph, subgraph, final_set))
print("Success rates: " + str(num_success / float(num_trials)))
for key, value in zip(['Graph Size', 'Subgraph Size', 'Num. Graphs', 'Num. Successes'],
[sz, sg_sz, num_trials, num_success]):
success_rate_dict[key].append(value)
success_rate_df = pd.DataFrame(success_rate_dict)
success_rate_df.to_excel(os.path.join("results", subgraph, filename), index=False)
def inspect_second_phase(sizes, subgraph, filename, p=0.5):
measurements_dict = {'Graph Size': [], 'Subgraph Size': [], 'Subgraph Remaining Num.': [],
'Num. Iterations': [], 'Success': []}
for sz, sg_sz in sizes:
print(str(sz) + ",", sg_sz)
dm = DeshpandeMontanari(sz, p, subgraph, sg_sz, True if subgraph == "dag-clique" else False)
scores, lbs = dm.algorithm(t_star=100)
key_name = (subgraph, f"n_{sz}_p_{p}_size_{sg_sz}_{'d' if subgraph == 'dag-clique' else 'ud'}")
head_path = os.path.join(os.path.dirname(__file__), '../../..', 'graph_calculations', 'pkl', key_name[0], key_name[1] + '_runs')
for r in range(len(scores) // sz):
ranks_by_run = scores[r*sz:(r+1)*sz]
labels_by_run = lbs[r*sz:(r+1)*sz]
sorted_vertices_by_run = np.argsort(ranks_by_run)
c_n_hat_by_run = sorted_vertices_by_run[-2 * sg_sz:]
sg_remaining = len([v for v in c_n_hat_by_run if labels_by_run[v]])
dir_path = os.path.join(head_path, key_name[1] + "_run_" + str(r))
graph = pickle.load(open(os.path.join(dir_path, 'gnx.pkl'), 'rb'))
final_set, num_iterations = cleaning_algorithm(graph, subgraph, c_n_hat_by_run, sg_sz)
success = int(test_subgraph(graph, subgraph, final_set))
for key, value in zip(['Graph Size', 'Subgraph Size', 'Subgraph Remaining Num.', 'Num. Iterations', 'Success'],
[sz, sg_sz, sg_remaining, num_iterations, success]):
measurements_dict[key].append(value)
measurements_df = pd.DataFrame(measurements_dict)
measurements_df.to_excel(os.path.join("results", subgraph, filename), index=False)
def trio(sizes, subgraph, filename_algorithm_test, filename_success_rate, filename_run_analysis, p=0.5):
# Write both results of the BP phase, results of the complete algorithm (success) and (success) results by run.
if not os.path.exists(os.path.join("results", subgraph)):
os.mkdir(os.path.join("results", subgraph))
with open(os.path.join("results", subgraph, filename_algorithm_test), 'w') as f:
wr = csv.writer(f)
wr.writerow(['Graph Size', 'Subgraph Size', 'Mean remaining subgraph vertices %', 'AUC on all runs'])
success_rate_dict = {'Graph Size': [], 'Subgraph Size': [], 'Num. Graphs': [], 'Num. Successes': []}
measurements_dict = {'Graph Size': [], 'Subgraph Size': [], 'Subgraph Remaining Num.': [],
'Num. Iterations': [], 'Success': []}
for sz, sg_sz in sizes:
print(str(sz) + ",", sg_sz)
dm = DeshpandeMontanari(sz, p, subgraph, sg_sz, True if subgraph == "dag-clique" else False)
scores, lbs = dm.algorithm(t_star=100)
num_success = 0
num_trials = len(scores) // sz
key_name = (subgraph, f"n_{sz}_p_{p}_size_{sg_sz}_{'d' if subgraph == 'dag-clique' else 'ud'}")
head_path = os.path.join(os.path.dirname(__file__), '../../..', 'graph_calculations', 'pkl', key_name[0], key_name[1] + '_runs')
auc = []
remaining_subgraph_vertices = []
for r in range(len(lbs) // sz):
ranks_by_run = scores[r*sz:(r+1)*sz]
labels_by_run = lbs[r*sz:(r+1)*sz]
auc.append(roc_auc_score(labels_by_run, ranks_by_run))
sorted_vertices_by_run = np.argsort(ranks_by_run)
c_n_hat_by_run = sorted_vertices_by_run[-2 * sg_sz:]
remaining_subgraph_vertices.append(len([v for v in c_n_hat_by_run if labels_by_run[v]]))
dir_path = os.path.join(head_path, key_name[1] + "_run_" + str(r))
graph = pickle.load(open(os.path.join(dir_path, 'gnx.pkl'), 'rb'))
final_set, num_iterations = cleaning_algorithm(graph, subgraph, c_n_hat_by_run, sg_sz)
success = int(test_subgraph(graph, subgraph, final_set))
num_success += success
for key, value in zip(
['Graph Size', 'Subgraph Size', 'Subgraph Remaining Num.', 'Num. Iterations', 'Success'],
[sz, sg_sz, remaining_subgraph_vertices[-1], num_iterations, success]):
measurements_dict[key].append(value)
print("Success rates: " + str(num_success / float(num_trials)))
for key, value in zip(['Graph Size', 'Subgraph Size', 'Num. Graphs', 'Num. Successes'],
[sz, sg_sz, num_trials, num_success]):
success_rate_dict[key].append(value)
wr.writerow([str(val)
for val in [sz, sg_sz,
np.round(np.mean(remaining_subgraph_vertices) * (100. / sg_sz), 2),
np.round( | np.mean(auc) | numpy.mean |
from __future__ import print_function, division, absolute_import
import warnings
import sys
import itertools
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import keypoints_equal, reseed
class Test_blur_gaussian_(unittest.TestCase):
def setUp(self):
reseed()
def test_integration(self):
backends = ["auto", "scipy", "cv2"]
nb_channels_lst = [None, 1, 3, 4, 5, 10]
gen = itertools.product(backends, nb_channels_lst)
for backend, nb_channels in gen:
with self.subTest(backend=backend, nb_channels=nb_channels):
image = np.zeros((5, 5), dtype=np.uint8)
if nb_channels is not None:
image = np.tile(image[..., np.newaxis], (1, 1, nb_channels))
image[2, 2] = 255
mask = image < 255
observed = iaa.blur_gaussian_(
np.copy(image), sigma=5.0, backend=backend)
assert observed.shape == image.shape
assert observed.dtype.name == "uint8"
assert np.all(observed[2, 2] < 255)
assert np.sum(observed[mask]) > (5*5-1)
if nb_channels is not None and nb_channels > 1:
for c in sm.xrange(1, observed.shape[2]):
assert np.array_equal(observed[..., c],
observed[..., 0])
def test_sigma_zero(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4, 1))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4*3).astype(np.uint8).reshape((4, 4, 3))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
def test_eps(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed_no_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=0)
observed_with_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=1e10)
assert not np.array_equal(observed_no_eps, observed_with_eps)
assert np.array_equal(observed_with_eps, image)
def test_ksize(self):
def side_effect(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
sigmas = [5.0, 5.0]
ksizes = [None, 3]
ksizes_expected = [2.6*5.0, 3]
gen = zip(sigmas, ksizes, ksizes_expected)
for (sigma, ksize, ksize_expected) in gen:
with self.subTest(sigma=sigma, ksize=ksize):
mock_GaussianBlur = mock.Mock(side_effect=side_effect)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
observed = iaa.blur_gaussian_(
np.copy(image),
sigma=sigma,
ksize=ksize,
backend="cv2")
assert np.array_equal(observed, image+1)
cargs = mock_GaussianBlur.call_args
assert mock_GaussianBlur.call_count == 1
assert np.array_equal(cargs[0][0], image)
assert isinstance(cargs[0][1], tuple)
assert np.allclose(
np.float32(cargs[0][1]),
np.float32([ksize_expected, ksize_expected]))
assert np.isclose(cargs[1]["sigmaX"], sigma)
assert np.isclose(cargs[1]["sigmaY"], sigma)
assert cargs[1]["borderType"] == cv2.BORDER_REFLECT_101
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_backends_called(self):
def side_effect_cv2(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
def side_effect_scipy(image, sigma, mode):
return image + 1
mock_GaussianBlur = mock.Mock(side_effect=side_effect_cv2)
mock_gaussian_filter = mock.Mock(side_effect=side_effect_scipy)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="cv2")
assert mock_GaussianBlur.call_count == 1
with mock.patch('scipy.ndimage.gaussian_filter', mock_gaussian_filter):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="scipy")
assert mock_gaussian_filter.call_count == 1
def test_backends_similar(self):
with self.subTest(nb_channels=None):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
with self.subTest(nb_channels=3):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image = np.tile(image[..., np.newaxis], (1, 1, 3))
image[1] += 1
image[2] += 2
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
for c in sm.xrange(3):
diff = np.abs(image_cv2[..., c].astype(np.int32)
- image_scipy[..., c].astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
def test_warnings(self):
# note that self.assertWarningRegex does not exist in python 2.7
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = iaa.blur_gaussian_(
np.zeros((1, 1), dtype=np.uint32),
sigma=3.0,
ksize=11,
backend="scipy")
assert len(caught_warnings) == 1
assert (
"but also provided 'ksize' argument"
in str(caught_warnings[-1].message))
def test_other_dtypes_sigma_0(self):
dtypes_to_test_list = [
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"],
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
for dtype in float_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.allclose(image_aug, image)
def test_other_dtypes_sigma_075(self):
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
dtypes_to_test_list = [
# scipy
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
# cv2
["bool",
"uint8", "uint16",
"int8", "int16", "int32",
"float16", "float32", "float64"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((5, 5), dtype=bool)
image[2, 2] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.75, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == (mask > 0.5))
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if dtype.itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
values = [5000, 1000**1, 1000**2, 1000**3]
for dtype, value in zip(float_dts, values):
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1,
# 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
max_diff = (
np.dtype(dtype).itemsize
* 0.01
* np.float128(value))
assert np.max(diff) < max_diff
def test_other_dtypes_bool_at_sigma_06(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
for backend in ["scipy", "cv2"]:
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.6, backend=backend)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
class Test_blur_mean_shift_(unittest.TestCase):
@property
def image(self):
image = [
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203]
]
image = np.array(image, dtype=np.uint8).reshape((4, 2*4, 1))
image = np.tile(image, (1, 1, 3))
return image
def test_simple_image(self):
image = self.image
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
assert 0 <= np.average(image[:, 0:4, :]) <= 5
assert 199 <= np.average(image[:, 4:, :]) <= 203
def test_hw_image(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_hw1_image(self):
image = self.image[:, :, 0:1]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.ndim == 3
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_non_contiguous_image(self):
image = self.image
image_cp = np.copy(np.fliplr(image))
image = np.fliplr(image)
assert image.flags["C_CONTIGUOUS"] is False
image_blurred = iaa.blur_mean_shift_(image, 0.5, 0.5)
assert image_blurred.shape == image_cp.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image_cp)
def test_both_parameters_are_zero(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0, 0)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_mean_shift_(np.copy(image), 1.0, 1.0)
assert image_aug.shape == image.shape
class TestGaussianBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_sigma_is_zero(self):
# no blur, shouldnt change anything
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_low_sigma(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(3, 3, 1))]
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_sigma_is_tuple(self):
# varying blur sigmas
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
def test_other_dtypes_bool_at_sigma_0(self):
# bool
aug = iaa.GaussianBlur(sigma=0)
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
def test_other_dtypes_uint_int_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
def test_other_dtypes_float_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.float16, np.float32, np.float64]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_bool_at_sigma_060(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=0.6)
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
image_aug = aug.augment_image(image)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
def test_other_dtypes_at_sigma_1(self):
# --
# blur of various dtypes at sigma=1.0
# and using an example value of 100 for int/uint/float and True for
# bool
# --
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[2, 2] = 100
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=1.0)
mask = np.float64([
[1, 2, 3, 2, 1],
[2, 5, 9, 5, 2],
[4, 9, 15, 9, 4],
[2, 5, 9, 5, 2],
[1, 2, 3, 2, 1]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
assert np.average(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4
assert np.average(diff) < 2.0
def test_other_dtypes_at_sigma_040(self):
# --
# blur of various dtypes at sigma=0.4
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.GaussianBlur(sigma=0.4)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.uint8)
# mask[2, 2] = 100
# kernel = ndimage.gaussian_filter(mask, 0.4, mode="mirror")
mask = np.float64([
[0, 0, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 3, 83, 3, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4.0
def test_other_dtypes_at_sigma_075(self):
# --
# blur of various dtypes at sigma=0.75
# and values being half-way between center and maximum for each dtype
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# Such inaccuracies appear for float64 if used.
# --
aug = iaa.GaussianBlur(sigma=0.75)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if np.dtype(dtype).itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
max_diff = np.dtype(dtype).itemsize * 0.01 * np.float128(value)
assert np.max(diff) < max_diff
def test_failure_on_invalid_dtypes(self):
# assert failure on invalid dtypes
aug = iaa.GaussianBlur(sigma=1.0)
for dt in [np.float128]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
class TestAverageBlur(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestAverageBlur, self).__init__(*args, **kwargs)
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
self.base_img = base_img
self.blur3x3 = blur3x3
self.blur4x4 = blur4x4
self.blur5x5 = blur5x5
def setUp(self):
reseed()
def test_kernel_size_0(self):
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.base_img)
def test_kernel_size_3(self):
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur3x3)
def test_kernel_size_5(self):
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur5x5)
def test_kernel_size_is_tuple(self):
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_with_wider_range(self):
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 200
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
def test_kernel_size_is_stochastic_parameter(self):
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_of_tuples(self):
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = np.copy(self.base_img)
else:
possible[key] = cv2.blur(
self.base_img, (kh, kw))[..., np.newaxis]
nb_iterations = 250
nb_seen = dict([(key, 0) for key, val in possible.items()])
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
for key, img_aug in possible.items():
if | np.array_equal(observed, img_aug) | numpy.array_equal |
"""AoC 2018 Day 11: Chronal Charge"""
import numpy as np
from tqdm import tqdm
# Part 1
def cell_power(x, y, grid_serial):
rack_id = x + 10
power = rack_id * y
power += grid_serial
power *= rack_id
# get the third digit if any
power //= 100
power %= 10
power -= 5
return power
def grid_power(grid_serial, size=300):
grid = | np.zeros(shape=(size + 1, size + 1), dtype=np.int64) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, TextBox
import time
from astropy.io import fits
from astropy.wcs import WCS
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
#import scipy as sp
from netCDF4 import Dataset
import pickle
from iris2model_sav_to_python_format import convert_sav_to_dict
import iris2_lists
#import dask.array as da
import warnings # ignore tedious warnings
warnings.filterwarnings("ignore")
# plot the different variables
def plot_parameters(show=True):
ltau_valstep = LTAU[1] - LTAU[0]
phys_var_idx = 0
ltau_idx = int(len(LTAU)/2)
fig, (ax1,ax2,ax3,ax4) = plt.subplots(1,4, figsize=(16,6))
# making a slider with respect to log(tau)
slider = Slider(plt.axes([0.025, 0.1, 0.025, 0.8]),
r"log($\tau$)",
np.min(LTAU),
np.max(LTAU),
valstep=ltau_valstep,
valinit=LTAU[ltau_idx],
orientation="vertical",
valfmt="%.1f")
dat1 = MODEL[:,:,ltau_idx,0]
dat2 = MODEL[:,:,ltau_idx,1]
dat3 = MODEL[:,:,ltau_idx,2]
dat4 = MODEL[:,:,ltau_idx,3]
ax1.set_title("%s" % PHYS_VAR[0][:PHYS_VAR[0].find("[")])
im1 = ax1.imshow(dat1.T, cmap="gray", aspect="auto", origin="lower", vmin=3e3, vmax=1.5*np.mean(dat1))
cbar1 = fig.colorbar(im1, ax=ax1, label=PHYS_VAR[0][PHYS_VAR[0].find("["):], format="%.0e")
ax2.set_title("%s" % PHYS_VAR[1][:PHYS_VAR[1].find("[")])
im2 = ax2.imshow(dat2.T, cmap="gray", aspect="auto", origin="lower")
cbar2 = fig.colorbar(im2, ax=ax2, label=PHYS_VAR[1][PHYS_VAR[1].find("["):], format="%.0e")
ax3.set_title("%s" % PHYS_VAR[2][:PHYS_VAR[2].find("[")])
im3 = ax3.imshow(dat3.T, cmap="gray", aspect="auto", origin="lower", vmin=np.percentile(dat3,1), vmax=np.percentile(dat3,98))
cbar3 = fig.colorbar(im3, ax=ax3, label=PHYS_VAR[2][PHYS_VAR[2].find("["):], format="%.0e")
ax4.set_title("%s" % PHYS_VAR[3][:PHYS_VAR[3].find("[")])
im4 = ax4.imshow(dat4.T, cmap="gray", aspect="auto", origin="lower", vmin=np.percentile(dat4,1), vmax=np.percentile(dat4,98))
cbar4 = fig.colorbar(im4, ax=ax4, label=PHYS_VAR[3][PHYS_VAR[3].find("["):], format="%.0e")
def update(X):
idx = int(np.argmin(abs(LTAU-X)))
dat1 = MODEL[:,:,idx,0]
dat2 = MODEL[:,:,idx,1]
dat3 = MODEL[:,:,idx,2]
dat4 = MODEL[:,:,idx,3]
im1.set_data(dat1.T)
im2.set_data(dat2.T)
im3.set_data(dat3.T)
im4.set_data(dat4.T)
im1.set_clim(vmin=3e3, vmax=1.5*np.mean(dat1))
im2.set_clim(vmin=-0.5*np.max(dat2), vmax=0.5*np.max(dat2))
im3.set_clim(vmin=np.percentile(dat3,1), vmax=np.percentile(dat3,98))#vmin=0.1*np.mean(dat3), vmax=3*np.mean(dat3))
im4.set_clim(vmin=np.percentile(dat4,1), vmax=np.percentile(dat4,98))#0.1*np.mean(dat4), vmax=3*np.mean(dat4))
cbar1.update_normal(im1)
cbar2.update_normal(im2)
cbar3.update_normal(im3)
cbar4.update_normal(im3)
#fig_par.canvas.draw_idle()
fig.canvas.draw_idle()
plt.tight_layout(rect=(0.05,0,1,1))
slider.on_changed(update)
if show: plt.show()
# plot RP line and observation line along
# with the model atmosphere of choice
def plot_RP_line(tau_idx=-1, initX=100, initY=100, show=True, show_weights=False):
temp_model = MODEL[:,:,tau_idx,0]
global valX, valY, init_p1, init_p2, cont_p1, cont_p2
valX = initX
valY = initY
init_p1 = 100 # initial percentile
init_p2 = 0 # initial percentile
val_p1 = np.percentile(temp_model, init_p1)
val_p2 = np.percentile(temp_model, init_p2)
cont_p1 = | np.zeros(temp_model.shape) | numpy.zeros |
#!/usr/bin/python3
from PIL import Image
import numpy as np
import os, sys, argparse
def main():
parser = argparse.ArgumentParser(description='Convert 16B tiff to 8B tiff.')
parser.add_argument(dest="filenames", metavar="F", type=str, nargs='+', help="16B tiff filename to convert")
args = parser.parse_args()
for filename in args.filenames:
if os.path.exists("./{}".format(filename)):
convert(filename)
else:
print("Couldn't find {}".format(filename))
def convert(filename):
im = Image.open(filename)
print("reading {}, Format:{}, Size:{}, Mode:{}, Frame Number:{}".format(filename, im.format, im.size, im.mode, im.n_frames))
for i in range(im.n_frames):
im.seek(i)
ar = np.array(im)
ar = ar / 16
max_pix = ar.max()
min_pix = ar.min()
pix_range = max_pix - min_pix
out_range = 256
ar = (ar - min_pix)/pix_range*out_range
pilImg = Image.fromarray( | np.uint8(ar) | numpy.uint8 |
"""
Downloads images from games, stored in the osgameclones-database, then creates a collage of them.
"""
import ruamel.yaml as yaml
import os
import requests
from PIL import Image
from io import BytesIO
import numpy as np
from progress.bar import IncrementalBar
def download_images():
# import the osgameclones data
path = os.path.realpath(os.path.join(root_path, os.path.pardir, 'osgameclones.git', 'games'))
files = os.listdir(path)
# iterate over all yaml files in osgameclones/data folder and load contents
entries = []
for file in files:
# read yaml
with open(os.path.join(path, file), 'r', encoding='utf-8') as stream:
try:
_ = yaml.safe_load(stream)
except Exception as exc:
print(file)
raise exc
# add to entries
entries.extend(_)
print('imported {} entries'.format(len(entries)))
# collect all image informations
images = []
for entry in entries:
if 'images' in entry:
images.extend(entry['images'])
print('contain {} image links'.format(len(images)))
# download them all
for url in images:
name = "".join(x for x in url[5:] if (x.isalnum() or x in '._-'))
outfile = os.path.join(download_path, name)
if not os.path.isfile(outfile):
try:
r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)'},
timeout=20, allow_redirects=True)
if r.status_code == requests.codes.ok:
im = Image.open(BytesIO(r.content))
im.save(outfile)
print('saved {}'.format(url))
except:
pass
def downsize_images():
scale_factor = 10
for file in os.listdir(download_path):
file_path = os.path.join(download_path, file)
if not os.path.isfile(file_path):
continue
outfile = os.path.join(downsized_path, file[:-4]+'.png') # losless storage of downsize image
if os.path.isfile(outfile):
continue
im = Image.open(file_path)
if im.mode != 'RGB':
print('{} - {}'.format(file, im.mode))
continue
width = im.width
height = im.height
if width < target_width * scale_factor or height < target_height * scale_factor:
continue
box = [(width-target_width*scale_factor)/2, (height-target_height*scale_factor)/2, target_width * scale_factor, target_height * scale_factor]
box[2] += box[0]
box[3] += box[1]
im_resized = im.resize((target_width, target_height), resample=Image.LANCZOS, box=box)
im_resized.save(outfile)
print('saved {}'.format(file))
def assemble_collage():
print('start assembling collage')
# load all from downsized path
files = os.listdir(downsized_path)
files = [file for file in files if os.path.isfile(os.path.join(downsized_path, file))]
images = []
bar = IncrementalBar('Loading', max=len(files))
for file in files:
im = Image.open(os.path.join(downsized_path, file))
im = np.asarray(im)
images.append(im)
bar.next()
bar.finish()
# compute total amount of light in each image and only keep the N brightest
images = [(np.sum(image), image) for image in images]
images.sort(key=lambda x: x[0], reverse=True)
images = images[:N]
images = [x[1] for x in images]
# compute the average color in each quadrant
Cx = int(target_height / 2)
Cy = int(target_width / 2)
U = [np.mean(image[:Cx, :, :], axis=(1, 2)) for image in images]
D = [ | np.mean(image[Cx:, :, :], axis=(1, 2)) | numpy.mean |
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| ||||| |||| || |||||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||||
|| || ||||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || |||||||
/--------------------- Who You Want To See ---------------------/
'''
# date:2021-04-18
# Author: Eric.Lee
# function: who you want to see "你想看谁"
import os
import cv2
import time
from multiprocessing import Process
from multiprocessing import Manager
import numpy as np
import random
import time
import shutil
# 加载模型组件库
from face_detect.yolo_v3_face import yolo_v3_face_model
from insight_face.face_verify import insight_face_model
from face_multi_task.face_multi_task_component import FaceMuitiTask_Model
from face_euler_angle.face_euler_angle_component import FaceAngle_Model
# 加载工具库
import sys
sys.path.append("./lib/wyw2s_lib/")
from cores.wyw2s_fuction import get_faces_batch_attribute
from utils.utils import parse_data_cfg
from utils.show_videos_thread import run_show
from moviepy.editor import *
def main_wyw2s(video_path,cfg_file):
config = parse_data_cfg(cfg_file)
print("\n/---------------------- main_wyw2s config ------------------------/\n")
for k_ in config.keys():
print("{} : {}".format(k_,config[k_]))
print("\n/------------------------------------------------------------------------/\n")
print("\n loading who you want 2 see local demo ...\n")
face_detect_model = yolo_v3_face_model(conf_thres=float(config["detect_conf_thres"]),nms_thres=float(config["detect_nms_thres"]),
model_arch = config["detect_model_arch"],model_path = config["detect_model_path"],yolo_anchor_scale = float(config["yolo_anchor_scale"]),
img_size = float(config["detect_input_size"]),
)
face_verify_model = insight_face_model(backbone_model_path =config["face_verify_backbone_path"] ,
facebank_path = config["facebank_path"],
threshold = float(config["face_verify_threshold"]))
face_multitask_model = FaceMuitiTask_Model(model_path = config["face_multitask_model_path"], model_arch = config["face_multitask_model_arch"])
face_euler_model = FaceAngle_Model(model_path = config["face_euler_model_path"])
print("\n/------------------------------------------------------------------------/\n")
YouWantToSee = config["YouWantToSee"]
YouWantToSee_=[name_ for name_ in YouWantToSee.split(",")]
print(" YouWantToSee : {}".format(YouWantToSee_))
print("\n/------------------------------------------------------------------------/\n")
p_colors = []
for i in range(len(face_verify_model.face_names)):
if i == 0 :
p_colors.append((100,155,100))
if i == 1 :
p_colors.append((0,255,0))
elif i == 2:
p_colors.append((255,0,0))
elif i == 3:
p_colors.append((0,255,255))
elif i == 4:
p_colors.append((0,185,255))
elif i == 5:
p_colors.append((255,185,55))
else:
p_colors.append((random.randint(60,255),random.randint(70,255),random.randint(130,255)))
cap = cv2.VideoCapture(video_path)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# 时间轴
time_map = | np.zeros([200,frame_count,3]) | numpy.zeros |
"""Compare the input versus output rate.
Note this comparison doesn't quite hold - the intrinsic rate plotted here is
the poisson rate value.
TODO: Fix this
"""
import numpy as np
import matplotlib.pyplot as plt
from frbpoppy import CosmicPopulation, log10normal, Survey, SurveyPopulation
from frbpoppy import hist
from tests.convenience import plot_aa_style, rel_path
N_DAYS = 50
N_SRCS = int(1e3)
RATE = 0.1 # per day
SINGLE_INPUT_RATE = True
init_surv_time_frac = 0.1
# Set up a population
pop = CosmicPopulation.simple(N_SRCS, n_days=N_DAYS, repeaters=True)
pop.set_dist(z_max=0.01)
pop.set_lum(model='powerlaw', low=1e30, high=1e40, power=-1)
pop.set_w(model='constant', value=1)
pop.set_dm(mw=False, igm=True, host=False)
# Add a distribution of Poisson burst rates
if SINGLE_INPUT_RATE:
rate_dist = RATE
else:
rate_dist = log10normal(RATE, 2, N_SRCS)
pop.set_time(model='poisson', rate=rate_dist)
pop.generate()
# Survey the high fluences
survey = Survey('perfect', n_days=N_DAYS)
survey.set_beam(model='perfect')
survey.snr_limit = 1e-6
survey.t_obs = 60*60 # seconds
# Check the burst rate
surv_pop = SurveyPopulation(pop, survey)
time = surv_pop.frbs.time
# Set up plot
plot_aa_style()
if isinstance(rate_dist, np.ndarray):
min_rate = np.log10(np.min(rate_dist[rate_dist != 0]))
max_rate = np.log10(max(rate_dist))
else:
min_rate = np.log10(RATE) - 1
max_rate = np.log10(RATE) + 1
rate_dist = np.array(rate_dist)
bins = | np.logspace(min_rate, max_rate, 20) | numpy.logspace |
import sys
import numpy as np
import pytest
from opytimizer.core import agent, function
from opytimizer.optimizers import aiwpso
from opytimizer.spaces import search
def test_aiwpso_hyperparams():
hyperparams = {
'w': 2,
'w_min': 1,
'w_max': 3,
'c1': 1.7,
'c2': 1.7
}
new_aiwpso = aiwpso.AIWPSO(hyperparams=hyperparams)
assert new_aiwpso.w == 2
assert new_aiwpso.w_min == 1
assert new_aiwpso.w_max == 3
assert new_aiwpso.c1 == 1.7
assert new_aiwpso.c2 == 1.7
def test_aiwpso_hyperparams_setter():
new_aiwpso = aiwpso.AIWPSO()
new_aiwpso.w_min = 0.5
assert new_aiwpso.w_min == 0.5
new_aiwpso.w_max = 2
assert new_aiwpso.w_max == 2
new_aiwpso.c1 = 1.5
assert new_aiwpso.c1 == 1.5
new_aiwpso.c2 = 1.5
assert new_aiwpso.c2 == 1.5
def test_aiwpso_rebuild():
new_aiwpso = aiwpso.AIWPSO()
assert new_aiwpso.built == True
def test_aiwpso_compute_success():
n_agents = 2
search_space = search.SearchSpace(n_agents=n_agents, n_iterations=10,
n_variables=2, lower_bound=[0, 0],
upper_bound=[10, 10])
new_aiwpso = aiwpso.AIWPSO()
new_fitness = np.zeros(n_agents)
new_aiwpso._compute_success(search_space.agents, new_fitness)
assert new_aiwpso.w != 0
def test_aiwpso_evaluate():
def square(x):
return | np.sum(x**2) | numpy.sum |
import sys
import warnings
import numpy as np
import pandas as pd
from itertools import combinations
from typing import Hashable, List
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
stats = importr('stats')
def match_arg(arg, choices, arg_name):
if arg in choices:
return arg
else:
sys.exit("arguement \"{}\" should be one of ".format(arg_name) + ", ".join([f"\"{choice}\"" for choice in choices]))
def match(a: List[Hashable], b: List[Hashable]) -> List[int]:
b_dict = {x: i for i, x in enumerate(b)}
return [b_dict.get(x, np.nan) for x in a]
def clipper1sided(score_exp,
score_back,
FDR = 0.05,
ifuseknockoff = None,
nknockoff = None,
contrastScore_method = None,
importanceScore_method = "diff",
FDR_control_method = None,
ifpowerful = True,
seed = 12345):
score_exp = np.atleast_2d(score_exp)
score_back = np.atleast_2d(score_back)
if np.any(score_exp < 0) or | np.any(score_back < 0) | numpy.any |
import os, os.path, random
import json
import torch
from torch.nn import functional as F
from torchvision import transforms
import numpy as np
from PIL import Image
import cv2
import albumentations as A
def img_transform(img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy())
return img
def imresize(im, size, interp='bilinear'):
if interp == 'nearest':
resample = Image.NEAREST
elif interp == 'bilinear':
resample = Image.BILINEAR
elif interp == 'bicubic':
resample = Image.BICUBIC
else:
raise Exception('resample method undefined!')
return im.resize(size, resample)
def b_imresize(im, size, interp='bilinear'):
return F.interpolate(im, size, mode=interp)
# from HRnet
def multi_scale_aug(image, label=None):
# print('image_shape: ', image.shape)
# print('label_shape: ', label.shape)
rand_scale = 0.5 + random.randint(0, 16) / 10.0
long_size = np.int(2048 * rand_scale + 0.5)
w, h = image.shape[-2:]
if h > w:
new_h = long_size
new_w = np.int(w * long_size / h + 0.5)
else:
new_w = long_size
new_h = np.int(h * long_size / w + 0.5)
image = F.interpolate(image, (new_w, new_h), mode='bilinear')
if label is not None:
label = F.interpolate(label.unsqueeze(1).float(), (new_w, new_h), mode='nearest').squeeze(1).long()
else:
return image
return image, label
def patch_loader(X, Y, xi, yi, cfg, train_mode=False, select_scale=None):
X_batch = X.clone()
Y_batch = Y.clone()
if select_scale is not None:
select_scale_batch = select_scale.clone()
len_batch = len(select_scale_batch)
else:
len_batch = X.size(0)
# print('---------select_scale_batch-----------\n', select_scale_batch)
for b in range(len_batch):
X = X_batch[b].unsqueeze(0)
Y = Y_batch[b].unsqueeze(0)
if select_scale is not None:
select_scale = int(select_scale_batch[b])
# print('---------select_scale-----------\n', select_scale)
fov_map_scale = cfg.MODEL.fov_map_scale
ori_w, ori_h = X.shape[-2:]
if cfg.DATASET.multi_scale_aug and train_mode:
X, Y = multi_scale_aug(X, Y)
ori_cx_lr = xi*fov_map_scale
ori_cy_lr = yi*fov_map_scale*cfg.MODEL.patch_ap
scaled_cx_lr = ori_cx_lr * (X.shape[-2]/ori_w)
scaled_cy_lr = ori_cy_lr * (X.shape[-1]/ori_h)
xi, yi = scaled_cx_lr//fov_map_scale, scaled_cy_lr//(fov_map_scale*cfg.MODEL.patch_ap)
# X: b,c,w,h
xi_ori, yi_ori = xi, yi
if train_mode == False:
patch_bank = list((float(cfg.VAL.expand_prediection_rate_patch)*np.array(cfg.MODEL.patch_bank)).astype(int))
else:
patch_bank = cfg.MODEL.patch_bank
segm_downsampling_rate = cfg.DATASET.segm_downsampling_rate
fov_padding = cfg.MODEL.fov_padding
# single_gpu_size = str(torch.cuda.get_device_properties('cuda:0')).split('total_memory=')[1].split('MB')[0]
single_gpu_size = 10000
if int(single_gpu_size) < 1:
fov_padding_cpu = True
else:
fov_padding_cpu = False
if 'CITYSCAPES' in cfg.DATASET.root_dataset or 'CITYSCAPE' in cfg.DATASET.list_train:
ignore_label=20-1
elif 'Digest' in cfg.DATASET.root_dataset:
ignore_label=-2
else:
if cfg.DATASET.ignore_index != -2:
ignore_label=cfg.DATASET.ignore_index
else:
ignore_label=-2
X_patches = []
X_patches_cords = []
for s in range(len(patch_bank)):
if cfg.VAL.F_Xlr_only and s>0:
X_patches.append(X_patches[0])
continue
if select_scale != None and s != select_scale and s != 0:
continue
patch_size = patch_bank[s]
patch_size_x = patch_size
patch_size_y = patch_size*cfg.MODEL.patch_ap
# TODO: debug, current adjusting xi, yi approach only available when X.shape can devide by fov_map_scale
if cfg.DATASET.adjust_crop_range:
xi = int(xi_ori*(X.shape[-2]-patch_size_x)/X.shape[-2])
yi = int(yi_ori*(X.shape[-1]-patch_size_y)/X.shape[-1])
# correction on residual
if xi_ori >= round(ori_w/fov_map_scale)-1:
xi += 1
if yi_ori >= round(ori_h/(fov_map_scale*cfg.MODEL.patch_ap))-1:
yi += 1
# if X.shape[2] < patch_size_x or X.shape[3] < patch_size_y:
# raise Exception('Patch size {}x{} exceed image size {}'.format(patch_size_x, patch_size_y, X.shape))
if fov_padding:
if cfg.DATASET.adjust_crop_range:
p_h = max(patch_size_x-X.shape[2], 0)
p_w = max(patch_size_y-X.shape[3], 0)
# p = max(X_p_x, X_p_y)
p_y_h = max(patch_bank[0]-X.shape[2], 0)
p_y_w = max(patch_bank[0]*cfg.MODEL.patch_ap-X.shape[3], 0)
# p_y = max(Y_p_x, Y_p_y)
else:
p_w = patch_size_y-1
p_h = patch_size_x-1
p_y_w = patch_bank[0]*cfg.MODEL.patch_ap-1
p_y_h = patch_bank[0]-1
if cfg.DATASET.mirror_padding:
if fov_padding_cpu:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h), mode='reflect').cpu()
else:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h), mode='reflect')
if train_mode and segm_downsampling_rate != 1:
if fov_padding_cpu:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), mode='reflect').cpu()
else:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), mode='reflect')
else:
if fov_padding_cpu:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w,p_y_w,p_y_h,p_y_h), mode='reflect').cpu()
else:
Y_pad = F.pad(Y.unsqueeze(1).float(), (p_y_w,p_y_w,p_y_h,p_y_h), mode='reflect')
Y_pad = Y_pad.squeeze(1).long()
else:
if fov_padding_cpu:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h)).cpu()
else:
X_pad = F.pad(X, (p_w,p_w,p_h,p_h))
if train_mode and segm_downsampling_rate != 1:
if fov_padding_cpu:
Y_pad = F.pad(Y, (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), value=ignore_label).cpu()
else:
Y_pad = F.pad(Y, (p_y_w//segm_downsampling_rate,p_y_w//segm_downsampling_rate,p_y_h//segm_downsampling_rate,p_y_h//segm_downsampling_rate), value=ignore_label)
else:
if fov_padding_cpu:
Y_pad = F.pad(Y, (p_y_w,p_y_w,p_y_h,p_y_h), value=ignore_label).cpu()
else:
Y_pad = F.pad(Y, (p_y_w,p_y_w,p_y_h,p_y_h), value=ignore_label)
cx_lr = xi*fov_map_scale # upper left corner of current X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cx_lr > (X.shape[2]-patch_size_x):
cx_lr = X.shape[2]-patch_size_x
cx = cx_lr + patch_bank[0]//2 - patch_size_x//2 # upper left corner of current patch size with same center of X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cx < 0:
cx = 0
if fov_padding:
# cx_lr = cx_lr+p_y
cx_p = cx+p_h
if cfg.DATASET.multi_scale_aug:
cx_p_y = cx+p_y_h
else:
cx_p_y = cx_lr+p_y_h
if cfg.DATASET.multi_scale_aug and train_mode:
if cx_p < 0:
cx_p = 0
elif cx_p > (X.shape[2]-patch_size_x):
cx_p = X.shape[2]-patch_size_x
if cx_p_y < 0:
cx_p_y = 0
elif cx_p_y > (X.shape[2]-patch_size_x):
cx_p_y = X.shape[2]-patch_size_x
if cfg.DATASET.adjust_crop_range:
if cx_p_y < 0:
cx_p_y = 0
elif cx_p_y > (X.shape[2]-patch_size_x):
cx_p_y = X.shape[2]-patch_size_x
else:
if cx < 0:
cx = 0
elif cx > (X.shape[2]-patch_size_x):
cx = X.shape[2]-patch_size_x
cy_lr = yi*(fov_map_scale*cfg.MODEL.patch_ap) # upper left corner of current X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cy_lr > (X.shape[3]-patch_size_y):
cy_lr = X.shape[3]-patch_size_y
cy = cy_lr + (patch_bank[0]*cfg.MODEL.patch_ap)//2 - patch_size_y//2 # upper left corner of current patch size with same center of X_lr pixel
if cfg.DATASET.adjust_crop_range:
if cy < 0:
cy = 0
if fov_padding:
# cy_lr = cy_lr+p_y
cy_p = cy+p_w
if cfg.DATASET.multi_scale_aug:
cy_p_y = cy+p_y_w
else:
cy_p_y = cy_lr+p_y_w
if cfg.DATASET.multi_scale_aug and train_mode:
if cy_p < 0:
cy_p = 0
elif cy_p > (X.shape[3]-patch_size_y):
cy_p = X.shape[3]-patch_size_y
if cy_p_y < 0:
cy_p_y = 0
elif cy_p_y > (X.shape[3]-patch_size_y):
cy_p_y = X.shape[3]-patch_size_y
if cfg.DATASET.adjust_crop_range:
if cy_p_y < 0:
cy_p_y = 0
elif cy_p_y > (X.shape[3]-patch_size_y):
cy_p_y = X.shape[3]-patch_size_y
else:
if cy < 0:
cy = 0
elif cy > (X.shape[3]-patch_size_y):
cy = X.shape[3]-patch_size_y
if fov_padding:
crop_patch = X_pad[:, :, cx_p:cx_p+patch_size_x, cy_p:cy_p+patch_size_y].to(X.device)
else:
crop_patch = X[:, :, cx:cx+patch_size_x, cy:cy+patch_size_y]
assert min(crop_patch.shape)!=0, "crop_patch size wrong ({}) cropped from X_pad ({}), X({}) at (cx_p={},cy_p={},xi={},yi={}, patch_size_x={}, patch_size_y={})".format(crop_patch.shape, X_pad.shape, X.shape, cx_p, cy_p, xi, yi, patch_size_x, patch_size_y)
if not (select_scale != None and select_scale != 0 and s == 0):
X_patches.append(b_imresize(crop_patch, (patch_bank[0],patch_bank[0]*cfg.MODEL.patch_ap), interp='bilinear'))
if not train_mode:
if not (select_scale != None and select_scale != 0 and s == 0):
X_patches_cords.append((cx, cy, patch_size, p_w, p_h)) # TODO: patch_size to be corrected in visualisation in eval/eval_multipro
if s == 0:
if segm_downsampling_rate != 1 and train_mode:
patch_size = patch_bank[0] // segm_downsampling_rate
cx_p_y = cx_p_y // segm_downsampling_rate
cy_p_y = cy_p_y // segm_downsampling_rate
else:
patch_size = patch_bank[0]
patch_size_x = patch_size
patch_size_y = patch_size*cfg.MODEL.patch_ap
if not train_mode:
Y_patch_cord = (cx_p_y, cy_p_y, patch_size, p_y_w, p_y_h)
# Y.shape b,w,h, NOTE Y has different size in train and val mode
if fov_padding:
Y_patch = Y_pad[:, cx_p_y:cx_p_y+patch_size_x, cy_p_y:cy_p_y+patch_size_y].to(Y.device)
else:
Y_patch = Y[:, cx_lr:cx_lr+patch_size_x, cy_lr:cy_lr+patch_size_y]
if b == 0:
X_patches_batch = X_patches
Y_patch_batch = Y_patch
else:
for p in range(len(X_patches_batch)):
X_patches_batch[p] = torch.cat([X_patches_batch[p], X_patches[p]])
Y_patch_batch = torch.cat([Y_patch_batch, Y_patch])
if train_mode:
return X_patches_batch, Y_patch_batch
else:
# print('Y_patch_cord: ', Y_patch_cord)
return X_patches_batch, Y_patch_cord, X_patches_cords, Y_patch_batch
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, odgt, opt, **kwargs):
# remapping labels reflecting servie degree of GS
if opt.root_dataset == '/scratch0/chenjin/GLEASON2019_DATA/Data/' or \
opt.root_dataset == '/home/chenjin/Chen_UCL/Histo-MRI-mapping/GLEASON2019_DATA/Data/' or \
opt.root_dataset == '/SAN/medic/Histo_MRI_GPU/chenjin/Data/GLEASON2019_DATA/Data/' or \
'GLEASON2019_DATA' in opt.root_dataset or 'Gleason' in opt.root_dataset:
# four class mapping
if opt.class_mapping == 0:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 3,
5: 4, 6: 1,
}
# three class mapping exclude class5
elif opt.class_mapping == 30:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 3,
5: 1, 6: 1,
}
# gs3 vs all
elif opt.class_mapping == 3:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 1,
5: 1, 6: 1,
}
# gs4 vs all
elif opt.class_mapping == 4:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 1, 4: 2,
5: 1, 6: 1,
}
# gs5 vs all
elif opt.class_mapping == 5:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 1, 4: 1,
5: 2, 6: 1,
}
# benine vs all
elif opt.class_mapping == 6:
self.label_mapping = {0: 1,
1: 1, 2: 1,
3: 2, 4: 2,
5: 2, 6: 1,
}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.748, 0.611, 0.823],
std=[0.146, 0.245, 0.119])
elif opt.root_dataset == '/home/chenjin/Chen_UCL/Histo-MRI-mapping/DigestPath2019/' or 'Digest' in opt.list_train:
self.label_mapping = {0: 1,
255: 2,
}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.816, 0.697, 0.792],
std=[0.160, 0.277, 0.198])
elif 'ADE20K' in opt.root_dataset or 'ADE' in opt.list_train:
self.label_mapping = {}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
elif 'CITYSCAPES' in opt.root_dataset or 'CITYSCAPE' in opt.list_train:
# following HRNet-Semantic-Segmentation setting
# but starting from 1 instead of 0, seems 0 leads to bug in criterion.OhemCrossEntropy implementation
# debug note 24/12/19 seems label must start from 1 and must be continues, otherwise lead inconsistence between pred by view(-1) and seg_label
ignore_label=20
self.label_mapping = {-1: ignore_label, 0: ignore_label,
1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label,
5: ignore_label, 6: ignore_label,
7: 1, 8: 2, 9: ignore_label,
10: ignore_label, 11: 3, 12: 4,
13: 5, 14: ignore_label, 15: ignore_label,
16: ignore_label, 17: 6, 18: ignore_label,
19: 7, 20: 8, 21: 9, 22: 10, 23: 11, 24: 12,
25: 13, 26: 14, 27: 15, 28: 16,
29: ignore_label, 30: ignore_label,
31: 17, 32: 18, 33: 19}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
elif 'Histo' in opt.root_dataset or 'histomri' in opt.list_train:
self.label_mapping = {}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.8223, 0.7783, 0.7847],
std=[0.210, 0.216, 0.241])
elif 'DeepGlob' in opt.root_dataset or 'DeepGlob' in opt.list_train:
# ignore_label=7
if opt.ignore_index == 0:
self.label_mapping = {0: 2,
1: 3, 2: 4,
3: 5, 4: 6,
5: 7, 6: 1,
}
elif opt.ignore_index == 6:
self.label_mapping = {0: 1,
1: 2, 2: 3,
3: 4, 4: 5,
5: 6, 6: 7,
}
# mean and std
self.normalize = transforms.Normalize(
# gleason2019 322 train mean and std applied
mean=[0.282, 0.379, 0.408],
std=[0.089, 0.101, 0.127])
else:
raise Exception('Unknown root for mapping and normalisation!')
# parse options
self.imgSizes = opt.imgSizes
self.imgMaxSize = opt.imgMaxSize
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# parse the input list
self.parse_input_list(odgt, **kwargs)
def convert_label(self, label, inverse=False):
label = np.array(label)
temp = label.copy()
if inverse:
for v, k in self.label_mapping.items():
label[temp == k] = v
else:
for k, v in self.label_mapping.items():
label[temp == k] = v
return label
def parse_input_list(self, odgt, max_sample=-1, start_idx=-1, end_idx=-1):
if isinstance(odgt, list):
self.list_sample = odgt
elif isinstance(odgt, str):
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
if start_idx >= 0 and end_idx >= 0: # divide file list
self.list_sample = self.list_sample[start_idx:end_idx]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def img_transform(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = self.normalize(torch.from_numpy(img.copy()))
return img
def img_transform_unnorm(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy())
return img
def img_transform_rev(self, img):
# 0-255 to 0-1
img = np.float32(np.array(img)) / 255.
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy())
return img
def segm_transform(self, segm):
# to tensor, -1 to 149
# !!!!! JC: This is why all data need to mapped to 1-numClass
# and because of this, ignore_index (in CrossEntropy/OhemCrossEntropy/IoU) = ignore_label (in dataset class_mapping)-1
segm = torch.from_numpy(np.array(segm)).long() - 1
return segm
# Round x to the nearest multiple of p and x' >= x
def round2nearest_multiple(self, x, p):
return ((x - 1) // p + 1) * p
class TrainDataset(BaseDataset):
def __init__(self, root_dataset, odgt, opt, batch_per_gpu=1, cal_REV=False, **kwargs):
super(TrainDataset, self).__init__(odgt, opt, **kwargs)
self.root_dataset = root_dataset
# down sampling rate of segm labe
self.segm_downsampling_rate = opt.segm_downsampling_rate
self.batch_per_gpu = batch_per_gpu
# classify images into two classes: 1. h > w and 2. h <= w
self.batch_record_list = [[], []]
# override dataset length when trainig with batch_per_gpu > 1
self.cur_idx = 0
self.if_shuffled = False
# augmentation
self.augmentation = opt.random_flip
self.balance_sam_idx = 0
self.num_class = opt.num_class
self.cal_REV = cal_REV
def _get_sub_batch(self):
while True:
# get a sample record
this_sample = self.list_sample[self.cur_idx]
if self.augmentation == 'balance_sample' and self.balance_sam_idx > 2:
# search gs-5 and reset idx every 3 steps represent
# severe rare gs-4 in contrast to balanced other 3 classes
search_rare_class = True
s_idx = self.cur_idx
while search_rare_class:
search_sample = self.list_sample[s_idx]
s_idx += 1
if s_idx >= self.num_sample:
s_idx = 0
segm_path = os.path.join(self.root_dataset, search_sample['fpath_segm'])
segm = self.convert_label(Image.open(segm_path))
hist, _ = np.histogram(segm, bins=self.num_class, range=(0, self.num_class-1))
if (hist[-1] / np.sum(hist)) > 0.25:
this_sample = search_sample
search_rare_class = False
self.balance_sam_idx = 0
self.balance_sam_idx += 1
if this_sample['height'] > this_sample['width']:
self.batch_record_list[0].append(this_sample) # h > w, go to 1st class
else:
self.batch_record_list[1].append(this_sample) # h <= w, go to 2nd class
# update current sample pointer
self.cur_idx += 1
if self.cur_idx >= self.num_sample:
self.cur_idx = 0
np.random.shuffle(self.list_sample)
if len(self.batch_record_list[0]) == self.batch_per_gpu:
batch_records = self.batch_record_list[0]
self.batch_record_list[0] = []
break
elif len(self.batch_record_list[1]) == self.batch_per_gpu:
batch_records = self.batch_record_list[1]
self.batch_record_list[1] = []
break
return batch_records
def __getitem__(self, index):
# NOTE: random shuffle for the first time. shuffle in __init__ is useless
if not self.if_shuffled:
| np.random.seed(index) | numpy.random.seed |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import hypothesis.strategies as st
import numpy as np
import random
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
class TestTopK(hu.HypothesisTestCase):
def top_k_ref(self, X, k, flatten_indices):
X_flat = X.reshape((-1, X.shape[-1]))
indices_ref = np.ndarray(shape=X_flat.shape, dtype=np.int32)
flatten_indices_ref = np.ndarray(shape=X_flat.shape, dtype=np.int32)
values_ref = np.ndarray(shape=X_flat.shape, dtype=np.float32)
global_idx = 0
for i in range(X_flat.shape[0]):
od = OrderedDict()
for j in range(X_flat.shape[1]):
val = X_flat[i, j]
if val not in od:
od[val] = []
od[val].append((j, global_idx))
global_idx += 1
j = 0
for val, idxs in sorted(od.items(), reverse=True):
for (idx, flatten_idx) in idxs:
indices_ref[i, j] = idx
flatten_indices_ref[i, j] = flatten_idx
values_ref[i, j] = val
j = j + 1
indices_ref = np.reshape(indices_ref, X.shape)
flatten_indices_ref = np.reshape(flatten_indices_ref, X.shape)
values_ref = np.reshape(values_ref, X.shape)
indices_ref = indices_ref.take(list(range(k)), axis=-1)
flatten_indices_ref = flatten_indices_ref.take(list(range(k)), axis=-1)\
.flatten()
values_ref = values_ref.take(list(range(k)), axis=-1)
if flatten_indices:
return (values_ref, indices_ref, flatten_indices_ref)
else:
return (values_ref, indices_ref)
@given(X=hu.tensor(), flatten_indices=st.booleans(), **hu.gcs)
def test_top_k(self, X, flatten_indices, gc, dc):
X = X.astype(dtype=np.float32)
k = random.randint(1, X.shape[-1])
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
@given(bs=st.integers(1, 10), n=st.integers(1, 1), k=st.integers(1, 1),
flatten_indices=st.booleans(), **hu.gcs)
def test_top_k_1(self, bs, n, k, flatten_indices, gc, dc):
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
@given(bs=st.integers(1, 10), n=st.integers(1, 100000), k=st.integers(1, 1),
flatten_indices=st.booleans(), **hu.gcs)
def test_top_k_2(self, bs, n, k, flatten_indices, gc, dc):
X = np.random.rand(bs, n).astype(dtype=np.float32)
output_list = ["Values", "Indices"]
if flatten_indices:
output_list.append("FlattenIndices")
op = core.CreateOperator("TopK", ["X"], output_list,
k=k, device_option=gc)
def bind_ref(X_loc):
return self.top_k_ref(X_loc, k, flatten_indices)
self.assertReferenceChecks(gc, op, [X], bind_ref)
@given(bs=st.integers(1, 10), n=st.integers(1, 100000),
k=st.integers(1, 1024), flatten_indices=st.booleans(), **hu.gcs)
def test_top_k_3(self, bs, n, k, flatten_indices, gc, dc):
X = | np.random.rand(bs, n) | numpy.random.rand |
import numpy as np
from bresenham import bresenham
import scipy.ndimage
def mydrawPNG(vector_image, Side = 256):
raster_image = np.zeros((int(Side), int(Side)), dtype=np.float32)
initX, initY = int(vector_image[0, 0]), int(vector_image[0, 1])
stroke_bbox = []
stroke_cord_buffer = []
pixel_length = 0
for i in range(0, len(vector_image)):
if i > 0:
if vector_image[i - 1, 2] == 1:
initX, initY = int(vector_image[i, 0]), int(vector_image[i, 1])
cordList = list(bresenham(initX, initY, int(vector_image[i, 0]), int(vector_image[i, 1])))
pixel_length += len(cordList)
stroke_cord_buffer.extend([list(i) for i in cordList])
for cord in cordList:
if (cord[0] > 0 and cord[1] > 0) and (cord[0] < Side and cord[1] < Side):
raster_image[cord[1], cord[0]] = 255.0
initX, initY = int(vector_image[i, 0]), int(vector_image[i, 1])
if vector_image[i, 2] == 1:
min_x = np.array(stroke_cord_buffer)[:, 0].min()
min_y = np.array(stroke_cord_buffer)[:, 1].min()
max_x = np.array(stroke_cord_buffer)[:, 0].max()
max_y = np.array(stroke_cord_buffer)[:, 1].max()
stroke_bbox.append([min_x, min_y, max_x, max_y])
stroke_cord_buffer = []
raster_image = scipy.ndimage.binary_dilation(raster_image) * 255.0
#utils.image_boxes(Image.fromarray(raster_image).convert('RGB'), stroke_bbox).show()
return raster_image, stroke_bbox
def preprocess(sketch_points, side = 256.0):
sketch_points = sketch_points.astype(np.float)
sketch_points[:, :2] = sketch_points[:, :2] / | np.array([800, 800]) | numpy.array |
from __future__ import division,print_function
#matplotlib inline
#load_ext autoreload
#autoreload 2
import sys
from tqdm import tqdm_notebook as tqdm
import random
import matplotlib.pyplot as plt
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.init as init
from torch.autograd import Variable, grad
from torchvision import datasets, transforms
from torch.nn.parameter import Parameter
import calculate_log as callog
import warnings
warnings.filterwarnings('ignore')
torch.cuda.set_device(1) #Select the GPU
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# from .route import *
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
return torch.cat([x, out], 1)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = out_planes * 4
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
torch_model.record(x)
out = self.conv1(self.relu(self.bn1(x)))
torch_model.record(out)
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
torch_model.record(out)
out = self.conv2(self.relu(self.bn2(out)))
torch_model.record(out)
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
torch_model.record(out)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(TransitionBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return F.avg_pool2d(out, 2)
class DenseBlock(nn.Module):
def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0):
super(DenseBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate)
def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(in_planes+i*growth_rate, growth_rate, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class DenseNet3(nn.Module):
def __init__(self, depth, num_classes, growth_rate=12,
reduction=0.5, bottleneck=True, dropRate=0.0, normalizer = None,
out_classes = 100, k=None, info=None):
super(DenseNet3, self).__init__()
in_planes = 2 * growth_rate
n = (depth - 4) / 3
if bottleneck == True:
n = int(n/2)
block = BottleneckBlock
else:
block = BasicBlock
# 1st conv before any dense block
self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes*reduction))
# 2nd block
self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate)
in_planes = int(math.floor(in_planes*reduction))
# 3rd block
self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
in_planes = int(in_planes+n*growth_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.collecting = False
if k is None:
self.fc = nn.Linear(in_planes, num_classes)
else:
pass
# self.fc = RouteFcUCPruned(in_planes, num_classes, topk=k, info=info)
# # self.fc = RouteDropout(in_planes, num_classes, p=k)
self.in_planes = in_planes
self.normalizer = normalizer
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.features(x)
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.in_planes)
out = self.fc(out)
return out
def forward_virtual(self, x):
out = self.features(x)
out = F.avg_pool2d(out, 8)
# breakpoint()
out = out.view(-1, self.in_planes)
return self.fc(out), out
def features(self, x):
if self.normalizer is not None:
x = x.clone()
x[:, 0, :, :] = (x[:, 0, :, :] - self.normalizer.mean[0]) / self.normalizer.std[0]
x[:, 1, :, :] = (x[:, 1, :, :] - self.normalizer.mean[1]) / self.normalizer.std[1]
x[:, 2, :, :] = (x[:, 2, :, :] - self.normalizer.mean[2]) / self.normalizer.std[2]
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
return out
# function to extact the multiple features
def feature_list(self, x):
if self.normalizer is not None:
x = x.clone()
x[:,0,:,:] = (x[:,0,:,:] - self.normalizer.mean[0]) / self.normalizer.std[0]
x[:,1,:,:] = (x[:,1,:,:] - self.normalizer.mean[1]) / self.normalizer.std[1]
x[:,2,:,:] = (x[:,2,:,:] - self.normalizer.mean[2]) / self.normalizer.std[2]
out_list = []
out = self.conv1(x)
out_list.append(out)
out = self.trans1(self.block1(out))
out_list.append(out)
out = self.trans2(self.block2(out))
out_list.append(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out_list.append(out)
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.in_planes)
return self.fc(out), out_list
def intermediate_forward(self, x, layer_index):
if self.normalizer is not None:
x = x.clone()
x[:,0,:,:] = (x[:,0,:,:] - self.normalizer.mean[0]) / self.normalizer.std[0]
x[:,1,:,:] = (x[:,1,:,:] - self.normalizer.mean[1]) / self.normalizer.std[1]
x[:,2,:,:] = (x[:,2,:,:] - self.normalizer.mean[2]) / self.normalizer.std[2]
out = self.conv1(x)
if layer_index == 1:
out = self.trans1(self.block1(out))
elif layer_index == 2:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
elif layer_index == 3:
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
out = self.relu(self.bn1(out))
return out
# function to extact the penultimate features
def penultimate_forward(self, x):
if self.normalizer is not None:
x = x.clone()
x[:,0,:,:] = (x[:,0,:,:] - self.normalizer.mean[0]) / self.normalizer.std[0]
x[:,1,:,:] = (x[:,1,:,:] - self.normalizer.mean[1]) / self.normalizer.std[1]
x[:,2,:,:] = (x[:,2,:,:] - self.normalizer.mean[2]) / self.normalizer.std[2]
out = self.conv1(x)
out = self.trans1(self.block1(out))
out = self.trans2(self.block2(out))
out = self.block3(out)
penultimate = self.relu(self.bn1(out))
out = F.avg_pool2d(penultimate, 8)
out = out.view(-1, self.in_planes)
return self.fc(out), penultimate
def record(self, t):
if self.collecting:
self.gram_feats.append(t)
def gram_feature_list(self, x):
self.collecting = True
self.gram_feats = []
self.forward(x)
self.collecting = False
temp = self.gram_feats
self.gram_feats = []
return temp
def load(self, path="resnet_cifar10.pth"):
tm = torch.load(path, map_location="cpu")
self.load_state_dict(tm)
def get_min_max(self, data, power):
mins = []
maxs = []
for i in range(0, len(data), 64):
batch = data[i:i + 64].cuda()
feat_list = self.gram_feature_list(batch)
for L, feat_L in enumerate(feat_list):#96, x, x, x
if L == len(mins):
mins.append([None] * len(power))
maxs.append([None] * len(power))
for p, P in enumerate(power):
g_p = G_p(feat_L, P)
current_min = g_p.min(dim=0, keepdim=True)[0]
# breakpoint()
current_max = g_p.max(dim=0, keepdim=True)[0]
if mins[L][p] is None:
mins[L][p] = current_min
maxs[L][p] = current_max
else:
mins[L][p] = torch.min(current_min, mins[L][p])
maxs[L][p] = torch.max(current_max, maxs[L][p])
# breakpoint()
return mins, maxs
def get_deviations(self, data, power, mins, maxs):
deviations = []
for i in range(0, len(data), 64):
batch = data[i:i + 64].cuda()
feat_list = self.gram_feature_list(batch)
batch_deviations = []
for L, feat_L in enumerate(feat_list):
dev = 0
for p, P in enumerate(power):
g_p = G_p(feat_L, P)
dev += (F.relu(mins[L][p] - g_p) / torch.abs(mins[L][p] + 10 ** -6)).sum(dim=1, keepdim=True)
dev += (F.relu(g_p - maxs[L][p]) / torch.abs(maxs[L][p] + 10 ** -6)).sum(dim=1, keepdim=True)
batch_deviations.append(dev.cpu().detach().numpy())
batch_deviations = np.concatenate(batch_deviations, axis=1)
deviations.append(batch_deviations)
deviations = np.concatenate(deviations, axis=0)
return deviations
# class BasicBlock(nn.Module):
# def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
# super(BasicBlock, self).__init__()
# self.bn1 = nn.BatchNorm2d(in_planes)
# self.relu1 = nn.ReLU(inplace=True)
# self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
# self.bn2 = nn.BatchNorm2d(out_planes)
# self.relu2 = nn.ReLU(inplace=True)
# self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
# padding=1, bias=False)
# self.droprate = dropRate
# self.equalInOut = (in_planes == out_planes)
# self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
# padding=0, bias=False) or None
#
# def forward(self, x):
# torch_model.record(x)
# if not self.equalInOut:
# x = self.relu1(self.bn1(x))
# torch_model.record(x)
# else:
# out = self.relu1(self.bn1(x))
# torch_model.record(out)
#
# if self.equalInOut:
# out = self.relu2(self.bn2(self.conv1(out)))
# torch_model.record(out)
# else:
# out = self.relu2(self.bn2(self.conv1(x)))
# torch_model.record(out)
# if self.droprate > 0:
# out = F.dropout(out, p=self.droprate, training=self.training)
# out = self.conv2(out)
# torch_model.record(out)
# if not self.equalInOut:
# torch_model.record(torch.add(self.convShortcut(x), out))
# return torch.add(self.convShortcut(x), out)
# else:
# torch_model.record(torch.add(x, out))
# return torch.add(x, out)
# class NetworkBlock(nn.Module):
# def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
# super(NetworkBlock, self).__init__()
# self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
#
# def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
# layers = []
# for i in range(nb_layers):
# layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
# return nn.Sequential(*layers)
#
# def forward(self, x):
# return self.layer(x)
# class WideResNet(nn.Module):
# def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
# super(WideResNet, self).__init__()
# nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
# assert ((depth - 4) % 6 == 0)
# n = (depth - 4) // 6
# block = BasicBlock
# # 1st conv before any network block
# self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
# padding=1, bias=False)
# # 1st block
# self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# # 2nd block
# self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# # 3rd block
# self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# # global average pooling and classifier
# self.bn1 = nn.BatchNorm2d(nChannels[3])
# self.relu = nn.ReLU(inplace=True)
# self.fc = nn.Linear(nChannels[3], num_classes)
# self.nChannels = nChannels[3]
# self.collecting = False
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# elif isinstance(m, nn.Linear):
# m.bias.data.zero_()
#
# def forward(self, x):
# out = self.conv1(x)
# out = self.block1(out)
# out = self.block2(out)
# out = self.block3(out)
# out = self.relu(self.bn1(out))
# out = F.avg_pool2d(out, 8)
# out = out.view(-1, self.nChannels)
# return self.fc(out)
#
# def forward_virtual(self, x):
# out = self.conv1(x)
# out = self.block1(out)
# out = self.block2(out)
# out = self.block3(out)
# out = self.relu(self.bn1(out))
# out = F.avg_pool2d(out, 8)
# out = out.view(-1, self.nChannels)
# return self.fc(out), out
#
# def intermediate_forward(self, x, layer_index):
# out = self.conv1(x)
# out = self.block1(out)
# out = self.block2(out)
# out = self.block3(out)
# out = self.relu(self.bn1(out))
# return out
#
# def feature_list(self, x):
# out_list = []
# out = self.conv1(x)
# out = self.block1(out)
# out = self.block2(out)
# out = self.block3(out)
# out = self.relu(self.bn1(out))
# out_list.append(out)
# out = F.avg_pool2d(out, 8)
# out = out.view(-1, self.nChannels)
# return self.fc(out), out_list
# def record(self, t):
# if self.collecting:
# self.gram_feats.append(t)
#
# def gram_feature_list(self, x):
# self.collecting = True
# self.gram_feats = []
# self.forward(x)
# self.collecting = False
# temp = self.gram_feats
# self.gram_feats = []
# return temp
#
# def load(self, path="resnet_cifar10.pth"):
# tm = torch.load(path, map_location="cpu")
# self.load_state_dict(tm)
#
# def get_min_max(self, data, power):
# mins = []
# maxs = []
#
# for i in range(0, len(data), 64):
# batch = data[i:i + 64].cuda()
# feat_list = self.gram_feature_list(batch)
#
# for L, feat_L in enumerate(feat_list):#96, x, x, x
# if L == len(mins):
# mins.append([None] * len(power))
# maxs.append([None] * len(power))
#
# for p, P in enumerate(power):
# g_p = G_p(feat_L, P)
#
# current_min = g_p.min(dim=0, keepdim=True)[0]
# # breakpoint()
# current_max = g_p.max(dim=0, keepdim=True)[0]
#
# if mins[L][p] is None:
# mins[L][p] = current_min
# maxs[L][p] = current_max
# else:
# mins[L][p] = torch.min(current_min, mins[L][p])
# maxs[L][p] = torch.max(current_max, maxs[L][p])
# # breakpoint()
# return mins, maxs
#
# def get_deviations(self, data, power, mins, maxs):
# deviations = []
#
# for i in range(0, len(data), 64):
# batch = data[i:i + 64].cuda()
# feat_list = self.gram_feature_list(batch)
#
# batch_deviations = []
# for L, feat_L in enumerate(feat_list):
# dev = 0
# for p, P in enumerate(power):
# g_p = G_p(feat_L, P)
#
# dev += (F.relu(mins[L][p] - g_p) / torch.abs(mins[L][p] + 10 ** -6)).sum(dim=1, keepdim=True)
# dev += (F.relu(g_p - maxs[L][p]) / torch.abs(maxs[L][p] + 10 ** -6)).sum(dim=1, keepdim=True)
# batch_deviations.append(dev.cpu().detach().numpy())
# batch_deviations = np.concatenate(batch_deviations, axis=1)
# deviations.append(batch_deviations)
# deviations = np.concatenate(deviations, axis=0)
#
# return deviations
# torch_model = WideResNet(40, 10, 2, dropRate=0.0)
torch_model = DenseNet3(100, 10, 12, reduction=0.5, bottleneck=True, dropRate=0.0, normalizer=None,
k=None, info=None)
# torch_model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes=10)
# torch_model.load('/afs/cs.wisc.edu/u/x/f/xfdu/workspace/energy_ood/CIFAR/snapshots/baseline/cifar10_wrn_baseline_0.1_1000_40_1_10000_epoch_99.pt')
torch_model.load('/afs/cs.wisc.edu/u/x/f/xfdu/workspace/energy_ood/CIFAR/snapshots/baseline/cifar10_dense_baseline_dense_0.1_1000_40_1_10000_epoch_99.pt')
torch_model.cuda()
torch_model.params = list(torch_model.parameters())
torch_model.eval()
print("Done")
batch_size = 64
mean = np.array([[0.4914, 0.4822, 0.4465]]).T
std = np.array([[0.2023, 0.1994, 0.2010]]).T
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.CenterCrop(size=(32, 32)),
transforms.ToTensor(),
normalize
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('/nobackup-slow/dataset/cifarpy', train=True, download=True,
transform=transform_train),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('/nobackup-slow/dataset/cifarpy', train=False, transform=transform_test),
batch_size=batch_size)
data_train = list(torch.utils.data.DataLoader(
datasets.CIFAR10('/nobackup-slow/dataset/cifarpy', train=True, download=True,
transform=transform_test),
batch_size=1, shuffle=False))
data = list(torch.utils.data.DataLoader(
datasets.CIFAR10('/nobackup-slow/dataset/cifarpy', train=False, download=True,
transform=transform_test),
batch_size=1, shuffle=False))
torch_model.eval()
# correct = 0
# total = 0
# for x,y in test_loader:
# x = x.cuda()
# y = y.numpy()
# correct += (y==np.argmax(torch_model(x).detach().cpu().numpy(),axis=1)).sum()
# total += y.shape[0]
# print("Accuracy: ",correct/total)
import torchvision.transforms as trn
import torchvision.datasets as dset
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import utils.svhn_loader as svhn
cifar100 = list(torch.utils.data.DataLoader(
datasets.CIFAR100('/nobackup-slow/dataset/cifarpy', train=False, download=True,
transform=transform_test),
batch_size=1, shuffle=True))
mean_my = [x / 255 for x in [125.3, 123.0, 113.9]]
std_my = [x / 255 for x in [63.0, 62.1, 66.7]]
texture = list(dset.ImageFolder(root="/nobackup-slow/dataset/dtd/images",
transform=trn.Compose([trn.Resize(32), trn.CenterCrop(32),
trn.ToTensor(), trn.Normalize(mean_my, std_my)])))
svhn = list(svhn.SVHN(root='/nobackup-slow/dataset/svhn/', split="test",
transform=trn.Compose(
[#trn.Resize(32),
trn.ToTensor(), trn.Normalize(mean_my, std_my)]), download=False))
places365 = list(dset.ImageFolder(root="/nobackup-slow/dataset/places365/",
transform=trn.Compose([trn.Resize(32), trn.CenterCrop(32),
trn.ToTensor(), trn.Normalize(mean_my, std_my)])))
lsunc = list(dset.ImageFolder(root="/nobackup-slow/dataset/LSUN_C",
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean_my, std_my)])))
lsunr = list(dset.ImageFolder(root="/nobackup-slow/dataset/LSUN_resize",
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean_my, std_my)])))
isun = list(dset.ImageFolder(root="/nobackup-slow/dataset/iSUN",
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean_my, std_my)])))
train_preds = []
train_confs = []
train_logits = []
for idx in range(0, len(data_train), 64):
batch = torch.squeeze(torch.stack([x[0] for x in data_train[idx:idx + 64]]), dim=1).cuda()
logits = torch_model(batch)
confs = F.softmax(logits, dim=1).cpu().detach().numpy()
preds = np.argmax(confs, axis=1)
logits = (logits.cpu().detach().numpy())
train_confs.extend(np.max(confs, axis=1))
train_preds.extend(preds)
train_logits.extend(logits)
print("Done")
test_preds = []
test_confs = []
test_logits = []
for idx in range(0, len(data), 64):
batch = torch.squeeze(torch.stack([x[0] for x in data[idx:idx + 64]]), dim=1).cuda()
logits = torch_model(batch)
confs = F.softmax(logits, dim=1).cpu().detach().numpy()
preds = np.argmax(confs, axis=1)
logits = (logits.cpu().detach().numpy())
test_confs.extend(np.max(confs, axis=1))
test_preds.extend(preds)
test_logits.extend(logits)
print("Done")
import calculate_log as callog
def detect(all_test_deviations, all_ood_deviations, verbose=True, normalize=True):
average_results = {}
for i in range(1, 11):
random.seed(i)
validation_indices = random.sample(range(len(all_test_deviations)), int(0.1 * len(all_test_deviations)))
test_indices = sorted(list(set(range(len(all_test_deviations))) - set(validation_indices)))
validation = all_test_deviations[validation_indices]
test_deviations = all_test_deviations[test_indices]
t95 = validation.mean(axis=0) + 10 ** -7
if not normalize:
t95 = np.ones_like(t95)
test_deviations = (test_deviations / t95[np.newaxis, :]).sum(axis=1)
ood_deviations = (all_ood_deviations / t95[np.newaxis, :]).sum(axis=1)
results = callog.compute_metric(-test_deviations, -ood_deviations)
for m in results:
average_results[m] = average_results.get(m, 0) + results[m]
for m in average_results:
average_results[m] /= i
if verbose:
callog.print_results(average_results)
return average_results
def cpu(ob):
for i in range(len(ob)):
for j in range(len(ob[i])):
ob[i][j] = ob[i][j].cpu()
return ob
def cuda(ob):
for i in range(len(ob)):
for j in range(len(ob[i])):
ob[i][j] = ob[i][j].cuda()
return ob
class Detector:
def __init__(self):
self.all_test_deviations = None
self.mins = {}
self.maxs = {}
self.classes = range(10)
def compute_minmaxs(self, data_train, POWERS=[10]):
for PRED in tqdm(self.classes):
train_indices = np.where(np.array(train_preds) == PRED)[0]
train_PRED = torch.squeeze(torch.stack([data_train[i][0] for i in train_indices]), dim=1)
mins, maxs = torch_model.get_min_max(train_PRED, power=POWERS)
self.mins[PRED] = cpu(mins)
self.maxs[PRED] = cpu(maxs)
torch.cuda.empty_cache()
def compute_test_deviations(self, POWERS=[10]):
all_test_deviations = None
test_classes = []
for PRED in tqdm(self.classes):
test_indices = np.where(np.array(test_preds) == PRED)[0]
test_PRED = torch.squeeze(torch.stack([data[i][0] for i in test_indices]), dim=1)
test_confs_PRED = np.array([test_confs[i] for i in test_indices])
test_classes.extend([PRED] * len(test_indices))
mins = cuda(self.mins[PRED])
maxs = cuda(self.maxs[PRED])
test_deviations = torch_model.get_deviations(test_PRED, power=POWERS, mins=mins, maxs=maxs) / test_confs_PRED[:, np.newaxis]
cpu(mins)
cpu(maxs)
if all_test_deviations is None:
all_test_deviations = test_deviations
else:
all_test_deviations = | np.concatenate([all_test_deviations, test_deviations], axis=0) | numpy.concatenate |
#!/usr/bin/env python
import rospy
from rds_network_ros.msg import ToGui
import signal
import sys
from matplotlib import pyplot as plt
import numpy as np
import scipy.io as sio
time_begin = []
time = []
corrected_command_linear = []
corrected_command_angular = []
nominal_command_linear = []
nominal_command_angular = []
collision_points_on_obstacles = []
def signal_handler(sig, frame):
#plt.plot(nominal_command_linear, color='blue', label='nominal')
#plt.plot(corrected_command_linear, color='green', label='corrected')
#plt.title("Linear command (nominal and corrected)")
#plt.show()
#plt.plot(nominal_command_angular, color='blue', label='nominal')
#plt.plot(corrected_command_angular, color='green', label='corrected')
#plt.title("Angular command (nominal and corrected)")
#plt.show()
# write the result to a npy-file and a mat-file
result = np.array([time, nominal_command_linear, nominal_command_angular,
corrected_command_linear, corrected_command_angular])
#np.save('command_log.npy', result)
sio.savemat('commands_log.mat', {'commands': result})
#collision_points_array=np.asarray(collision_points_on_obstacles, dtype='object')
x_vectorizer = np.vectorize(lambda obj: obj.x)
y_vectorizer = | np.vectorize(lambda obj: obj.y) | numpy.vectorize |
import argparse
import time
import cv2.cv2 as cv
import numpy as np
from scipy.signal import find_peaks
h_bins = 8
s_bins = 4
v_bins = 4
num_of_bins = h_bins + s_bins + v_bins
def check_time(args, cap):
frame_count = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
start_frame = int(args.start_sec * cap.get(cv.CAP_PROP_FPS))
if args.end_sec < 0:
end_frame = frame_count
else:
end_frame = int(args.end_sec * cap.get(cv.CAP_PROP_FPS))
if end_frame > frame_count:
end_frame = frame_count
print('[w] End time greater than the length of the video.')
if end_frame < start_frame:
print('[f] End time must larger than start time.')
raise ValueError
cap.set(cv.CAP_PROP_POS_FRAMES, start_frame)
return start_frame, end_frame
def get_intersection(cap, numFrame, shape):
eof_flag = False
hists = np.zeros((numFrame, num_of_bins))
for k in range(numFrame):
_, img = cap.read()
if img is None:
eof_flag = True
break
img = cv.resize(img, (shape[1], shape[0]))
hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
h, _ = np.histogram(hsv_img[:, :, 0], h_bins, (0, 255))
s, _ = np.histogram(hsv_img[:, :, 1], s_bins, (0, 255))
v, _ = np.histogram(hsv_img[:, :, 2], v_bins, (0, 255))
hists[k] = np.concatenate((h, s, v))
if img is None:
hists = hists[0:k]
numFrame = k
# compute intersection
hists = hists / (3 * shape[0] * shape[1])
hists_shift = hists[[0] + [x for x in range(numFrame - 1)]]
hists_pair = | np.stack((hists, hists_shift), 2) | numpy.stack |
import matplotlib.pyplot as plt
import copy
import graphClass as gc
import numpy as np
from fractions import Fraction
# INPUT HERE
# what level affine carpet would you like:
precarpet_level = 2
# how large would you like the center hole to be:
sideOfCenterHole = 1/2
# the above two are the only parameters, since sideOfCenterHole + 2*sideOfSmallSquares = 1 must be true
sideOfSmallSquares = (1 - sideOfCenterHole) / 2
# CODE FOR THE CROSSWIRE
# building the level 0 cross carpet
aC0 = gc.Graph()
aC0.add_vertex("a", np.array([0, 0.5]))
aC0.add_vertex("b", np.array([0.5, 1]))
aC0.add_vertex("c", np.array([1, 0.5]))
aC0.add_vertex("d", np.array([0.5, 0]))
aC0.add_vertex("e", np.array([0.5, 0.5]))
aC0.add_edge("a", "e")
aC0.add_edge('b', 'e')
aC0.add_edge('c', 'e')
aC0.add_edge('d', 'e')
# variables needed for the for loop that builds the precarpet
aCn = gc.Graph()
aCn_plus_one = aC0
copyOfACn = gc.Graph()
# listOfContractionParameters[i][0] is the scaleX
# listOfContractionParameters[i][1] is scaleY
# listOfContractionParameters[i][2] is fixedPoint
listOfContractionParameters = [[sideOfSmallSquares, sideOfSmallSquares, np.array([0, 0])], # q0
[sideOfCenterHole, sideOfSmallSquares, np.array([0.5, 0])], # q1
[sideOfSmallSquares, sideOfSmallSquares, | np.array([1, 0]) | numpy.array |
"""Sympy numerical code generation utilities."""
import collections
import functools
import inspect
import itertools
import keyword
import re
import numpy as np
import sympy
try:
from cached_property import cached_property
except ModuleNotFoundError:
def cached_property(f):
"""On-demand property which is calculated only once and memorized."""
return property(functools.lru_cache()(f))
try:
import methodtools
def cached_method(f):
return methodtools.lru_cache()(f)
except ModuleNotFoundError:
def cached_method(f):
"""Null decorator."""
return f
class cached_class_property:
"""Decorator to cache class properties."""
def __init__(self, getter):
functools.update_wrapper(getter, self)
self.getter = getter
def __get__(self, obj, cls=None):
if hasattr(self, 'value'):
return self.value
if cls is None:
cls = type(obj)
self.value = self.getter(cls)
return self.value
class classproperty:
"""Same as property(), but passes type(obj) instead of obj to methods."""
def __init__(self, fget, doc=None):
assert callable(fget)
self.fget = fget
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, obj, cls=None):
if cls is None:
cls = type(obj)
return self.fget(cls)
def init_static_variable(f):
return f()
def ew_diff(ndexpr, *wrt, **kwargs):
"""Element-wise symbolic derivative of n-dimensional array-like expression.
>>> import sympy
>>> x = sympy.symbols('x')
>>> ew_diff([[x**2, sympy.cos(x)], [5/x + 3, x**3 +2*x]], x)
array([[2*x, -sin(x)],
[-5/x**2, 3*x**2 + 2]], dtype=object)
"""
out = np.empty_like(ndexpr, object)
for ind, expr in np.ndenumerate(ndexpr):
out[ind] = sympy.diff(expr, *wrt, **kwargs)
return out
def ndexpr_diff(ndexpr, wrt):
"""Calculates the derivatives of an array expression w.r.t. to an ndarray.
>>> from sympy import var, sin; from numpy import array
>>> tup = var('x,y,z')
>>> ndexpr_diff(tup, [x,y])
array([[1, 0, 0],
[0, 1, 0]], dtype=object)
>>> ndexpr_diff([x**2+2*y/z, sin(x)], (x,y))
array([[2*x, cos(x)],
[2/z, 0]], dtype=object)
"""
ndexpr = np.asarray(ndexpr)
wrt = np.asarray(wrt)
jac = np.empty(wrt.shape + ndexpr.shape, dtype=object)
for i, elem in np.ndenumerate(wrt):
diff = ew_diff(ndexpr, elem)
jac[i] = diff if diff.shape else diff[()]
return jac
def flat_cat(*args, **kwargs):
"""Concatenate flattened arrays."""
chain = list(itertools.chain(args, kwargs.values()))
if not chain:
return np.array([])
else:
return np.concatenate([np.asanyarray(a).flatten() for a in chain])
def make_signature(arg_names, member=False):
"""Make Signature object from argument name iterable or str."""
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
if isinstance(arg_names, str):
arg_names = map(str.strip, arg_name_list.split(','))
if member and arg_names and arg_names[0] != 'self':
arg_names = ['self'] + arg_names
return inspect.Signature([inspect.Parameter(n, kind) for n in arg_names])
def wrap_with_signature(arg_name_list, member=False):
def decorator(f):
@functools.wraps(f)
def wrapper(*args):
return f(*args)
wrapper.__signature__ = make_signature(arg_name_list, member)
return wrapper
return decorator
def sparsify(array, selector=None):
"""Get nonzero values and indices from an array."""
values = []
indices = []
for index in np.ndindex(*array.shape):
if selector is not None and not selector(*index):
continue
elem = array[index]
if elem:
values.append(elem)
indices.append(index)
if indices:
return | np.array(values) | numpy.array |
from agents.agent_minimax.minimax import heuristic, check_horizontal, check_vertical, check_diagonal_pos, check_diagonal_neg, calculate_streak
import numpy as np
from agents.common import NO_PLAYER, BoardPiece, PLAYER2, PLAYER1, string_to_board
def test_check_horizontal_empty():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
assert check_horizontal(initialBoard) == 0
def test_check_horizontal_2():
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = PLAYER1
initialBoard[5, 1] = PLAYER1
assert check_horizontal(initialBoard) == 2
def test_eval_window_none():
from agents.agent_minimax.minimax import evaluate_window_dict
test_finds = {
"Player1": 2,
"Player2": 1,
"NoPlayer": 1
}
assert evaluate_window_dict(test_finds) == 0
def test_eval_window_pos():
from agents.agent_minimax.minimax import evaluate_window_dict
test_finds = {
"Player1": 2,
"Player2": 0,
"NoPlayer": 2
}
assert evaluate_window_dict(test_finds) == 2
def test_eval_window_neg():
from agents.agent_minimax.minimax import evaluate_window_dict
test_finds = {
"Player1": 0,
"Player2": 3,
"NoPlayer": 1
}
assert evaluate_window_dict(test_finds) == -3
def test_iterate_window_none():
from agents.agent_minimax.minimax import iterate_window
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
assert iterate_window(initialBoard, 5, 0, 0, +1) == 0
def test_iterate_window_pos():
from agents.agent_minimax.minimax import iterate_window
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = PLAYER1
initialBoard[5, 1] = PLAYER1
assert iterate_window(initialBoard, 5, 0, 0, +1) == 2
def test_iterate_window_neg():
from agents.agent_minimax.minimax import iterate_window
initialBoard = np.ndarray(shape=(6, 7), dtype=BoardPiece)
initialBoard.fill(NO_PLAYER)
initialBoard[5, 0] = PLAYER2
initialBoard[5, 1] = PLAYER2
initialBoard[5, 2] = PLAYER2
assert iterate_window(initialBoard, 5, 0, 0, +1) == -3
def test_new_horizontal():
from agents.agent_minimax.minimax import new_check_horizontal
initialBoard = | np.ndarray(shape=(6, 7), dtype=BoardPiece) | numpy.ndarray |
"""Colmap dataset for fundametal matrix estimation. Derived from FundamentalMatrixDataset.
"""
import sqlite3
import numpy as np
from dfe.datasets import FundamentalMatrixDataset
from dfe.utils import colmap_read, colmap_utils
class ColmapDataset(FundamentalMatrixDataset):
"""Colmap dataset for fundametal matrix estimation. Derived from FundamentalMatrixDataset.
"""
def __init__(
self,
path,
num_points=-1,
threshold=1,
max_F=None,
random=False,
min_matches=20,
compute_virtual_points=True,
):
"""Init.
Args:
path (str): path to dataset folder
num_points (int, optional): number of points per sample. Defaults to -1.
threshold (int, optional): epipolar threshold. Defaults to 1.
max_F (int, optional): maximal number of samples (if None: use all). Defaults to None.
random (bool, optional): random database access. Defaults to False.
min_matches (int, optional): minimal number of good matches per sample. Defaults to 20.
"""
super(ColmapDataset, self).__init__(num_points)
cameras = colmap_read.read_cameras_binary("%s/sparse/0/cameras.bin" % path)
images = colmap_read.read_images_binary("%s/sparse/0/images.bin" % path)
connection = sqlite3.connect("%s/reconstruction.db" % path)
cursor = connection.cursor()
self.img_paths = []
if random:
cursor.execute(
"SELECT pair_id, data FROM matches WHERE rows>=? ORDER BY RANDOM();",
(min_matches,),
)
else:
cursor.execute(
"SELECT pair_id, data FROM matches WHERE rows>=?;", (min_matches,)
)
for row in cursor:
# max number of image pairs
if max_F and len(self.F) == max_F:
break
img1_id, img2_id = colmap_utils.pair_id_to_image_ids(row[0])
try:
img1 = images[img1_id]
img2 = images[img2_id]
except KeyError:
print("Image doesn't match id")
continue
# check if both images share enough 3D points
pts1 = img1.point3D_ids[img1.point3D_ids != -1]
pts2 = img2.point3D_ids[img2.point3D_ids != -1]
common = len(np.intersect1d(pts1, pts2))
if common < min_matches:
continue
# get cameras
K1, T1, sz1 = colmap_utils.get_cam(img1, cameras)
K2, T2, sz2 = colmap_utils.get_cam(img2, cameras)
F = colmap_utils.compose_fundamental_matrix(K1, T1, K2, T2)
# pull the matches
matches = np.fromstring(row[1], dtype=np.uint32).reshape(-1, 2)
cursor_2 = connection.cursor()
cursor_2.execute(
"SELECT data, cols FROM keypoints WHERE image_id=?;", (img1_id,)
)
row_2 = next(cursor_2)
keypoints1 = np.fromstring(row_2[0], dtype=np.float32).reshape(-1, row_2[1])
cursor_2.execute(
"SELECT data, cols FROM keypoints WHERE image_id=?;", (img2_id,)
)
row_2 = next(cursor_2)
keypoints2 = np.fromstring(row_2[0], dtype=np.float32).reshape(-1, row_2[1])
cursor_2.execute(
"SELECT data FROM descriptors WHERE image_id=?;", (img1_id,)
)
row_2 = next(cursor_2)
descriptor_1 = np.float32(
np.fromstring(row_2[0], dtype=np.uint8).reshape(-1, 128)
)
cursor_2.execute(
"SELECT data FROM descriptors WHERE image_id=?;", (img2_id,)
)
row_2 = next(cursor_2)
descriptor_2 = np.float32(
np.fromstring(row_2[0], dtype=np.uint8).reshape(-1, 128)
)
dist = np.sqrt(
np.mean(
(descriptor_1[matches[:, 0]] - descriptor_2[matches[:, 1]]) ** 2, 1
)
)[..., None]
rel_scale = np.abs(
keypoints1[matches[:, 0], 2] - keypoints2[matches[:, 1], 2]
)[..., None]
angle1 = keypoints1[matches[:, 0], 3]
angle2 = keypoints2[matches[:, 1], 3]
rel_orient = np.minimum( | np.abs(angle1 - angle2) | numpy.abs |
import numpy as np
import random
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
import pandas as pd
import warnings
from multiprocessing import Pool
from numba import njit, prange
def euclidean_distance_per_feature(a, b):
"""Compute the euclidean distance per shared feature between two numpy arrays.
Parameters
----------
a: numpy array
b: numpy array
Returns
-------
numpy array
"""
diff=a-b
n_feature = len(diff)-np.isnan(diff).sum()
if n_feature == 0:
print("warning was about to divide by zero")
return 10000*len(diff)
return np.sqrt(np.nansum(diff*diff))/n_feature
@njit(parallel=True)
def dist_edpf(XA,XB):
'''
dist(u=XA[i], v=XB[j]) is computed and stored in the ij'th entry.
where dist is the above euclidean_distance_per_feature
Parameters
----------
XA : numpy array
XB : numpy array
Returns
-------
arr : numpy array
'''
n_a = len(XA)
n_b = len(XB)
arr = np.empty((n_a,n_b))
for i in prange(n_a):
for j in prange(n_b):
diff=XA[i]-XB[j]
arr[i][j]=np.sqrt(np.nansum(diff*diff))/(len(diff)-np.isnan(diff).sum())
return arr
class KMeans(object):
'''
K-Means clustering
----------
continue
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init :
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
n_init : int, default: 1
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default: 1000
Maximum number of iterations of the k-means algorithm for a
single run.
tolerance : float, default : .00001
Attributes
----------
centroids_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
'''
def __init__(self, n_clusters=8, init='k-means++', n_init=1,
max_iter=300, tolerance = 1e-4, verbose = False):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tolerance = tolerance
self.n_init = n_init
self.verbose = verbose
self.centroids_ = None
self.labels_ = None
def _initialize_centroids(self, X):
'''
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
k-means++ initialization for centroids
'''
# use Kmeans plus plus
self.centroids_ = self._kmeans_plus_plus(X)
def _kmeans_plus_plus(self, X):
'''
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
helper function to initialize centroids in a smart way
'''
k=self.n_clusters
centroids = np.empty((k, X.shape[1]))
for j in range(k):
if j == 0:
centroids[j] = X[np.random.choice(X.shape[0])]
else:
# compute square of euclidean distance per feature to nearest centroid
dists = dist_edpf(X, centroids[:j].reshape(-1, X.shape[1]))
dists2 = dists.min(axis = 1)
# pick random choice with probabilty propertional to distances
ind = np.random.choice(X.shape[0], p = dists2/dists2.sum())
centroids[j] = X[ind]
return centroids
def _assign_clusters(self, X):
'''
computes euclidean distance per feature from each point to each centroid
and assigns point to closest centroid) assigns self.labels_
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Data points to assign to clusters based on distance metric
'''
labels = self.predict(X)
self.labels_ = labels
def _compute_centroids(self, X):
'''
compute the centroids for the datapoints in X from the current values
of self.labels_
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Data points to assign to clusters based on distance metric
returns new centroids
'''
centroids=[]
for j in range(self.n_clusters):
arr = X[self.labels_==j]
if len(arr)-np.isnan(arr).sum()==0:
arr = X
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
centroids.append(np.nanmean(arr, axis=0))
return np.array(centroids)
def fit(self, X):
''''
Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
'''
self._initialize_centroids(X)
for i in range(self.max_iter):
self._assign_clusters(X)
new_centroids = self._compute_centroids(X)
if (np.array([euclidean_distance_per_feature(*a) for a in zip(self.centroids_,new_centroids)]) < self.tolerance).all():
if self.verbose:
print('Converged on interation {}'.format(i))
return i
break
# re-assign centroids
self.centroids_ = new_centroids
return i
def predict(self, X):
'''
Optional method: predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
'''
distances = dist_edpf(X, self.centroids_)
return distances.argmin(axis = 1)
def score(self, X):
'''
return the total residual sum of squares
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
The SSE
'''
labels = self.predict(X)
lst = []
for i in range(len(labels)):
lst.append(euclidean_distance_per_feature(X[i],self.centroids_[labels[i]]))
arr = np.array(lst)
SSE = | np.sum(arr) | numpy.sum |
import numpy as np
import tensorflow as tf
def unpickle(file):
import pickle
fo = open(file, 'rb')
dict = pickle.load(fo, encoding='latin1')
fo.close()
if 'data' in dict:
dict['data'] = dict['data'].reshape((-1, 3, 32, 32)).swapaxes(1, 3).swapaxes(1, 2).reshape(-1, 32*32*3) / 256.
return dict
def load_data_one(f):
batch = unpickle(f)
data = batch['data']
labels = batch['labels']
print("Loading %s: %d" % (f, len(data)))
return data, labels
def load_data(files, data_dir, label_count):
data, labels = load_data_one(data_dir + '/' + files[0])
for f in files[1:]:
data_n, labels_n = load_data_one(data_dir + '/' + f)
data = np.append(data, data_n, axis=0)
labels = np.append(labels, labels_n, axis=0)
labels = np.array([[float(i == label) for i in range(label_count)] for label in labels])
return data, labels
def run_in_batch_avg(session, tensors, batch_placeholders, feed_dict={}, batch_size=200):
res = [0] * len(tensors)
batch_tensors = [(placeholder, feed_dict[placeholder]) for placeholder in batch_placeholders]
total_size = len(batch_tensors[0][1])
batch_count = int((total_size + batch_size - 1) / batch_size)
for batch_idx in range(batch_count):
current_batch_size = None
for (placeholder, tensor) in batch_tensors:
batch_tensor = tensor[batch_idx*batch_size: (batch_idx+1)*batch_size]
current_batch_size = len(batch_tensor)
feed_dict[placeholder] = tensor[batch_idx*batch_size: (batch_idx+1)*batch_size]
tmp = session.run(tensors, feed_dict=feed_dict)
res = [r + t * current_batch_size for (r, t) in zip(res, tmp)]
return [r / float(total_size) for r in res]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape=shape)
return tf.Variable(initial)
def conv2d(input, in_features, out_features, kernel_size, with_bias=False):
W = weight_variable([kernel_size, kernel_size, in_features, out_features])
conv = tf.nn.conv2d(input, W, [1, 1, 1, 1], padding='SAME')
if with_bias:
return conv + bias_variable([out_features])
return conv
def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob):
current = tf.contrib.layers.batch_norm(current, scale=True, is_training=is_training, updates_collections=None)
current = tf.nn.relu(current)
current = conv2d(current, in_features, out_features, kernel_size)
current = tf.nn.dropout(current, keep_prob)
return current
def block(input, layers, in_features, growth, is_training, keep_prob):
current = input
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth, 3, is_training, keep_prob)
current = tf.concat((current, tmp), axis=3)
features += growth
return current, features
def avg_pool(input, s):
return tf.nn.avg_pool(input, [1, s, s, 1], [1, s, s, 1], 'VALID')
def run_model(data, image_dim, label_count, depth):
weight_decay = 1e-4
layers = int((depth - 4) / 3)
graph = tf.Graph()
with graph.as_default():
xs = tf.placeholder("float", shape=[None, image_dim])
ys = tf.placeholder("float", shape=[None, label_count])
lr = tf.placeholder("float", shape=[])
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder("bool", shape=[])
current = tf.reshape(xs, [-1, 32, 32, 3])
current = conv2d(current, 3, 16, 3)
current, features = block(current, layers, 16, 12, is_training, keep_prob)
current = batch_activ_conv(current, features, features, 1, is_training, keep_prob)
current = avg_pool(current, 2)
current, features = block(current, layers, features, 12, is_training, keep_prob)
current = batch_activ_conv(current, features, features, 1, is_training, keep_prob)
current = avg_pool(current, 2)
current, features = block(current, layers, features, 12, is_training, keep_prob)
current = tf.contrib.layers.batch_norm(current, scale=True, is_training=is_training, updates_collections=None)
current = tf.nn.relu(current)
current = avg_pool(current, 8)
final_dim = features
current = tf.reshape(current, [-1, final_dim])
Wfc = weight_variable([final_dim, label_count])
bfc = bias_variable([label_count])
ys_ = tf.nn.softmax(tf.matmul(current, Wfc) + bfc)
cross_entropy = -tf.reduce_mean(ys * tf.log(ys_ + 1e-12))
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
train_step = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True).minimize(cross_entropy + l2 * weight_decay)
correct_prediction = tf.equal(tf.argmax(ys_, 1), tf.argmax(ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
with tf.Session(graph=graph) as session:
batch_size = 64
learning_rate = 0.1
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
train_data, train_labels = data['train_data'], data['train_labels']
batch_count = int(len(train_data) / batch_size)
batches_data = | np.split(train_data[:batch_count * batch_size], batch_count) | numpy.split |
import numpy as np
import pandas as pd
import sys
import pathlib
from matplotlib import pyplot as plt
# Import the functions used throughout this project from the function dictionary library file
fileDir = pathlib.Path(__file__).parents[2]
code_library_folder = fileDir / 'Code' / 'function_dictionary_library'
sys.path.append(str(code_library_folder))
from partitioning_modeling_function import treatment_train_violation_checks, mc_treatment_train_violation_checks
from statistical_functions import ecdf
output_filepath = fileDir / 'Results' / 'Treatment Trains Violations Summary' / 'MC Bit csESP wetFGD cp bt zvi.xlsx'
figure_filepath = fileDir / 'Results' / 'Treatment Trains Violations Summary' / 'MC Bit csESP wetFGD cp bt zvi.tif'
# Order of inputs are:
coal_input = 'Bituminous' # rank or bed
csesp_input = 1 # 0/1 if a csESP is installed
hsesp_input = 0 # 0/1 if a hsESP is installed
ff_input = 0 # 0/1 if a fabric filter is installed
scr_input = 0 # 0/1 if a selective catalytic reduction is installed
aci_input = 0 # 0/1 if an activated carbon injection process is installed
wetfgd_input = 1 # 0/1 if a wetFGD process is installed
wetfgdtype_input = 'LS Forced DBA' # 'LS Forced DBA', 'LS Forced None', 'LS Inhibited DBA', 'LS Inhibited None', 'LS Inhibited NaFo',
# 'Mg-enhanced Lime Natural', 'Mg-enhanced Lime Inhibited', 'Mg-enhanced Lime Ext. Forced'
dryfgd_input = 0 # 0/1 if a dryFGD process is installed
elg_input = 1 # 0/1 if water treatment process is designed to achieve Selenium removal
zld_input = 0 # 0/1 if a zero liquid discharge process is installed
cp_input = 1 # 0/1 if a chemical precipitation process is installed
mbr_input = 0 # 0/1 if a membrane bioreactor is installed
bt_input = 1 # 0/1 if a biological treatment process is installed
mvc_input = 0 # 0/1 if a mechanical vapor compression process is installed
iex_input = 0 # 0/1 if an ion exchange process is installed
alox_input = 0 # 0/1 if an Aluminum Oxide process is installed
feox_input = 0 # 0/1 if an Iron Oxide process is installed
zvi_input = 1 # 0/1 if a zero-valent iron process is installed
crys_input = 0 # 0/1 if a crystallization process is installed
electricity_generated_input = 550 # Hourly generation in MWh
i = 0
violations_total = []
violations_as = []
violations_hg = []
violations_se = []
concentration_as = []
concentration_hg = []
concentration_se = []
concentration_cl = []
concentration_pb = []
while i < 200:
print(i)
total_violations, as_concentration, as_violations, hg_concentration, hg_violations, se_concentration, se_violations, \
cl_concentration, pb_concentration = mc_treatment_train_violation_checks(coal_input, csesp_input, hsesp_input, ff_input,
scr_input, aci_input, wetfgd_input,
wetfgdtype_input, dryfgd_input, elg_input,
zld_input, cp_input, mbr_input, bt_input,
mvc_input, iex_input, alox_input, feox_input,
zvi_input, crys_input,
electricity_generated_input)
violations_total.append(total_violations)
violations_as.append(as_violations)
violations_hg.append(hg_violations)
violations_se.append(se_violations)
concentration_as.append(as_concentration)
concentration_hg.append(hg_concentration)
concentration_se.append(se_concentration)
concentration_cl.append(cl_concentration)
concentration_pb.append(pb_concentration)
i += 1
total_violations_summary = [np.percentile(violations_total, 5), np.percentile(violations_total, 25),
np.percentile(violations_total, 50), np.percentile(violations_total, 75),
np.percentile(violations_total, 95)]
as_violations_summary = [np.percentile(violations_as, 5), np.percentile(violations_as, 25),
np.percentile(violations_as, 50), np.percentile(violations_as, 75),
np.percentile(violations_as, 95)]
hg_violations_summary = [np.percentile(violations_hg, 5), np.percentile(violations_hg, 25),
np.percentile(violations_hg, 50), np.percentile(violations_hg, 75),
np.percentile(violations_hg, 95)]
se_violations_summary = [np.percentile(violations_se, 5), np.percentile(violations_se, 25),
np.percentile(violations_se, 50), | np.percentile(violations_se, 75) | numpy.percentile |
from scipy.stats import norm
import numpy as np
print(f'{norm.cdf(2)-norm.cdf(-2):.4f}')
print(f'{norm.cdf(3)-norm.cdf(-3):.4f}')
# Параметры для бутстрэпа
sample1 = np.random.normal(14,1,size=50)
def ci_param_bootstrap(data, alpha=0.05, number_of_bootstrap_samples=10, size_of_bootstrap_samples=20 ):
"""параметрический бутстрэп
Args:
data (array like): данные для оценки среднего
alpha (float, optional): увроень доверия. Defaults to 0.05.
number_of_bootstrap_samples (int, optional): сколько сэмплов для бутстрэпа делать. Defaults to 10.
size_of_bootstrap_samples (int, optional): сколько наблюдений делать в каждый сэмпл. Defaults to 20.
"""
# Оцениваем неизвестный параметр theta
sample_mean = np.mean(data)
sample_std = np.std(data, ddof=1)
# print(sample_mean, sample_std)
# Генерируем выборку из распределения N(sample_mean, sigma)
bootstrap_samples = np.random.normal(sample_mean,sample_std,size=[number_of_bootstrap_samples,size_of_bootstrap_samples])
# Считаем среднее для каждой выборки
bootstrap_estimates = np.apply_along_axis(np.mean, 1, bootstrap_samples)
# Вычисляем параметрический бутстрэп доверительный интервал
CI_Bootstrap_Parametric = (np.quantile(bootstrap_estimates,alpha/2), np.quantile(bootstrap_estimates,1-alpha/2))
return(CI_Bootstrap_Parametric)
print(ci_param_bootstrap(sample1))
def ci_non_param_bootstrap(data, alpha=0.05, number_of_bootstrap_samples=10, size_of_bootstrap_samples=20 ):
"""непараметрический бутстрэп
Args:
data (array like): данные для оценки среднего
alpha (float, optional): увроень доверия. Defaults to 0.05.
number_of_bootstrap_samples (int, optional): сколько сэмплов для бутстрэпа делать. Defaults to 10.
size_of_bootstrap_samples (int, optional): сколько наблюдений делать в каждый сэмпл. Defaults to 20.
"""
# Генерируем выборку из исходного распределения
bootstrap_samples = np.random.choice(data,size=[number_of_bootstrap_samples,size_of_bootstrap_samples])
# Считаем среднее для каждой выборки
bootstrap_estimates = | np.apply_along_axis(np.mean, 1, bootstrap_samples) | numpy.apply_along_axis |
# -*- coding: utf-8 -*-
from scipy.integrate import solve_ivp
import matplotlib
"""in case it's not working uncomment this: matplotlib.use('TkAgg') """
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import inv
from matplotlib import colors as mcolors
import paras_dorsoventral as dors
import paras_rostrocaudal as ros
import testround_difftest_set as r #for sparse matrix stuff
#import testround_difftest_backup as r
import stencil_import as tubemodel
import os
import plot_saved_v
d=10.0
dx=20
dt=0.1#10
maxtime = 10000 #TIME IN SECONDS!!!
"""
CHECKLIST
maxtime
dx according to model (10 ori, 20 0.5, 40 0.25 etc)
stencil_import paths
stencils in folder
plotsaved path
Wnt0, Shh0
delta_Wnt, delta_Shh
plotting colourmax here and in plotsaved
how often save?
spheresize according to model
"""
xlen =tubemodel.xmax
ylen =tubemodel.ymax
zlen = tubemodel.zmax
print(xlen,ylen,zlen)
spheresize = r.spheresize
D_Wnt = 150.7
D_Shh = 133.4
delta_Wnt = 0.04
delta_Shh = 0.1
Wnt0 = tubemodel.Wnt0
Shh0 = tubemodel.Shh0
#import the stencils for tubemodel, WNTsecretion and SHHsecretion points
stenc = tubemodel.stenc
WNTstenc= tubemodel.Wstenc
SHHstenc= tubemodel.Sstenc
#plotting colourmax
rosmax = tubemodel.Wnt0#5#0.0
dorsmax = tubemodel.Shh0#5#0.0
unknownbase=5.0
class Grid:
def __init__(self,xdim,ydim,zdim, Name, seeds,Alpha,Baselevel):
self.grid = np.zeros((xdim,ydim,zdim))
self.totalsites = np.sum(stenc.grid)
self.name = Name
self.xlen=xdim
self.ylen=ydim
self.zlen=zdim
self.baselevel=Baselevel
self.plantrandomseed(seeds)
self.alpha=Alpha
if Name =="Wnt":
self.Amatr = A_Wnt
self.b = b_Wnt
self.delta = delta_Wnt
print("deltawnt:",self.delta)
if Name =="Shh":
self.Amatr = A_Shh
self.b = b_Shh
self.delta = delta_Shh
def show(self,ax):
plotgrid(self,ax)
def plantseed(self,coordinates):
for xyz in coordinates:
x= xyz[0]
y = xyz[1]
z=xyz[2]
self.grid[y][x][z] = self.baselevel
def artificialseed(self,coordinates,level):
for i in range(len(coordinates)):
xyz = coordinates[i]
x= xyz[0]
y = xyz[1]
z=xyz[2]
self.grid[x][y][z] = level[i]*self.baselevel
def plantrandomseed(self, seeds):
n = seeds
M = self.totalsites
coords = np.transpose(np.where(stenc.grid))
for c in coords:
randomnr = np.random.uniform()
if randomnr < n/M:
self.grid[c[0]][c[1]][c[2]] = self.baselevel#*np.random.uniform()
n-=1
M-=1
def diffusion(self,n):
for i in range(n):
deltaU,b = laplacian(self,self.Amatr,self.b)
old = self.grid
self.grid =old + dt*self.alpha*(deltaU +b)
def degradation(self,n):
for i in range(n):
old = self.grid
#print("degrmax",np.max(self.delta * self.grid *dt))
self.grid = old - self.delta * old *dt
def rostrocaudal_reaction(rate,FB,MB,HB,Wnt):
for i in range(rate):
fb= (FB.grid).copy()
mb= (MB.grid).copy()
hb= (HB.grid).copy()
gsk3= (GSK3.grid).copy() # Wnt modulates gsk3
wnt= (Wnt.grid).copy()
u = (U.grid).copy()
FB.grid = fb + dt*( ros.c1*(gsk3**ros.n1)/(1+ ros.c1*(gsk3**ros.n1)+ ros.c2*(mb**ros.n2)+ ros.c3*(hb**ros.n3)) -ros.d1*fb )
MB.grid = mb + dt*(ros.c4*(mb**ros.n4)/(1+ ros.c4*(mb**ros.n4)+ ros.c5*(fb**ros.n5)+ ros.c6*(hb**ros.n6)+ ros.c7*(gsk3**ros.n7)) -ros.d2*mb)
HB.grid = hb + dt*( ros.c8*(hb**ros.n8)/(1 + ros.c8*(hb**ros.n8) + ros.c9*(fb**ros.n9) + ros.c10*(mb**ros.n10)+ ros.c11*(gsk3**ros.n11)) -ros.d3*hb )
GSK3.grid = gsk3 + dt*(ros.c12*(gsk3**ros.n12)/(1 + ros.c12*(gsk3**ros.n12)+ ros.c13*(u**ros.n13) ) -ros.d4*gsk3 )
U.grid = u + dt*((ros.c14*(wnt**ros.n14) + ros.c15*(u**ros.n15))/( 1+ ros.c14*(wnt**ros.n14) + ros.c15*(u**ros.n15) + ros.c16*(u**ros.n16)) - ros.d5*u)
antistenc = np.ones_like(stenc.grid) - stenc.grid
for c in np.transpose(np.where(antistenc)):
FB.grid[c[0]][c[1]][c[2]] = 0
MB.grid[c[0]][c[1]][c[2]] = 0
HB.grid[c[0]][c[1]][c[2]] = 0
GSK3.grid[c[0]][c[1]][c[2]] = 0
def dorsoventral_reaction(rate,P,O,N,G,S,W):
for i in range(rate):
p= (P.grid).copy()
o= (O.grid).copy()
n= (N.grid).copy()
g= (G.grid).copy()
s= (S.grid).copy()
w= (W.grid).copy()
P.grid = p + dt*( dors.alpha / (1.0 + (n/dors.NcritP)**dors.h1 + (o/dors.OcritP)**dors.h2 ) - dors.k1*p )
O.grid = o + dt*(( (dors.beta*g) / (1.0+g) ) * ( 1.0/(1.0+(n/dors.NcritO)**dors.h3) ) - dors.k2*o)
N.grid = n + dt*( (dors.gamma*g/(1.0+g)) * (1.0/(1.0+ (o/dors.OcritN)**dors.h4 + (p/dors.PcritN)**dors.h5 )) - dors.k3*n)
G.grid = g + dt*(((dors.delta*s)/(1.0+s)) * (1.0/(1.0+ (w/dors.WcritG)**dors.h6 )) - dors.k4*g)
antistenc = np.ones_like(stenc.grid) - stenc.grid
for c in np.transpose(np.where(antistenc)):
P.grid[c[0]][c[1]][c[2]] = 0
O.grid[c[0]][c[1]][c[2]] = 0
N.grid[c[0]][c[1]][c[2]] = 0
G.grid[c[0]][c[1]][c[2]] = 0
def alldiffuse(rate,Wnt,Shh):
for i in range(rate):
Wnt.diffusion(1)
Shh.diffusion(1)
def alldegrade(rate,Wnt,Shh):
for i in range(rate):
Wnt.degradation(1)
Shh.degradation(1)
def plotgrid(grid,ax,r=0.47,g=0.0,b=1.0):
if np.all(grid.grid ==0):
return
print("minmax",np.min(grid.grid),np.max(grid.grid))
if grid.baselevel!=0:
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/unknownbase], keep_alpha=True) for z in x] for x in y] for y in grid.grid])
else:
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/unknownbase], keep_alpha=True) for z in x] for x in y] for y in grid.grid])
fc = (colorgrid).flatten()
gridindices = np.where(np.ones_like(grid.grid))
ax.scatter(gridindices[0],gridindices[1],gridindices[2],marker = 'o',c=fc,linewidth=0,vmin=0,vmax=grid.baselevel,depthshade=False,s=spheresize )
def plotarray(array,ax,maximum,r=0.47,g=0.0,b=1.0):
if np.all(array ==0):
return
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/maximum ], keep_alpha=True) for z in x] for x in y] for y in array])
fc = (colorgrid).flatten()
gridindices = np.where( | np.ones_like(array) | numpy.ones_like |
#!/usr/bin/python2.5
#
# Copyright 2014 <NAME>.
#
# Author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Waveform definitions.
import numpy
waveforms = []
"""----------------------------------------------------------------------------
Waveshaper for audio rate
----------------------------------------------------------------------------"""
WAVESHAPER_SIZE = 1024
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / float(WAVESHAPER_SIZE)
linear = x
sin = (1.0 - numpy.cos(numpy.pi * x)) / 2.0
tan = numpy.arctan(8 * numpy.cos(numpy.pi * x))
scale = tan.max()
tan = (1.0 - tan / scale) / 2.0
inverse_sin = numpy.arccos(1 - 2 * x) / numpy.pi
inverse_tan = numpy.arccos(numpy.tan(scale * (1.0 - 2.0 * x)) / 8.0) / numpy.pi
def audio_rate_flip(x):
x = numpy.array(list(-x[WAVESHAPER_SIZE:0:-1]) + list(x))
return numpy.round((x * 32767.0)).astype(int)
audio_rate_tables = []
audio_rate_tables.append(('inverse_tan_audio', audio_rate_flip(inverse_tan)))
audio_rate_tables.append(('inverse_sin_audio', audio_rate_flip(inverse_sin)))
audio_rate_tables.append(('linear_audio', audio_rate_flip(linear)))
audio_rate_tables.append(('sin_audio', audio_rate_flip(sin)))
audio_rate_tables.append(('tan_audio', audio_rate_flip(tan)))
waveforms.extend(audio_rate_tables)
"""----------------------------------------------------------------------------
Waveshaper for control rate
----------------------------------------------------------------------------"""
WAVESHAPER_SIZE = 1024
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / float(WAVESHAPER_SIZE)
linear = x
sin = (1.0 - | numpy.cos(numpy.pi * x) | numpy.cos |
# -*- coding: utf-8 -*-
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import operator
import warnings
import numpy as np
from numpy.fft import irfft, fft, ifft
from scipy.special import sinc
from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning,
lstsq)
from scipy._lib.six import string_types
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase']
def _get_fs(fs, nyq):
"""
Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
"""
if nyq is None and fs is None:
fs = 2
elif nyq is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
fs = 2*nyq
return fs
# Some notes on function parameters:
#
# `cutoff` and `width` are given as numbers between 0 and 1. These are
# relative frequencies, expressed as a fraction of the Nyquist frequency.
# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
<NAME>, "Discrete-Time Signal Processing", p.475-476.
Examples
--------
Suppose we want to design a lowpass filter, with 65 dB attenuation
in the stop band. The Kaiser window parameter to be used in the
window method is computed by `kaiser_beta(65)`:
>>> from scipy.signal import kaiser_beta
>>> kaiser_beta(65)
6.20426
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter,
expressed as a fraction of the Nyquist frequency.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
Examples
--------
Suppose we want to design a FIR filter using the Kaiser window method
that will have 211 taps and a transition width of 9 Hz for a signal that
is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency,
the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB)
is computed as follows:
>>> from scipy.signal import kaiser_atten
>>> kaiser_atten(211, 0.0375)
64.48099630593983
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Determine the filter window parameters for the Kaiser window method.
The parameters returned by this function are generally used to create
a finite impulse response filter using the window method, with either
`firwin` or `firwin2`.
Parameters
----------
ripple : float
Upper bound for the deviation (in dB) of the magnitude of the
filter's frequency response from that of the desired filter (not
including frequencies in any transition intervals). That is, if w
is the frequency expressed as a fraction of the Nyquist frequency,
A(w) is the actual frequency response of the filter and D(w) is the
desired frequency response, the design requirement is that::
abs(A(w) - D(w))) < 10**(-ripple/20)
for 0 <= w <= 1 and w not in a transition interval.
width : float
Width of transition region, normalized so that 1 corresponds to pi
radians / sample. That is, the frequency is expressed as a fraction
of the Nyquist frequency.
Returns
-------
numtaps : int
The length of the Kaiser window.
beta : float
The beta parameter for the Kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=True)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476.
Examples
--------
We will use the Kaiser window method to design a lowpass FIR filter
for a signal that is sampled at 1000 Hz.
We want at least 65 dB rejection in the stop band, and in the pass
band the gain should vary no more than 0.5%.
We want a cutoff frequency of 175 Hz, with a transition between the
pass band and the stop band of 24 Hz. That is, in the band [0, 163],
the gain varies no more than 0.5%, and in the band [187, 500], the
signal is attenuated by at least 65 dB.
>>> from scipy.signal import kaiserord, firwin, freqz
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0
>>> cutoff = 175
>>> width = 24
The Kaiser method accepts just a single parameter to control the pass
band ripple and the stop band rejection, so we use the more restrictive
of the two. In this case, the pass band ripple is 0.005, or 46.02 dB,
so we will use 65 dB as the design parameter.
Use `kaiserord` to determine the length of the filter and the
parameter for the Kaiser window.
>>> numtaps, beta = kaiserord(65, width/(0.5*fs))
>>> numtaps
167
>>> beta
6.20426
Use `firwin` to create the FIR filter.
>>> taps = firwin(numtaps, cutoff, window=('kaiser', beta),
... scale=False, nyq=0.5*fs)
Compute the frequency response of the filter. ``w`` is the array of
frequencies, and ``h`` is the corresponding complex array of frequency
responses.
>>> w, h = freqz(taps, worN=8000)
>>> w *= 0.5*fs/np.pi # Convert w to Hz.
Compute the deviation of the magnitude of the filter's response from
that of the ideal lowpass filter. Values in the transition region are
set to ``nan``, so they won't appear in the plot.
>>> ideal = w < cutoff # The "ideal" frequency response.
>>> deviation = np.abs(np.abs(h) - ideal)
>>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan
Plot the deviation. A close look at the left end of the stop band shows
that the requirement for 65 dB attenuation is violated in the first lobe
by about 0.125 dB. This is not unusual for the Kaiser window method.
>>> plt.plot(w, 20*np.log10(np.abs(deviation)))
>>> plt.xlim(0, 0.5*fs)
>>> plt.ylim(-90, -60)
>>> plt.grid(alpha=0.25)
>>> plt.axhline(-65, color='r', ls='--', alpha=0.3)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Deviation from ideal (dB)')
>>> plt.title('Lowpass Filter Frequency Response')
>>> plt.show()
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=None, fs=None):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist frequency, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist frequency.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be odd if a passband includes the
Nyquist frequency.
cutoff : float or 1-D array_like
Cutoff frequency of filter (expressed in the same units as `fs`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `fs/2`. The values 0 and
`fs/2` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `fs`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
If True, the gain at the frequency 0 (i.e., the "DC gain") is 1.
If False, the DC gain is 0. Can also be a string argument for the
desired filter type (equivalent to ``btype`` in IIR design functions).
.. versionadded:: 1.3.0
Support for string arguments.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `fs/2` (the Nyquist frequency) if the first passband ends at
`fs/2` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `cutoff` must be between 0 and `nyq`. Default
is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to ``fs/2``, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See Also
--------
firwin2
firls
minimum_phase
remez
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
""" # noqa: E501
# The major enhancements to this function added in November 2010 were
# developed by <NAME> (see ticket #902).
nyq = 0.5 * _get_fs(fs, nyq)
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than fs/2.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
if isinstance(pass_zero, str):
if pass_zero in ('bandstop', 'lowpass'):
if pass_zero == 'lowpass':
if cutoff.size != 1:
raise ValueError('cutoff must have one element if '
'pass_zero=="lowpass", got %s'
% (cutoff.shape,))
elif cutoff.size <= 1:
raise ValueError('cutoff must have at least two elements if '
'pass_zero=="bandstop", got %s'
% (cutoff.shape,))
pass_zero = True
elif pass_zero in ('bandpass', 'highpass'):
if pass_zero == 'highpass':
if cutoff.size != 1:
raise ValueError('cutoff must have one element if '
'pass_zero=="highpass", got %s'
% (cutoff.shape,))
elif cutoff.size <= 1:
raise ValueError('cutoff must have at least two elements if '
'pass_zero=="bandpass", got %s'
% (cutoff.shape,))
pass_zero = False
else:
raise ValueError('pass_zero must be True, False, "bandpass", '
'"lowpass", "highpass", or "bandstop", got '
'%s' % (pass_zero,))
pass_zero = bool(operator.index(pass_zero)) # ensure bool-like
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist frequency.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2-D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by <NAME>, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=None,
antisymmetric=False, fs=None):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1-D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency is half `fs`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must
not be repeated.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `freq` must be between 0 and `nyq`. Default is 1.
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
firls
firwin
minimum_phase
remez
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] <NAME>. and <NAME>., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, <NAME>., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
nyq = 0.5 * _get_fs(fs, nyq)
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with fs/2.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if freq[1] == 0:
raise ValueError('Value 0 must not be repeated in freq')
if freq[-2] == nyq:
raise ValueError('Value fs/2 must not be repeated in freq')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist frequency.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist frequencies.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero "
"frequency.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
if (d == 0).any():
# Tweak any repeated values in freq so that interp works.
freq = np.array(freq, copy=True)
eps = np.finfo(float).eps * nyq
for k in range(len(freq) - 1):
if freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Check if freq is strictly increasing after tweak
d = np.diff(freq)
if (d <= 0).any():
raise ValueError("freq cannot contain numbers that are too close "
"(within eps * (fs/2): "
"{}) to a repeated value".format(eps))
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=None, type='bandpass',
maxiter=25, grid_density=16, fs=None):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges.
All elements must be non-negative and less than half the sampling
frequency as given by `fs`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
*Deprecated. Use `fs` instead.*
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
* 'bandpass' : flat response in bands. This is the default.
* 'differentiator' : frequency proportional response in bands.
* 'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
fs : float, optional
The sampling frequency of the signal. Default is 1.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
firls
firwin
firwin2
minimum_phase
References
----------
.. [1] <NAME> and <NAME>, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] <NAME>, <NAME> and <NAME>, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
In these examples `remez` gets used creating a bandpass, bandstop, lowpass
and highpass filter. The used parameters are the filter order, an array
with according frequency boundaries, the desired attenuation values and the
sampling frequency. Using `freqz` the corresponding frequency response
gets calculated and plotted.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> def plot_response(fs, w, h, title):
... "Utility function to plot response functions"
... fig = plt.figure()
... ax = fig.add_subplot(111)
... ax.plot(0.5*fs*w/np.pi, 20*np.log10(np.abs(h)))
... ax.set_ylim(-40, 5)
... ax.set_xlim(0, 0.5*fs)
... ax.grid(True)
... ax.set_xlabel('Frequency (Hz)')
... ax.set_ylabel('Gain (dB)')
... ax.set_title(title)
This example shows a steep low pass transition according to the small
transition width and high filter order:
>>> fs = 22050.0 # Sample rate, Hz
>>> cutoff = 8000.0 # Desired cutoff frequency, Hz
>>> trans_width = 100 # Width of transition from pass band to stop band, Hz
>>> numtaps = 400 # Size of the FIR filter.
>>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], [1, 0], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Low-pass Filter")
This example shows a high pass filter:
>>> fs = 22050.0 # Sample rate, Hz
>>> cutoff = 2000.0 # Desired cutoff frequency, Hz
>>> trans_width = 250 # Width of transition from pass band to stop band, Hz
>>> numtaps = 125 # Size of the FIR filter.
>>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs],
... [0, 1], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "High-pass Filter")
For a signal sampled with 22 kHz a bandpass filter with a pass band of 2-5
kHz gets calculated using the Remez algorithm. The transition width is 260
Hz and the filter order 10:
>>> fs = 22000.0 # Sample rate, Hz
>>> band = [2000, 5000] # Desired pass band, Hz
>>> trans_width = 260 # Width of transition from pass band to stop band, Hz
>>> numtaps = 10 # Size of the FIR filter.
>>> edges = [0, band[0] - trans_width, band[0], band[1],
... band[1] + trans_width, 0.5*fs]
>>> taps = signal.remez(numtaps, edges, [0, 1, 0], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Band-pass Filter")
It can be seen that for this bandpass filter, the low order leads to higher
ripple and less steep transitions. There is very low attenuation in the
stop band and little overshoot in the pass band. Of course the desired
gain can be better approximated with a higher filter order.
The next example shows a bandstop filter. Because of the high filter order
the transition is quite steep:
>>> fs = 20000.0 # Sample rate, Hz
>>> band = [6000, 8000] # Desired stop band, Hz
>>> trans_width = 200 # Width of transition from pass band to stop band, Hz
>>> numtaps = 175 # Size of the FIR filter.
>>> edges = [0, band[0] - trans_width, band[0], band[1], band[1] + trans_width, 0.5*fs]
>>> taps = signal.remez(numtaps, edges, [1, 0, 1], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Band-stop Filter")
>>> plt.show()
"""
if Hz is None and fs is None:
fs = 1.0
elif Hz is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'Hz' and 'fs'.")
fs = Hz
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, fs,
maxiter, grid_density)
def firls(numtaps, bands, desired, weight=None, nyq=None, fs=None):
"""
FIR filter design using least-squares error minimization.
Calculate the filter coefficients for the linear-phase finite
impulse response (FIR) filter which has the best approximation
to the desired frequency response described by `bands` and
`desired` in the least squares sense (i.e., the integral of the
weighted mean-squared error within the specified bands is
minimized).
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be odd.
bands : array_like
A monotonic nondecreasing sequence containing the band edges in
Hz. All elements must be non-negative and less than or equal to
the Nyquist frequency given by `nyq`.
desired : array_like
A sequence the same size as `bands` containing the desired gain
at the start and end point of each band.
weight : array_like, optional
A relative weighting to give to each band region when solving
the least squares problem. `weight` has to be half the size of
`bands`.
nyq : float, optional
*Deprecated. Use `fs` instead.*
Nyquist frequency. Each frequency in `bands` must be between 0
and `nyq` (inclusive). Default is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `bands`
must be between 0 and ``fs/2`` (inclusive). Default is 2.
Returns
-------
coeffs : ndarray
Coefficients of the optimal (in a least squares sense) FIR filter.
See also
--------
firwin
firwin2
minimum_phase
remez
Notes
-----
This implementation follows the algorithm given in [1]_.
As noted there, least squares design has multiple advantages:
1. Optimal in a least-squares sense.
2. Simple, non-iterative method.
3. The general solution can obtained by solving a linear
system of equations.
4. Allows the use of a frequency dependent weighting function.
This function constructs a Type I linear phase FIR filter, which
contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:
.. math:: coeffs(n) = coeffs(numtaps - 1 - n)
The odd number of coefficients and filter symmetry avoid boundary
conditions that could otherwise occur at the Nyquist and 0 frequencies
(e.g., for Type II, III, or IV variants).
.. versionadded:: 0.18
References
----------
.. [1] <NAME>, Linear-Phase Fir Filter Design By Least Squares.
OpenStax CNX. Aug 9, 2005.
http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7
Examples
--------
We want to construct a band-pass filter. Note that the behavior in the
frequency ranges between our stop bands and pass bands is unspecified,
and thus may overshoot depending on the parameters of our filter:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(2)
>>> fs = 10.0 # Hz
>>> desired = (0, 0, 1, 1, 0, 0)
>>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
... fir_firls = signal.firls(73, bands, desired, fs=fs)
... fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
... hs = list()
... ax = axs[bi]
... for fir in (fir_firls, fir_remez, fir_firwin2):
... freq, response = signal.freqz(fir)
... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
... for band, gains in zip(zip(bands[::2], bands[1::2]),
... zip(desired[::2], desired[1::2])):
... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
... if bi == 0:
... ax.legend(hs, ('firls', 'remez', 'firwin2'),
... loc='lower center', frameon=False)
... else:
... ax.set_xlabel('Frequency (Hz)')
... ax.grid(True)
... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
...
>>> fig.tight_layout()
>>> plt.show()
""" # noqa
nyq = 0.5 * _get_fs(fs, nyq)
numtaps = int(numtaps)
if numtaps % 2 == 0 or numtaps < 1:
raise ValueError("numtaps must be odd and >= 1")
M = (numtaps-1) // 2
# normalize bands 0->1 and make it 2 columns
nyq = float(nyq)
if nyq <= 0:
raise ValueError('nyq must be positive, got %s <= 0.' % nyq)
bands = np.asarray(bands).flatten() / nyq
if len(bands) % 2 != 0:
raise ValueError("bands must contain frequency pairs.")
if (bands < 0).any() or (bands > 1).any():
raise ValueError("bands must be between 0 and 1 relative to Nyquist")
bands.shape = (-1, 2)
# check remaining params
desired = np.asarray(desired).flatten()
if bands.size != desired.size:
raise ValueError("desired must have one entry per frequency, got %s "
"gains for %s frequencies."
% (desired.size, bands.size))
desired.shape = (-1, 2)
if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
raise ValueError("bands must be monotonically nondecreasing and have "
"width > 0.")
if (bands[:-1, 1] > bands[1:, 0]).any():
raise ValueError("bands must not overlap.")
if (desired < 0).any():
raise ValueError("desired must be non-negative.")
if weight is None:
weight = np.ones(len(desired))
weight = np.asarray(weight).flatten()
if len(weight) != len(desired):
raise ValueError("weight must be the same size as the number of "
"band pairs (%s)." % (len(bands),))
if (weight < 0).any():
raise ValueError("weight must be non-negative.")
# Set up the linear matrix equation to be solved, Qa = b
# We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
# where Q1(k,n)=q(k−n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.
# We omit the factor of 0.5 above, instead adding it during coefficient
# calculation.
# We also omit the 1/π from both Q and b equations, as they cancel
# during solving.
# We have that:
# q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
# Using our nomalization ω=πf and with a constant weight W over each
# interval f1->f2 we get:
# q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
# integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
q = np.dot(np.diff( | np.sinc(bands * n) | numpy.sinc |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym import utils
from meta_mb.meta_envs.base import MetaEnv
import os
import tensorflow as tf
class HumanoidEnv(MetaEnv, mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
dir_path = os.path.dirname(os.path.abspath(__file__))
mujoco_env.MujocoEnv.__init__(self, '%s/assets/humanoid.xml' % dir_path, frame_skip=5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.sim.data
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat])
def step(self, a):
data = self.sim.data
action = a
if getattr(self, 'action_space', None):
action = np.clip(a, self.action_space.low,
self.action_space.high)
# reward
alive_bonus = 5.0
lin_vel_cost = 0.25 / 0.015 * data.qvel.flat[0]
quad_ctrl_cost = 0.1 * | np.square(action) | numpy.square |
import logging
import os
from dataclasses import dataclass
from string import Template
import numpy as np
import tables
import pyinotify
from phd.thunderstorm.convert_to_hdf5 import CylinderProtoSet
from phd.utils.hdf5_tools import ProtoSetReader
from phd.utils.run_tools import G4CinServer, CinServerParameters
from tables import Filters, Table, Group
import matplotlib.pyplot as plt
ROOT_PATH = os.path.dirname(__file__)
INPUT_TEMPLATE = """/npm/geometry/type gdml
/npm/geometry/gdml critical_energy.gdml
/npm/thunderstorm/physics ${physics}
/npm/thunderstorm/minimal_energy ${energy} MeV
/npm/thunderstorm/stepping/type critical_energy
/npm/thunderstorm/stacking/electron false
/npm/thunderstorm/stacking/positron false
/npm/thunderstorm/stacking/gamma false
/npm/thunderstorm/stacking/save_gamma false
/npm/thunderstorm/stacking/save_electron true
/npm/thunderstorm/stacking/save_electron_cut ${energy} MeV
separator
"""
MESSEGE = """/gps/particle e-
/gps/number 1
/gps/direction 0 0 -1
/gps/ene/mono ${energy} MeV
/gps/position 0.0 0.0 0.0 m
/run/beamOn ${number}
separator
"""
class Processor:
def init_messege(self) -> str:
return ""
def process(self, event):
return None
def accept(self, event):
pass
def create_gdml(template_file, values: dict):
with open(template_file) as fin:
gdml_template = fin.read()
gdml_template = Template(gdml_template)
with open("critical_energy.gdml", 'w') as fout:
fout.write(gdml_template.substitute(values))
return 0
class CriticalEnergyProcessor(Processor):
def __init__(self, meta):
self.reader = ProtoSetReader("stacking_simple.bin", CylinderProtoSet)
filters = Filters(complevel=3, fletcher32=True)
self.reader.set_filters(filters)
self.path_hdf5 = "result.hdf5"
self.counter = 0
self.mess_templte = Template(MESSEGE)
self.meta = meta
self.step = 0.001
def init_messege(self) -> str:
return self.mess_templte.substitute(self.meta)
def process(self, event):
group_path = self.convert(event.pathname)
os.remove(event.pathname)
with tables.open_file(self.path_hdf5) as h5file:
table: Table = h5file.get_node(group_path, "stacking_simple")
n_electron = table.nrows
n_primary = table.attrs["number"]
gamma = n_electron / n_primary
if gamma > 1:
return None
else:
self.meta["energy"] = self.meta["energy"] + self.step
return self.init_messege()
def accept(self, event):
return event.name == "stacking_simple.bin"
def convert(self, path):
with tables.open_file(self.path_hdf5, mode="a") as h5file:
group = h5file.create_group(h5file.root, "sim{}".format(str(self.counter).rjust(4, '0')))
self.reader(path, h5file, group)
for table in h5file.iter_nodes(group):
if (isinstance(table, Group)):
continue
for key, value in self.meta.items():
table.attrs[key] = value
self.counter += 1
return group._v_pathname
class G4CinServerHandler(pyinotify.ProcessEvent):
def my_init(self, server: G4CinServer, processor: Processor):
self.server = server
self.processor = processor
self.server.send(processor.init_messege())
def process_IN_CREATE(self, event):
logging.root.info(str(event))
def process_IN_CLOSE_WRITE(self, event):
if self.processor.accept(event):
result = self.processor.process(event)
if result is not None:
self.server.send(result)
else:
raise KeyboardInterrupt
import star
import numpy as np
from phd.thunderstorm import atmosphere
from scipy.optimize import root_scalar
def get_critical_energy(height = 0, field = 0):
"""
:param height: meters
:param field:kV/cm
:return:
"""
material = star.electron.PredefinedMaterials.AIR_DRY_NEAR_SEA_LEVEL
density = atmosphere.ISACalculator.density(height) # kg/m3
def critical_energy_equation(energy):
data = star.electron.calculate_stopping_power(material, np.asarray([energy]))
stopPower = data["stopping_power_total"][0]
return field - stopPower*density
try:
critical_energy_root = root_scalar(
critical_energy_equation,
bracket=(0.001, 2.0),
)
except ValueError as err:
print(err)
return None
return critical_energy_root
from scipy.linalg import lstsq
# def calculate_secondary_production_rate(path):
# bins = np.arange(-500.0, 501, 1)
# x = bins[:-1]
# M = x[:, np.newaxis] ** [0, 1]
#
# dtype = np.dtype(
# [
# ("field", "d"),
# ("height", "d"),
# ("energy", "d"),
# ("k", "d"),
# ("b", "d")
# ]
# )
#
# with tables.open_file(path) as h5file:
# result = []
# for group in h5file.root:
# table = h5file.get_node(group, "stacking_simple")
# data = table.read()
# field = table.attrs["values_gdml_field"][0]
# height = table.attrs["values_gdml_height"][0]
# energy = table.attrs["values_macros_energy"]
# number = table.attrs["values_macros_number"]
# temp, _ = np.histogram(data["z"], bins=bins)
# temp = np.cumsum(temp)
# y = temp / number
# p, res, rnk, s = lstsq(M, y)
# result.append((field, height, energy, p[1], p[0]))
# return np.array(result, dtype=dtype)
@dataclass(eq=True, frozen=True)
class FieldHeigth:
field: float
height : float
def get_group(path):
with tables.open_file(path) as h5file:
result = {}
for group in h5file.root:
table = h5file.get_node(group, "stacking_simple")
field = table.attrs["values_gdml_field"][0]
height = table.attrs["values_gdml_height"][0]
energy = table.attrs["values_macros_energy"]
key = FieldHeigth(field, height)
if key in result.keys():
result[key].append((energy, group._v_name))
else:
result[key] = [(energy, group._v_name)]
for value in result.values():
value.sort(key=lambda x: x[0])
return result
def plot_secondary_production_rate(path, output="plot"):
if not os.path.exists(output):
os.mkdir(output)
groups = get_group(path)
bins = np.arange(-500.0, 501, 1)
x = bins[:-1]
result = []
dtype = np.dtype(
[
("field", "d"),
("height", "d"),
("energy", "d"),
("k", "d"),
("b", "d")
]
)
with tables.open_file(path) as h5file:
for key, value in groups.items():
energy_cut = value[0][0]
plt.clf()
for energy, group_name in value:
table: tables.Table = h5file.get_node("/{}".format(group_name), "stacking_simple")
data = table.read()
data = data[data["energy"] > energy_cut]
number = table.attrs["values_macros_number"]
temp, _ = np.histogram(data["z"], bins=bins)
temp = np.cumsum(temp[::-1])
y = temp / number
plt.plot(x, y)
path = os.path.join(output, "{}m_{}kV_m.png".format(key.height, key.field*1e4))
plt.xlabel("Height, meters")
plt.ylabel("Cumulative number of electron")
plt.tight_layout()
plt.savefig(path, format="png", transparent=True, dpi = 600)
return 0
def calculate_secondary_production_rate(path, rate_cut = 0.001, method="simple"):
if method not in ["simple", "rate-cut"]:
logging.root.warning("Bad method for {}".format(calculate_secondary_production_rate.__name__))
groups = get_group(path)
bins = np.arange(-500.0, 501, 1)
x = bins[:-1]
M = x[:, np.newaxis] ** [0, 1]
result = []
dtype = np.dtype(
[
("field", "d"),
("height", "d"),
("energy", "d"),
("k", "d"),
("b", "d"),
("chi2", "d")
]
)
with tables.open_file(path) as h5file:
for key, value in groups.items():
energy_cut = value[0][0]
for energy, group_name in value:
table: tables.Table = h5file.get_node("/{}".format(group_name), "stacking_simple")
data = table.read()
data = data[data["energy"] > energy_cut]
number = table.attrs["values_macros_number"]
temp, _ = np.histogram(data["z"], bins=bins)
temp = np.cumsum(temp[::-1])
y = temp / number
p, res, rnk, s = lstsq(M, y)
k = p[1]
if method=="rate-cut":
if k<= rate_cut:
energy_cut = energy
elif method=="simple":
energy_cut = energy
y_fit = p[1]*x + p[0]
indx = y_fit !=0
y_fit = y_fit[indx]
chi2 = np.sum(((y[indx] - y_fit)/y_fit)**2)
result.append((key.field, key.height, energy, p[1], p[0], chi2))
return np.array(result, dtype=dtype)
class CriticalEnergyProvider:
def __init__(self):
data_path = os.path.join(os.path.dirname(__file__), "data", "critical_energy.npy")
self.data = np.load(data_path)
self.pairs = np.unique(self.data[["height", "field"]])
def get_critical_energy(self, height = 0.0, field = 0.0, length = 1000.0):
indx = self.pairs["height"] == height
if np.all(np.logical_not(indx)):
raise Exception("Non table height")
pairs = self.pairs[indx]
fields = np.sort(pairs["field"])
f_indx = (field < fields).argmax()
field = fields[f_indx]
indx = np.logical_and(self.data["height"] == height, self.data["field"] == field)
rate_cut = 1/length
data = self.data[indx]
indx = | np.argsort(data["energy"]) | numpy.argsort |
import json
import sys
from collections import Counter
from pathlib import Path
import numpy as np
import pandas as pd
HEADER = '-' * 50
def extract_last_part_from_exception(text):
return text[text.rfind(' File "'):]
def extract_last_place_from_exception(text):
return text[text.rfind(' File "'):].splitlines()[0]
def load_df(filepath):
with Path(filepath).open() as f:
df = json.load(f)
df = pd.DataFrame.from_dict(df)
for k in df.keys():
df[k] = [tuple(v) if isinstance(v, list) else v for v in df[k]]
return df
def give_examples(df, ref_df):
return f'{len(df)}x ({len(df) / len(ref_df) * 100:.1f}%) ({sorted([(t.seed, t.steps, t.score) for t in df.itertuples()][:5], key=lambda x: x[1])})'
def print_exceptions(df, ref_df):
print(HEADER, 'EXCEPTIONS:')
counter = Counter([extract_last_place_from_exception(r) for r in df.end_reason if r.startswith('exception:')])
for k, v in counter.most_common():
d = df[[r.startswith('exception:') and k == extract_last_place_from_exception(r) for r in df.end_reason]]
print(k, '\n', extract_last_part_from_exception(d.end_reason.iloc[0]), ':', give_examples(d, df))
print()
print()
print()
def get_group_from_end_reason(text):
if text.startswith('exception:'):
return 'exception'
if 'starved' in text or 'while fainted from lack of food' in text:
return 'food'
if 'the shopkeeper' in text:
return 'peaceful_mon'
if 'was poisoned' in text:
if 'corpse' in text or 'glob' in text:
return 'poisoned_food'
else:
return 'poisoned_other'
if 'turned to stone' in text:
return 'stone'
if 'frozen by a monster' in text:
return 'frozen'
if 'while sleeping' in text:
return 'sleeping'
return 'other'
def print_end_reasons(df, ref_df):
print(HEADER, 'END REASONS:')
for end_reason_group, d in sorted(df.groupby('end_reason_group'), key=lambda x: -len(x[1])):
print(' ', 'GROUP:', end_reason_group, ':', give_examples(d, ref_df))
counter = Counter([r for r in d.end_reason if not r.startswith('exception:')])
for k, v in counter.most_common():
d2 = d[[not r.startswith('exception:') and k == r for r in d.end_reason]]
print(' ', k, ':', give_examples(d2, d))
print()
print()
print()
def print_summary(comment, df, ref_df, indent=0):
indent_chars = ' ' * indent
print(indent_chars + HEADER, f'SUMMARY ({comment}):')
print(indent_chars + ' ', '*' * 8, 'stats')
for stat_name, stat_values in [
('score ', df.score),
*[(f'score-{role} ', df[df.role == role].score) for role in sorted(df.role.unique())],
*[(f'score-mile-{milestone} ', df[df.milestone == milestone].score) for milestone in
sorted(df.milestone.unique())],
('exp_level ', df.experience_level),
('dung_level ', df.level_num),
('runtime_dur ', df.duration),
]:
mean = np.mean(stat_values)
std = np.std(stat_values)
quantiles = np.quantile(stat_values, [0, 0.05, 0.25, 0.5, 0.75, 0.95, 1])
quantiles = ' '.join((f'{q:6.0f}' for q in quantiles))
with np.printoptions(precision=3, suppress=True):
print(indent_chars + ' ', stat_name, ':',
f'{mean:6.0f} +/- {std:6.0f} [{quantiles}] ({len(stat_values)}x)')
print(indent_chars + ' ', '*' * 8, 'end_reasons', give_examples(df, ref_df))
for end_reason_group, d in sorted(df.groupby('end_reason_group'), key=lambda x: -len(x[1])):
print(indent_chars + ' ', end_reason_group, ':', give_examples(d, df))
print()
def main(filepath):
df = load_df(filepath)
df.seed = [s[0] for s in df.seed]
df['end_reason_group'] = [get_group_from_end_reason(e) for e in df.end_reason]
df['role'] = [c[:3] for c in df.character]
median = np.median(df.score)
print_exceptions(df, df)
print_end_reasons(df, df)
print(HEADER, 'SORTED BY SCORE:')
print(df.sort_values('score'))
print()
print_summary('all', df, df)
print_summary('score >= median', df[df.score >= median], df)
print_summary('score < median', df[df.score < median], df)
print(HEADER, 'BY ROLE:')
for k, d in df.groupby('role'):
print_summary(k, d, df, indent=1)
print(HEADER, 'BY MILESTONE:')
for k, d in df.groupby('milestone'):
print_summary(f'milestone-{k}', d, df, indent=1)
print(HEADER, 'TO PASTE:')
std_median = np.std([np.median(np.random.choice(df.score, size=max(1, len(df) // 2))) for _ in range(1024)])
print(f'median : { | np.median(df.score) | numpy.median |
import numpy as np
from math import *
π = np.pi
import scipy.special as ss
import scipy.integrate as sint
import mpmath
def convolve_around_center (func1, func2, N1, Nout, Δx, x_center=0):
u"""
Convolve two functions func1 and func2, with func1 decreasing away from 0 (convolution kernel) :
(func1*func2)(x) = ∫ dx1 func2(x-x1) func1(x1)
≃ Δx ∑ func2(x-k⋅Δx) func1(k⋅Δx) from k=-N1 to +N1
-> Only 2⋅N1+1 points of func1 are sampled around 0, while func2 is evaluated as needed (around x=x_center).
The result is ( x, (func1*func2)(x) ) with X = [x_center-Nout⋅Δx, x_center+Nout⋅Δx].
Typically, Nout << N1. Nout can be 0, and in that case, the result is simply (func1*func2)(x_center).
"""
# samples of func1
X1 = np.linspace(-N1*Δx, +N1*Δx, 2*N1+1)
Y1 = func1( X1 )
# samples of func2
X2 = x_center + np.linspace((-N1-Nout)*Δx, (+N1+Nout)*Δx, 2*(N1+Nout)+1)
Y2 = func2( X2 )
# output
Conv_x = x_center + np.linspace(-Nout*Δx, +Nout*Δx, 2*Nout+1)
Conv = np.zeros(2*Nout+1)
for i in range(2*Nout+1):
# pas optimal car ré-évaluation inutile de func2 :
# Y2 = func2( Conv_x[i] - X1 )
# Conv[i] = np.sum( Y1 * Y2 ) * Δx
# mieux :
Y2loc = Y2[i:i+2*N1+1]
Conv[i] = np.sum( Y1 * Y2loc ) * Δx
return Conv_x, Conv
def distr_x0_harmonic (x, σ):
return np.exp(-(x/σ)**2/2)/sqrt(2*π)/σ
#----------------------------------------------------------------
# No resetting
def fpt_free_survival (L, t, D, σ):
survdist_σ0 = lambda x, t: ss.erf( x/np.sqrt(4*D*t) ) * np.heaviside(x,0.5)
if σ == 0:
return survdist_σ0(L,t)
else:
assert np.isscalar(L)
distr_x0 = lambda x: distr_x0_harmonic(x, σ)
def ps (t):
surv_f = lambda x: survdist_σ0(x, t)
return convolve_around_center(distr_x0, surv_f, x_center=L, N1=1000+int(500*sqrt(4*D*np.max(t))/σ), Nout=0, Δx=0.01*σ)[1]
return np.vectorize(ps)(t)
def fpt_free_distrib (t, x_targ):
if σ == 0:
return x_targ/(2*np.sqrt(π*D*t**3)) * np.exp(-x_targ**2/(4*D*t))
else:
pass
def fpt_2d_free_survival (R, t, D, Rtol, σ, regularize=True, split_domain=True):
if σ == 0:
a = Rtol/R
c = R/np.sqrt(4*D*t)
f = lambda x, a,c: np.exp(-x**2/(4*a**2*c**2)) / x * (ss.y0(x/a)*ss.j0(x)-ss.j0(x/a)*ss.y0(x)) / (ss.y0(x)**2+ss.j0(x)**2)
if regularize:
# regularization of the divergence of f at x=0 by substracting the leading-order term,
# which is, amazingly, integrable analytically; this allows the integrator to better behave;
# splitting the domain in two does improve the result a tiny bit;
# (but this method seems to lead to a slight overestimation of the survival proba, if the langevin simulations are accurate)
f_reg = lambda x, a,c: f(x,a,c) - 1/x * 2/π * log(1/a) / (1 + 4/π**2 * (np.euler_gamma+np.log(x/2))**2)
if split_domain: ps0 = lambda a,c: 2*log(1/a) + 2/π * ( sint.quad(f_reg, 0, 1, args=(a,c), epsabs=1e-6, limit=1000)[0] + sint.quad(f_reg, 1, +np.inf, args=(a,c), epsabs=1e-5, limit=1000)[0] )
else: ps0 = lambda a,c: 2*log(1/a) + 2/π * ( sint.quad(f_reg, 0, +np.inf, args=(a,c), epsabs=1e-5, limit=1000)[0] )
else:
# splitting the domain in two (one near zero where there is a singularity, the other to infinity)
# allows to use to integration methods, one on the finite domain which treats the singularity well
# and the other which treats the rest of the infinite domain without singularity
if split_domain: ps0 = lambda a,c: 2/π * ( sint.quad(f, 0, 0.1, args=(a,c), epsabs=1e-4, limit=1000)[0] + sint.quad(f, 0.1, +np.inf, args=(a,c), epsabs=1e-6, limit=1000)[0] )
else: ps0 = lambda a,c: 2/π * sint.quad(f, 0, +np.inf, args=(a,c), epsabs=1e-5, limit=1000)[0]
return np.vectorize( lambda a,c: (ps0(a,c) if a < 0.999 else 0.) )(a,c)
else:
# just a convolution of a guassian with the σ=0 curve
pass
#----------------------------------------------------------------
# Poissonian reset
def fpt_poisson_c (α, D, L):
return sqrt(α/D)*L
def fpt_poisson_inverselapl (x, t, α, D, σ, fpt):
mpmath.mp.dps = 30
x = np.atleast_1d(x)
t = np.atleast_1d(t)
P = np.zeros((len(x),len(t)))
sqrt2 = mpmath.sqrt(2)
if fpt:
ret_psr_lp = lambda psr,s: 1 - s*psr # p(tf) = - d/dt psr
else:
ret_psr_lp = lambda psr,s: psr
for i in range(len(x)):
if σ == 0:
def ps0_lp (κ, s):
return (1 - mpmath.exp(-κ * x[i])) / s
else:
b = x[i] / σ
def ps0_lp (κ, s):
k = σ * κ
return (1 - mpmath.exp(k**2/2)/2 * ( mpmath.exp(+κ*x[i]) * mpmath.erfc((b+k)/sqrt2)
+ mpmath.exp(-κ*x[i]) * (1+mpmath.erf((b-k)/sqrt2)) ) ) / s
def psr_lp (s):
κ = mpmath.sqrt( (α+s) / D )
ps0 = ps0_lp(κ, s=α+s)
psr = ps0 / (1 - α*ps0)
return ret_psr_lp(psr, s)
for j in range(len(t)):
if x[i] < 0:
P[i,j] = 0
else:
P[i,j] = mpmath.invertlaplace(psr_lp, t[j], method='talbot', degree=20)
return np.squeeze(P)
def fpt_poisson_survival (x, t, α, D, σ):
return fpt_poisson_inverselapl(x, t, α, D, σ, False)
def fpt_poisson_distrib (x, t, α, D, σ):
return fpt_poisson_inverselapl(x, t, α, D, σ, True)
def fpt_poisson_tau (b, c):
if np.all(np.isinf(b)):
return 4/c**2 * ( np.exp(c) - 1 )
else:
return 4/c**2 * ( (2*np.exp(-c**2/2/b**2)) / ( np.exp(c)*ss.erfc((c/b+b)/sqrt(2)) + np.exp(-c)*ss.erfc((c/b-b)/sqrt(2)) ) - 1 )
def fpt_2d_poisson_tau (b, c, a, do_warn_err=False):
a = np.fmin(a, 1-1e-10)
def func (a,b,c):
if b > 18.5:
if not np.isinf(b):
print("warning : approximating b={:.3f} by b=inf".format(b))
return ss.k0(a*c) / ss.k0(c) - 1
else:
# regularization of fD, not needed :
# # fDreg = lambda z, b,c: z * np.exp(-z**2/2) * ( ss.k0(c/b*z) * ss.i0(b*z) + np.log(z) )
# # d = -(a*b)**2/2
# # np.exp(-b**2/2) * sint.quad(fD, a*b, np.inf, args=(b,c))[0] - np.exp(d)*np.log(a*b) + ss.expi(d)/2
fDg = lambda z, b,c: z * np.exp(-b**2/2-z**2/2) * (ss.k0(c/b*z)/ss.k0(a*c)-1) * ss.i0(b*z)
Dg, Dgerr = sint.quad(fDg, a*b, max(10,2*b), args=(b,c), epsrel=1e-8)
# todo error checks
return 1/( 1 + Dg ) - 1
return 4/c**2 * np.vectorize(func)(a,b,c)
#----------------------------------------------------------------
# Periodical reset
def fpt_periodic_c (rT, D, L):
return L/sqrt(4*D*rT)
def fpt_periodic_tau (b, c):
if np.all(np.isinf(b)):
return ( ss.erf(c) + 2*c*(np.exp(-c**2)/sqrt(π)-c*ss.erfc(c)) ) / ss.erfc(c) / c**2
else:
int_exp_erf = lambda v,b,c: sint.quad( lambda u, v,b,c: np.exp(-u**2/2) * ss.erf(c/np.sqrt(v)*np.abs(1-u/b)), -np.inf, +np.inf, args=(v,b,c), epsrel=1e-1 )[0]
int_exp_erf = np.vectorize( int_exp_erf, excluded=(1,2) )
int_v = np.vectorize( lambda b,c: sint.quad( int_exp_erf, 0, 1, args=(b,c), epsrel=1e-1 )[0] )
int_exp_erfc = lambda b,c: sint.quad( lambda u, b,c: np.exp(-u**2/2) * ss.erfc(c*np.abs(1-u/b)), -np.inf, +np.inf, args=(b,c), epsrel=1e-3 )[0]
int_exp_erfc = np.vectorize( int_exp_erfc )
return int_v(b,c) / int_exp_erfc(b,c) / c**2
int_exp_erf = lambda b,c: sint.quad( lambda u, b,c: np.exp(-u**2/2) * ss.erf(c*np.abs(1-u/b)), -np.inf, +np.inf, args=(b,c), epsrel=1e-3 )[0]
int_exp_erf = np.vectorize( int_exp_erf )
def fpt_periodic_survival (t, rT, b, c):
global int_exp_erf
k = np.floor( t / rT )
if np.all(np.isinf(b)):
return ss.erf(c)**k * ss.erf(c*np.sqrt(rT/(t-k*rT)))
else:
int_exp_erf_kt = lambda b,c,k,t: sint.quad( lambda u, b,c,k,t: np.exp(-u**2/2) * ss.erf(c* | np.abs(1-u/b) | numpy.abs |
import numpy as np
import torch
from abc import ABC, abstractmethod
#import seaborn as sns
import matplotlib.pyplot as plt
import plyfile
import skimage.measure
import torch.nn as nn
import torch
import torch.nn.functional as F
class objective_func(ABC):
@abstractmethod
def func(self, x):
pass
def dfunc(self, x):
out = self.func(x)
out.backward()
return x.grad
def get_optimal(self):
return self.optimal
def get_optimum(self):
return self.optimum
def visualise1d(self, lim, n):
'''
lim: the visualisation scope [-lim, lim] in each dimension
n: the number of points used to interpolate between [-lim, lim]
'''
xs = | np.linspace(-lim, lim, n) | numpy.linspace |
#!/usr/bin/env python
"""
Test the creation of redundant features (see sklearn source)
The structure of X is columns of [informative, redundant, nuisance] features
"""
import numpy as np
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
from sympy import symbols
from synthetic_data.synthetic_data import (generate_redundant_features,
make_tabular_data,
transform_to_distribution)
| np.random.seed(111) | numpy.random.seed |
#/usr/bin/env python
# July 2014
"""Module for setting up statistical models"""
import numpy as np
from scipy.integrate import odeint
import pymc as mc
import tga
def tga_w( data, beta, N_k ):
# scenario parameters
beta = float(beta)/60. # heating rate, K/s
T_exp = data[:-1,0] # experimental temperatures, K
w_exp = data[:-1,1] # experimental mass fractions
T_0 = T_exp[0] - 50. # initial simulation temperature, K
T_f = data[-2,0] # final simulation temperature, K
w_f = data[-1,1] # residual mass fraction
# numerical parameters
N_t = 200 # resolution of solution
T_sol = np.linspace(T_0, T_f, N_t) # solution temperatures, K
# specified parameters
N_c = int(N_k) + 1 # number of components
# uncertain parameters, logA, E, nu
E_1 = 150e3 # lower bound activation energy, J/mol-K
E_2 = 350e3 # upper bound activation energy, J/mol-K
E_L = 40e3*np.ones( N_c - 1 )
E = np.linspace(E_1, E_2, N_c-1 ) # E, J/mol
E_U = 800e3*np.ones( N_c - 1 )
logA_L = 1.*np.ones( N_c - 1 )
logA = 20.*np.ones( N_c - 1 ) # log of pre-exponential, log(1/s)
logA_U = 70.*np.ones( N_c - 1 )
nu_L = w_f*np.ones( N_c - 2 )
nu = w_f**(1./(N_c-1))*np.ones( N_c - 2 ) # vector of stoichiometric coefficients
nu_U = np.ones( N_c - 2 )
# parameter list: [ logA, E, nu ]
params = np.append( logA, np.append(E, nu) )
params_L = np.append( logA_L, np.append(E_L, nu_L) )
params_U = np.append( logA_U, | np.append(E_U, nu_U) | numpy.append |
import argparse
from itertools import product
import warnings
from joblib import Parallel, delayed
import librosa
import numpy as np
import pandas as pd
from scipy import signal, stats
from sklearn.linear_model import LinearRegression
from tqdm import tqdm
from tsfresh.feature_extraction import feature_calculators
from earthquake import config
warnings.filterwarnings("ignore")
class FeatureGenerator(object):
"""Feature engineering.
"""
def __init__(
self,
path_to_store,
is_train=True,
n_rows=1e6,
n_jobs=1,
segment_size=150000
):
"""Decomposition of initial signal into the set of features.
Args:
path_to_store:
Path to .hdf store with original signal data.
is_train:
True, if creating the training set.
n_rows:
Amount of rows in training store.
n_jobs:
Amount of parallel jobs.
segment_size:
Amount of observations in each segment
"""
self.path_to_store = path_to_store
self.n_rows = n_rows
self.n_jobs = n_jobs
self.segment_size = segment_size
self.is_train = is_train
if self.is_train:
self.total = int(self.n_rows / self.segment_size)
self.store = None
self.keys = None
else:
self.store = pd.HDFStore(self.path_to_store, mode='r')
self.keys = self.store.keys()
self.total = len(self.keys)
def __del__(self):
if self.store is not None:
self.store.close()
def segments(self):
"""Returns generator object to iterate over segments.
"""
if self.is_train:
for i in range(self.total):
start = i * self.segment_size
stop = (i + 1) * self.segment_size
# read one segment of data from .hdf store
data = pd.read_hdf(self.path_to_store, start=start, stop=stop)
x = data['acoustic_data'].values
y = data['time_to_failure'].values[-1]
seg_id = 'train_' + str(i)
del data
yield seg_id, x, y
else:
for key in self.keys:
seg_id = key[1:]
x = self.store[key]['acoustic_data'].values
yield seg_id, x, -999
def get_features(self, x, y, seg_id):
x = pd.Series(x)
# fast fourier transform
zc = np.fft.fft(x)
# real part
realFFT = pd.Series(np.real(zc))
# imaginary part
imagFFT = pd.Series(np.imag(zc))
main_dict = self.features(x, y, seg_id)
r_dict = self.features(realFFT, y, seg_id)
i_dict = self.features(imagFFT, y, seg_id)
for k, v in r_dict.items():
if k not in ['target', 'seg_id']:
main_dict[f'fftr_{k}'] = v
for k, v in i_dict.items():
if k not in ['target', 'seg_id']:
main_dict[f'ffti_{k}'] = v
return main_dict
def features(self, x, y, seg_id):
feature_dict = dict()
feature_dict['target'] = y
feature_dict['seg_id'] = seg_id
# lists with parameters to iterate over them
percentiles = [
1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 99]
hann_windows = [
50, 150, 1500, 15000]
spans = [
300, 3000, 30000, 50000]
windows = [
10, 50, 100, 500, 1000, 10000]
borders = list(range(-4000, 4001, 1000))
peaks = [
10, 20, 50, 100]
coefs = [
1, 5, 10, 50, 100]
autocorr_lags = [
5, 10, 50, 100, 500, 1000, 5000, 10000]
# basic stats
feature_dict['mean'] = x.mean()
feature_dict['std'] = x.std()
feature_dict['max'] = x.max()
feature_dict['min'] = x.min()
# basic stats on absolute values
feature_dict['mean_change_abs'] = np.mean(np.diff(x))
feature_dict['abs_max'] = np.abs(x).max()
feature_dict['abs_mean'] = np.abs(x).mean()
feature_dict['abs_std'] = np.abs(x).std()
# geometric and harmonic means
feature_dict['hmean'] = stats.hmean(np.abs(x[np.nonzero(x)[0]]))
feature_dict['gmean'] = stats.gmean(np.abs(x[np.nonzero(x)[0]]))
# k-statistic and moments
for i in range(1, 5):
feature_dict[f'kstat_{i}'] = stats.kstat(x, i)
feature_dict[f'moment_{i}'] = stats.moment(x, i)
for i in [1, 2]:
feature_dict[f'kstatvar_{i}'] = stats.kstatvar(x, i)
# aggregations on various slices of data
for agg_type, slice_length, direction in product(
['std', 'min', 'max', 'mean'],
[1000, 10000, 50000],
['first', 'last']):
if direction == 'first':
feature_dict[f'{agg_type}_{direction}_{slice_length}'] = \
x[:slice_length].agg(agg_type)
elif direction == 'last':
feature_dict[f'{agg_type}_{direction}_{slice_length}'] = \
x[-slice_length:].agg(agg_type)
feature_dict['max_to_min'] = x.max() / np.abs(x.min())
feature_dict['max_to_min_diff'] = x.max() - np.abs(x.min())
feature_dict['count_big'] = len(x[np.abs(x) > 500])
feature_dict['sum'] = x.sum()
feature_dict['mean_change_rate'] = self.calc_change_rate(x)
# calc_change_rate on slices of data
for slice_length, direction in product(
[1000, 10000, 50000], ['first', 'last']):
if direction == 'first':
feature_dict[f'mean_change_rate_{direction}_{slice_length}'] = \
self.calc_change_rate(x[:slice_length])
elif direction == 'last':
feature_dict[f'mean_change_rate_{direction}_{slice_length}'] = \
self.calc_change_rate(x[-slice_length:])
# percentiles on original and absolute values
for p in percentiles:
feature_dict[f'percentile_{p}'] = np.percentile(x, p)
feature_dict[f'abs_percentile_{p}'] = np.percentile(np.abs(x), p)
feature_dict['trend'] = self.add_trend_feature(x)
feature_dict['abs_trend'] = self.add_trend_feature(x, abs_values=True)
feature_dict['mad'] = x.mad()
feature_dict['kurt'] = x.kurtosis()
feature_dict['skew'] = x.skew()
feature_dict['med'] = x.median()
feature_dict['Hilbert_mean'] = np.abs(signal.hilbert(x)).mean()
for hw in hann_windows:
feature_dict[f'Hann_window_mean_{hw}'] = \
(signal.convolve(x, signal.hann(hw), mode='same') / sum(signal.hann(hw))).mean()
feature_dict['classic_sta_lta1_mean'] = \
self.classic_sta_lta(x, 500, 10000).mean()
feature_dict['classic_sta_lta2_mean'] = \
self.classic_sta_lta(x, 5000, 100000).mean()
feature_dict['classic_sta_lta3_mean'] = \
self.classic_sta_lta(x, 3333, 6666).mean()
feature_dict['classic_sta_lta4_mean'] = \
self.classic_sta_lta(x, 10000, 25000).mean()
feature_dict['classic_sta_lta5_mean'] = \
self.classic_sta_lta(x, 50, 1000).mean()
feature_dict['classic_sta_lta6_mean'] = \
self.classic_sta_lta(x, 100, 5000).mean()
feature_dict['classic_sta_lta7_mean'] = \
self.classic_sta_lta(x, 333, 666).mean()
feature_dict['classic_sta_lta8_mean'] = \
self.classic_sta_lta(x, 4000, 10000).mean()
# exponential rolling statistics
ewma = pd.Series.ewm
for s in spans:
feature_dict[f'exp_Moving_average_{s}_mean'] = \
(ewma(x, span=s).mean(skipna=True)).mean(skipna=True)
feature_dict[f'exp_Moving_average_{s}_std'] = \
(ewma(x, span=s).mean(skipna=True)).std(skipna=True)
feature_dict[f'exp_Moving_std_{s}_mean'] = \
(ewma(x, span=s).std(skipna=True)).mean(skipna=True)
feature_dict[f'exp_Moving_std_{s}_std'] = \
(ewma(x, span=s).std(skipna=True)).std(skipna=True)
feature_dict['iqr'] = np.subtract(*np.percentile(x, [75, 25]))
feature_dict['iqr1'] = np.subtract(*np.percentile(x, [95, 5]))
feature_dict['ave10'] = stats.trim_mean(x, 0.1)
for slice_length, threshold in product(
[50000, 100000, 150000], [5, 10, 20, 50, 100]):
feature_dict[f'count_big_{slice_length}_threshold_{threshold}'] = \
(np.abs(x[-slice_length:]) > threshold).sum()
feature_dict[f'count_big_{slice_length}_less_threshold_{threshold}'] = \
(np.abs(x[-slice_length:]) < threshold).sum()
feature_dict['range_minf_m4000'] = \
feature_calculators.range_count(x, -np.inf, -4000)
feature_dict['range_p4000_pinf'] = \
feature_calculators.range_count(x, 4000, np.inf)
for i, j in zip(borders, borders[1:]):
feature_dict[f'range_{i}_{j}'] = feature_calculators.range_count(x, i, j)
for autocorr_lag in autocorr_lags:
feature_dict[f'autocorrelation_{autocorr_lag}'] = \
feature_calculators.autocorrelation(x, autocorr_lag)
feature_dict[f'c3_{autocorr_lag}'] = \
feature_calculators.c3(x, autocorr_lag)
for p in percentiles:
feature_dict[f'binned_entropy_{p}'] = \
feature_calculators.binned_entropy(x, p)
feature_dict['num_crossing_0'] = \
feature_calculators.number_crossing_m(x, 0)
for peak in peaks:
feature_dict[f'num_peaks_{peak}'] = feature_calculators.number_peaks(x, peak)
for c in coefs:
feature_dict[f'spkt_welch_density_{c}'] = \
list(feature_calculators.spkt_welch_density(x, [{'coeff': c}]))[0][1]
feature_dict[f'time_rev_asym_stat_{c}'] = \
feature_calculators.time_reversal_asymmetry_statistic(x, c)
for w in windows:
x_roll_std = x.rolling(w).std().dropna().values
x_roll_mean = x.rolling(w).mean().dropna().values
feature_dict[f'ave_roll_std_{w}'] = x_roll_std.mean()
feature_dict[f'std_roll_std_{w}'] = x_roll_std.std()
feature_dict[f'max_roll_std_{w}'] = x_roll_std.max()
feature_dict[f'min_roll_std_{w}'] = x_roll_std.min()
for p in percentiles:
feature_dict[f'percentile_roll_std_{p}_window_{w}'] = \
np.percentile(x_roll_std, p)
feature_dict[f'av_change_abs_roll_std_{w}'] = \
np.mean(np.diff(x_roll_std))
feature_dict[f'av_change_rate_roll_std_{w}'] = \
np.mean(np.nonzero((np.diff(x_roll_std) / x_roll_std[:-1]))[0])
feature_dict[f'abs_max_roll_std_{w}'] = \
np.abs(x_roll_std).max()
feature_dict[f'ave_roll_mean_{w}'] = x_roll_mean.mean()
feature_dict[f'std_roll_mean_{w}'] = x_roll_mean.std()
feature_dict[f'max_roll_mean_{w}'] = x_roll_mean.max()
feature_dict[f'min_roll_mean_{w}'] = x_roll_mean.min()
for p in percentiles:
feature_dict[f'percentile_roll_mean_{p}_window_{w}'] = \
np.percentile(x_roll_mean, p)
feature_dict[f'av_change_abs_roll_mean_{w}'] = \
np.mean(np.diff(x_roll_mean))
feature_dict[f'av_change_rate_roll_mean_{w}'] = \
np.mean(np.nonzero((np.diff(x_roll_mean) / x_roll_mean[:-1]))[0])
feature_dict[f'abs_max_roll_mean_{w}'] = \
np.abs(x_roll_mean).max()
# Mel-frequency cepstral coefficients (MFCCs)
x = x.values.astype('float32')
mfcc = librosa.feature.mfcc(y=x)
for i in range(len(mfcc)):
feature_dict[f'mfcc_{i}_avg'] = np.mean(np.abs(mfcc[i]))
# spectral features
feature_dict['spectral_centroid'] = \
np.mean(np.abs(librosa.feature.spectral_centroid(y=x)[0]))
feature_dict['zero_crossing_rate'] = \
np.mean(np.abs(librosa.feature.zero_crossing_rate(y=x)[0]))
feature_dict['spectral_flatness'] = \
np.mean(np.abs(librosa.feature.spectral_flatness(y=x)[0]))
feature_dict['spectral_contrast'] = \
np.mean(np.abs(librosa.feature.spectral_contrast(S=np.abs(librosa.stft(x)))[0]))
feature_dict['spectral_bandwidth'] = \
np.mean(np.abs(librosa.feature.spectral_bandwidth(y=x)[0]))
return feature_dict
def generate(self):
feature_list = []
res = Parallel(n_jobs=self.n_jobs, backend='threading')(
delayed(self.get_features)(x, y, s)
for s, x, y in tqdm(self.segments(),
total=self.total,
ncols=100,
desc='generating features',
ascii=True))
for r in res:
feature_list.append(r)
return pd.DataFrame(feature_list)
@staticmethod
def add_trend_feature(arr, abs_values=False):
idx = np.array(range(len(arr)))
if abs_values:
arr = np.abs(arr)
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), arr)
return lr.coef_[0]
@staticmethod
def classic_sta_lta(x, length_sta, length_lta):
sta = | np.cumsum(x ** 2) | numpy.cumsum |
import numpy as np
def get_trajectory_txt(trajectory):
cell = trajectory.get_cells()[0]
a = np.linalg.norm(cell[0])
b = np.linalg.norm(cell[1])
c = np.linalg.norm(cell[2])
alpha = np.arccos(np.dot(cell[1], cell[2])/(c*b))
gamma = np.arccos(np.dot(cell[1], cell[0])/(a*b))
beta = np.arccos(np.dot(cell[2], cell[0])/(a*c))
xhi = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
yhi = np.sqrt(pow(b,2)- pow(xy,2))
yz = (b*c*np.cos(alpha)-xy * xz)/yhi
zhi = np.sqrt(pow(c,2)-pow(xz,2)-pow(yz,2))
xhi = xhi + max(0,0, xy, xz, xy+xz)
yhi = yhi + max(0,0, yz)
xlo_bound = np.min([0.0, xy, xz, xy+xz])
xhi_bound = xhi + np.max([0.0, xy, xz, xy+xz])
ylo_bound = np.min([0.0, yz])
yhi_bound = yhi + np.max([0.0, yz])
zlo_bound = 0
zhi_bound = zhi
ind = trajectory.get_array('steps')
lammps_data_file = ''
for i, position_step in enumerate(trajectory.get_positions()):
lammps_data_file += 'ITEM: TIMESTEP\n'
lammps_data_file += '{}\n'.format(ind[i])
lammps_data_file += 'ITEM: NUMBER OF ATOMS\n'
lammps_data_file += '{}\n'.format(len(position_step))
lammps_data_file += 'ITEM: BOX BOUNDS xy xz yz pp pp pp\n'
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(xlo_bound, xhi_bound, xy)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(ylo_bound, yhi_bound, xz)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(zlo_bound, zhi_bound, yz)
lammps_data_file += ('ITEM: ATOMS x y z\n')
for position in position_step:
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(*position)
return lammps_data_file
def parameters_to_input_file(parameters_object):
parameters = parameters_object.get_dict()
input_file = ('STRUCTURE FILE POSCAR\nPOSCAR\n\n')
input_file += ('FORCE CONSTANTS\nFORCE_CONSTANTS\n\n')
input_file += ('PRIMITIVE MATRIX\n')
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[0])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[1])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[2])
input_file += ('\n')
input_file += ('SUPERCELL MATRIX PHONOPY\n')
input_file += ('{} {} {} \n').format(* | np.array(parameters['supercell']) | numpy.array |
from IPython.core.display import display, HTML
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pickle
import cv2
import glob
import time
import os
from feature_extraction import extractFeatures, configParams, extract_hog_features, extract_color_features
def search_windows(img, windows, classifier, X_scaler):
hog_feat = configParams['use_hog_feat']
spatial_feat = configParams['use_spatial_feat']
hist_feat = configParams['use_hist_feat']
# Create an empty list to receive positive detection windows
on_windows = []
count_window = 0
count_car_window = 0
# Iterate over all windows in the list
for window in windows:
# Extract the test window from original image
window_img = cv2.resize(img[window[0][1]:window[1][1],
window[0][0]:window[1][0]],
(64, 64))
# Extract features for that window
img_features = extractFeatures(window_img, verbose=False,
hog_feat=hog_feat, spatial_feat=spatial_feat, hist_feat=hist_feat)
# Scale extracted features to be fed to classifier
test_features = X_scaler.transform(np.array(img_features).reshape(1, -1))
# Predict using your classifier
prediction = classifier.predict(test_features)
# If positive (prediction == 1) then save the window
count_window += 1
if prediction == 1:
count_car_window += 1
on_windows.append(window)
# Return windows for positive detections
return on_windows
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
imcopy = | np.copy(img) | numpy.copy |
# %%
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D
from scipy.stats import norm
from matplotlib.animation import FuncAnimation
from svgpathtools import svg2paths
from svgpath2mpl import parse_path
# matplotlib parameters to ensure correctness of Chinese characters
plt.rcParams["font.family"] = 'sans-serif'
plt.rcParams['font.sans-serif']=['Arial Unicode MS', 'SimHei'] # Chinese font
plt.rcParams['axes.unicode_minus']=False # correct minus sign
plt.rcParams["font.size"] = 14
plt.rcParams["xtick.labelsize"] = 20
plt.rcParams["ytick.labelsize"] = 20
def gen_marker(fname:str, rotation:float=180):
"""Generate maker from svg image file.
Args:
fname (str): filename of svg image.
rotation (int, optional):
degree of rotation of original images. Defaults to 180.
Returns:
Object of marker.
"""
person_path, attributes = svg2paths(fname)
person_marker = parse_path(attributes[0]['d'])
person_marker.vertices -= person_marker.vertices.mean(axis=0)
person_marker = person_marker.transformed(Affine2D().rotate_deg(rotation))
person_marker = person_marker.transformed(Affine2D().scale(-1,1))
return person_marker
person_mkr = gen_marker('icons/person.svg',)
#%%
class UpdateFigure:
def __init__(self, ax:plt.Axes,
ax_main:plt.Axes, ax_right:plt.Axes, ax_top:plt.Axes, ax_colorbar:int, data:np.ndarray):
"""Plot the first frame for the animation.
Args:
ax (plt.Axes): axes of scatter plot
ax_main (plt.Axes): axes of transfer function
ax_right (plt.Axes): axes of histogram
ax_top (plt.Axes): axes of histogram
ax_colorbar (plt.Axes): axes of histogram
data (np.ndarray): random data for plotting
"""
self.color_repo = dict(
blue = '#375492',
green = '#88E685',
dark_green = '#00683B',
red = '#93391E',
pink = '#E374B7',
purple = '#A268B4',
black = '#000000',
)
self.cm = plt.cm.RdYlBu_r
self.colors = dict(
mkr_init=[0,0,0,1],
transfer=self.color_repo['blue'],
f1 =self.color_repo['blue'],
f2 =self.color_repo['green'],
gl =self.color_repo['black'],
)
# ====================
# Define transfer functions
# ====================
self.transfer = lambda x: norm.ppf(x, loc=0.5, scale=0.15)
self.transfer_grad_inv = lambda x: norm.pdf(x, loc=0.5, scale=0.15)
self.transfer_inv = lambda y: norm.cdf(y, loc=0.5, scale=0.15)
# ====================
# generate the grid of person
# ====================
self.ax = ax
xn, yn = 20, 20
xx, yy = np.meshgrid( | np.arange(xn) | numpy.arange |
import numpy as np
import matplotlib.pylab as plot
from astropy.io import ascii,fits
from scipy import interpolate
import grb_catalogs
from BurstCube.LocSim.Detector import *
from BurstCube.LocSim.Spacecraft import *
from astropy.coordinates import SkyCoord
from astropy import units as u
from scipy.optimize import curve_fit
from astropy.table import Table
import healpy as hp
from gammaray_proposal_tools import *
### run code
def run(dir='/Users/jracusin/BurstCube/gitrep/Users/jracusin/',nsims=10000,minflux=0.5):
burstcube, BCpointings, aeff_bc = setup_BC(dir=dir)
fermi, GBMpointings, aeff_gbm=setup_GBM(dir=dir)
## Aeff at 100 keV
# bcaeff=loginterpol(aeff_bc['keV'],aeff_bc['aeff'],150.)
# gbmaeff=loginterpol(aeff_gbm['energy'],aeff_gbm['aeff'],150.)
# print(bcaeff,gbmaeff)
#Aeff on same energy points
eng=np.logspace(np.log10(50),np.log10(300),100)
bcaeff=loginterpol(aeff_bc['keV'],aeff_bc['aeff'],eng)
gbmaeff=loginterpol(aeff_gbm['energy'],aeff_gbm['aeff'],eng)
# print(bcaeff/gbmaeff)
trig,gbm=load_GBM_catalogs(dir=dir)
s=np.where(gbm['T90']<=2.0)[0]
sgbm=gbm[s]
print(len(sgbm))
# realgbmflux=sgbm['FLUX_BATSE_1024']
# wreal=np.where(realgbmflux>0)[0]
interval=1.0 #s
bgrate=300. #cts/s in 50-300 keV
gbmexposures, bcexposures, secondhighestgbm, secondhighestbc, randgbmexposures, randbcexposures=throw_grbs(fermi,burstcube,nsims=nsims)
# simgbmcr,simbccr,simgbmpfsample,simbcpfsample,realpf,pinterval=grb_spectra(sgbm,gbmaeff,bcaeff,eng,nsims,interval=interval)
gbmflux2counts,bcflux2counts,realpf=grb_spectra(sgbm,gbmaeff,bcaeff,eng,nsims,interval=interval)
pf=logNlogS(bcaeff,gbmaeff,minflux=minflux,nsims=nsims,interval=interval)
r=np.array(np.round(np.random.rand(nsims)*(len(realpf)-1)).astype('int'))
simgbmcr=pf*gbmflux2counts[r]
simbccr=pf*bcflux2counts[r]
simgbmpfsample=pf
simbcpfsample=pf
pinterval=1.
# simgbmcr,simbccr,simgbmpfsample,simbcpfsample=logNlogS(bcaeff,gbmaeff,minflux=minflux,nsims=nsims,interval=interval)
realgbmflux=realpf
wreal=np.where(realgbmflux>0)[0]
pf=simgbmpfsample
#Solve for the number of detected counts which will equal our source photons
sourcegbm = simgbmcr*secondhighestgbm*pinterval
sourcebc = simbccr*secondhighestbc*pinterval
#randomize background rate around typical background of 300 cts/s (50-300 keV, GBM)
bckgrd=np.random.poisson(bgrate,nsims)
scaledgbmbckgrd = bckgrd*pinterval
scaledbcbckgrd = bckgrd*np.median(bcaeff/gbmaeff)*pinterval
#creating an array of zeros that I can manipulate to create an array of detected GRBs
detectgbm = np.zeros(len(sourcegbm))
detectbc = np.zeros(len(sourcebc))
#calculate the significance of the second highest exposure detector. If the significance is greater than 4.5 sigma than the burst is detectable.
for u in range(len(sourcegbm)):
if sourcegbm[u]>0:
sig = sourcegbm[u] / (np.sqrt(sourcegbm[u] + scaledgbmbckgrd[u]))
if sig > 4.5:
detectgbm[u] = 1.0
else:
detectgbm[u] = 0.0
for j in range(len(sourcebc)):
if sourcebc[j]>0:
sig = sourcebc[j] / (np.sqrt(sourcebc[j] + scaledbcbckgrd[j]))
if sig > 4.5:
detectbc[j] = 1.0
else:
detectbc[j] = 0.0
else: sig=0
#Creating plot of peak flux versus counts for real and simulated GBM
w=np.where(pf>0)[0]
wg = np.where(simgbmcr*detectgbm>0.)[0]
wbc = np.where(simbccr*detectbc>0.)[0]
fig=plot.figure(figsize=(10,8))
plot.subplot(2,2,1)
# plot.hist(gbmcr[w],label='real GBM',bins=np.logspace(1,6,40),color='orange')
plot.hist(simgbmcr[wg],label='GBM',bins=np.logspace(1,6,40),alpha=0.7,color='blue')
plot.hist(simbccr[wbc],label='BurstCube',bins=np.logspace(1,6,40),alpha=0.7,color='green')
plot.xlabel('Count Rate (50-300 keV; cts/s)')
plot.xscale('log')
plot.yscale('log')
plot.xlim([10,5e4])
plot.ylabel('N Simulated sGRBs')
plot.legend()
plot.subplot(2,2,2)
plot.hist(simgbmpfsample,label='Simulated total',bins=np.logspace(-1,4,40),alpha=1.0,color='C3')
plot.hist(realgbmflux[wreal],label='real GBM',bins=np.logspace(-1,4,40),color='orange', alpha=0.7)
# this is the simulated GBM
plot.hist(simgbmpfsample[wg],label='GBM',bins=np.logspace(-1,4,40),alpha=0.5,color='blue')
plot.hist(simbcpfsample[wbc],label='BC',bins= | np.logspace(-1,4,40) | numpy.logspace |
# This code is largely adapted from Udacity's CarND lectures.
# Available: https://classroom.udacity.com/nanodegrees/nd013/
import matplotlib.pyplot as plt
import cv2
import numpy as np
from HOG import feature_extract
from train import loadModel, train
from scipy.ndimage.measurements import label
import glob
debug = False
showAll = False
orientations = 11
pixels_per_cell = 16
cells_per_block = 2
# Block split static params
window = 64
nFeaturesPerBlock = orientations * cells_per_block ** 2
nBlocksPerWindow = (window // pixels_per_cell) - cells_per_block + 1
stride = 2
lastHeatMap = None
if not debug:
fig = plt.figure()
def getCars(image, scaleDiv, model, yStart=350, yStop=660, xStart=0, xStop=1280):
"""
:param image: Cropped RGB image frame
:param scaleDiv: Divides image dimensions by this factor
:return:
"""
global debug
image = image[yStart:yStop, xStart:xStop]
# 1: Scale
if scaleDiv != 1:
imshape = image.shape
img = cv2.resize(image, (np.int(imshape[1] / scaleDiv),
np.int(imshape[0] / scaleDiv)))
else:
img = np.copy(image)
# Split into blocks
nxblocks = (img.shape[1] // pixels_per_cell) - cells_per_block + 1
nyblocks = (img.shape[0] // pixels_per_cell) - cells_per_block + 1
nxsteps = (nxblocks - nBlocksPerWindow) // stride + 1
nysteps = (nyblocks - nBlocksPerWindow) // stride + 1
# HOG Transform
HOG_image = feature_extract([img], False)
HOG_image = np.array(HOG_image[0])
rectangles = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb * stride
xpos = xb * stride
# Extract HOG for this patch
featCrop = HOG_image[:, ypos:ypos + nBlocksPerWindow, \
xpos:xpos + nBlocksPerWindow].ravel()
xleft = xpos * pixels_per_cell
ytop = ypos * pixels_per_cell
# Extract the image patch
test_prediction = model.predict(featCrop.reshape(1, -1))
global showAll
if test_prediction == 1 or showAll:
xbox_left = np.int(xleft * scaleDiv)
ytop_draw = np.int(ytop * scaleDiv)
win_draw = np.int(window * scaleDiv)
rectangles.append(
((xbox_left + xStart, ytop_draw + yStart),
(xbox_left + win_draw + xStart, ytop_draw + yStart + win_draw)))
return rectangles
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
"""
# Copied from Udacity CarND lesson material
:param img:
:param bboxes:
:param color:
:param thick:
:return:
"""
# Make a copy of the image
imcopy = np.copy(img)
random_color = False
# Iterate through the bounding boxes
for bbox in bboxes:
if color == 'random' or random_color:
color = ( | np.random.randint(0, 255) | numpy.random.randint |
"""Training - mitosis detection"""
import argparse
from datetime import datetime
import json
import math
import os
import pickle
import shutil
import sys
import numpy as np
import tensorflow as tf
import tensorboard as tb
import resnet
import resnet50
# data
def get_image(filename, patch_size):
"""Get image from filename.
Args:
filename: String filename of an image.
patch_size: Integer length to which the square image will be
resized.
Returns:
TensorFlow tensor containing the decoded and resized image with
type float32 and values in [0, 1).
"""
image_string = tf.read_file(filename)
# shape (h,w,c), uint8 in [0, 255]:
image = tf.image.decode_png(image_string, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32) # float32 [0, 1)
# TODO: remove this
#image = tf.image.resize_images(image, [patch_size, patch_size]) # float32 [0, 1)
#with tf.control_dependencies(
# [tf.assert_type(image, tf.float32, image.dtype),
# tf.verify_tensor_all_finite(image, "image tensor contains NaN or INF values"]):
return image
def get_label(filename):
"""Get label from filename.
Args:
filename: String in format "**/train|val/mitosis|normal/name.{ext}",
where the label is either "mitosis" or "normal".
Returns:
TensorFlow float binary label equal to 1 for mitosis or 0 for
normal.
"""
# note file name format:
# lab is a single digit, case and region are two digits with padding if needed
splits = tf.string_split([filename], "/")
label_str = splits.values[-2]
# check that label string is valid
is_valid = tf.logical_or(tf.equal(label_str, 'normal'), tf.equal(label_str, 'mitosis'))
assert_op = tf.Assert(is_valid, [label_str])
with tf.control_dependencies([assert_op]): # test for correct label extraction
#label = tf.to_int32(tf.equal(label_str, 'mitosis'))
label = tf.to_float(tf.equal(label_str, 'mitosis')) # required because model produces float
return label
def preprocess(filename, patch_size):
"""Get image and label from filename.
Args:
filename: String filename of an image.
patch_size: Integer length to which the square image will be
resized, if necessary.
Returns:
Tuple of a float32 image Tensor with shape (h,w,c) and values in
[0, 1), a binary label, and a filename.
"""
# return image_resized, label
label = get_label(filename)
#label = tf.expand_dims(label, -1) # make each scalar label a vector of length 1 to match model
image = get_image(filename, patch_size) # float32 in [0, 1)
return image, label, filename
def normalize(image, model_name):
"""Normalize an image tensor.
Note: due to broadcasting, this works with a single image, or a batch
of images.
Args:
image: A Tensor of shape (...,h,w,c) with values in [0, 1].
model_name: String indicating the model to use.
Returns:
A normalized image Tensor of shape (...,h,w,c).
"""
# NOTE: don't use in-place updates to avoid side-effects
if model_name in ("vgg", "vgg19", "resnet"):
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
image = image[..., ::-1] # rbg -> bgr
image = image * 255 # float32 in [0, 255]
image = image - means # mean centering using imagenet means
else:
# normalize to [-1, 1]
#image = image / 255
image = image - 0.5
image = image * 2
return image
def unnormalize(image, model_name):
"""Unnormalize an image tensor.
Note: due to broadcasting, this works with a single image, or a batch
of images.
Args:
image: A Tensor of shape (...,h,w,c) with normalized values.
model_name: String indicating the model to use.
Returns:
An unnormalized image Tensor of shape (...,h,w,c) with values in
[0, 1].
"""
# NOTE: don't use in-place updates to avoid side-effects
if model_name in ("vgg", "vgg19", "resnet"):
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
image = image + means # mean centering using imagenet means
image = image / 255 # float32 in [0, 1]
image = image[..., ::-1] # bgr -> rgb
else:
image = image / 2
image = image + 0.5
return image
def augment(image, patch_size, seed=None):
"""Apply random data augmentation to the given image.
Args:
image: A Tensor of shape (h,w,c) with values in [0, 1].
patch_size: The patch size to which to randomly crop the image.
seed: An integer used to create a random seed.
Returns:
A data-augmented image with values in [0, 1].
"""
# NOTE: these values currently come from the Google pathology paper:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al.
# Detecting Cancer Metastases on Gigapixel Pathology Images. arXiv.org. 2017.
# TODO: convert these hardcoded values into hyperparameters!!
# NOTE: if the seed is None, these ops will be seeded with a completely random seed, rather than
# a deterministic one based on the graph seed. This appears to only happen within the map
# functions of the Dataset API, based on the `test_num_parallel_calls` and
# `test_image_random_op_seeds` tests. For now, we will pass in a seed from the user and use it
# at the op level.
# NOTE: Additionally, if the Dataset.map() function that calls this function is using
# `num_parallel_calls` > 1, the results will be non-reproducible.
# TODO: https://github.com/tensorflow/tensorflow/issues/13932
# NOTE: ouch! It turns out that a reinitializable iterator for a Dataset will cause any ops with
# random seeds, such as these, to be reset, and thus each epoch will be evaluated exactly the
# same. The desired behavior would be to seed these ops once at the very beginning, so that an
# entire training run can be deterministic, but not with the exact same random augmentation during
# each epoch. Oh TensorFlow...
shape = tf.shape(image) # (h, w, c)
# random zoom
# TODO: possibly re-enable random zooms enabled via flag
#lb = shape[0] # lower bound on the resize is the current size of the image
#ub = lb + tf.to_int32(tf.to_float(lb)*0.25) # upper bound is 25% larger
#new_size = tf.random_uniform([2], minval=lb, maxval=ub, dtype=tf.int32, seed=seed)
#image = tf.image.resize_images(image, new_size) # random resize
#image = tf.random_crop(image, shape, seed=seed) # random cropping back to original size
# mirror padding if needed
size = int(math.ceil((patch_size + 30) * (math.cos(math.pi/4) + math.sin(math.pi/4))))
#pad_h = tf.maximum(0, size - shape[0])
#pad_w = tf.maximum(0, size - shape[1])
#pad_h_before = tf.to_int32(tf.floor(pad_h / 2))
#pad_w_before = tf.to_int32(tf.floor(pad_w / 2))
#pad_h_after = pad_h - pad_h_before
#pad_w_after = pad_w - pad_w_before
#paddings = tf.reshape(
# tf.stack([pad_h_before, pad_h_after, pad_w_before, pad_w_after, 0, 0], 0),
# [3, 2]) # h, w, z before/after paddings
pad = tf.to_int32(tf.ceil(tf.maximum(0, size - shape[0]) / 2))
paddings = tf.reshape(tf.stack([pad, pad, pad, pad, 0, 0], 0), [3, 2]) # h, w, z before/after
image = tf.pad(image, paddings, mode="REFLECT")
# random rotation
angle = tf.random_uniform([], minval=0, maxval=2*np.pi, seed=seed)
image = tf.contrib.image.rotate(image, angle, "BILINEAR")
# crop to bounding box to allow for random translation crop, if the input image is large enough
# note: translation distance: 7 µm = 30 pixels = max allowable euclidean pred distance from
# actual mitosis
# note: the allowable region is a circle with radius 30, but we are cropping to a square, so we
# can't crop from a square with side length of patch_size+30 or we run the risk of moving the
# mitosis to a spot in the corner of the resulting image, which would be outside of the circle
# radius, and thus we would be incorrect to label that image as positive. We also want to impose
# some amount of buffer into our learned model, so we place an upper bound of `c` pixels on the
# distance. We sample a distance along the height axis, compute a valid distance along the width
# axis that is upper bounded by a Euclidean translation distance of `c` in the worst case, crop
# the center of the image to this height and width, and then perform a random crop, yielding a
# patch for which the center is at most `c` pixels from the true center in terms of Euclidean
# distance.
# NOTE: In the dataset, all normal samples must be > 60 pixels from the center of a mitotic figure
# to avoid random crops that end up incorrectly within a mitotic region.
# c = 25 = sqrt(a**2 + b**2) = 6.25 µm
c = 25 # TODO: set this as a hyperparameter
a = tf.random_uniform([], minval=0, maxval=c, dtype=tf.int32, seed=seed)
b = tf.to_int32(tf.floor(tf.sqrt(tf.to_float(c**2 - a**2))))
crop_h = tf.minimum(shape[0], patch_size + a)
crop_w = tf.minimum(shape[1], patch_size + b)
image = tf.image.resize_image_with_crop_or_pad(image, crop_h, crop_w)
# random central crop == random translation augmentation
image = tf.random_crop(image, [patch_size, patch_size, 3], seed=seed)
image = tf.image.random_flip_up_down(image, seed=seed)
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.image.random_brightness(image, 64/255, seed=seed)
image = tf.image.random_contrast(image, 0.25, 1, seed=seed)
image = tf.image.random_saturation(image, 0.75, 1, seed=seed)
image = tf.image.random_hue(image, 0.04, seed=seed)
image = tf.clip_by_value(image, 0, 1)
return image
def create_augmented_batch(image, batch_size, patch_size):
"""Create a batch of augmented versions of the given image.
This will sample `batch_size/4` augmented images deterministically,
and yield four rotated variants for each augmented image (0, 90, 180,
270 degrees).
Args:
image: A Tensor of shape (h,w,c).
batch_size: Number of augmented versions to generate.
patch_size: The patch size to which to randomly crop the image.
Returns:
A Tensor of shape (batch_size,h,w,c) containing a batch of
data-augmented versions of the given image.
"""
assert batch_size % 4 == 0 or batch_size == 1, "batch_size must be 1 or divisible by 4"
# TODO rewrite this function to just draw `batch_size` examples from the `augment` function
def rots_batch(image):
rot0 = image
rot90 = tf.image.rot90(image)
rot180 = tf.image.rot90(image, k=2)
rot270 = tf.image.rot90(image, k=3)
rots = tf.stack([rot0, rot90, rot180, rot270])
return rots
if batch_size >= 4:
image_crop = tf.image.resize_image_with_crop_or_pad(image, patch_size, patch_size)
images = rots_batch(image_crop)
for i in range(round(batch_size/4)-1):
aug_image = augment(image, patch_size, i)
aug_image_rots = rots_batch(aug_image)
images = tf.concat([images, aug_image_rots], axis=0)
else:
images = tf.expand_dims(image, 0)
return images
def marginalize(x):
"""Marginalize over injected noise at test time.
This implements noise marginalization by averaging over a batch of
values. Typically, this would be used with logits for a batch of
augmented versions of a single image, or for the associated batch
of labels. This is only performed at test time when
`tf.keras.backend.learning_phase() == 0`.
Args:
x: A Tensor of shape (n,...).
Returns:
A Tensor of shape (1, ...) containing the average over the batch
dimension.
"""
avg_x = tf.reduce_mean(x, axis=0, keepdims=True, name="avg_x")
x = tf.cond(tf.logical_not(tf.keras.backend.learning_phase()), lambda: avg_x, lambda: x)
return x
def process_dataset(dataset, model_name, patch_size, augmentation, marginalization, marg_batch_size,
threads, seed=None):
"""Process a Dataset.
Args:
dataset: Dataset of filenames.
model_name: String indicating the model to use.
patch_size: Integer length to which the square patches will be
resized.
augmentation: Boolean for whether or not to apply random augmentation
to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`marg_batch_size` must be divisible by 4, or equal to 1 for a special
debugging case of no augmentation.
marg_batch_size: Integer training batch size.
threads: Integer number of threads for dataset buffering.
seed: Integer random seed.
Returns:
A labeled Dataset of augmented, normalized images, possibly with
marginalization.
"""
dataset = dataset.map(lambda filename: preprocess(filename, patch_size),
num_parallel_calls=threads)
# augment (typically at training time)
if augmentation:
dataset = dataset.map(
lambda image, label, filename: (augment(image, patch_size, seed), label, filename),
num_parallel_calls=threads)
else:
# we are now generating larger original images to allow for random rotations & translations
# during augmentation, and thus if we don't apply augmentation, we need to ensure that the
# images are center cropped to the correct size.
dataset = dataset.map(lambda image, label, filename:
(tf.image.resize_image_with_crop_or_pad(image, patch_size, patch_size), label, filename),
num_parallel_calls=threads)
# TODO: should this be in an `elif` block before the above `else` block? in particular, the
# patch sizes will be messed up
# marginalize (typically at eval time)
if marginalization:
dataset = dataset.map(lambda image, label, filename:
(create_augmented_batch(image, marg_batch_size, patch_size),
tf.tile(tf.expand_dims(label, -1), [marg_batch_size]),
tf.tile(tf.expand_dims(filename, -1), [marg_batch_size])),
num_parallel_calls=threads)
# normalize
dataset = dataset.map(lambda image, label, filename:
(normalize(image, model_name), label, filename), num_parallel_calls=threads)
return dataset
def create_dataset(path, model_name, patch_size, batch_size, shuffle, augmentation, marginalization,
oversampling, threads, prefetch_batches, seed=None):
"""Create a dataset.
Args:
path: String path to the generated image patches. This should
contain folders for each class.
model_name: String indicating the model to use.
patch_size: Integer length to which the square patches will be
resized.
batch_size: Integer training batch size.
shuffle: Boolean for whether or not to shuffle filenames.
augmentation: Boolean for whether or not to apply random augmentation
to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`batch_size` must be divisible by 4, or equal to 1 for a special
debugging case of no augmentation.
oversampling: Boolean for whether or not to oversample the minority
mitosis class via class-aware sampling. Not compatible with
marginalization.
threads: Integer number of threads for dataset buffering.
prefetch_batches: Integer number of batches to prefetch.
seed: Integer random seed.
Returns:
A Dataset object.
"""
# read & process images
if oversampling:
# oversample the minority mitosis class via class-aware sampling, in which we sample the mitosis
# and normal samples separately in order to yield class-balanced mini-batches.
mitosis_dataset = tf.data.Dataset.list_files(os.path.join(path, "mitosis", "*.png"))
normal_dataset = tf.data.Dataset.list_files(os.path.join(path, "normal", "*.png"))
# zipping will stop once the normal dataset is empty
mitosis_dataset = mitosis_dataset.repeat(-1).shuffle(int(1e6))
normal_dataset = normal_dataset.shuffle(int(1e6))
mitosis_dataset = process_dataset(mitosis_dataset, model_name, patch_size, augmentation, False,
batch_size, threads, seed)
normal_dataset = process_dataset(normal_dataset, model_name, patch_size, augmentation, False,
batch_size, threads, seed)
# zip together the datasets, then flatten and batch so that each mini-batch contains an even
# number of mitosis and normal samples
# NOTE: the number of elements in the zipped dataset is limited to the lesser of the mitosis and
# normal datasets, and since the mitosis dataset is set to repeat indefinitely, this zipped
# dataset will be limited to the number of normal samples
dataset = tf.data.Dataset.zip((mitosis_dataset, normal_dataset))
dataset = dataset.flat_map(lambda mitosis, normal:
tf.data.Dataset.from_tensors(mitosis).concatenate(tf.data.Dataset.from_tensors(normal)))
dataset = dataset.batch(batch_size)
# note that batch norm could be affected by very small final batches, but right now this would
# also affect evaluation tasks, so we will wait to enable this until we move to tf Estimators
#dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
dataset = tf.data.Dataset.list_files(os.path.join(path, "*", "*.png"))
if shuffle:
dataset = dataset.shuffle(int(1e7))
dataset = process_dataset(dataset, model_name, patch_size, augmentation, marginalization,
batch_size, threads, seed)
# batch if necessary
if not marginalization:
dataset = dataset.batch(batch_size)
# note that batch norm could be affected by very small final batches, but right now this would
# also affect evaluation tasks, so we will wait to enable this until we move to tf Estimators
#dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
# prefetch
dataset = dataset.prefetch(prefetch_batches)
return dataset
# model
def create_model(model_name, input_shape, images):
"""Create a model.
Args:
model_name: String indicating the model to use in ("vgg", "vgg19",
"resnet", "logreg").
input_shape: 3-Tuple containing the shape of a single image.
images: An image Tensor of shape (n,h,w,c).
Returns:
An unfrozen Keras Model in which `images` is the input tensor, and
another Model object representing the base model when using
pretrained models.
"""
if model_name == "logreg":
# logistic regression classifier
model_base = None
inputs = tf.keras.layers.Input(shape=input_shape, tensor=images)
x = tf.keras.layers.Flatten()(inputs)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg":
# create a model by replacing the classifier of a VGG16 model with a new classifier specific
# to the breast cancer problem
# recommend fine-tuning last 4 layers
#with tf.device("/cpu"):
model_base = tf.keras.applications.VGG16(
include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc1')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=keras.regularizers.l2(l2))(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc2')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=keras.regularizers.l2(l2))(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg_new":
# train a new vgg16-like model from scratch on inputs in [-1, 1].
#with tf.device("/cpu"):
model_base = tf.keras.applications.VGG16(
include_top=False, input_shape=input_shape, input_tensor=images, weights=None)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc1')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=tf.keras.regularizers.l2(l2))(x)
#x = tf.keras.layers.Dropout(0.5)(x)
#x = tf.keras.layers.Dropout(0.1)(x)
#x = tf.keras.layers.Dense(256, activation='relu', name='fc2')(x)
#x = tf.keras.layers.Dense(256, kernel_initializer="he_normal",
# kernel_regularizer=tf.keras.regularizers.l2(l2))(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "vgg19":
# create a model by replacing the classifier of a VGG19 model with a new classifier specific
# to the breast cancer problem
# recommend fine-tuning last 4 layers
#with tf.device("/cpu"):
#inputs = tf.keras.layers.Input(shape=input_shape)
model_base = tf.keras.applications.VGG19(
include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet":
# create a model by replacing the classifier of a ResNet50 model with a new classifier
# specific to the breast cancer problem
# recommend fine-tuning last 11 (stage 5 block c), 21 (stage 5 blocks b & c), or 33 (stage
# 5 blocks a,b,c) layers
#with tf.device("/cpu"):
# NOTE: there is an issue in keras with using batch norm with model templating, i.e.,
# defining a model with generic inputs and then calling it on a tensor. the issue stems from
# batch norm not being well defined for shared settings, but it makes it quite annoying in
# this context. to "fix" it, we define it by directly passing in the `images` tensor
# https://github.com/fchollet/keras/issues/2827
model_base = resnet50.ResNet50(include_top=False, input_shape=input_shape, input_tensor=images)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet_new":
# train a new resnet50-like model from scratch on inputs in [-1, 1].
#with tf.device("/cpu"):
# NOTE: there is an issue in keras with using batch norm with model templating, i.e.,
# defining a model with generic inputs and then calling it on a tensor. the issue stems from
# batch norm not being well defined for shared settings, but it makes it quite annoying in
# this context. to "fix" it, we define it by directly passing in the `images` tensor
# https://github.com/fchollet/keras/issues/2827
model_base = resnet50.ResNet50(
include_top=False, input_shape=input_shape, input_tensor=images, weights=None)
inputs = model_base.inputs
x = model_base.output
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.GlobalAveragePooling2D()(x)
# init tf.keras.layers.Dense weights with Gaussian scaled by sqrt(2/(fan_in+fan_out))
logits = tf.keras.layers.Dense(1, kernel_initializer="glorot_normal")(x)
model_tower = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
elif model_name == "resnet_custom":
model_base = None
model_tower = resnet.ResNet(images, input_shape)
else:
raise Exception("model name unknown: {}".format(model_name))
# TODO: add this when it's necessary, and move to a separate function
## Multi-GPU exploitation via a linear combination of GPU loss functions.
#ins = []
#outs = []
#for i in range(num_gpus):
# with tf.device("/gpu:{}".format(i)):
# x = tf.keras.layers.Input(shape=input_shape) # split of batch
# out = resnet50(x) # run split on shared model
# ins.append(x)
# outs.append(out)
#model = tf.keras.Model(inputs=ins, outputs=outs) # multi-GPU, data-parallel model
model = model_tower
# unfreeze all model layers.
for layer in model.layers[1:]: # don't include input layer
layer.trainable = True
return model, model_base
# based on `keras.utils.multi_gpu_model`
def multi_gpu_model(model, gpus):
"""Replicates a model on different GPUs.
Specifically, this function implements single-machine
multi-GPU data parallelism. It works in the following way:
- Divide the model's input(s) into multiple sub-batches.
- Apply a model copy on each sub-batch. Every model copy
is executed on a dedicated GPU.
- Concatenate the results (on CPU) into one big batch.
E.g. if your `batch_size` is 64 and you use `gpus=2`,
then we will divide the input into 2 sub-batches of 32 samples,
process each sub-batch on one GPU, then return the full
batch of 64 processed samples.
This induces quasi-linear speedup on up to 8 GPUs.
This function is only available with the TensorFlow backend
for the time being.
# Arguments
model: A Keras model instance. To avoid OOM errors,
this model could have been built on CPU, for instance
(see usage example below).
gpus: Integer >= 2 or list of integers, number of GPUs or
list of GPU IDs on which to create model replicas.
# Returns
A Keras `Model` instance which can be used just like the initial
`model` argument, but which distributes its workload on multiple GPUs.
"""
if isinstance(gpus, (list, tuple)):
num_gpus = len(gpus)
target_gpu_ids = gpus
else:
num_gpus = gpus
target_gpu_ids = range(num_gpus)
def get_slice(data, i, parts):
shape = tf.shape(data)
batch_size = shape[:1]
input_shape = shape[1:]
step = batch_size // parts
if i == num_gpus - 1:
size = batch_size - step * i
else:
size = step
size = tf.concat([size, input_shape], axis=0)
stride = tf.concat([step, input_shape * 0], axis=0)
start = stride * i
return tf.slice(data, start, size)
all_outputs = []
for i in range(len(model.outputs)):
all_outputs.append([])
# Place a copy of the model on each GPU,
# each getting a slice of the inputs.
for i, gpu_id in enumerate(target_gpu_ids):
with tf.device('/cpu:0'):
inputs = []
# Retrieve a slice of the input on the CPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_i = tf.keras.layers.Lambda(
get_slice, output_shape=input_shape, arguments={'i': i, 'parts': num_gpus})(x)
inputs.append(slice_i)
with tf.device('/gpu:%d' % gpu_id):
with tf.name_scope('replica_%d' % gpu_id):
# Apply model on slice (creating a model replica on the target device).
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later.
for o in range(len(outputs)):
all_outputs[o].append(outputs[o])
# Merge outputs on CPU.
with tf.device('/cpu:0'):
merged = []
for name, outputs in zip(model.output_names, all_outputs):
merged.append(tf.keras.layers.concatenate(outputs, axis=0, name=name))
return tf.keras.Model(model.inputs, merged)
def compute_data_loss(labels, logits):
"""Compute the mean logistic loss.
Args:
labels: A Tensor of shape (n, 1) containing a batch of labels.
logits: A Tensor of shape (n, 1) containing a batch of pre-sigmoid
prediction values.
Returns:
A scalar Tensor representing the mean logistic loss.
"""
# TODO: this is a binary classification problem so optimizing a loss derived from a Bernoulli
# distribution is appropriate. however, would the dynamics of the training algorithm be more
# stable if we treated this as a multi-class classification problem and derived a loss from a
# Multinomial distribution with two classes (and a single trial)? it would be
# over-parameterized, but then again, the deep net itself is already heavily parameterized.
# Bernoulli-derived loss
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.reshape(labels, [-1, 1]), logits=logits))
# Multinomial-derived loss
#labels = tf.one_hot(indices=labels, depth=2, on_value=1, off_value=0)
#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))
#loss = tf.reduce_mean(
# tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, features=logits))
return loss
def compute_l2_reg_loss(model, include_frozen=False, reg_final=True, reg_biases=False):
"""Compute L2 loss of trainable model weights.
This places a Gaussian prior with mean 0 std 1 on each of the model
parameters.
Args:
model: A Keras Model object.
include_frozen: Boolean for whether or not to ignore frozen layers.
reg_final: Boolean for whether or not to regularize the final
logits-producing layer.
reg_biases: Boolean for whether or not to regularize biases.
Returns:
The L2 regularization loss of all trainable (i.e., unfrozen) model
weights, unless `include_frozen` is True, in which case all weights
are used.
"""
weights = []
if reg_final:
end = None
else: # don't regularize the final function that produces logits
end = -1 if not model.layers[-1].name.startswith("flatten") else -2
for layer in model.layers[:end]:
if layer.trainable or include_frozen:
if hasattr(layer, 'kernel'): # conv, dense
weights.append(layer.kernel)
elif hasattr(layer, 'gamma'): # batch norm scale
weights.append(1.0 - layer.gamma) # Gaussian prior centered at 1 for batch norm gamma value
if reg_biases:
# TODO: generally, we don't regularize the biases, but could we determine a probabilistic
# motivation to do this?
if hasattr(layer, 'bias'):
weights.append(layer.bias)
elif hasattr(layer, 'beta'):
weights.append(layer.beta)
l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in weights])
return l2_loss
def compute_metrics(loss, labels, preds, probs, num_thresholds):
"""Compute metrics.
This creates ops that compute metrics in a streaming fashion.
Args:
loss: A Tensor representing the current batch mean loss.
labels: A Tensor of shape (n, 1) containing a batch of labels.
preds: A Tensor of shape (n, 1) containing a batch of binary
prediction values.
probs: A Tensor of shape (n, 1) containing a batch of probabilistic
prediction values.
num_thresholds: An integer indicating the number of thresholds to
use to compute PR curves.
Returns:
A tuple of mean loss, accuracy, positive predictive value
(precision), sensitivity (recall), F1, PR curve data, F1 list
based on the PR curve data, a grouped metrics update op, and a
group metrics reset op.
"""
# TODO: think about converting this to a class
mean_loss, mean_loss_update_op, mean_loss_reset_op = create_resettable_metric(tf.metrics.mean,
'mean_loss', values=loss)
acc, acc_update_op, acc_reset_op = create_resettable_metric(tf.metrics.accuracy,
'acc', labels=labels, predictions=preds)
ppv, ppv_update_op, ppv_reset_op = create_resettable_metric(tf.metrics.precision,
'ppv', labels=labels, predictions=preds)
sens, sens_update_op, sens_reset_op = create_resettable_metric(tf.metrics.recall,
'sens', labels=labels, predictions=preds)
f1 = 2 * (ppv * sens) / (ppv + sens)
pr, pr_update_op, pr_reset_op = create_resettable_metric(
tf.contrib.metrics.precision_recall_at_equal_thresholds,
'pr', labels=tf.cast(labels, dtype=tf.bool), predictions=probs, num_thresholds=num_thresholds)
f1s = 2 * (pr.precision * pr.recall) / (pr.precision + pr.recall)
# combine all reset & update ops
metric_update_ops = tf.group(
mean_loss_update_op, acc_update_op, ppv_update_op, sens_update_op, pr_update_op)
metric_reset_ops = tf.group(
mean_loss_reset_op, acc_reset_op, ppv_reset_op, sens_reset_op, pr_reset_op)
return mean_loss, acc, ppv, sens, f1, pr, f1s, metric_update_ops, metric_reset_ops
#return mean_loss, acc, ppv, sens, f1, metric_update_ops, metric_reset_ops
# utils
def create_resettable_metric(metric, scope, **metric_kwargs): # prob safer to only allow kwargs
"""Create a resettable metric.
Args:
metric: A tf.metrics metric function.
scope: A String scope name to enclose the metric variables within.
metric_kwargs: Kwargs for the metric.
Returns:
The metric op, the metric update op, and a metric reset op.
"""
# started with an implementation from https://github.com/tensorflow/tensorflow/issues/4814
with tf.variable_scope(scope) as scope:
metric_op, update_op = metric(**metric_kwargs)
scope_name = tf.contrib.framework.get_name_scope() # in case nested name/variable scopes
local_vars = tf.contrib.framework.get_variables(scope_name,
collection=tf.GraphKeys.LOCAL_VARIABLES) # get all local variables in this scope
reset_op = tf.variables_initializer(local_vars)
return metric_op, update_op, reset_op
def initialize_variables(sess):
"""Initialize variables for training.
This initializes all tensor variables in the graph.
Args:
sess: A TensorFlow Session.
"""
# NOTE: Keras keeps track of the variables that are initialized, and any call to
# `tf.keras.backend.get_session()`, which is even used internally, will include logic to
# initialize variables. There is a situation in which resuming from a previous checkpoint and
# then saving the model after the first epoch will result in part of the model being
# reinitialized. The problem is that calling `tf.keras.backend.get_session()` here is too soon
# to initialize any variables, the resume branch skips any variable initialization, and then the
# `model.save` code path ends up calling `tf.keras.backend.get_session()`, thus causing part of
# the model to be reinitialized. Specifically, the model base is fine because it is initialized
# when the pretrained weights are added in, but the new dense classifier will not be marked as
# initialized by Keras. The non-resume branch will initialize any variables not initialized by
# Keras yet, and thus will avoid this issue. It could be possible to use
# `tf.keras.backend.manual_variable_initialization(True)` and then manually initialize
# all variables, but this would cause any pretrained weights to be removed. Instead, we should
# initialize all variables first with the equivalent of the logic in
# `tf.keras.backend.get_session()`, and then call resume.
# NOTE: the global variables initializer will erase the pretrained weights, so we instead only
# initialize the other variables
# NOTE: reproduced from the old tf.keras.backend._initialize_variables() function
# EDIT: this was updated in the master branch in commit
# https://github.com/fchollet/keras/commit/9166733c3c144739868fe0c30d57b861b4947b44
# TODO: given the change in master, reevaluate whether or not this is actually necessary anymore
variables = tf.global_variables()
uninitialized_variables = []
for v in variables:
if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
uninitialized_variables.append(v)
v._keras_initialized = True
global_init_op = tf.variables_initializer(uninitialized_variables)
local_init_op = tf.local_variables_initializer()
sess.run([global_init_op, local_init_op])
# training
def train(train_path, val_path, exp_path, model_name, model_weights, patch_size, train_batch_size,
val_batch_size, clf_epochs, finetune_epochs, clf_lr, finetune_lr, finetune_momentum,
finetune_layers, l2, reg_biases, reg_final, augmentation, marginalization, oversampling,
num_gpus, threads, prefetch_batches, log_interval, checkpoint, resume, seed):
"""Train a model.
Args:
train_path: String path to the generated training image patches.
This should contain folders for each class.
val_path: String path to the generated validation image patches.
This should contain folders for each class.
exp_path: String path in which to store the model checkpoints, logs,
etc. for this experiment
model_name: String indicating the model to use.
model_weights: Optional string path to an HDF5 file containing the
initial weights of the model. If None, then pretrained imagenet
weights will be used.
patch_size: Integer length to which the square patches will be
resized.
train_batch_size: Integer training batch size.
val_batch_size: Integer validation batch size.
clf_epochs: Integer number of epochs for which to training the new
classifier layers.
finetune_epochs: Integer number of epochs for which to fine-tune the
model.
clf_lr: Float learning rate for training the new classifier layers.
finetune_lr: Float learning rate for fine-tuning the model.
finetune_momentum: Float momentum rate for fine-tuning the model.
finetune_layers: Integer number of layers at the end of the
pretrained portion of the model to fine-tune. The new classifier
layers will still be trained during fine-tuning as well.
l2: Float L2 global regularization value.
reg_biases: Boolean for whether or not to regularize biases.
reg_final: Boolean for whether or not to regularize the final
logits-producing layer.
augmentation: Boolean for whether or not to apply random
augmentation to the images.
marginalization: Boolean for whether or not to use noise
marginalization when evaluating the validation set. If True, then
each image in the validation set will be expanded to a batch of
augmented versions of that image, and predicted probabilities for
each batch will be averaged to yield a single noise-marginalized
prediction for each image. Note: if this is True, then
`val_batch_size` must be divisible by 4, or equal to 1 for a
special debugging case of no augmentation.
oversampling: Boolean for whether or not to oversample the minority
mitosis class via class-aware sampling.
num_gpus: Integer number of GPUs to use for data parallelism.
threads: Integer number of threads for dataset buffering.
prefetch_batches: Integer number of batches to prefetch.
log_interval: Integer number of steps between logging during
training.
checkpoint: Boolean flag for whether or not to save a checkpoint
after each epoch.
resume: Boolean flag for whether or not to resume training from a
checkpoint.
seed: Integer random seed.
"""
# TODO: break this out into:
# * data gen func
# * inference func
# * loss func
# * metrics func
# * logging func
# * train func
# set random seed
# NOTE: At the moment, this is faily useless because if the augmentation ops are seeded, they will
# be evaluated in the exact same deterministic manner on every epoch, which is not desired.
# Additionally, the multithreading needed to process the data will cause non-deterministic
# results. The one benefit is that the classification layers will be created deterministically.
np.random.seed(seed)
tf.set_random_seed(seed)
# create session, force tf.Keras to use it
config = tf.ConfigProto(allow_soft_placement=True)#, log_device_placement=True)
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# debugger
#from tensorflow.python import debug as tf_debug
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# data
with tf.name_scope("data"):
# NOTE: seed issues to be fixed in tf
train_dataset = create_dataset(train_path, model_name, patch_size, train_batch_size, True,
augmentation, False, oversampling, threads, prefetch_batches) #, seed)
val_dataset = create_dataset(val_path, model_name, patch_size, val_batch_size, False,
False, marginalization, False, threads, prefetch_batches) #, seed)
# note that batch norm could be affected by very small final batches, but right now the fix,
# which requires this change as well, would also affect evaluation tasks, so we will wait to
# enable that (and this change too) until we move to tf Estimators
#output_shapes = (tf.TensorShape([None, patch_size, patch_size, 3]),
# tf.TensorShape([None]),
# tf.TensorShape([None]))
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
#output_shapes)
train_init_op = iterator.make_initializer(train_dataset)
val_init_op = iterator.make_initializer(val_dataset)
images, labels, filenames = iterator.get_next()
input_shape = (patch_size, patch_size, 3)
# models
with tf.name_scope("model"):
# replicate model on each GPU to allow for data parallelism
if num_gpus > 1:
with tf.device("/cpu:0"):
model_tower, model_base = create_model(model_name, input_shape, images)
#model_tower, model_base = create_model(model_name, input_shape, images)
model = multi_gpu_model(model_tower, num_gpus)
else:
model_tower, model_base = create_model(model_name, input_shape, images)
model = model_tower
if model_weights is not None:
model_tower.load_weights(model_weights)
# compute logits and predictions, possibly with marginalization
# NOTE: tf prefers to feed logits into a combined sigmoid and logistic loss function for
# numerical stability
if marginalization:
logits = marginalize(model.output) # will marginalize at test time
labels = tf.cond(tf.keras.backend.learning_phase(), lambda: labels, lambda: labels[0:1])
else:
logits = model.output
# for Bernoulli-derived loss
probs = tf.nn.sigmoid(logits, name="probs")
preds = tf.round(probs, name="preds") # implicit threshold at 0.5
# for Multinomial-derived loss
#probs = tf.nn.softmax(logits, name="probs") # possible improved numerical stability
#preds = tf.argmax(probs, axis=1, name="preds")
# loss
with tf.name_scope("loss"):
with tf.control_dependencies([tf.assert_equal(tf.shape(labels)[0], tf.shape(logits)[0])]):
data_loss = compute_data_loss(labels, logits)
reg_loss = compute_l2_reg_loss(
model_tower, include_frozen=True, reg_final=reg_final, reg_biases=reg_biases)
loss = data_loss + l2*reg_loss
# TODO: enable this and test it
# use l2 reg during training, but not during validation. Otherwise, more fine-tuning will
# lead to an apparent lower validation loss, even though it may just be due to more layers
# that can be adjusted in order to lower the regularization portion of the loss.
#loss = tf.cond(tf.keras.backend.learning_phase(), lambda: data_loss + l2*reg_loss, lambda: data_loss)
# optim
# TODO: extract this into a function with tests
with tf.name_scope("optim"):
global_step_op = tf.train.get_or_create_global_step()
global_epoch_op = tf.Variable(0, trainable=False, name="global_epoch", dtype=tf.int32)
global_epoch_increment_op = tf.assign_add(global_epoch_op, 1, name="global_epoch_increment")
# TODO: rework the `finetune_layers` param to include starting from the beg/end
# classifier
# - freeze all pre-trained model layers.
if model_base:
for layer in model_base.layers:
layer.trainable = False
var_list = model_tower.trainable_weights
else:
var_list = None # i.e., use all available variables if we are not using transfer learning
# add any weight regularization to the base loss for unfrozen layers:
clf_reg_loss = compute_l2_reg_loss(model_tower, reg_final=reg_final, reg_biases=reg_biases)
clf_loss = data_loss + l2*clf_reg_loss
clf_opt = tf.train.AdamOptimizer(clf_lr)
clf_grads_and_vars = clf_opt.compute_gradients(clf_loss, var_list=var_list)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
clf_model_update_ops = model_tower.updates
with tf.control_dependencies(clf_model_update_ops):
clf_train_op = clf_opt.apply_gradients(clf_grads_and_vars, global_step=global_step_op)
# finetuning
# - unfreeze a portion of the pre-trained model layers.
# note, could make this arbitrary, but for now, fine-tune some number of layers at the *end* of
# the pretrained portion of the model
if model_base:
if finetune_layers != 0:
for layer in model_base.layers[-finetune_layers:]:
layer.trainable = True
var_list = model_tower.trainable_weights
else:
var_list = None # i.e., use all available variables if we are not using transfer learning
# add any weight regularization to the base loss for unfrozen layers:
finetune_reg_loss = compute_l2_reg_loss(model_tower, reg_final=reg_final, reg_biases=reg_biases)
finetune_loss = data_loss + l2*finetune_reg_loss
# TODO: enable this, or use `tf.train.piecewise_constant` with `global_epoch`
#lr = tf.train.exponential_decay(
# finetune_lr, global_step_op,
# decay_steps=decay_steps, decay_rate=decay_rate,
# staircase=True)
finetune_opt = tf.train.MomentumOptimizer(finetune_lr, finetune_momentum, use_nesterov=True)
finetune_grads_and_vars = finetune_opt.compute_gradients(finetune_loss, var_list=var_list)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
finetune_model_update_ops = model_tower.updates
with tf.control_dependencies(finetune_model_update_ops):
finetune_train_op = finetune_opt.apply_gradients(
finetune_grads_and_vars, global_step=global_step_op)
# metrics
with tf.name_scope("metrics"):
num_thresholds = 11
mean_loss, acc, ppv, sens, f1, pr, f1s, metric_update_ops, metric_reset_ops = compute_metrics(
loss, labels, preds, probs, num_thresholds)
f1_max = tf.reduce_max(f1s)
thresh_max = pr.thresholds[tf.argmax(f1s)]
# tensorboard summaries
# TODO: extract this into a function
# NOTE: tensorflow is annoying when it comes to name scopes, so sometimes the name needs to be
# hardcoded as a prefix instead of a proper name scope if that name was used as a name scope
# earlier. otherwise, a numeric suffix will be appended to the name.
# general minibatch summaries
with tf.name_scope("summary"):
# data
actual_batch_size = tf.shape(images)[0]
percent_pos = tf.reduce_mean(labels) # positive labels are 1
pos_mask = tf.cast(labels, tf.bool)
neg_mask = tf.logical_not(pos_mask)
mitosis_images = tf.boolean_mask(images, pos_mask)
normal_images = tf.boolean_mask(images, neg_mask)
mitosis_filenames = tf.boolean_mask(filenames, pos_mask)
normal_filenames = tf.boolean_mask(filenames, neg_mask)
num_preds = tf.shape(preds)[0]
# false-positive & false-negative cases
pos_preds_mask = tf.cast(tf.squeeze(preds, axis=1), tf.bool)
#pos_preds_mask = tf.cast(preds, tf.bool)
neg_preds_mask = tf.logical_not(pos_preds_mask)
fp_mask = tf.logical_and(pos_preds_mask, neg_mask)
fn_mask = tf.logical_and(neg_preds_mask, pos_mask)
fp_images = tf.boolean_mask(images, fp_mask)
fn_images = tf.boolean_mask(images, fn_mask)
fp_filenames = tf.boolean_mask(filenames, fp_mask)
fn_filenames = tf.boolean_mask(filenames, fn_mask)
with tf.name_scope("images"):
tf.summary.image("mitosis", unnormalize(mitosis_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("normal", unnormalize(normal_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("false-positive", unnormalize(fp_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
tf.summary.image("false-negative", unnormalize(fn_images, model_name), 1,
collections=["minibatch", "minibatch_val"])
with tf.name_scope("data/filenames"):
tf.summary.text("mitosis", mitosis_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("normal", normal_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("false-positive", fp_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.text("false-negative", fn_filenames, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("data/images", images, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("data/labels", labels, collections=["minibatch", "minibatch_val"])
for layer in model_tower.layers:
for weight in layer.weights:
tf.summary.histogram(weight.name, weight, collections=["minibatch", "minibatch_val"])
if hasattr(layer, 'output'):
layer_name = "model/{}/out".format(layer.name)
tf.summary.histogram(layer_name, layer.output, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("model/probs", probs, collections=["minibatch", "minibatch_val"])
tf.summary.histogram("model/preds", preds, collections=["minibatch", "minibatch_val"])
with tf.name_scope("minibatch"):
tf.summary.scalar("loss", loss, collections=["minibatch"])
tf.summary.scalar("batch_size", actual_batch_size, collections=["minibatch", "minibatch_val"])
tf.summary.scalar("num_preds", num_preds, collections=["minibatch", "minibatch_val"])
tf.summary.scalar("percent_positive", percent_pos, collections=["minibatch"])
tf.summary.scalar("learning_phase", tf.to_int32(tf.keras.backend.learning_phase()),
collections=["minibatch", "minibatch_val"])
# TODO: gradient histograms
# TODO: first layer convolution kernels as images
minibatch_summaries = tf.summary.merge_all("minibatch")
minibatch_val_summaries = tf.summary.merge_all("minibatch_val")
# epoch summaries
with tf.name_scope("epoch"):
tf.summary.scalar("loss", mean_loss, collections=["epoch"])
tf.summary.scalar("acc", acc, collections=["epoch"])
tf.summary.scalar("ppv", ppv, collections=["epoch"])
tf.summary.scalar("sens", sens, collections=["epoch"])
tf.summary.scalar("f1", f1, collections=["epoch"])
tf.summary.scalar("f1_max", f1_max, collections=["epoch"])
tf.summary.scalar("thresh_max", thresh_max, collections=["epoch"])
tb.summary.pr_curve_raw_data_op(
name='pr_curve',
true_positive_counts=pr.tp,
false_positive_counts=pr.fp,
true_negative_counts=pr.tn,
false_negative_counts=pr.fn,
precision=pr.precision,
recall=pr.recall,
num_thresholds=num_thresholds,
display_name='pr curve',
description="PR curve for {num_thresholds} thresholds.".format(num_thresholds=num_thresholds),
collections=["epoch"])
epoch_summaries = tf.summary.merge_all("epoch")
# use train and val writers so that plots can be on same graph
train_writer = tf.summary.FileWriter(os.path.join(exp_path, "train"), tf.get_default_graph())
val_writer = tf.summary.FileWriter(os.path.join(exp_path, "val"))
# save ops
checkpoint_filename = os.path.join(exp_path, "model.ckpt")
saver = tf.train.Saver()
# initialize stuff
initialize_variables(sess)
if resume:
saver.restore(sess, checkpoint_filename)
# TODO: extract this into a function with tests
# training loop for new classifier layers and fine-tuning
for train_op, epochs in [(clf_train_op, clf_epochs), (finetune_train_op, finetune_epochs)]:
global_epoch_start = sess.run(global_epoch_op)
for _ in range(global_epoch_start, global_epoch_start+epochs): # allow for resuming of training
global_epoch = sess.run(global_epoch_op)
# training
sess.run(train_init_op)
while True:
global_step = sess.run(global_step_op)
try:
if log_interval > 0 and global_step % log_interval == 0:
# train, update metrics, & log stuff
_, _, loss_val, summary_str = sess.run([train_op, metric_update_ops, loss,
minibatch_summaries], feed_dict={tf.keras.backend.learning_phase(): 1})
mean_loss_val, f1_val = sess.run([mean_loss, f1])
train_writer.add_summary(summary_str, global_step)
print("train", global_epoch, global_step, loss_val, mean_loss_val, f1_val)
else:
# train & update metrics
_, _ = sess.run(
[train_op, metric_update_ops], feed_dict={tf.keras.backend.learning_phase(): 1})
except tf.errors.OutOfRangeError:
break
# log average training metrics for epoch & reset
op_values = sess.run([f1, f1_max, thresh_max, ppv, sens, acc, mean_loss, epoch_summaries])
(f1_val, f1_max_val, thresh_max_val, ppv_val, sens_val, acc_val, mean_loss_val,
summary_str) = op_values
print("---epoch {global_epoch}, train f1 (@ 0.5): {f1_val}, train max f1 "\
"(@ {thresh_max_val}): {f1_max_val}, train ppv: {ppv_val}, train sens: {sens_val}, "\
"train acc: {acc_val}, train avg loss: {mean_loss_val}"\
.format(global_epoch=global_epoch, f1_val=f1_val, acc_val=acc_val,
mean_loss_val=mean_loss_val, thresh_max_val=thresh_max_val,
f1_max_val=f1_max_val, ppv_val=ppv_val, sens_val=sens_val))
train_writer.add_summary(summary_str, global_epoch)
sess.run(metric_reset_ops)
# validation
sess.run(val_init_op)
vi = 0 # validation step
while True:
try:
# evaluate & update metrics
if log_interval > 0 and vi % log_interval == 0:
_, loss_val, summary_str = sess.run(
[metric_update_ops, loss, minibatch_val_summaries],
feed_dict={tf.keras.backend.learning_phase(): 0})
mean_loss_val, f1_val = sess.run([mean_loss, f1])
print("val", global_epoch, vi, loss_val, mean_loss_val, f1_val)
val_writer.add_summary(summary_str, vi)
else:
_ = sess.run(metric_update_ops, feed_dict={tf.keras.backend.learning_phase(): 0})
vi += 1
except tf.errors.OutOfRangeError:
break
# log average validation metrics for epoch & reset
op_values = sess.run([f1, f1_max, thresh_max, ppv, sens, acc, mean_loss, epoch_summaries])
(f1_val, f1_max_val, thresh_max_val, ppv_val, sens_val, acc_val, mean_loss_val,
summary_str) = op_values
print("---epoch {global_epoch}, val f1 (@ 0.5): {f1_val}, val max f1 (@ {thresh_max_val}): "\
"{f1_max_val}, val ppv: {ppv_val}, val sens: {sens_val}, val acc: {acc_val}, "\
"val avg loss: {mean_loss_val}"\
.format(global_epoch=global_epoch, f1_val=f1_val,
thresh_max_val=thresh_max_val, f1_max_val=f1_max_val, ppv_val=ppv_val,
sens_val=sens_val, acc_val=acc_val, mean_loss_val=mean_loss_val))
val_writer.add_summary(summary_str, global_epoch)
sess.run(metric_reset_ops)
sess.run(global_epoch_increment_op) # global_epoch += 1
# save model
if checkpoint:
keras_filename = os.path.join(exp_path, "checkpoints",
"{f1_max_val:.5}_f1max_{f1_val:.5}_f1_{mean_loss_val:.5}_loss_{global_epoch}_"\
"epoch_model.hdf5"\
.format(f1_max_val=f1_max_val, f1_val=f1_val, mean_loss_val=mean_loss_val,
global_epoch=global_epoch))
model_tower.save(keras_filename, include_optimizer=False) # keras model
saver.save(sess, checkpoint_filename) # full TF graph
print("Saved model file to {}".format(keras_filename))
val_writer.flush()
#train_writer.flush()
def main(argv=None):
"""Command line interface for this script. Can optionally pass in a
list of strings to simulate command line usage.
"""
# parse args
parser = argparse.ArgumentParser()
parser.add_argument("--patches_path", default=os.path.join("data", "mitoses", "patches"),
help="path to the generated image patches containing `train` & `val` folders "\
"(default: %(default)s)")
parser.add_argument("--exp_parent_path", default=os.path.join("experiments", "mitoses"),
help="parent path in which to store experiment folders (default: %(default)s)")
parser.add_argument("--exp_name", default=None,
help="path within the experiment parent path in which to store the model checkpoints, "\
"logs, etc. for this experiment; an existing path can be used to resume training "\
"(default: %%y-%%m-%%d_%%H:%%M:%%S_{model})")
parser.add_argument("--exp_name_suffix", default=None,
help="suffix to add to experiment name (default: all parameters concatenated together)")
parser.add_argument("--exp_full_path", default=None,
help="full path in which to store the experiment. either this or the --exp_parent_path, "\
"--exp_name, --exp_name_suffix flags as a group can be used. typically, this would "\
"be used to resume an existing experiment (default: %(default)s)")
parser.add_argument("--model", default="vgg",
choices=["logreg", "vgg", "vgg_new", "vgg19", "resnet", "resnet_new", "resnet_custom"],
help="name of the model to use in ['logreg', 'vgg', 'vgg_new', 'vgg19', 'resnet', "\
"'resnet_new', 'resnet_custom'] (default: %(default)s)")
parser.add_argument("--model_weights", default=None,
help="optional hdf5 file containing the initial weights of the model. if not supplied, the "\
"model will start with pretrained weights from imagenet. (default: %(default)s)")
parser.add_argument("--patch_size", type=int, default=64,
help="integer length to which the square patches will be resized (default: %(default)s)")
parser.add_argument("--train_batch_size", type=int, default=32,
help="training batch size (default: %(default)s)")
parser.add_argument("--val_batch_size", type=int, default=32,
help="validation batch size (default: %(default)s)")
parser.add_argument("--clf_epochs", type=int, default=1,
help="number of epochs for which to train the new classifier layers "\
"(default: %(default)s)")
parser.add_argument("--finetune_epochs", type=int, default=0,
help="number of epochs for which to fine-tune the unfrozen layers (default: %(default)s)")
parser.add_argument("--clf_lr", type=float, default=1e-3,
help="learning rate for training the new classifier layers (default: %(default)s)")
parser.add_argument("--finetune_lr", type=float, default=1e-4,
help="learning rate for fine-tuning the unfrozen layers (default: %(default)s)")
parser.add_argument("--finetune_momentum", type=float, default=0.9,
help="momentum rate for fine-tuning the unfrozen layers (default: %(default)s)")
parser.add_argument("--finetune_layers", type=int, default=0,
help="number of layers at the end of the pretrained portion of the model to fine-tune "\
"(note: the new classifier layers will still be trained during fine-tuning as well) "\
"(default: %(default)s)")
parser.add_argument("--l2", type=float, default=1e-3,
help="amount of l2 weight regularization (default: %(default)s)")
parser.add_argument("--reg_biases", default=False, action="store_true",
help="whether or not to regularize biases. (default: %(default)s)")
parser.add_argument("--skip_reg_final", dest="reg_final", action="store_false",
help="whether or not to skip regularization of the logits-producing layer "\
"(default: %(default)s)")
parser.set_defaults(reg_final=True)
augment_parser = parser.add_mutually_exclusive_group(required=False)
augment_parser.add_argument("--augment", dest="augment", action="store_true",
help="apply random augmentation to the training images (default: True)")
augment_parser.add_argument("--no_augment", dest="augment", action="store_false",
help="do not apply random augmentation to the training images (default: False)")
parser.set_defaults(augment=True)
parser.add_argument("--marginalize", default=False, action="store_true",
help="use noise marginalization when evaluating the validation set. if this is set, then "\
"the validation batch_size must be divisible by 4, or equal to 1 for no augmentation "\
"(default: %(default)s)")
parser.add_argument("--oversample", default=False, action="store_true",
help="oversample the minority mitosis class during training via class-aware sampling "\
"(default: %(default)s)")
parser.add_argument("--num_gpus", type=int, default=1,
help="num_gpus: Integer number of GPUs to use for data parallelism. (default: %(default)s)")
parser.add_argument("--threads", type=int, default=5,
help="number of threads for dataset parallel processing; note: this will cause "\
"non-reproducibility (default: %(default)s)")
# TODO: update this to default to `None` to take advantage of auto prefetch buffer size tuning
# https://github.com/tensorflow/tensorflow/commit/d355f4e2644b68ea643f573c564936ec23b93787
parser.add_argument("--prefetch_batches", type=int, default=100,
help="number of batches to prefetch (default: %(default)s)")
parser.add_argument("--log_interval", type=int, default=100,
help="number of steps between logging during training (default: %(default)s)")
checkpoint_parser = parser.add_mutually_exclusive_group(required=False)
checkpoint_parser.add_argument("--checkpoint", dest="checkpoint", action="store_true",
help="save a checkpoint after each epoch (default: True)")
checkpoint_parser.add_argument("--no_checkpoint", dest="checkpoint", action="store_false",
help="do not save a checkpoint after each epoch (default: False)")
parser.set_defaults(checkpoint=True)
parser.add_argument("--resume", default=False, action="store_true",
help="resume training from a checkpoint (default: %(default)s)")
parser.add_argument("--seed", type=int, help="random seed (default: %(default)s)")
args = parser.parse_args(argv)
# set train/val paths
train_path = os.path.join(args.patches_path, "train")
val_path = os.path.join(args.patches_path, "val")
if args.exp_full_path is None:
if args.exp_name is None:
date = datetime.strftime(datetime.today(), "%y%m%d_%H%M%S")
args.exp_name = date + "_" + args.model
if args.exp_name_suffix is None:
args.exp_name_suffix = "patch_size_{args.patch_size}_batch_size_{args.train_batch_size}_"\
"clf_epochs_{args.clf_epochs}_ft_epochs_{args.finetune_epochs}_"\
"clf_lr_{args.clf_lr}_ft_lr_{args.finetune_lr}_"\
"ft_mom_{args.finetune_momentum}_ft_layers_{args.finetune_layers}_"\
"l2_{args.l2}_rb_{args.reg_biases}_aug_{args.augment}_"\
"marg_{args.marginalize}_over_{args.oversample}".format(args=args)
full_exp_name = args.exp_name + "_" + args.exp_name_suffix
args.exp_full_path = os.path.join(args.exp_parent_path, full_exp_name)
# make an experiment folder
if not os.path.exists(args.exp_full_path):
os.makedirs(os.path.join(args.exp_full_path, "checkpoints"))
print("experiment directory: {}".format(args.exp_full_path))
# create a random seed if needed
if args.seed is None:
args.seed = np.random.randint(1e9)
# save args to a file in the experiment folder, appending if it exists
with open(os.path.join(args.exp_full_path, 'args.txt'), 'a') as f:
json.dump(args.__dict__, f)
print("", file=f)
# can be read in later with
#with open('args.txt', 'r') as f:
# args = json.load(f)
# save command line invocation to txt file for ease of rerunning the exact experiment
with open(os.path.join(args.exp_full_path, 'invoke.txt'), 'a') as f:
# NOTE: since we sometimes call this `main` function via the hyperparam search script, we can't
# always use `sys.argv` because it would always refer to the outer invocation, i.e., the
# invocation of the hyperparam search script.
if argv is not None: # called from hpo script
fname = os.path.basename(__file__)
f.write("python3 {fname} ".format(fname=fname) + " ".join(argv) + "\n")
else: # called directly
f.write("python3 " + " ".join(sys.argv) + "\n")
# copy this script to the experiment folder
shutil.copy2(os.path.realpath(__file__), args.exp_full_path)
# train!
train(train_path=train_path, val_path=val_path, exp_path=args.exp_full_path,
model_name=args.model, model_weights=args.model_weights, patch_size=args.patch_size,
train_batch_size=args.train_batch_size, val_batch_size=args.val_batch_size,
clf_epochs=args.clf_epochs, finetune_epochs=args.finetune_epochs, clf_lr=args.clf_lr,
finetune_lr=args.finetune_lr, finetune_momentum=args.finetune_momentum,
finetune_layers=args.finetune_layers, l2=args.l2, reg_biases=args.reg_biases,
reg_final=args.reg_final, augmentation=args.augment, marginalization=args.marginalize,
oversampling=args.oversample, num_gpus=args.num_gpus, threads=args.threads,
prefetch_batches=args.prefetch_batches, log_interval=args.log_interval,
checkpoint=args.checkpoint, resume=args.resume, seed=args.seed)
if __name__ == "__main__":
main()
# ---
# tests
# TODO: eventually move these to a separate file.
# `py.test train_mitoses.py`
# TODO: use this fixture when we move these tests to a test module
#import pytest
#
#@pytest.fixture(autouse=True)
#def reset():
# """Ensure that the TensorFlow graph/session are clean."""
# tf.reset_default_graph()
# tf.keras.backend.clear_session()
# yield # run test
def reset():
"""Ensure that the TensorFlow graph/session are clean."""
tf.reset_default_graph()
tf.keras.backend.clear_session()
# data
def test_get_image(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
filename = os.path.join(str(tmpdir), "x.png")
x = np.random.randint(0, 255, dtype=np.uint8, size=(64,64,3))
Image.fromarray(x).save(filename)
image_op = get_image(filename, 64)
sess = tf.keras.backend.get_session()
image = sess.run(image_op)
assert image.shape == (64, 64, 3)
assert image.dtype == np.float32
assert np.min(image) >= 0
assert np.max(image) < 1
assert np.allclose(x.astype(np.float32) / 255, image)
assert np.allclose((x / 255).astype(np.float32), image)
def test_get_label():
import pytest
# mitosis
reset()
filename = "train/mitosis/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
assert label == 1
# normal
reset()
filename = "train/normal/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
assert label == 0
# wrong label name
with pytest.raises(tf.errors.InvalidArgumentError):
reset()
filename = "train/unknown/1_03_05_713_348.jpg"
label_op = get_label(filename)
sess = tf.keras.backend.get_session()
label = sess.run(label_op)
def test_preprocess(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
folder = os.path.join(str(tmpdir), "this/train/mitosis")
os.makedirs(folder)
filename_orig = os.path.join(folder, "x.png")
x = np.random.randint(0, 255, dtype=np.uint8, size=(64,64,3))
Image.fromarray(x).save(filename_orig)
image_op, label_op, filename_op = preprocess(tf.constant(filename_orig), 64)
sess = tf.keras.backend.get_session()
image, label, filename = sess.run([image_op, label_op, filename_op])
assert image.shape == (64, 64, 3)
assert image.dtype == np.float32
assert np.min(image) >= 0
assert np.max(image) < 1
assert label == 1.0
assert filename.decode("utf-8") == filename_orig
def test_normalize_unnormalize():
reset()
sess = tf.keras.backend.get_session()
input_shape = (64, 64, 3)
x_np = np.random.rand(*input_shape).astype(np.float32) # uniform sampling in [0, 1)
x_batch_np = np.random.rand(2, *input_shape).astype(np.float32) # uniform sampling in [0, 1)
# imagenet preprocessing
model_name = "vgg"
means = np.array([103.939, 116.779, 123.68]).astype(np.float32)
x_norm_correct_np = x_np[..., ::-1] * 255 - means
x_batch_norm_correct_np = x_batch_np[..., ::-1] * 255 - means
assert x_np.dtype == np.float32
assert x_np.dtype == x_batch_np.dtype == x_norm_correct_np.dtype == x_batch_norm_correct_np.dtype
# single example
def test(x_norm, x_unnorm):
"""Test normalized and unnormalized versions of x."""
# NOTE: closes over x_np & x_norm_correct_np
assert x_norm.dtype == x_norm_correct_np.dtype
assert x_unnorm.dtype == x_np.dtype
assert np.allclose(x_norm, x_norm_correct_np)
assert not np.allclose(x_norm, x_np)
assert np.all(np.max(x_norm, axis=(0,1)) > 1)
assert np.all(np.max(x_norm, axis=(0,1)) < 255 - means)
assert np.all(np.min(x_norm, axis=(0,1)) < 0)
assert np.all(np.min(x_norm, axis=(0,1)) > 0 - means)
assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)
# batch of examples
def test_batch(x_batch_norm, x_batch_unnorm):
"""Test normalized and unnormalized versions of x_batch."""
# NOTE: closes over x_batch_np & x_batch_norm_correct_np
assert x_batch_norm.dtype == x_batch_norm_correct_np.dtype
assert x_batch_unnorm.dtype == x_batch_np.dtype
assert np.allclose(x_batch_norm, x_batch_norm_correct_np)
assert not np.allclose(x_batch_norm, x_batch_np)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) > 1)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) < 255 - means)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) < 0)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) > 0 - means)
assert np.allclose(x_batch_unnorm, x_batch_unnorm_np, atol=1e-7)
## numpy
x_norm_np = normalize(x_np, model_name)
x_unnorm_np = unnormalize(x_norm_np, model_name)
test(x_norm_np, x_unnorm_np)
x_batch_norm_np = normalize(x_batch_np, model_name)
x_batch_unnorm_np = unnormalize(x_batch_norm_np, model_name)
test_batch(x_batch_norm_np, x_batch_unnorm_np)
## tensorflow
x = tf.placeholder(tf.float32, [*input_shape])
x_norm = normalize(x, model_name)
x_unnorm = unnormalize(x_norm, model_name)
x_norm_np, x_unnorm_np = sess.run([x_norm, x_unnorm], feed_dict={x: x_np})
test(x_norm_np, x_unnorm_np)
x_batch = tf.placeholder(tf.float32, [None, *input_shape])
x_batch_norm = normalize(x_batch, model_name)
x_batch_unnorm = unnormalize(x_batch_norm, model_name)
x_batch_norm_np, x_batch_unnorm_np = sess.run([x_batch_norm, x_batch_unnorm],
feed_dict={x_batch: x_batch_np})
test_batch(x_batch_norm_np, x_batch_unnorm_np)
# image standardization preprocessing
reset()
sess = tf.keras.backend.get_session()
model_name = "not_vgg"
x_norm_correct_np = x_np * 2 - 1
x_batch_norm_correct_np = x_batch_np * 2 - 1
# single example
def test(x_norm, x_unnorm):
"""Test normalized and unnormalized versions of x."""
# NOTE: closes over x_np & x_norm_correct_np
assert x_norm.dtype == x_norm_correct_np.dtype
assert x_unnorm.dtype == x_np.dtype
assert np.allclose(x_norm, x_norm_correct_np)
assert not np.allclose(x_norm, x_np)
assert np.all(np.max(x_norm, axis=(0,1)) <= 1)
assert np.all(np.max(x_norm, axis=(0,1)) > 0)
assert np.all(np.min(x_norm, axis=(0,1)) >= -1)
assert np.all(np.min(x_norm, axis=(0,1)) < 0)
assert np.allclose(x_unnorm, x_np, rtol=1e-4, atol=1e-7)
# batch of examples
def test_batch(x_batch_norm, x_batch_unnorm):
"""Test normalized and unnormalized versions of x_batch."""
# NOTE: closes over x_batch_np & x_batch_norm_correct_np
assert x_batch_norm.dtype == x_batch_norm_correct_np.dtype
assert x_batch_unnorm.dtype == x_batch_np.dtype
assert np.allclose(x_batch_norm, x_batch_norm_correct_np)
assert not np.allclose(x_batch_norm, x_batch_np)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) <= 1)
assert np.all(np.max(x_batch_norm, axis=(0,1,2)) > 0)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) >= -1)
assert np.all(np.min(x_batch_norm, axis=(0,1,2)) < 0)
assert np.allclose(x_batch_unnorm, x_batch_unnorm_np) #, atol=1e-7)
## numpy
x_norm_np = normalize(x_np, model_name)
x_unnorm_np = unnormalize(x_norm_np, model_name)
test(x_norm_np, x_unnorm_np)
x_batch_norm_np = normalize(x_batch_np, model_name)
x_batch_unnorm_np = unnormalize(x_batch_norm_np, model_name)
test_batch(x_batch_norm_np, x_batch_unnorm_np)
## tensorflow
x = tf.placeholder(tf.float32, [*input_shape])
x_norm = normalize(x, model_name)
x_unnorm = unnormalize(x_norm, model_name)
x_norm_np, x_unnorm_np = sess.run([x_norm, x_unnorm], feed_dict={x: x_np})
test(x_norm_np, x_unnorm_np)
x_batch = tf.placeholder(tf.float32, [None, *input_shape])
x_batch_norm = normalize(x_batch, model_name)
x_batch_unnorm = unnormalize(x_batch_norm, model_name)
x_batch_norm_np, x_batch_unnorm_np = sess.run([x_batch_norm, x_batch_unnorm],
feed_dict={x_batch: x_batch_np})
test_batch(x_batch_norm_np, x_batch_unnorm_np)
def test_augment(tmpdir):
# NOTE: pytest will provide a temp directory automatically:
# https://docs.pytest.org/en/latest/tmpdir.html
from PIL import Image
reset()
# create png image
filename = os.path.join(str(tmpdir), "x.png")
patch_size = 64
x = np.random.randint(0, 255, dtype=np.uint8, size=(patch_size, patch_size,3))
Image.fromarray(x).save(filename)
image_op = get_image(filename, 64)
aug_image_op = augment(image_op, patch_size)
sess = tf.keras.backend.get_session()
image, aug_image = sess.run([image_op, aug_image_op])
assert aug_image.shape == (64, 64, 3)
assert aug_image.dtype == np.float32
assert np.min(aug_image) >= 0
assert np.max(aug_image) <= 1
assert not np.allclose(aug_image, x/255)
assert not np.allclose(aug_image, image)
# seeds
reset()
image_op = get_image(filename, 64)
aug_image_op1 = augment(image_op, patch_size, 1)
aug_image_op2 = augment(image_op, patch_size, 2)
sess = tf.keras.backend.get_session()
aug_image1a, aug_image2a = sess.run([aug_image_op1, aug_image_op2])
reset()
image_op = get_image(filename, 64)
aug_image_op1 = augment(image_op, patch_size, 1)
aug_image_op2 = augment(image_op, patch_size, 2)
sess = tf.keras.backend.get_session()
aug_image1b, aug_image2b = sess.run([aug_image_op1, aug_image_op2])
assert np.allclose(aug_image1a, aug_image1b)
assert np.allclose(aug_image2a, aug_image2b)
assert not np.allclose(aug_image1a, aug_image2a)
def test_create_augmented_batch():
import pytest
reset()
sess = tf.keras.backend.get_session()
patch_size = 64
image = np.random.rand(patch_size, patch_size, 3).astype(np.float32)
# wrong sizes
with pytest.raises(AssertionError):
create_augmented_batch(image, 3, patch_size)
create_augmented_batch(image, 31, patch_size)
# correct sizes
def test(batch_size):
aug_images_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images = sess.run(aug_images_tf)
assert aug_images.shape == (batch_size,64,64,3)
test(32)
test(4)
test(1)
# deterministic behavior
def test2(batch_size):
# different session runs
aug_images_1_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_1 = sess.run(aug_images_1_tf)
aug_images_2_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_2 = sess.run(aug_images_2_tf)
assert np.array_equal(aug_images_1, aug_images_2)
# same session run
aug_images_1_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_2_tf = create_augmented_batch(image, batch_size, patch_size)
aug_images_1, aug_images_2 = sess.run([aug_images_1_tf, aug_images_2_tf])
assert np.array_equal(aug_images_1, aug_images_2)
test2(32)
test2(4)
test2(1)
def test_marginalize():
import pytest
reset()
sess = tf.keras.backend.get_session()
shape = (32, 1)
logits = np.random.randn(*shape) # will be embedded directly in tf graph
marg_logits = marginalize(logits) # tf ops
# forgot tf.keras.backend.learning_phase()
# NOTE: this is no longer an error due to a Keras change that sets a default value of 0 for
# `tf.keras.backend.learning_phase()`.
#with pytest.raises(tf.errors.InvalidArgumentError):
# l = sess.run(marg_logits)
# train time
l = sess.run(marg_logits, feed_dict={tf.keras.backend.learning_phase(): 1})
assert l.shape == shape
assert np.array_equal(l, logits)
# test time
l = sess.run(marg_logits, feed_dict={tf.keras.backend.learning_phase(): 0})
assert l.shape == (1, 1)
assert np.allclose(l.squeeze(), np.mean(logits))
# equal labels
reset()
sess = tf.keras.backend.get_session()
labels = np.full(shape, 1)
marg_labels = marginalize(labels)
# train time
l = sess.run(marg_labels, feed_dict={tf.keras.backend.learning_phase(): 1})
assert l.shape == shape
assert np.array_equal(l, labels)
# test time
l = sess.run(marg_labels, feed_dict={tf.keras.backend.learning_phase(): 0})
assert l.shape == (1, 1)
assert np.allclose(l.squeeze(), 1)
# model
def test_compute_l2_reg_loss():
reset()
# create model with a mix of pretrained and new weights
# NOTE: the pretrained layers will be initialized by Keras on creation, while the new Dense
# layer will remain uninitialized
input_shape = (224,224,3)
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(1, 3)(inputs)
x = tf.keras.layers.BatchNormalization()(x)
logits = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inputs, outputs=logits, name="model")
for l in model.layers:
l.trainable = True
sess = tf.keras.backend.get_session()
# all layers
l2_reg = compute_l2_reg_loss(model)
correct_l2_reg = (
tf.nn.l2_loss(model.layers[1].kernel)
+ tf.nn.l2_loss(1.0 - model.layers[2].gamma)
+ tf.nn.l2_loss(model.layers[3].kernel))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
# subset of layers
model.layers[1].trainable = False
l2_reg = compute_l2_reg_loss(model)
correct_l2_reg = (
tf.nn.l2_loss(1.0 - model.layers[2].gamma)
+ tf.nn.l2_loss(model.layers[3].kernel))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
# include frozen layers
model.layers[1].trainable = False
l2_reg = compute_l2_reg_loss(model, True)
correct_l2_reg = (
tf.nn.l2_loss(model.layers[1].kernel)
+ tf.nn.l2_loss(1.0 - model.layers[2].gamma)
+ tf.nn.l2_loss(model.layers[3].kernel))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert np.array_equal(l2_reg_val, correct_l2_reg_val)
model.layers[1].trainable = True
# skip final layer
l2_reg = compute_l2_reg_loss(model, reg_final=False)
correct_l2_reg = (
tf.nn.l2_loss(model.layers[1].kernel)
+ tf.nn.l2_loss(1.0 - model.layers[2].gamma))
l2_reg_val, correct_l2_reg_val = sess.run([l2_reg, correct_l2_reg])
assert | np.array_equal(l2_reg_val, correct_l2_reg_val) | numpy.array_equal |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 18:41:38 2021
@author: divyoj
"""
## importing libraries:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
import os
# # note that this must be executed before 'import numba'
# os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
from numba import njit
import time as process_time
import plotting_gradient
from scipy.integrate import solve_ivp
## functions:
@njit
def do_timestep(t,z,aT,bT,alpha, beta, gamma, zeta):
''' function to give dxdt at a time step '''
aL = z[0*(nx*ny):1*(nx*ny)].reshape((ny,nx))
bL = z[1*(nx*ny):2*(nx*ny)].reshape((ny,nx))
aR = z[2*(nx*ny):3*(nx*ny)].reshape((ny,nx))
bR = z[3*(nx*ny):4*(nx*ny)].reshape((ny,nx))
# total membrane protein concentration:
a0 = aL + aR
b0 = bL + bR
# intitating dxdt to arrays of zeros:
daL=np.zeros((ny,nx));daR=np.zeros((ny,nx));dbL=np.zeros((ny,nx));dbR= | np.zeros((ny,nx)) | numpy.zeros |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), np.arange(57, 71), np.arange(72, 95)))
onsetelecs = set(['LA1', 'LA2', 'LA3', 'LA4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2', 'LPH3', 'LPH4'])
resectelecs = set(['LALT1', 'LALT2', 'LALT3', 'LALT4', 'LALT5', 'LALT6',
'LAST1', 'LAST2', 'LAST3', 'LAST4',
'LA1', 'LA2', 'LA3', 'LA4', 'LPST4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2'])
clinresult = 2
elif pat_id == 'pt7':
# [1:17 19:35 37:38 41:62 67:109]
included_indices = np.concatenate((np.arange(0, 17), np.arange(18, 35),
np.arange(36, 38), np.arange(40, 62), np.arange(66, 109)))
onsetelecs = set(['MFP1', 'LFP3',
'PT2', 'PT3', 'PT4', 'PT5',
'MT2', 'MT3',
'AT3', 'AT4',
'G29', 'G30', 'G39', 'G40', 'G45', 'G46'])
resectelecs = set(['G28', 'G29', 'G30', 'G36', 'G37', 'G38', 'G39',
'G41', 'G44', 'G45', 'G46',
'LFP1', 'LFP2', 'LSF3', 'LSF4'])
clinresult = 3
elif pat_id == 'pt8':
# [1:19 21 23 30:37 39:40 43:64 71:76]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 21),
np.arange(
22, 23), np.arange(
29, 37), np.arange(
38, 40),
np.arange(42, 64), np.arange(70, 76)))
onsetelecs = set(['G19', 'G23', 'G29', 'G30', 'G31',
'TO6', 'TO5',
'MST3', 'MST4',
'O8', 'O9'])
resectelecs = set(['G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'MST2', 'MST3', 'MST4', 'PST2', 'PST3', 'PST4'])
clinresult = 1
elif pat_id == 'pt10':
# [1:3 5:19 21:35 48:69]
included_indices = np.concatenate((np.arange(0, 3), np.arange(4, 19),
np.arange(20, 35), np.arange(47, 69)))
onsetelecs = set(['TT1', 'TT2', 'TT4', 'TT6',
'MST1',
'AST2'])
resectelecs = set(['G3', 'G4', 'G5', 'G6', 'G11', 'G12', 'G13', 'G14',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6', 'AST1', 'AST2', 'AST3', 'AST4'])
clinresult = 2
elif pat_id == 'pt11':
# [1:19 21:35 37 39 40 43:74 76:81 83:84]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 35),
np.arange(
36, 37), np.arange(
38, 40), np.arange(
42, 74),
np.arange(75, 81), np.arange(82, 84)))
onsetelecs = set(['RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39',
'RG44', 'RG45'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12', 'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12',
'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30',
'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
clinresult = 1
elif pat_id == 'pt12':
# [1:15 17:33 38:39 42:61]
included_indices = np.concatenate((np.arange(0, 15), np.arange(16, 33),
np.arange(37, 39), np.arange(41, 61)))
onsetelecs = set(['AST1', 'AST2',
'TT2', 'TT3', 'TT4', 'TT5'])
resectelecs = set(['G19', 'G20', 'G21', 'G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 2
elif pat_id == 'pt13':
# [1:36 39:40 43:66 69:74 77 79:94 96:103 105:130]
included_indices = np.concatenate((np.arange(0, 36), np.arange(38, 40),
np.arange(
42, 66), np.arange(
68, 74), np.arange(
76, 77),
np.arange(78, 94), np.arange(95, 103), np.arange(104, 130)))
onsetelecs = set(['G1', 'G2', 'G9', 'G10', 'G17', 'G18'])
resectelecs = set(['G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11',
'G17', 'G18', 'G19',
'AP2', 'AP3', 'AP4'])
clinresult = 1
elif pat_id == 'pt14':
# [1:19 21:37 41:42 45:61 68:78]
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 10),
np.arange(
11, 17), np.arange(
18, 19), np.arange(
20, 37),
np.arange(40, 42), np.arange(44, 61), np.arange(67, 78)))
onsetelecs = set(['MST1', 'MST2',
'TT1', 'TT2', 'TT3',
'AST1', 'AST2'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'AST1', 'AST2',
'MST1', 'MST2', 'PST1'])
clinresult = 4
elif pat_id == 'pt15':
# [2:7 9:30 32:36 41:42 45:47 49:66 69 71:85];
included_indices = np.concatenate((np.arange(1, 7), np.arange(8, 30),
np.arange(
31, 36), np.arange(
40, 42), np.arange(
44, 47),
np.arange(48, 66), np.arange(68, 69), np.arange(70, 85)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4',
'MST1', 'MST2', 'AST1', 'AST2', 'AST3'])
resectelecs = set(['G2', 'G3', 'G4', 'G5', 'G10', 'G11', 'G12', 'G13',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt16':
# [1:19 21:37 42:43 46:53]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 53)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST3', 'MST4',
'G26', 'G27', 'G28', 'G18', 'G19', 'G20', 'OF4'])
resectelecs = set(['G18', 'G19', 'G20', 'G26', 'G27', 'G28',
'G29', 'G30', 'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'
])
clinresult = 1
elif pat_id == 'pt17':
# [1:19 21:37 42:43 46:51]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 51)))
onsetelecs = set(['TT1', 'TT2'])
resectelecs = set(['G27', 'G28', 'G29', 'G30',
'TT', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
return included_indices, onsetelecs, clinresult
def returnlaindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
spreadelecs = None
if pat_id == 'la01':
# [1 3 7:8 11:13 17:19 22:26 32 34:35 37 42 50:55 58 ...
# 62:65 70:72 77:81 84:97 100:102 105:107 110:114 120:121 130:131];
# onset_electrodes = {'Y''1', 'X''4', ...
# 'T''5', 'T''6', 'O''1', 'O''2', 'B1', 'B2',...% rare onsets
# }
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 8), np.arange(10, 13),
np.arange(
16, 19), np.arange(
21, 26), np.arange(
31, 32),
np.arange(
33, 35), np.arange(
36, 37), np.arange(
41, 42),
np.arange(
49, 55), np.arange(
57, 58), np.arange(
61, 65),
np.arange(
69, 72), np.arange(
76, 81), np.arange(
83, 97),
np.arange(
99, 102), np.arange(
104, 107), np.arange(
109, 114),
np.arange(119, 121), np.arange(129, 131)))
onsetelecs = ["X'4", "T'5", "T'6", "O'1", "O'2", "B1", "B2"]
spreadelecs = ["P1", "P2", 'P6', "X1", "X8", "X9", "E'2", "E'3"
"T'1"]
if seiz_id == 'inter2':
included_indices = np.concatenate((np.arange(0, 1), np.arange(7, 16), np.arange(21, 28),
np.arange(
33, 36), np.arange(
39, 40), np.arange(
42, 44), np.arange(
46, 50),
np.arange(
56, 58), np.arange(
62, 65), np.arange(
66, 68), np.arange(
69, 75),
np.arange(76, 83), np.arange(85, 89), np.arange(96, 103),
np.arange(106, 109), np.arange(111, 115), np.arange(116, 117),
np.arange(119, 123), np.arange(126, 127), np.arange(130, 134),
np.arange(136, 137), np.arange(138, 144), np.arange(146, 153)))
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19), np.arange(20, 33),
np.arange(
34, 37), np.arange(
38, 40), np.arange(
42, 98),
np.arange(107, 136), np.arange(138, 158)))
onsetelecs = ["Y'1"]
clinresult = 1
elif pat_id == 'la02':
# [1:4 7 9 11:12 15:18 21:28 30:34 47 50:62 64:67 ...
# 70:73 79:87 90 95:99]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 7), np.arange(8, 9),
np.arange(
10, 12), np.arange(
14, 18), np.arange(
20, 28),
np.arange(
29, 34), np.arange(
46, 47), np.arange(
49, 62),
np.arange(
63, 67), np.arange(
69, 73), np.arange(
78, 87),
np.arange(89, 90), np.arange(94, 99)))
onsetelecs = ["L'2", "L'3", "L'4"]
clinresult = 1
elif pat_id == 'la03':
# [1:3 6:33 36:68 77:163]
included_indices = np.concatenate((np.arange(0, 3), np.arange(5, 33),
np.arange(35, 68), np.arange(76, 163)))
onsetelecs = ["L7"]
clinresult = 2
elif pat_id == 'la04':
# [1:4 9:13 15:17 22 24:32 44:47 52:58 60 63:64 ...
# 67:70 72:74 77:84 88:91 94:96 98:101 109:111 114:116 121 123:129];
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 13),
np.arange(
14, 17), np.arange(
21, 22), np.arange(
23, 32),
np.arange(43, 47), np.arange(51, 58), np.arange(59, 60),
np.arange(62, 64), np.arange(66, 70), np.arange(71, 74),
np.arange(76, 84), np.arange(87, 91), np.arange(93, 96),
np.arange(97, 101), np.arange(108, 111), np.arange(113, 116),
np.arange(120, 121), np.arange(122, 129)))
# FIRST ABLATION WAS A FAILURE
onsetelecs = ["L'4", "G'1", # 2ND RESECTION REMOVED ALL OF M' ELECTRODES
"M'1", "M'2", "M'3", "M'4", "M'5", "M'6", "M'7",
"M'8", "M'9", "M'10", "M'11", "M'12", "M'13", "M'14", "M'15", "M'16"]
clinresult = 2
elif pat_id == 'la05':
# [2:4 7:15 21:39 42:82 85:89 96:101 103:114 116:121 ...
# 126:145 147:152 154:157 160:161 165:180 182:191];
included_indices = np.concatenate((np.arange(1, 4), np.arange(6, 15),
np.arange(
20, 39), np.arange(
41, 82), np.arange(
84, 89),
np.arange(95, 101), np.arange(102, 114), np.arange(115, 121),
np.arange(125, 145), np.arange(146, 152), np.arange(153, 157),
np.arange(159, 161), np.arange(164, 180), np.arange(181, 191)))
onsetelecs = ["T'1", "T'2", "D'1", "D'2"]
clinresult = 1
elif pat_id == 'la06':
# [1:4 7:12 14:17 19 21:33 37 46:47 50:58 61:62 70:73 77:82 ...
# 84:102 104:112 114:119];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
13, 17), np.arange(
18, 19), np.arange(
20, 33),
np.arange(36, 37), np.arange(45, 47), np.arange(49, 58),
np.arange(60, 62), np.arange(69, 73), np.arange(76, 82),
np.arange(83, 102), np.arange(103, 112), np.arange(113, 119)))
onsetelecs = ["Q'3", "Q'4", "R'3", "R'4"]
clinresult = 2
elif pat_id == 'la07':
# [1:18 22:23 25 34:37 44 48:51 54:55 57:69 65:66 68:78 ...
# 82:83 85:93 96:107 114:120];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 18), np.arange(21, 23),
np.arange(
24, 25), np.arange(
33, 37), np.arange(
43, 44),
np.arange(47, 51), np.arange(53, 55), np.arange(56, 69),
np.arange(64, 66), np.arange(67, 78), np.arange(81, 83),
np.arange(84, 93), np.arange(95, 107), np.arange(113, 120)))
onsetelecs = ["T'1", "T'3", "R'8", "R'9"]
clinresult = 1
elif pat_id == 'la08':
# [1:2 8:13 15:19 22 25 27:30 34:35 46:48 50:57 ...
# 65:68 70:72 76:78 80:84 87:93 100:102 105:108 110:117 123:127 130:131 133:137 ...
# 140:146]
included_indices = np.concatenate((np.arange(0, 2), np.arange(7, 13),
np.arange(
14, 19), np.arange(
21, 22), np.arange(
24, 25),
np.arange(26, 30), np.arange(33, 35), np.arange(45, 48),
np.arange(49, 57), np.arange(64, 68), np.arange(69, 72),
np.arange(75, 78), np.arange(79, 84), np.arange(86, 93),
np.arange(99, 102), np.arange(104, 108), np.arange(109, 117),
np.arange(122, 127), np.arange(129, 131), np.arange(132, 137),
np.arange(139, 146)))
onsetelecs = ["Q2"]
clinresult = 2
elif pat_id == 'la09':
# [3:4 7:17 21:28 33:38 42:47 51:56 58:62 64:69 ...
# 73:80 82:84 88:92 95:103 107:121 123 126:146 150:161 164:169 179:181 ...
# 183:185 187:191]
# 2/7/18 - got rid of F10 = looking at edf was super noisy
included_indices = np.concatenate((np.arange(2, 3), np.arange(6, 17),
np.arange(
20, 28), np.arange(
32, 38), np.arange(
41, 47),
np.arange(
50, 56), np.arange(
57, 62), np.arange(
63, 66), np.arange(
67, 69),
np.arange(72, 80), np.arange(81, 84), np.arange(87, 92),
np.arange(94, 103), np.arange(106, 121), np.arange(122, 123),
np.arange(125, 146), np.arange(149, 161), np.arange(163, 169),
np.arange(178, 181), np.arange(182, 185), np.arange(186, 191)))
onsetelecs = ["X'1", "X'2", "X'3", "X'4", "U'1", "U'2"]
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(20, 39), np.arange(41, 189)))
onsetelecs = ["P'1", "P'2"]
clinresult = 2
elif pat_id == 'la10':
# [1:4 7:13 17:19 23:32 36:37 46:47 50 54:59 62:66 68:79 82:96 ...
# 99:106 108:113 117:127 135:159 163:169 172:173 176:179 181:185];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 13),
np.arange(
16, 19), np.arange(
22, 32), np.arange(
35, 37),
np.arange(45, 47), np.arange(49, 50), np.arange(53, 59),
np.arange(61, 66), np.arange(67, 79), np.arange(81, 96),
np.arange(98, 106), np.arange(107, 113), np.arange(116, 127),
np.arange(134, 159), np.arange(162, 169), np.arange(171, 173),
np.arange(175, 179), np.arange(180, 185)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 2
elif pat_id == 'la11':
# [3:4 7:16 22:30 33:39 42 44:49 53:62 64:87 91:100 ...
# 102:117 120:127 131:140 142:191];
included_indices = np.concatenate((np.arange(2, 4), np.arange(6, 16),
np.arange(
21, 30), np.arange(
32, 39), np.arange(
41, 42), np.arange(
43, 49),
np.arange(
52, 62), np.arange(
63, 87), np.arange(
90, 100), np.arange(
101, 117),
np.arange(119, 127), np.arange(130, 140), np.arange(141, 191)))
onsetelecs = ["D6", "Z10"]
clinresult = 2
elif pat_id == 'la12':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 15),
np.arange(
19, 23), np.arange(
24, 31), np.arange(
34, 36), np.arange(
42, 44), np.arange(
47, 48),
np.arange(
49, 59), np.arange(
61, 66), np.arange(
68, 86), np.arange(
87, 90),
np.arange(
91, 100), np.arange(
101, 119), np.arange(
121, 129), np.arange(
131, 134),
np.arange(136, 150), np.arange(153, 154), np.arange(156, 161),
np.arange(167, 178), np.arange(187, 191)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 3
elif pat_id == 'la13':
# [1:4 7:12 23:33 36:37 44:45 48:70 72:93]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
22, 33), np.arange(
35, 37), np.arange(
43, 45),
np.arange(47, 70), np.arange(71, 93)))
onsetelecs = ["Y13", "Y14"]
clinresult = 2
elif pat_id == 'la15':
# included_channels = [1:4 9:12 15:19 21:27 30:34 36:38 43:57 62:66 ...
# 68:71 76:85 89:106 108:112 114:115 118:124 127:132 135:158 ...
# 161:169 171:186]
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 12),
np.arange(
14, 19), np.arange(
20, 27), np.arange(
29, 34),
np.arange(35, 38), np.arange(42, 57), np.arange(61, 66),
np.arange(67, 71), np.arange(75, 85), np.arange(88, 106),
np.arange(107, 112), np.arange(113, 115), np.arange(117, 124),
np.arange(126, 132), np.arange(134, 158), np.arange(160, 169), np.arange(170, 186)))
if seiz_id == 'ictal':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(
20, 39), np.arange(
41, 95), np.arange(
96, 112),
np.arange(113, 132), np.arange(134, 187)))
onsetelecs = ["R1", "R2", "R3"]
clinresult = 4
elif pat_id == 'la16':
# [1:3 10:16 23:24 28 31:35 37:39 42:44 46:47 ...
# 49:54 58:62 64:65 68:70 76:89 93:98 100:101 105:124 126 128:130 ...
# 132:134 136:140 142:144 149:156 158:163 165:166 168:170 173:181
# 183:189];
included_indices = np.concatenate((np.arange(0, 3), np.arange(9, 16),
np.arange(
22, 24), np.arange(
27, 28), np.arange(
30, 35),
np.arange(36, 39), np.arange(41, 44), np.arange(45, 47),
np.arange(48, 54), np.arange(57, 62), np.arange(63, 65),
np.arange(67, 70), | np.arange(75, 89) | numpy.arange |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""CARS trainer."""
import os
import logging
import copy
import json
import numpy as np
from collections import namedtuple
import torch.nn as nn
import torch.utils
import torch.backends.cudnn as cudnn
from .utils import eval_model_parameters
from .nsga3 import CARS_NSGA
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.search_space import SearchSpace
from vega.search_space.search_algs import SearchAlgorithm
from vega.search_space.search_algs.nsga_iii import SortAndSelectPopulation
from vega.datasets.pytorch import Dataset
from vega.core.common import FileOps, Config, DefaultConfig
from vega.core.metrics.pytorch import Metrics
from vega.search_space.networks.pytorch import CARSDartsNetwork
from vega.core.trainer.callbacks import Callback, ModelStatistics
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
@ClassFactory.register(ClassType.CALLBACK)
class CARSTrainerCallback(Callback):
"""A special callback for CARSTrainer."""
def __init__(self):
super(CARSTrainerCallback, self).__init__()
self.alg_policy = ClassFactory.__configs__.search_algorithm.policy
def before_train(self, epoch, logs=None):
"""Be called before the training process."""
# Use zero valid_freq to supress default valid step
self.trainer.auto_save_ckpt = False
self.trainer.auto_save_perf = False
self.trainer.valid_freq = 0
cudnn.benchmark = True
cudnn.enabled = True
self.search_alg = SearchAlgorithm(SearchSpace())
self.set_algorithm_model(self.trainer.model)
# setup alphas
n_individual = self.alg_policy.num_individual
self.alphas = torch.cat([self.trainer.model.random_single_path().unsqueeze(0)
for i in range(n_individual)], dim=0)
self.trainer.train_loader = self.trainer._init_dataloader(mode='train')
self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')
def before_epoch(self, epoch, logs=None):
"""Be called before each epoach."""
self.epoch = epoch
self.trainer.lr_scheduler.step()
def train_step(self, batch):
"""Replace the default train_step function."""
self.trainer.model.train()
input, target = batch
self.trainer.optimizer.zero_grad()
for j in range(self.alg_policy.num_individual_per_iter):
i = np.random.randint(0, self.alg_policy.num_individual, 1)[0]
if self.epoch < self.alg_policy.warmup:
logits = self.trainer.model.forward_random(input)
else:
logits = self.trainer.model(input, self.alphas[i])
loss = self.trainer.loss(logits, target)
loss.backward(retain_graph=True)
if self.epoch < self.alg_policy.warmup:
break
nn.utils.clip_grad_norm(
self.trainer.model.parameters(), self.trainer.cfg.grad_clip)
self.trainer.optimizer.step()
return {'loss': loss.item(),
'train_batch_output': logits}
def after_epoch(self, epoch, logs=None):
"""Be called after each epoch."""
self.search_evol_arch(epoch)
def set_algorithm_model(self, model):
"""Set model to algorithm.
:param model: network model
:type model: torch.nn.Module
"""
self.search_alg.set_model(model)
def search_evol_arch(self, epoch):
"""Update architectures.
:param epoch: The current epoch
:type epoch: int
:param valid_queue: valid dataloader
:type valid_queue: dataloader
:param model: The model to be trained
:type model: nn.Module
"""
if epoch >= self.alg_policy.start_ga_epoch and \
(epoch - self.alg_policy.start_ga_epoch) % self.alg_policy.ga_interval == 0:
self.save_model_checkpoint(
self.trainer.model, 'weights_{}.pt'.format(epoch))
for generation in range(self.alg_policy.num_generation):
fitness = np.zeros(
int(self.alg_policy.num_individual * (1 + self.alg_policy.expand)))
model_sizes = np.zeros(
int(self.alg_policy.num_individual * (1 + self.alg_policy.expand)))
genotypes = []
# generate offsprings using mutation and cross-over
offsprings = self.search_alg.gen_offspring(self.alphas)
self.alphas = torch.cat((self.alphas, offsprings), dim=0)
# calculate fitness (accuracy) and #parameters
for i in range(int(self.alg_policy.num_individual * (1 + self.alg_policy.expand))):
fitness[i], _ = self.search_infer_step(self.alphas[i])
genotypes.append(self.genotype_namedtuple(self.alphas[i]))
model_sizes[i] = self.eval_model_sizes(self.alphas[i])
logging.info('Valid_acc for invidual {} %f, size %f'.format(
i), fitness[i], model_sizes[i])
# update population using pNSGA-III (CARS_NSGA)
logging.info('############## Begin update alpha ############')
if self.alg_policy.nsga_method == 'nsga3':
_, _, keep = SortAndSelectPopulation(
np.vstack((1 / fitness, model_sizes)), self.alg_policy.num_individual)
elif self.alg_policy.nsga_method == 'cars_nsga':
nsga_objs = [model_sizes]
keep = CARS_NSGA(fitness, nsga_objs,
self.alg_policy.num_individual)
drop = list(set(list(
range(int(self.alg_policy.num_individual *
(1 + self.alg_policy.expand))))) - set(keep.tolist()))
logging.info('############## KEEP ############')
fitness_keep = []
size_keep = []
genotype_keep = []
for i in keep:
logging.info('KEEP Valid_acc for invidual {} %f, size %f, genotype %s'.format(i), fitness[i],
model_sizes[i], genotypes[i])
fitness_keep.append(fitness[i])
size_keep.append(model_sizes[i])
genotype_keep.append(genotypes[i])
logging.info('############## DROP ############')
for i in drop:
logging.info('DROP Valid_acc for invidual {} %f, size %f, genotype %s'.format(i), fitness[i],
model_sizes[i], genotypes[i])
if self.alg_policy.select_method == 'uniform':
selected_genotypes, selected_acc, selected_model_sizes = \
self.select_uniform_pareto_front(
np.array(fitness_keep), np.array(size_keep), genotype_keep)
else: # default: first
selected_genotypes, selected_acc, selected_model_sizes = \
self.select_first_pareto_front(
np.array(fitness_keep), np.array(size_keep), genotype_keep)
ga_epoch = int(
(epoch - self.alg_policy.start_ga_epoch) / self.alg_policy.ga_interval)
self.save_genotypes(selected_genotypes, selected_acc, selected_model_sizes,
'genotype_selected_{}.txt'.format(ga_epoch))
self.save_genotypes(genotype_keep, np.array(fitness_keep), np.array(size_keep),
'genotype_keep_{}.txt'.format(ga_epoch))
self.save_genotypes_to_json(genotype_keep, np.array(fitness_keep), np.array(size_keep),
'genotype_keep_jsons', ga_epoch)
self.alphas = self.alphas[keep].clone()
logging.info('############## End update alpha ############')
def search_infer_step(self, alpha):
"""Infer in search stage.
:param valid_queue: valid dataloader
:type valid_queue: dataloader
:param model: The model to be trained
:type model: nn.Module
:param alpha: encoding of a model
:type alpha: array
:return: Average top1 acc and loss
:rtype: nn.Tensor
"""
metrics = Metrics(self.trainer.cfg.metric)
self.trainer.model.eval()
with torch.no_grad():
for step, (input, target) in enumerate(self.trainer.valid_loader):
input = input.cuda()
target = target.cuda(non_blocking=True)
logits = self.trainer.model(input, alpha)
metrics(logits, target)
top1 = metrics.results[0]
return top1
def select_first_pareto_front(self, fitness, obj, genotypes):
"""Select models in the first pareto front.
:param fitness: fitness, e.g. accuracy
:type fitness: ndarray
:param obj: objectives (model sizes, FLOPS, latency etc)
:type obj: ndarray
:param genotypes: genotypes for searched models
:type genotypes: list
:return: The selected samples
:rtype: list
"""
F, _, selected_idx = SortAndSelectPopulation(np.vstack(
(1 / fitness, obj)), self.alg_policy.pareto_model_num)
selected_genotypes = []
selected_acc = []
selected_model_sizes = []
for idx in selected_idx:
selected_genotypes.append(genotypes[idx])
selected_acc.append(fitness[idx])
selected_model_sizes.append(obj[idx])
return selected_genotypes, selected_acc, selected_model_sizes
def select_uniform_pareto_front(self, fitness, obj, genotypes):
"""Select models in the first pareto front.
:param fitness: fitness, e.g. accuracy
:type fitness: ndarray
:param obj: objectives (model sizes, FLOPS, latency etc)
:type obj: ndarray
:param genotypes: genotypes for searched models
:type genotypes: list
:return: The selected samples
:rtype: list
"""
# preprocess
max_acc = fitness.max()
keep = (fitness > max_acc * 0.5)
fitness = fitness[keep]
obj = obj[keep]
genotypes = [i for (i, v) in zip(genotypes, keep) if v]
max_obj = obj.max()
min_obj = obj.min()
grid_num = self.alg_policy.pareto_model_num
grid = np.linspace(min_obj, max_obj, num=grid_num + 1)
selected_idx = []
for idx in range(grid_num):
keep = (obj <= grid[idx]) | (obj > grid[idx + 1])
sub_fitness = | np.array(fitness) | numpy.array |
from src.model.Interface import ICollaborativeModel, IContentModel, IBestModel
# ----- ABBREVIATIONS ------ #
# K1: keep-1-out
# CV: cross validation
class ItemCBF_CF(IContentModel):
"""
Item CF
- MAP@10 K1-CV5 (only warm and target):
"""
from src.model.KNN.ItemKNNCBFCFRecommender import ItemKNNCBFCFRecommender
best_parameters = {'topK': 17, 'shrink': 1463, 'similarity': 'asymmetric', 'normalize': True,
'asymmetric_alpha': 0.07899555402911075, 'feature_weighting': 'TF-IDF'}
recommender_class = ItemKNNCBFCFRecommender
recommender_name = "ItemCBF_CF"
class P3Alpha(ICollaborativeModel):
"""
P3Alpha
- MAP@10 K1-CV (only warm): 0.0463
- MAP@10 K1-CV (only warm and target): TODO
"""
from course_lib.GraphBased.P3alphaRecommender import P3alphaRecommender
best_parameters = {'topK': 122, 'alpha': 0.38923114168898876, 'normalize_similarity': True}
recommender_class = P3alphaRecommender
recommender_name = "P3alpha"
class HybridNormWeightedAvgAll(IBestModel):
"""
Hybrid of Normalized weighted average ranking of almost all models
- MAP@10 K1-10CV (only warm) with ICM_all_weighted and UCM_all:
"""
best_parameters = {'strategy': 'norm_weighted_avg', 'multiplier_cutoff': 2, 'WEIGHTED_AVG_ITEM': 0.9679497374745649,
'S_PURE_SVD': 0.02023761457683704, 'S_IALS': 0.007225989992151629,
'USER_CBF_CF': 0.05179513388991243,
'USER_CF': 0.03061248068550649}
@classmethod
def _get_all_models(cls, URM_train, ICM_all, UCM_all):
from src.model import new_best_models
all_models = {'WEIGHTED_AVG_ITEM': new_best_models.WeightedAverageItemBased.get_model(URM_train, ICM_all),
'S_PURE_SVD': new_best_models.PureSVDSideInfo.get_model(URM_train, ICM_all),
'S_IALS': new_best_models.IALSSideInfo.get_model(URM_train, ICM_all),
'USER_CBF_CF': new_best_models.UserCBF_CF_Warm.get_model(URM_train, UCM_all),
'USER_CF': new_best_models.UserCF.get_model(URM_train)}
return all_models
@classmethod
def get_model(cls, URM_train, ICM_train, UCM_train):
from src.model.HybridRecommender.HybridRankBasedRecommender import HybridRankBasedRecommender
all_models = cls._get_all_models(URM_train, ICM_train, UCM_train)
hybrid = HybridRankBasedRecommender(URM_train)
for model_name, model_object in all_models.items():
hybrid.add_fitted_model(model_name, model_object)
hybrid.fit(**cls.get_best_parameters())
return hybrid
class UserCBF_Cold(IBestModel):
"""
User CBF tuned with URM_train and UCM (containing age, region, user_act)
- MAP on tuning (k_out_3 and testing on 2, 3, 4 len users): 0.0117
- MAP on cold users (k_1_out): 0.01735
"""
best_parameters = {'topK': 3372, 'shrink': 1086, 'similarity': 'asymmetric', 'normalize': True,
'asymmetric_alpha': 1.5033071260303803, 'feature_weighting': 'BM25',
'interactions_feature_weighting': 'BM25'}
@classmethod
def get_model(cls, URM_train, UCM_train):
from src.model.KNN.UserKNNCBFRecommender import UserKNNCBFRecommender
model = UserKNNCBFRecommender(URM_train=URM_train, UCM_train=UCM_train)
model.fit(**cls.get_best_parameters())
return model
class HybridDemographicWithLT23AndUT22(IBestModel):
"""
Final hybrid model composed by two hybrid: one for smaller profile len users and one for bigger profile len users
- The threshold is set heuristically (I have tested threshold 20 and threshold 26, but there are not much changes
in the MAP)
- UCM_train is the one from get_UCM_train_new or get_UCM_all_new
- ICM_train is the one from get_ICM_train_new or get_ICM_all_new
"""
threshold = 23
@classmethod
def get_model(cls, URM_train, ICM_train, UCM_train):
from src.model.HybridRecommender.HybridDemographicRecommender import HybridDemographicRecommender
from src.model import best_models_lower_threshold_23, best_models_upper_threshold_22
import numpy as np
lt_23_recommender = best_models_lower_threshold_23.WeightedAverageItemBasedWithRP3.get_model(URM_train,
ICM_train)
ut_22_recommender = best_models_upper_threshold_22.WeightedAverageAll.get_model(URM_train, ICM_train, UCM_train)
lt_23_users_mask = np.ediff1d(URM_train.tocsr().indptr) >= cls.threshold
lt_23_users = | np.arange(URM_train.shape[0]) | numpy.arange |
# Authors: CommPy contributors
# License: BSD 3-Clause
from itertools import product
from numpy import zeros, identity, arange, concatenate, log2, array, inf
from numpy.random import seed
from numpy.testing import run_module_suite, assert_allclose, dec, assert_raises, assert_array_equal
from commpy.channels import MIMOFlatChannel
from commpy.links import *
from commpy.modulation import QAMModem, mimo_ml, bit_lvl_repr, max_log_approx, PSKModem, Modem
from commpy.utilities import signal_power
@dec.slow
def test_bit_lvl_repr():
# Set seed
| seed(17121996) | numpy.random.seed |
import numpy as np
import rllab.misc.logger as logger
from rllab.sampler import parallel_sampler
from rllab.sampler.base import Sampler
from rllab.misc import ext
from rllab.misc import special
from rllab.misc import tensor_utils
from rllab.algos import util
def local_truncate_paths(paths, max_samples):
"""
Truncate the list of paths so that the total number of samples is almost equal to max_samples. This is done by
removing extra paths at the end of the list. But here, we do NOT make the last path shorter.
:param paths: a list of paths
:param max_samples: the absolute maximum number of samples
:return: a list of paths, truncated so that the number of samples adds up to max-samples
"""
# chop samples collected by extra paths
# make a copy
paths = list(paths)
total_n_samples = sum(len(path["rewards"]) for path in paths)
while len(paths) > 0 and total_n_samples - len(paths[-1]["rewards"]) >= max_samples:
total_n_samples -= len(paths.pop(-1)["rewards"])
return paths
class BatchSamplerPlus(Sampler):
def __init__(self, algo, **kwargs):
"""
:type algo: BatchPolopt
"""
self.algo = algo
self.experience_replay = []
self.env_interacts_memory = []
self.env_interacts = 0
self.total_env_interacts = 0
self.mean_path_len = 0
def start_worker(self):
parallel_sampler.populate_task(self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_params,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
"""log_likelihoods for importance sampling"""
for path in paths:
logli = self.algo.policy.distribution.log_likelihood(path["actions"],path["agent_infos"])
path["log_likelihood"] = logli
"""keep data use per iteration approximately fixed"""
if not(self.algo.all_paths):
paths = local_truncate_paths(paths, self.algo.batch_size)
"""keep track of path length"""
self.env_interacts = sum([len(path["rewards"]) for path in paths])
self.total_env_interacts += self.env_interacts
self.mean_path_len = float(self.env_interacts)/len(paths)
"""manage experience replay for old batch reuse"""
self.experience_replay.append(paths)
self.env_interacts_memory.append(self.env_interacts)
if len(self.experience_replay) > self.algo.batch_aggregate_n:
self.experience_replay.pop(0)
self.env_interacts_memory.pop(0)
return paths
def process_samples(self, itr, paths):
"""
we will ignore paths argument and only use experience replay.
note: if algo.batch_aggregate_n = 1, then the experience replay will
only contain the most recent batch, and so len(all_paths) == 1.
"""
if self.algo.exploration_bonus:
self.compute_exploration_bonuses_and_statistics()
self.compute_epoch_weights()
all_paths = []
all_baselines = []
all_returns = []
self.IS_coeffs = [[] for paths in self.experience_replay]
for paths, weight, age in zip(self.experience_replay,self.weights,self.age):
b_paths, b_baselines, b_returns = self.process_single_batch(paths, weight, age)
all_paths += b_paths
all_baselines += [b_baselines]
all_returns += [b_returns]
samples_data = self.create_samples_dict(all_paths)
"""log all useful info"""
self.record_statistics(itr, all_paths, all_baselines, all_returns)
"""update vf and exploration bonus model"""
self.update_parametrized_models()
return samples_data
def compute_exploration_bonuses_and_statistics(self):
for paths in self.experience_replay:
for path in paths:
path["bonuses"] = self.algo.exploration_bonus.get_bonus(path)
self.bonus_total = sum([
sum([
sum(path["bonuses"])
for path in paths])
for paths in self.experience_replay])
self.bonus_mean = self.bonus_total / sum(self.env_interacts_memory)
self.new_bonus_total = sum([sum(path["bonuses"]) for path in self.experience_replay[-1]])
self.new_bonus_mean = self.new_bonus_total / self.env_interacts_memory[-1]
self.bonus_baseline = self.algo.exploration_lambda * \
min(0,self.bonus_mean / max(1,np.abs(self.bonus_mean)))
def compute_epoch_weights(self):
"""create weights, with highest weight on most recent batch"""
self.raw_weights = np.array(
[self.algo.batch_aggregate_coeff**j for j in range(len(self.experience_replay))],
dtype='float'
)
self.raw_weights /= sum(self.raw_weights)
self.raw_weights = self.raw_weights[::-1]
self.weights = self.raw_weights.copy()
"""reweight the weights by how many paths are in that batch """
if self.algo.relative_weights:
total_paths = sum([len(paths) for paths in self.experience_replay])
for j in range(len(self.weights)):
self.weights[j] *= total_paths / len(self.experience_replay[j])
self.age = np.arange(len(self.experience_replay))[::-1]
def process_single_batch(self, paths, weight, age):
baselines = []
returns = []
if hasattr(self.algo.baseline, "predict_n"):
all_path_baselines = self.algo.baseline.predict_n(paths)
else:
all_path_baselines = [self.algo.baseline.predict(path) for path in paths]
for idx, path in enumerate(paths):
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = path["rewards"] + \
self.algo.discount * path_baselines[1:] - \
path_baselines[:-1]
"""exploration bonuses"""
if self.algo.exploration_bonus:
path["bonuses"] *= self.algo.exploration_lambda
if self.algo.normalize_bonus:
path["bonuses"] /= max(1,np.abs(self.bonus_mean))
if self.algo.nonnegative_bonus_mean:
path["bonuses"] -= self.bonus_baseline
deltas += path["bonuses"]
"""recompute agent infos for old data"""
"""(necessary for correct reuse of old data)"""
if age > 0:
self.update_agent_infos(path)
"""importance sampling and batch aggregation"""
path["weights"] = weight * np.ones_like(path["rewards"])
if age > 0 and self.algo.importance_sampling:
self.compute_and_apply_importance_weights(path,age)
path["advantages"] = special.discount_cumsum(
deltas, self.algo.discount * self.algo.gae_lambda)
path["returns"] = special.discount_cumsum(path["rewards"], self.algo.discount)
baselines.append(path_baselines[:-1])
returns.append(path["returns"])
return paths, baselines, returns
def update_agent_infos(self,path):
"""
this updates the agent dist infos (i.e, mean & variance of Gaussian policy dist)
so that it can compute the probability of taking these actions on the most recent
policy is.
meanwhile, the log likelihood of taking the actions on the original behavior policy
can still be found in path["log_likelihood"].
"""
state_info_list = [path["agent_infos"][k] for k in self.algo.policy.state_info_keys]
input_list = tuple([path["observations"]] + state_info_list)
cur_dist_info = self.algo.dist_info_vars_func(*input_list)
for k in self.algo.policy.distribution.dist_info_keys:
path["agent_infos"][k] = cur_dist_info[k]
def compute_and_apply_importance_weights(self,path,age):
new_logli = self.algo.policy.distribution.log_likelihood(path["actions"],path["agent_infos"])
logli_diff = new_logli - path["log_likelihood"]
if self.algo.decision_weight_mode=='pd':
logli_diff = logli_diff[::-1]
log_decision_weighted_IS_coeffs = special.discount_cumsum(logli_diff,1)
IS_coeff = np.exp(log_decision_weighted_IS_coeffs[::-1])
elif self.algo.decision_weight_mode=='pt':
IS_coeff = np.exp(np.sum(logli_diff))
if self.algo.clip_IS_coeff_above:
IS_coeff = np.minimum(IS_coeff,self.algo.IS_coeff_upper_bound)
if self.algo.clip_IS_coeff_below:
IS_coeff = np.maximum(IS_coeff,self.algo.IS_coeff_lower_bound)
path["weights"] *= IS_coeff
self.IS_coeffs[age].append(IS_coeff)
def create_samples_dict(self, paths):
if not self.algo.policy.recurrent:
observations = tensor_utils.concat_tensor_list([path["observations"] for path in paths])
actions = tensor_utils.concat_tensor_list([path["actions"] for path in paths])
rewards = tensor_utils.concat_tensor_list([path["rewards"] for path in paths])
returns = tensor_utils.concat_tensor_list([path["returns"] for path in paths])
advantages = tensor_utils.concat_tensor_list([path["advantages"] for path in paths])
env_infos = tensor_utils.concat_tensor_dict_list([path["env_infos"] for path in paths])
agent_infos = tensor_utils.concat_tensor_dict_list([path["agent_infos"] for path in paths])
weights = tensor_utils.concat_tensor_list([path["weights"] for path in paths])
if self.algo.center_adv:
advantages = util.center_advantages(advantages)
if self.algo.positive_adv:
advantages = util.shift_advantages_to_positive(advantages)
samples_data = dict(
observations=observations,
actions=actions,
rewards=rewards,
returns=returns,
advantages=advantages,
env_infos=env_infos,
agent_infos=agent_infos,
weights=weights,
paths=paths,
)
else:
max_path_length = max([len(path["advantages"]) for path in paths])
# make all paths the same length (pad extra advantages with 0)
obs = [path["observations"] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
if self.algo.center_adv:
raw_adv = np.concatenate([path["advantages"] for path in paths])
adv_mean = np.mean(raw_adv)
adv_std = np.std(raw_adv) + 1e-8
adv = [(path["advantages"] - adv_mean) / adv_std for path in paths]
else:
adv = [path["advantages"] for path in paths]
adv = np.asarray([tensor_utils.pad_tensor(a, max_path_length) for a in adv])
actions = [path["actions"] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path["rewards"] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path["returns"] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
agent_infos = [path["agent_infos"] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos]
)
env_infos = [path["env_infos"] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos]
)
weights = [path["weights"] for path in paths]
weights = tensor_utils.pad_tensor_n(weights, max_path_length)
valids = [np.ones_like(path["returns"]) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
samples_data = dict(
observations=obs,
actions=actions,
advantages=adv,
rewards=rewards,
returns=returns,
valids=valids,
agent_infos=agent_infos,
env_infos=env_infos,
weights=weights,
paths=paths,
)
return samples_data
def record_statistics(self, itr, paths, baselines, returns):
evs = [special.explained_variance_1d(
np.concatenate(baselines[i]),
np.concatenate(returns[i])
) for i in range(len(baselines))]
evs = evs[::-1]
average_discounted_return, undiscounted_returns, ent = self.statistics_for_new_paths()
logger.record_tabular('Iteration', itr)
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
logger.record_tabular('AverageReturn', | np.mean(undiscounted_returns) | numpy.mean |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test pulse builder context utilities."""
from math import pi
import numpy as np
from qiskit import circuit, compiler, pulse
from qiskit.pulse import builder, exceptions, macros
from qiskit.pulse.instructions import directives
from qiskit.pulse.transforms import target_qobj_transform
from qiskit.test import QiskitTestCase
from qiskit.test.mock import FakeOpenPulse2Q
from qiskit.test.mock.utils import ConfigurableFakeBackend as ConfigurableBackend
from qiskit.pulse import library, instructions
class TestBuilder(QiskitTestCase):
"""Test the pulse builder context."""
def setUp(self):
super().setUp()
self.backend = FakeOpenPulse2Q()
self.configuration = self.backend.configuration()
self.defaults = self.backend.defaults()
self.inst_map = self.defaults.instruction_schedule_map
def assertScheduleEqual(self, program, target):
"""Assert an error when two pulse programs are not equal.
.. note:: Two programs are converted into standard execution format then compared.
"""
self.assertEqual(target_qobj_transform(program), target_qobj_transform(target))
class TestBuilderBase(TestBuilder):
"""Test builder base."""
def test_schedule_supplied(self):
"""Test that schedule is used if it is supplied to the builder."""
d0 = pulse.DriveChannel(0)
with pulse.build(name='reference') as reference:
with pulse.align_sequential():
pulse.delay(10, d0)
with pulse.build(schedule=reference) as schedule:
pass
self.assertScheduleEqual(schedule, reference)
self.assertEqual(schedule.name, 'reference')
def test_default_alignment_left(self):
"""Test default left alignment setting."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(0)
with pulse.build(default_alignment='left') as schedule:
pulse.delay(10, d0)
pulse.delay(20, d1)
with pulse.build(self.backend) as reference:
with pulse.align_left():
pulse.delay(10, d0)
pulse.delay(20, d1)
self.assertScheduleEqual(schedule, reference)
def test_default_alignment_right(self):
"""Test default right alignment setting."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(0)
with pulse.build(default_alignment='right') as schedule:
pulse.delay(10, d0)
pulse.delay(20, d1)
with pulse.build() as reference:
with pulse.align_right():
pulse.delay(10, d0)
pulse.delay(20, d1)
self.assertScheduleEqual(schedule, reference)
def test_default_alignment_sequential(self):
"""Test default sequential alignment setting."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(0)
with pulse.build(default_alignment='sequential') as schedule:
pulse.delay(10, d0)
pulse.delay(20, d1)
with pulse.build() as reference:
with pulse.align_sequential():
pulse.delay(10, d0)
pulse.delay(20, d1)
self.assertScheduleEqual(schedule, reference)
class TestContexts(TestBuilder):
"""Test builder contexts."""
def test_align_sequential(self):
"""Test the sequential alignment context."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build() as schedule:
with pulse.align_sequential():
pulse.delay(3, d0)
pulse.delay(5, d1)
pulse.delay(7, d0)
reference = pulse.Schedule()
# d0
reference.insert(0, instructions.Delay(3, d0), inplace=True)
reference.insert(8, instructions.Delay(7, d0), inplace=True)
# d1
reference.insert(3, instructions.Delay(5, d1), inplace=True)
self.assertScheduleEqual(schedule, reference)
def test_align_left(self):
"""Test the left alignment context."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
d2 = pulse.DriveChannel(2)
with pulse.build() as schedule:
with pulse.align_left():
pulse.delay(11, d2)
pulse.delay(3, d0)
with pulse.align_left():
pulse.delay(5, d1)
pulse.delay(7, d0)
reference = pulse.Schedule()
# d0
reference.insert(0, instructions.Delay(3, d0), inplace=True)
reference.insert(3, instructions.Delay(7, d0), inplace=True)
# d1
reference.insert(3, instructions.Delay(5, d1), inplace=True)
# d2
reference.insert(0, instructions.Delay(11, d2), inplace=True)
self.assertScheduleEqual(schedule, reference)
def test_align_right(self):
"""Test the right alignment context."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
d2 = pulse.DriveChannel(2)
with pulse.build() as schedule:
with pulse.align_right():
with pulse.align_right():
pulse.delay(11, d2)
pulse.delay(3, d0)
pulse.delay(13, d0)
pulse.delay(5, d1)
reference = pulse.Schedule()
# d0
reference.insert(8, instructions.Delay(3, d0), inplace=True)
reference.insert(11, instructions.Delay(13, d0), inplace=True)
# d1
reference.insert(19, instructions.Delay(5, d1), inplace=True)
# d2
reference.insert(0, instructions.Delay(11, d2), inplace=True)
self.assertScheduleEqual(schedule, reference)
def test_inline(self):
"""Test the inlining context."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build() as schedule:
pulse.delay(3, d0)
with pulse.inline():
# this alignment will be ignored due to inlining.
with pulse.align_right():
pulse.delay(5, d1)
pulse.delay(7, d0)
reference = pulse.Schedule()
# d0
reference += instructions.Delay(3, d0)
reference += instructions.Delay(7, d0)
# d1
reference += instructions.Delay(5, d1)
self.assertScheduleEqual(schedule, reference)
def test_transpiler_settings(self):
"""Test the transpiler settings context.
Tests that two cx gates are optimized away with higher optimization level.
"""
twice_cx_qc = circuit.QuantumCircuit(2)
twice_cx_qc.cx(0, 1)
twice_cx_qc.cx(0, 1)
with pulse.build(self.backend) as schedule:
with pulse.transpiler_settings(optimization_level=0):
builder.call(twice_cx_qc)
self.assertNotEqual(len(schedule.instructions), 0)
with pulse.build(self.backend) as schedule:
with pulse.transpiler_settings(optimization_level=3):
builder.call(twice_cx_qc)
self.assertEqual(len(schedule.instructions), 0)
def test_scheduler_settings(self):
"""Test the circuit scheduler settings context."""
inst_map = pulse.InstructionScheduleMap()
d0 = pulse.DriveChannel(0)
test_x_sched = pulse.Schedule()
test_x_sched += instructions.Delay(10, d0)
inst_map.add('x', (0,), test_x_sched)
ref_sched = pulse.Schedule()
ref_sched += pulse.instructions.Call(test_x_sched)
x_qc = circuit.QuantumCircuit(2)
x_qc.x(0)
with pulse.build(backend=self.backend) as schedule:
with pulse.transpiler_settings(basis_gates=['x']):
with pulse.circuit_scheduler_settings(inst_map=inst_map):
builder.call(x_qc)
self.assertScheduleEqual(schedule, ref_sched)
def test_phase_offset(self):
"""Test the phase offset context."""
d0 = pulse.DriveChannel(0)
with pulse.build() as schedule:
with pulse.phase_offset(3.14, d0):
pulse.delay(10, d0)
reference = pulse.Schedule()
reference += instructions.ShiftPhase(3.14, d0)
reference += instructions.Delay(10, d0)
reference += instructions.ShiftPhase(-3.14, d0)
self.assertScheduleEqual(schedule, reference)
def test_frequency_offset(self):
"""Test the frequency offset context."""
d0 = pulse.DriveChannel(0)
with pulse.build() as schedule:
with pulse.frequency_offset(1e9, d0):
pulse.delay(10, d0)
reference = pulse.Schedule()
reference += instructions.ShiftFrequency(1e9, d0)
reference += instructions.Delay(10, d0)
reference += instructions.ShiftFrequency(-1e9, d0)
self.assertScheduleEqual(schedule, reference)
def test_phase_compensated_frequency_offset(self):
"""Test that the phase offset context properly compensates for phase
accumulation."""
d0 = pulse.DriveChannel(0)
with pulse.build(self.backend) as schedule:
with pulse.frequency_offset(1e9, d0, compensate_phase=True):
pulse.delay(10, d0)
reference = pulse.Schedule()
reference += instructions.ShiftFrequency(1e9, d0)
reference += instructions.Delay(10, d0)
reference += instructions.ShiftPhase(
-2 * np.pi * ((1e9 * 10 * self.configuration.dt) % 1), d0)
reference += instructions.ShiftFrequency(-1e9, d0)
self.assertScheduleEqual(schedule, reference)
class TestChannels(TestBuilder):
"""Test builder channels."""
def test_drive_channel(self):
"""Text context builder drive channel."""
with pulse.build(self.backend):
self.assertEqual(pulse.drive_channel(0), pulse.DriveChannel(0))
def test_measure_channel(self):
"""Text context builder measure channel."""
with pulse.build(self.backend):
self.assertEqual(pulse.measure_channel(0), pulse.MeasureChannel(0))
def test_acquire_channel(self):
"""Text context builder acquire channel."""
with pulse.build(self.backend):
self.assertEqual(pulse.acquire_channel(0), pulse.AcquireChannel(0))
def test_control_channel(self):
"""Text context builder control channel."""
with pulse.build(self.backend):
self.assertEqual(pulse.control_channels(0, 1)[0],
pulse.ControlChannel(0))
class TestInstructions(TestBuilder):
"""Test builder instructions."""
def test_delay(self):
"""Test delay instruction."""
d0 = pulse.DriveChannel(0)
with pulse.build() as schedule:
pulse.delay(10, d0)
reference = pulse.Schedule()
reference += instructions.Delay(10, d0)
self.assertScheduleEqual(schedule, reference)
def test_play_parametric_pulse(self):
"""Test play instruction with parametric pulse."""
d0 = pulse.DriveChannel(0)
test_pulse = library.Constant(10, 1.0)
with pulse.build() as schedule:
pulse.play(test_pulse, d0)
reference = pulse.Schedule()
reference += instructions.Play(test_pulse, d0)
self.assertScheduleEqual(schedule, reference)
def test_play_sample_pulse(self):
"""Test play instruction with sample pulse."""
d0 = pulse.DriveChannel(0)
test_pulse = library.Waveform([0.0, 0.0])
with pulse.build() as schedule:
pulse.play(test_pulse, d0)
reference = pulse.Schedule()
reference += instructions.Play(test_pulse, d0)
self.assertScheduleEqual(schedule, reference)
def test_play_array_pulse(self):
"""Test play instruction on an array directly."""
d0 = pulse.DriveChannel(0)
test_array = np.array([0., 0.], dtype=np.complex_)
with pulse.build() as schedule:
pulse.play(test_array, d0)
reference = pulse.Schedule()
test_pulse = pulse.Waveform(test_array)
reference += instructions.Play(test_pulse, d0)
self.assertScheduleEqual(schedule, reference)
def test_play_name_argument(self):
"""Test name argument for play instruction."""
d0 = pulse.DriveChannel(0)
test_pulse = library.Constant(10, 1.0)
with pulse.build() as schedule:
pulse.play(test_pulse, channel=d0, name='new_name')
self.assertEqual(schedule.instructions[0][1].name, 'new_name')
def test_acquire_memory_slot(self):
"""Test acquire instruction into memory slot."""
acquire0 = pulse.AcquireChannel(0)
mem0 = pulse.MemorySlot(0)
with pulse.build() as schedule:
pulse.acquire(10, acquire0, mem0)
reference = pulse.Schedule()
reference += pulse.Acquire(10, acquire0, mem_slot=mem0)
self.assertScheduleEqual(schedule, reference)
def test_acquire_register_slot(self):
"""Test acquire instruction into register slot."""
acquire0 = pulse.AcquireChannel(0)
reg0 = pulse.RegisterSlot(0)
with pulse.build() as schedule:
pulse.acquire(10, acquire0, reg0)
reference = pulse.Schedule()
reference += pulse.Acquire(10, acquire0, reg_slot=reg0)
self.assertScheduleEqual(schedule, reference)
def test_acquire_qubit(self):
"""Test acquire instruction on qubit."""
acquire0 = pulse.AcquireChannel(0)
mem0 = pulse.MemorySlot(0)
with pulse.build() as schedule:
pulse.acquire(10, 0, mem0)
reference = pulse.Schedule()
reference += pulse.Acquire(10, acquire0, mem_slot=mem0)
self.assertScheduleEqual(schedule, reference)
def test_instruction_name_argument(self):
"""Test setting the name of an instruction."""
d0 = pulse.DriveChannel(0)
for instruction_method in [pulse.delay, pulse.set_frequency, pulse.set_phase,
pulse.shift_frequency, pulse.shift_phase]:
with pulse.build() as schedule:
instruction_method(0, d0, name='instruction_name')
self.assertEqual(schedule.instructions[0][1].name, 'instruction_name')
def test_set_frequency(self):
"""Test set frequency instruction."""
d0 = pulse.DriveChannel(0)
with pulse.build() as schedule:
pulse.set_frequency(1e9, d0)
reference = pulse.Schedule()
reference += instructions.SetFrequency(1e9, d0)
self.assertScheduleEqual(schedule, reference)
def test_shift_frequency(self):
"""Test shift frequency instruction."""
d0 = pulse.DriveChannel(0)
with pulse.build() as schedule:
pulse.shift_frequency(0.1e9, d0)
reference = pulse.Schedule()
reference += instructions.ShiftFrequency(0.1e9, d0)
self.assertScheduleEqual(schedule, reference)
def test_set_phase(self):
"""Test set phase instruction."""
d0 = pulse.DriveChannel(0)
with pulse.build() as schedule:
pulse.set_phase(3.14, d0)
reference = pulse.Schedule()
reference += instructions.SetPhase(3.14, d0)
self.assertScheduleEqual(schedule, reference)
def test_shift_phase(self):
"""Test shift phase instruction."""
d0 = pulse.DriveChannel(0)
with pulse.build() as schedule:
pulse.shift_phase(3.14, d0)
reference = pulse.Schedule()
reference += instructions.ShiftPhase(3.14, d0)
self.assertScheduleEqual(schedule, reference)
def test_snapshot(self):
"""Test snapshot instruction."""
with pulse.build() as schedule:
pulse.snapshot('test', 'state')
reference = pulse.Schedule()
reference += instructions.Snapshot('test', 'state')
self.assertScheduleEqual(schedule, reference)
class TestDirectives(TestBuilder):
"""Test builder directives."""
def test_barrier_with_align_right(self):
"""Test barrier directive with right alignment context."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
d2 = pulse.DriveChannel(2)
with pulse.build() as schedule:
with pulse.align_right():
pulse.delay(3, d0)
pulse.barrier(d0, d1, d2)
pulse.delay(11, d2)
with pulse.align_right():
pulse.delay(5, d1)
pulse.delay(7, d0)
reference = pulse.Schedule()
# d0
reference.insert(0, instructions.Delay(3, d0), inplace=True)
reference.insert(7, instructions.Delay(7, d0), inplace=True)
# d1
reference.insert(9, instructions.Delay(5, d1), inplace=True)
# d2
reference.insert(3, instructions.Delay(11, d2), inplace=True)
self.assertScheduleEqual(schedule, reference)
def test_barrier_with_align_left(self):
"""Test barrier directive with left alignment context."""
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
d2 = pulse.DriveChannel(2)
with pulse.build() as schedule:
with pulse.align_left():
pulse.delay(3, d0)
pulse.barrier(d0, d1, d2)
pulse.delay(11, d2)
with pulse.align_left():
pulse.delay(5, d1)
pulse.delay(7, d0)
reference = pulse.Schedule()
# d0
reference.insert(0, instructions.Delay(3, d0), inplace=True)
reference.insert(3, instructions.Delay(7, d0), inplace=True)
# d1
reference.insert(3, instructions.Delay(5, d1), inplace=True)
# d2
reference.insert(3, instructions.Delay(11, d2), inplace=True)
self.assertScheduleEqual(schedule, reference)
def test_barrier_on_qubits(self):
"""Test barrier directive on qubits."""
with pulse.build(self.backend) as schedule:
pulse.barrier(0, 1)
reference = pulse.ScheduleBlock()
reference += directives.RelativeBarrier(
pulse.DriveChannel(0),
pulse.DriveChannel(1),
pulse.MeasureChannel(0),
pulse.MeasureChannel(1),
pulse.ControlChannel(0),
pulse.ControlChannel(1),
pulse.AcquireChannel(0),
pulse.AcquireChannel(1)
)
self.assertEqual(schedule, reference)
def test_trivial_barrier(self):
"""Test that trivial barrier is not added."""
with pulse.build() as schedule:
pulse.barrier(pulse.DriveChannel(0))
self.assertEqual(schedule, pulse.ScheduleBlock())
class TestUtilities(TestBuilder):
"""Test builder utilities."""
def test_active_backend(self):
"""Test getting active builder backend."""
with pulse.build(self.backend):
self.assertEqual(pulse.active_backend(), self.backend)
def test_append_schedule(self):
"""Test appending a schedule to the active builder."""
d0 = pulse.DriveChannel(0)
reference = pulse.Schedule()
reference += instructions.Delay(10, d0)
with pulse.build() as schedule:
builder.call(reference)
self.assertScheduleEqual(schedule, reference)
def test_append_instruction(self):
"""Test appending an instruction to the active builder."""
d0 = pulse.DriveChannel(0)
instruction = instructions.Delay(10, d0)
with pulse.build() as schedule:
builder.append_instruction(instruction)
self.assertScheduleEqual(schedule, (0, instruction))
def test_qubit_channels(self):
"""Test getting the qubit channels of the active builder's backend."""
with pulse.build(self.backend):
qubit_channels = pulse.qubit_channels(0)
self.assertEqual(qubit_channels,
{pulse.DriveChannel(0),
pulse.MeasureChannel(0),
pulse.AcquireChannel(0),
pulse.ControlChannel(0),
pulse.ControlChannel(1)})
def test_active_transpiler_settings(self):
"""Test setting settings of active builder's transpiler."""
with pulse.build(self.backend):
self.assertFalse(pulse.active_transpiler_settings())
with pulse.transpiler_settings(test_setting=1):
self.assertEqual(
pulse.active_transpiler_settings()['test_setting'], 1)
def test_active_circuit_scheduler_settings(self):
"""Test setting settings of active builder's circuit scheduler."""
with pulse.build(self.backend):
self.assertFalse(pulse.active_circuit_scheduler_settings())
with pulse.circuit_scheduler_settings(test_setting=1):
self.assertEqual(
pulse.active_circuit_scheduler_settings()['test_setting'], 1)
def test_num_qubits(self):
"""Test builder utility to get number of qubits."""
with pulse.build(self.backend):
self.assertEqual(pulse.num_qubits(), 2)
def test_samples_to_seconds(self):
"""Test samples to time"""
config = self.backend.configuration()
config.dt = 0.1
with pulse.build(self.backend):
time = pulse.samples_to_seconds(100)
self.assertTrue(isinstance(time, float))
self.assertEqual(pulse.samples_to_seconds(100), 10)
def test_samples_to_seconds_array(self):
"""Test samples to time (array format)."""
config = self.backend.configuration()
config.dt = 0.1
with pulse.build(self.backend):
samples = np.array([100, 200, 300])
times = pulse.samples_to_seconds(samples)
self.assertTrue(np.issubdtype(times.dtype, np.floating))
np.testing.assert_allclose(times, np.array([10, 20, 30]))
def test_seconds_to_samples(self):
"""Test time to samples"""
config = self.backend.configuration()
config.dt = 0.1
with pulse.build(self.backend):
samples = pulse.seconds_to_samples(10)
self.assertTrue(isinstance(samples, int))
self.assertEqual(pulse.seconds_to_samples(10), 100)
def test_seconds_to_samples_array(self):
"""Test time to samples (array format)."""
config = self.backend.configuration()
config.dt = 0.1
with pulse.build(self.backend):
times = np.array([10, 20, 30])
samples = pulse.seconds_to_samples(times)
self.assertTrue(np.issubdtype(samples.dtype, np.integer))
np.testing.assert_allclose(pulse.seconds_to_samples(times),
| np.array([100, 200, 300]) | numpy.array |
"""
Created on Dec 16 2021
@author: <NAME>
Poisson equation solver for the Hall effect.
Includes classes for Hall bars, Hall bars in a nonlocal geometry, and Corbino disks.
The Hall bar class has build in methods for longitudinal and Hall 4-probe resistance measurements.
Plotting functions assume coordinates are in microns, but the Poisson equation is scale-invariant.
"""
import time
import math
import numpy as np
import scipy.sparse as sp # import sparse matrix library
import matplotlib.pyplot as plt
from scipy.sparse.linalg import spsolve
# import the file where the differentiation matrix operators are defined
from diff_matrices import Diff_mat_1D, Diff_mat_2D
class hallbar():
"""The class for a Hall bar device
Source is the left terminal, drain is the right terminal.
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_unravel_L,:] = BD[self.ind_unravel_L,:] # Boundaries at the left layer
L_sys[self.ind_unravel_R,:] = BD[self.ind_unravel_R,:] # Boundaries at the right edges
# CURRENT THROUGH EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_L] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_R] = 0 # Dirichlet boundary condition at drain
g[self.ind_unravel_T] = 0 # No current through top
g[self.ind_unravel_B] = 0 # No current through bottom
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top', 'bottom', or 'hall') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif side=='bottom':
ya = 0
yb = 0
elif side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, ya] - self.u[xb, yb]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
def plot_resistance(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
r_top = (self.u[0:-1, -1] - self.u[1:, -1]) * 25812 * self.Ly / self.dx
r_bottom = (self.u[0:-1, 0] - self.u[1:, 0]) * 25812 * self.Ly / self.dx
rxx = 25812 / self.lmbda
fig = plt.figure(figsize = [8,5])
plt.plot(self.x[0:-1] - self.dx, r_top, 'r', label='top')
plt.plot(self.x[0:-1] - self.dx, r_bottom, 'b', label='bottom')
plt.hlines(rxx, self.x[0], self.x[-1], linestyle='dashed', color='grey', label=r'$\rho_{xx}$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'$\rho_{xx}$ $(\Omega)$');
plt.legend()
plt.ylim([0, 12000]);
plt.show()
def add_contact(self, contact):
if contact.x1 > self.Lx or contact.x2 > self.Lx:
raise Exception('Contact out of bounds')
self.contacts.append(contact)
def measure_contact_voltageonly(self, contact):
# Args: contact instance
# Returns: measured resistivity
# Voltage is averaged across voltage tap
# THIS FUNCTION DOES NOT CHECK THE CURRENT!
# This method assumes 2terminal resistance is h/e2, which in general is wrong
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
y = self.Ny-1
elif contact.side=='bottom':
y = 0
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, y].mean()
# Average voltage A
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, y].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
# return apparent resistivity
return 25812 * v * self.Ly / dx
def measure_all_contacts_voltageonly(self):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact_voltageonly(contact))
return result
def measure_contact(self, contact, sxx, sxy):
'''
Voltage is averaged across voltage tap
This method checks the current and outputs resistivity.
Args:
contact : contact instance
sxx : longitudinal
sxy : hall. sxy/sxx should match self.lmbda
Returns: measured resistivity
'''
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif contact.side=='bottom':
ya = 0
yb = 0
elif contact.side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, ya].mean()
# Average voltage B
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, yb].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
i = self.measure_current(sxx, sxy)
# return apparent resistivity
if contact.side=='hall':
return v / i
else:
return v / i * self.Ly / dx
def measure_all_contacts(self, sxx, sxy):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact(contact, sxx, sxy))
return result
def measure_current(self, sxx, sxy):
'''
ARGS : sxx and sxy : longitudinal and Hall conductivity. units e2/h
Returns : current moving through device
'''
# choose place to measure: halfway across Hallbar
ind_x = int(self.Nx/2)
# calculate electric field using E = -\nabla V
# x electric field, using second order central finite difference
E_x = 0.5 * (self.u[ind_x - 1, :] - self.u[ind_x + 1, :]) / self.dx
# y electric field, need forward/backward differences for edges
Dy_1d, D2y_1d = Diff_mat_1D(self.Ny)
E_y = - 0.5 * Dy_1d.dot(self.u[ind_x, :]) / self.dy
# calculate x current using j = sigma E; integrate and convert to SI units
current = | np.sum(sxx * E_x + sxy * E_y) | numpy.sum |
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import print_function
import threading
import warnings
import cv2
import numpy as np
import scipy.ndimage as ndi
from keras import backend as K
from keras.utils.data_utils import Sequence
from scipy import linalg
from six.moves import range
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def transform(frame, transform_matrix, channel_axis, fill_mode, cval):
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
frame = np.rollaxis(frame, channel_axis, 0)
channel_images = [ndi.interpolation.affine_transform(
f_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for f_channel in frame]
frame = np.stack(channel_images, axis=0)
return np.rollaxis(frame, 0, channel_axis + 1)
def apply_transform(sample,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Apply the image transformation specified by a matrix.
# Arguments
sample: 2D numpy array, single sample.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
if sample.ndim == 4:
channel_axis = channel_axis - 1
transformed_frames = [transform(frame, transform_matrix, channel_axis, fill_mode, cval) for frame in sample]
return np.stack(transformed_frames, axis=0)
if sample.ndim == 3:
return transform(sample, transform_matrix, channel_axis, fill_mode, cval)
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def resize(image, target_dimensions):
channels = image.shape[-1]
return cv2.resize(image, target_dimensions, interpolation=cv2.INTER_CUBIC)\
.reshape(list(target_dimensions) + [channels])
def resize_sample(sample, target_dimensions=None):
if target_dimensions is None:
return sample
if sample.ndim == 4:
resized_images = [resize(frame, target_dimensions) for frame in sample]
return np.stack(resized_images, axis=0)
if sample.ndim == 3:
return resize(sample, target_dimensions)
class ImageDataGenerator(object):
"""Generate minibatches of image data with real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channel.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided. This is
applied after the `preprocessing_function` (if any provided)
but before any other transformation.
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode it is at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_angle=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format="channels_last",
time_delay=None,
target_dimensions=None):
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_angle = rotation_angle
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
self.data_format = data_format
self.time_delay = time_delay
self.target_dimensions = target_dimensions
if data_format == 'channels_last':
if time_delay is None:
self.time_delay_axis, self.row_axis, self.col_axis, self.channel_axis = None, 1, 2, 3
else:
self.time_delay_axis, self.row_axis, self.col_axis, self.channel_axis = 1, 2, 3, 4
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self, x, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='png', target_dimensions=None):
if target_dimensions:
self.target_dimensions = target_dimensions
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
# Arguments
x: batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= np.std(x, keepdims=True) + 1e-7
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-7)
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def resize(self, sample):
return resize_sample(sample, self.target_dimensions)
def get_random_transform_matrix(self, sample, seed=None):
"""Randomly augment a single image tensor.
# Arguments
sample: 3D or 4D tensor, single sample.
seed: random seed.
# Returns
A randomly generated transformation Matrix.
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
if seed is not None:
np.random.seed(seed)
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_angle:
theta = np.pi / 180 * np.random.uniform(-self.rotation_angle, self.rotation_angle)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * sample.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * sample.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = np.identity(3)
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = np.dot(transform_matrix, rotation_matrix)
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(transform_matrix, zoom_matrix)
return transform_matrix
def random_transform(self, sample, seed=None):
"""Randomly augment a single image tensor.
# Arguments
sample: 3D or 4D tensor, single sample.
seed: random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
transform_matrix = self.get_random_transform_matrix(sample, seed)
if transform_matrix is not None:
h, w = sample.shape[img_row_axis], sample.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
sample = apply_transform(sample, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
sample = random_channel_shift(sample,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
sample = flip_axis(sample, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
sample = flip_axis(sample, img_row_axis)
return sample
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
x: Numpy array, the data to fit on. Should have rank 5 or 4 when time_delay is None.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Raises
ValueError: in case of invalid input `x`.
"""
x = np.asarray(x, dtype=K.floatx())
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(
self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(
x.shape) +
' (' + str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=0)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=0)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = | np.random.permutation(self.n) | numpy.random.permutation |
#!/usr/bin/env python3
# stdlib modules
import itertools as it
import copy
# third party imports
import numpy as np
from openquake.hazardlib.geo.utils import OrthographicProjection
from openquake.hazardlib.geo import geodetic
# local imports
from impactutils.vectorutils.vector import Vector
from .utils import reverse_quad, get_quad_length
def _computeGC2(rupture, lon, lat, depth):
"""
Method for computing GC2 from a ShakeMap Rupture instance.
Args:
rupture (Rupture): ShakeMap rupture object.
lon (array): Numpy array of site longitudes.
lat (array): Numpy array of site latitudes.
depth (array): Numpy array of site depths.
Returns:
dict: Dictionary of GC2 distances. Keys include "T", "U", "rx"
"ry", "ry0".
"""
quadlist = rupture.getQuadrilaterals()
quadgc2 = copy.deepcopy(quadlist)
oldshape = lon.shape
if len(oldshape) == 2:
newshape = (oldshape[0] * oldshape[1], 1)
else:
newshape = (oldshape[0], 1)
# -------------------------------------------------------------------------
# Define a projection that spans sites and rupture
# -------------------------------------------------------------------------
all_lat = np.append(lat, rupture.lats)
all_lon = np.append(lon, rupture.lons)
west = np.nanmin(all_lon)
east = np.nanmax(all_lon)
south = np.nanmin(all_lat)
north = np.nanmax(all_lat)
proj = OrthographicProjection(west, east, north, south)
totweight = np.zeros(newshape, dtype=lon.dtype)
GC2T = np.zeros(newshape, dtype=lon.dtype)
GC2U = np.zeros(newshape, dtype=lon.dtype)
# -------------------------------------------------------------------------
# First sort out strike discordance and nominal strike prior to
# starting the loop if there is more than one group/trace.
# -------------------------------------------------------------------------
group_ind = rupture._getGroupIndex()
# Need group_ind as numpy array for sensible indexing...
group_ind_np = np.array(group_ind)
uind = np.unique(group_ind_np)
n_groups = len(uind)
if n_groups > 1:
# ---------------------------------------------------------------------
# The first thing we need to worry about is finding the coordinate
# shift. U's origin is "selected from the two endpoints most
# distant from each other."
# ---------------------------------------------------------------------
# Need to get index of first and last quad
# for each segment
iq0 = np.zeros(n_groups, dtype='int16')
iq1 = np.zeros(n_groups, dtype='int16')
for k in uind:
ii = [i for i, j in enumerate(group_ind) if j == uind[k]]
iq0[k] = int(np.min(ii))
iq1[k] = int(np.max(ii))
# ---------------------------------------------------------------------
# This is an iterator for each possible combination of traces
# including trace orientations (i.e., flipped).
# ---------------------------------------------------------------------
it_seg = it.product(it.combinations(uind, 2),
it.product([0, 1], [0, 1]))
# Placeholder for the trace pair/orientation that gives the
# largest distance.
dist_save = 0
for k in it_seg:
s0ind = k[0][0]
s1ind = k[0][1]
p0ind = k[1][0]
p1ind = k[1][1]
if p0ind == 0:
P0 = quadlist[iq0[s0ind]][0]
else:
P0 = quadlist[iq1[s0ind]][1]
if p1ind == 0:
P1 = quadlist[iq1[s1ind]][0]
else:
P1 = quadlist[iq0[s1ind]][1]
dist = geodetic.distance(P0.longitude, P0.latitude, 0.0,
P1.longitude, P1.latitude, 0.0)
if dist > dist_save:
dist_save = dist
A0 = P0
A1 = P1
# ---------------------------------------------------------------------
# A0 and A1 are the furthest two segment endpoints, but we still
# need to sort out which one is the "origin".
# ---------------------------------------------------------------------
# This goofy while-loop is to adjust the side of the rupture where the
# origin is located
dummy = -1
while dummy < 0:
A0.depth = 0
A1.depth = 0
p_origin = Vector.fromPoint(A0)
a0 = Vector.fromPoint(A0)
a1 = Vector.fromPoint(A1)
ahat = (a1 - a0).norm()
# Loop over traces
e_j = np.zeros(n_groups)
b_prime = [None] * n_groups
for j in range(n_groups):
P0 = quadlist[iq0[j]][0]
P1 = quadlist[iq1[j]][1]
P0.depth = 0
P1.depth = 0
p0 = Vector.fromPoint(P0)
p1 = Vector.fromPoint(P1)
b_prime[j] = p1 - p0
e_j[j] = ahat.dot(b_prime[j])
E = np.sum(e_j)
# List of discordancy
dc = [np.sign(a) * np.sign(E) for a in e_j]
b = Vector(0, 0, 0)
for j in range(n_groups):
b.x = b.x + b_prime[j].x * dc[j]
b.y = b.y + b_prime[j].y * dc[j]
b.z = b.z + b_prime[j].z * dc[j]
bhat = b.norm()
dummy = bhat.dot(ahat)
if dummy < 0:
tmpA0 = copy.deepcopy(A0)
A0 = copy.deepcopy(A1)
A1 = tmpA0
# ---------------------------------------------------------------------
# To fix discordancy, need to flip quads and rearrange
# the order of quadgc2
# ---------------------------------------------------------------------
# 1) flip quads
for i in range(len(quadgc2)):
if dc[group_ind[i]] < 0:
quadgc2[i] = reverse_quad(quadgc2[i])
# 2) rearrange quadlist order
qind = np.arange(len(quadgc2))
for i in range(n_groups):
qsel = qind[group_ind_np == uind[i]]
if dc[i] < 0:
qrev = qsel[::-1]
qind[group_ind_np == uind[i]] = qrev
quadgc2old = copy.deepcopy(quadgc2)
for i in range(len(qind)):
quadgc2[i] = quadgc2old[qind[i]]
# End of if-statement for adjusting group discordancy
s_i = 0.0
l_i = np.zeros(len(quadgc2))
for i in range(len(quadgc2)):
G0, G1, G2, G3 = quadgc2[i]
# Compute u_i and t_i for this quad
t_i = __calc_t_i(G0, G1, lat, lon, proj)
u_i = __calc_u_i(G0, G1, lat, lon, proj)
# Quad length (top edge)
l_i[i] = get_quad_length(quadgc2[i])
# ---------------------------------------------------------------------
# Weight of segment, three cases
# ---------------------------------------------------------------------
# Case 3: t_i == 0 and 0 <= u_i <= l_i
w_i = np.zeros_like(t_i)
# To avoid division by zero in totweight later on:
ix = (t_i == 0) & (0 <= u_i) & (u_i <= l_i[i])
totweight[ix] = 1.0
# Case 1:
ix = t_i != 0
w_i[ix] = (1.0 / t_i[ix]) * (np.arctan(
(l_i[i] - u_i[ix]) / t_i[ix]) - np.arctan(-u_i[ix] / t_i[ix]))
# Case 2:
ix = (t_i == 0) & ((u_i < 0) | (u_i > l_i[i]))
w_i[ix] = 1 / (u_i[ix] - l_i[i]) - 1 / u_i[ix]
totweight = totweight + w_i
GC2T = GC2T + w_i * t_i
if n_groups == 1:
GC2U = GC2U + w_i * (u_i + s_i)
else:
if i == 0:
qind = np.array(range(len(quadgc2)))
l_kj = 0
s_ij_1 = 0
else:
l_kj = l_i[(group_ind_np == group_ind_np[i]) & (qind < i)]
s_ij_1 = np.sum(l_kj)
# First endpoint in the current 'group' (or 'trace' in GC2 terms)
p1 = Vector.fromPoint(quadgc2[iq0[group_ind[i]]][0])
s_ij_2 = (p1 - p_origin).dot(np.sign(E) * ahat) / 1000.0
# Above is GC2N, for GC2T use:
# s_ij_2 = (p1 - p_origin).dot(bhat) / 1000.0
s_ij = s_ij_1 + s_ij_2
GC2U = GC2U + w_i * (u_i + s_ij)
s_i = s_i + l_i[i]
GC2T = GC2T / totweight
GC2U = GC2U / totweight
# Dictionary for holding the distances
distdict = dict()
distdict['T'] = copy.deepcopy(GC2T).reshape(oldshape)
distdict['U'] = copy.deepcopy(GC2U).reshape(oldshape)
# Take care of Rx
Rx = copy.deepcopy(GC2T) # preserve sign (no absolute value)
Rx = Rx.reshape(oldshape)
distdict['rx'] = Rx
# Ry
Ry = GC2U - s_i / 2.0
Ry = Ry.reshape(oldshape)
distdict['ry'] = Ry
# Ry0
Ry0 = np.zeros_like(GC2U)
ix = GC2U < 0
Ry0[ix] = | np.abs(GC2U[ix]) | numpy.abs |
##############################################
# Density Adaptative Point Set Registration #
##############################################
import sys
import numpy as np
from numpy.linalg import svd, det
from time import time
from . import observation_weights
from . import point_cloud_plotting
def list_prod(X):
if len(X)==1:
return X[0]
elif len(X) == 0:
return 1.0
else:
return np.prod(np.stack(X, 2), 2)
def sqe(Y, X):
d = Y[:, :, None].transpose(1, 2, 0) - X[:, :, None].transpose(2, 1, 0)
s = np.sum(d * d, axis=2)
return s
def get_default_cluster_priors(num_clusters, gamma):
pk = 1 / (num_clusters + gamma) * np.ones((num_clusters, 1), dtype=np.float32)
return pk.transpose()
def get_randn_cluster_means(point_clouds, num_clusters):
""" Create random cluster means, distributed on a sphere.
The standard deviation of all point-cloud points is the sphere radius.
:param point_clouds: [ X1, X2, ... ]. Xi = 3 x Ni points [np.array].
:param num_clusters: Number of clusters to generate
:return: cluster means, (3, num_clusters) [np.array]
"""
# Sample the the unit sphere and scale with data standard deviation
X = np.random.randn(3, num_clusters).astype(np.float32)
X = X / np.linalg.norm(X, axis=0)
v = np.var(np.concatenate(point_clouds, 1), 1, keepdims=True)
means = X * np.sqrt(v)
return means
def get_default_cluster_precisions(point_clouds, cluster_means):
# Minimum coordinates in point clouds and clusters
min_xyz = [np.min(pcl, 1) for pcl in point_clouds] # list of per-pcl minima
min_xyz = min_xyz + [np.min(cluster_means, 1)] # append cluster_means minima
min_xyz = np.min(np.stack(min_xyz), 1) # get joint minimum
# Maximum coordinates in point clouds and clusters
max_xyz = [np.max(pcl, 1) for pcl in point_clouds]
max_xyz = max_xyz + [np.max(cluster_means, 1)]
max_xyz = np.max(np.stack(max_xyz), 1)
q = 1 / sqe(min_xyz[...,np.newaxis], max_xyz[...,np.newaxis])
Q = q * np.ones((cluster_means.shape[1], 1))
return Q.astype(np.float32)
def get_default_start_poses(point_clouds, cluster_means):
""" Create default start poses
:param cluster_means:
:param point_clouds:
:return:
"""
I = np.eye(3, dtype=np.float32) # Identity rotation
mu = np.mean(cluster_means, 1) # Mean of cluster means
poses = [(I, mu - np.mean(pcl, 1)) for pcl in point_clouds]
return poses
def get_default_beta(cluster_precisions, gamma):
h = 2 / np.mean(cluster_precisions)
beta = gamma / (h * (gamma + 1))
return float(beta)
class PSREG:
def __init__(self,
betas=None,
epsilon=None,
cluster_priors=None,
cluster_means=None,
cluster_precisions=None,
feature_distr=None,
debug=False,
use_kdtree=False,
fix_cluster_pos_iter=2):
"""
:param beta:
:param epsilon:
:param cluster_priors: (1,K) numpy.array (\rho_k)
:param cluster_means: (3,K) numpy.array (X)
:param cluster_precisions: (3,K) numpy.array (Q)
"""
self.betas = betas
self.epsilon = epsilon
self.cluster_priors = cluster_priors
self.cluster_means = cluster_means
self.cluster_precisions = cluster_precisions
self.feature_distr = feature_distr
self.debug = debug
self.use_kdtree = use_kdtree
self.fix_cluster_pos_iter = fix_cluster_pos_iter
def register_points(self, point_clouds, feature_likelihoods, num_iters, start_poses, show_progress=False, observation_weight_function=observation_weights.default_uniform, ow_args=()):
"""
:param point_clouds: [ X1, X2, ... ]. Xi = (3, Ni) numpy.array
:param num_iters: Number of iterations to run
:param start_poses: [ (R1, t1), (R2, t2) ... ]
Ri = pcl-to-world rotation (3,3) numpy.array,
ti = pcl-to-world translation vector (3,1) numpy.array
:return:
"""
N = len(point_clouds)
Vs = point_clouds
Ps = start_poses
pk = self.cluster_priors
X = self.cluster_means
Q = self.cluster_precisions
fd = self.feature_distr
ow_reg_factor = 8.0
fts = feature_likelihoods
# Compute the observation weights
observation_weights = [observation_weight_function(V.transpose(), ow_args) for V in Vs]
TVs = [R @ V + t[..., np.newaxis] for V, (R, t) in zip(Vs, Ps)]
for i in range(len(observation_weights)):
m = np.sum(observation_weights[i])/observation_weights[i].shape[0]
observation_weights[i][np.where(observation_weights[i] > m * ow_reg_factor)] = m * ow_reg_factor
ds = [sqe(TV, X) for TV in TVs]
t_tot = time()
for i in range(num_iters):
t0 = time()
a_s, Ls, Rs, ts, TVs, X, Q, den, fd, ds = self._iterate(TVs, X, pk, Q, fd, Vs, fts, ds, observation_weights, i)
if show_progress:
print("%03d: %.1f ms" % (i+1, (time() - t0) * 1000))
tot_time = time() - t_tot
print("tot time %03d: %.1f ms" % (i+1, (tot_time) * 1000))
if self.debug:
point_cloud_plotting.plotCloudsModel(TVs, X, 56)
return TVs, X
# (uniform priors so far...)
def _iterate(self, TVs, X, pk, Q, feature_distr, Vs, features, ds, ows, current_iter):
""" Run one cppsr iteraton """
M = len(TVs)
a_s = np.ndarray(M, dtype=object)
Ls = np.ndarray(M, dtype=object)
Rs = np.ndarray(M, dtype=object)
ts = np.ndarray(M, dtype=object)
TV2s = np.ndarray(M, dtype=object)
ac_den = np.ndarray(M, dtype=object)
ap = np.ndarray(M, dtype=object)
num_features = len(feature_distr)
pyz_feature = np.ndarray((M, num_features), dtype=object)
Qt = Q.transpose()
for i, (TV, V, d, ow) in enumerate(zip(TVs, Vs, ds, ows)):
# Posteriors
a = pk * np.power(Qt, 1.5) * np.exp(-0.5 * Qt * d)
ap[i] = a.copy()
if features:
for j, (fl, fd) in enumerate(zip(features, feature_distr)):
# the joint feature distribution p(y|z,th)
pyz_feature[i][j] = fl[i] @ fd
a = list_prod(pyz_feature[i]) * a
ac_den[i] = np.sum(a, 1, keepdims=True) + self.betas
a = a / ac_den[i] # normalize row-wise
a = a * ow # apply observation weights
L = np.sum(a, 0, keepdims=True).transpose()
W = (V @ a) * Qt
b = L * Q # weights, b
mW = np.sum(W, 1, keepdims=True) # mean of W
mX = X @ b # mean of X
z = L.transpose() @ Q # sumOfWeights
P = (X @ W.transpose()) - (mX @ mW.transpose()) / z
# Compute R and t
uu, _, vv = svd(P)
vv = vv.transpose() # Note: v is transposed compared to matlab's svd()
S = np.diag([1, 1, det(uu @ vv)]).astype('float32')
R = uu @ S @ vv.transpose()
R = R
t = (mX - R @ mW) / z
TV = R @ V + t # transform V
a_s[i] = a
Ls[i] = L
Rs[i] = R
ts[i] = t
TV2s[i] = TV
TVs = TV2s
# Update X
den = Ls[0].copy()
for L in Ls[1:]:
den += L
den = den.transpose()
if self.fix_cluster_pos_iter < current_iter:
X = TVs[0] @ a_s[0]
for TV, a in zip(TVs[1:], a_s[1:]):
X += TV @ a
X = X / den
# Update Q
ds2 = [sqe(TV, X) for TV in TVs]
wn = np.sum(a_s[0] * ds2[0], 0, keepdims=True)
for distances, a in zip(ds2[1:], a_s[1:]):
wn += | np.sum(a * distances, 0, keepdims=True) | numpy.sum |
'''
Table of Contents
Functions and Interdependencies:
butter_bandpass
butter_bandpass_filter
- butter_bandpass
mtaper_specgram
simple_cwt
'''
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
import pywt
def butter_bandpass(lowcut, highcut, fs, order=5, plot_pref=True):
'''
designs a butterworth bandpass filter.
Found on a stackoverflow, but can't find it
anymore.
RH 2021
Args:
lowcut (scalar):
frequency (in Hz) of low pass band
highcut (scalar):
frequency (in Hz) of high pass band
fs (scalar):
sample rate (frequency in Hz)
order (int):
order of the butterworth filter
Returns:
b (ndarray):
Numerator polynomial coeffs of the IIR filter
a (ndarray):
Denominator polynomials coeffs of the IIR filter
'''
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = scipy.signal.butter(order, [low, high], btype='band')
if plot_pref:
w, h = scipy.signal.freqz(b, a, worN=2000)
plt.figure()
plt.plot((fs * 0.5 / np.pi) * w, abs(h), label="order = %d" % order)
plt.xlabel('frequency (Hz)')
plt.ylabel('frequency response (a.u)')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, axis=-1, order=5, plot_pref=False):
'''
applies a butterworth bandpass filter
RH 2021
Args:
data (ndarray):
data array. filtering done on
defined axis
lowcut (scalar):
frequency (in Hz) of low pass band
highcut (scalar):
frequency (in Hz) of high pass band
fs (scalar):
sample rate (frequency in Hz)
order (int):
order of the butterworth filter
Returns:
y (ndarray):
filtered data array
'''
b, a = butter_bandpass(lowcut, highcut, fs, order=order, plot_pref=plot_pref)
y = scipy.signal.lfilter(b, a, data, axis=axis)
return y
def mtaper_specgram(
signal,
nw=2.5,
ntapers=None,
win_len=0.1,
win_overlap=0.09,
fs=int(192e3),
clip=None,
freq_res_frac=1,
mode='psd',
**kwargs
):
"""
Multi-taper spectrogram
RH 2021
Args:
signal (array type):
Signal.
nw (float):
Time-bandwidth product
ntapers (int):
Number of tapers (None to set to 2 * nw -1)
win_len (float):
Window length in seconds
win_overlap (float):
Window overlap in seconds
fs (float):
Sampling rate in Hz
clip (2-tuple of floats):
Normalize amplitudes to 0-1 using clips (in dB)
freq_res_frac (float):
frequency resolution fraction.
generates nfft. If none then nfft=None,
which makes nfft=win nfft=nperseg=len_samples.
else nfft = freq_resolution_frac * round(win_len * fs)
mode (string):
mode of the scipy.signal.spectrogram to use. Can be
'psd', 'complex', ‘magnitude’, ‘angle’, ‘phase’
**kwargs:
Additional arguments for scipy.signal.spectrogram
Returns:
f (ndarray):
Frequency bin centers
t (ndarray):
Time indices
sxx (ndarray):
Spectrogram
"""
len_samples = np.round(win_len * fs).astype("int")
if freq_res_frac is None:
nfft = None
else:
nfft = freq_res_frac*len_samples
if ntapers is None:
ntapers = int(nw * 2)
overlap_samples = np.round(win_overlap * fs)
sequences, r = scipy.signal.windows.dpss(
len_samples, NW=nw, Kmax=ntapers, sym=False, norm=2, return_ratios=True
)
sxx_ls = None
for sequence, weight in zip(sequences, r):
f, t, sxx = scipy.signal.spectrogram(
signal,
fs=fs,
window=sequence,
nperseg=len_samples,
noverlap=overlap_samples,
nfft=nfft,
detrend='constant',
return_onesided=True,
scaling='density',
axis=-1,
mode=mode,
**kwargs
)
if sxx_ls is None:
sxx_ls = sxx * weight
else:
sxx_ls += np.abs(sxx * weight)
sxx = sxx_ls / len(sequences)
if clip is not None:
sxx = 20 * np.log10(sxx)
sxx = sxx - clip[0]
sxx[sxx < 0] = 0
sxx[sxx > (clip[1] - clip[0])] = clip[1] - clip[0]
sxx /= clip[1] - clip[0]
return f, t, sxx
def simple_cwt(
X,
freqs_toUse=None,
fs=30,
wavelet_type='cmor',
bwf=None,
cf=None,
psd_scaling=True,
plot_pref=True,
axis=-1):
'''
performs a simple continuous wavelet transform (cwt) using pywt.cwt
RH 2021
Args:
X (ndarray):
data array
freqs_toUse (1-D ndarray):
values of frequencies to perform cwt on
fs (scalar): sample rate in Hz
wavelet_type (string):
name of wavelet type to use. See pywt.wavelist() for all
possible inputs
bwf (scalar):
bandwidth (in units of frequency). Used only if using complex
morlet ('cmor')
cf (scalar):
center frequency. Used only if using complex morlet ('cmor')
axis (int):
axis along which to perform cwt
psd_scaling (bool):
preference of whether to scale the output to compute the power
spectral density or leave as raw output of pywt.cwt
Returns:
coeff (ndarray):
output cwt array (with temporal dimension=='axis').
A natural way to normalize output is to put it in units of
'spectral density' = np.abs(coeff**2 / (1/freqs_toUse)[:,None])
Another nice normalization is
np.abs(coeff / (1/freqs_toUse)[:,None]**1)**1.5
'''
if wavelet_type=='cmor' and bwf is None:
bwf = 2
if wavelet_type=='cmor' and cf is None:
cf = 1
waveletname = f'{wavelet_type}{bwf}-{cf}'
if freqs_toUse is None:
freqs_toUse = np.logspace(np.log(fs/30), np.log(fs/2), 30, base=np.exp(1))
scales = fs/freqs_toUse
coeff, freq = pywt.cwt(data=X,
scales=scales,
wavelet=waveletname,
sampling_period=1/fs,
axis=axis)
if psd_scaling:
coeff = np.abs(coeff**2 / (1/freqs_toUse)[:,None])
if plot_pref:
n_ticks = min(len(freq) , 10)
tick_spacing = len(freq)//n_ticks
ticks_toUse = np.arange(0,len(freq), tick_spacing)
fig, ax = plt.subplots()
if psd_scaling:
ax.imshow(coeff, aspect='auto', origin='lower')
else:
ax.imshow( | np.abs(coeff) | numpy.abs |
import cv2
import numpy as np
from detection.utils.box_tools import validate_boxes, clip_boxes
class Transformer:
def __init__(self, operations):
"""
Wrapper that applies every operation in 'operations' on all given images and labels.
:param operations: Array of operation classes or instance of class 'TransformationChain'
"""
if not (isinstance(operations, list) or isinstance(operations, TransformationChain)):
raise ValueError('The operations passed to the Transformer need to be either a list or an instance of a '
'TransformationChain')
self.operations = operations
def __call__(self, images, labels=None):
processed_images = []
if labels is not None:
processed_labels = []
for image, label in zip(images, labels):
for transformation in self.operations:
image, label = transformation(image, label)
processed_images.append(image)
processed_labels.append(label)
return np.array(processed_images), np.array(processed_labels)
else:
for image in images:
for transformation in self.operations:
image = transformation(image, None)
processed_images.append(image)
return np.array(processed_images)
class TransformationChain:
def __init__(self, number):
self.number = number
self.chain = [
[DataTypeConverter(target='float'),
Brightness(min_value=-50, max_value=50, apply=0.5),
Contrast(min_value=0.5, max_value=1.5, apply=0.5),
DataTypeConverter(target='int'),
VerticalFlip(apply=0.5),
HorizontalFlip(apply=0.5)],
[DataTypeConverter(target='float'),
Brightness(min_value=-40, max_value=40, apply=0.5),
Contrast(min_value=0.6, max_value=1.4, apply=0.5),
DataTypeConverter(target='int'),
ColorSpaceConverter(actual='RGB', target='HSV'),
DataTypeConverter(target='float'),
Hue(min_value=-20, max_value=20, apply=0.5),
Saturation(min_value=-20, max_value=20, apply=0.5),
DataTypeConverter(target='int'),
ColorSpaceConverter(actual='HSV', target='RGB'),
HorizontalFlip(apply=0.5),
VerticalFlip(apply=0.5)
]
]
if self.number > len(self.chain) - 1:
raise IndexError('The chain with the number {0} does not exist. '
'Please choose one of: {1}'.format(self.number, str(list(range(len(self.chain))))))
def __len__(self):
return len(self.chain[self.number])
def __iter__(self):
for item in range(len(self)):
yield self.chain[self.number][item]
class Resize:
def __init__(self, output_width, output_height, interpolation=cv2.INTER_LINEAR):
"""
Class to resizing images and labels.
:param output_width:
:param output_height:
:param interpolation: the algorithm that calculates the resizing
"""
algorithms = {'linear': cv2.INTER_LINEAR, 'cubic': cv2.INTER_CUBIC, 'nearest': cv2.INTER_NEAREST}
self.output_width = output_width
self.output_height = output_height
if isinstance(interpolation, str):
try:
self.interpolation = algorithms[interpolation]
except KeyError:
raise KeyError(
'Unknown algorithm {0}. Please choose one of: {1}'.format(interpolation, algorithms.keys()))
else:
self.interpolation = interpolation
def __call__(self, image, labels=None):
height, width, channel = image.shape
ratio_w = self.output_width / width
ratio_h = self.output_height / height
image = cv2.resize(image, dsize=(self.output_width, self.output_height),
interpolation=self.interpolation)
if labels is not None:
resized_labels = np.copy(labels)
resized_labels[:, [0, 2]] = np.round(resized_labels[:, [0, 2]] * ratio_w, decimals=0)
resized_labels[:, [1, 3]] = np.round(resized_labels[:, [1, 3]] * ratio_h, decimals=0)
return image, resized_labels
return image
class Transformation:
def __init__(self, apply=1.0):
"""
Basic class to implement a transformation.
:param apply: the probability that the transformation is applied
"""
self.skip = apply
def apply_transformation(self, image, labels):
return image, labels
def __call__(self, image, labels):
if np.random.uniform(0, 1) < self.skip:
return self.apply_transformation(image, labels)
else:
return image, labels
class Brightness(Transformation):
def __init__(self, min_value=-50, max_value=50, apply=1.0):
super().__init__(apply=apply)
self.min = min_value
self.max = max_value
def apply_transformation(self, image, labels):
if image.dtype == np.float32:
x = np.random.randint(self.min, self.max)
img = np.clip(image + x, a_min=0, a_max=255)
return img, labels
else:
raise ValueError("Cannot apply brightness conversion to INT array. Please convert to float.")
class Contrast(Transformation):
def __init__(self, min_value=0.5, max_value=1.5, apply=1.0):
super().__init__(apply=apply)
self.min = min_value
self.max = max_value
def apply_transformation(self, image, labels):
x = np.random.uniform(self.min, self.max)
image = np.clip(127.5 + x * (image - 127.5), 0, 255)
return image, labels
class Hue(Transformation):
def __init__(self, min_value=-20, max_value=20, apply=1.0):
super().__init__(apply=apply)
self.min = min_value
self.max = max_value
def apply_transformation(self, image, labels):
# apply random hue
random_hue = np.random.randint(self.min, self.max)
image[:, :, 0] = (image[:, :, 0] + random_hue) % 180
return image, labels
class Saturation(Transformation):
def __init__(self, min_value=-20, max_value=20, apply=1.0):
super().__init__(apply=apply)
self.min = min_value
self.max = max_value
def apply_transformation(self, image, labels):
# apply random saturation
random_saturation = np.random.randint(self.min, self.max)
image[:, :, 0] = (image[:, :, 1] + random_saturation) % 255
return image, labels
class Value(Transformation):
def __init__(self, min_value=-20, max_value=20, apply=1.0):
super().__init__(apply=apply)
self.min = min_value
self.max = max_value
def apply_transformation(self, image, labels):
# apply random value
random_value = np.random.randint(self.min, self.max)
image[:, :, 0] = (image[:, :, 2] + random_value) % 255
return image, labels
class HorizontalFlip(Transformation):
def __init__(self, apply=1.0):
super().__init__(apply=apply)
def apply_transformation(self, image, labels):
new_labels = np.copy(labels)
new_labels[:, [0, 2]] = image.shape[1] - new_labels[:, [2, 0]]
return image[:, ::-1], new_labels
class VerticalFlip(Transformation):
def __init__(self, apply=1.0):
super().__init__(apply=apply)
def apply_transformation(self, image, labels):
new_labels = np.copy(labels)
new_labels[:, [1, 3]] = image.shape[0] - new_labels[:, [3, 1]]
return image[::-1], new_labels
class DataTypeConverter:
def __init__(self, target='float'):
self.target = target
def __call__(self, image, labels):
if self.target.lower() == 'float':
return image.astype(np.float32), labels
else:
return np.round(image, decimals=0).astype(np.uint8), labels
class ColorSpaceConverter:
def __init__(self, actual, target):
transformations = {('BGR', 'RGB'): cv2.COLOR_BGR2RGB, ('RGB', 'BGR'): cv2.COLOR_RGB2BGR,
('RGB', 'HSV'): cv2.COLOR_RGB2HSV, ('HSV', 'RGB'): cv2.COLOR_HSV2RGB}
try:
self.transformation = transformations[(actual.upper(), target.upper())]
except KeyError:
raise KeyError('The transformation from {0} to {1} does not exist. Please try one of those: {2}'.format(
actual, target, transformations.keys()
))
def __call__(self, image, labels=None):
conv_img = cv2.cvtColor(image, code=self.transformation)
return (conv_img, labels) if labels is not None else conv_img
class Zoom(Transformation):
"""
Zooms into the image and applies a random translation in x and y direction. The zoomed image is only shifted so far
that it remains within the borders of the original image. The output size is always equal to the input size.
"""
def __init__(self, scale=1.5, min_scale=None, max_scale=None, min_area=500, min_objects=1, apply=1.0):
"""
:param scale: a fixed float value that specifies the scaling. Range 1.0 <= scale <= 3.0
:param min_scale: If the value is set the scaling is random and the fixed value is overwritten. This value
is the lower bound.
:param max_scale: If the value is set the scaling is random and the fixed value is overwritten. This value
is the upper bound.
:param min_area: filter all boxes that have a smaller area than this value
:param min_objects: the creates sample needs to have at least this values of objects to be valid otherwise
this transformation is withdrawn
:param apply: probability that this transformation is applied
"""
super().__init__(apply=apply)
if not isinstance(scale, float) and 1.0 <= scale <= 3.0:
raise ValueError('The scale factor needs to be a float between 1.0 and 3.0.')
self.scale = scale
self.min_scale = min_scale
self.max_scale = max_scale
self.min_area = min_area
self.min_objects = min_objects
def apply_transformation(self, image, labels):
new_image = np.copy(image)
if self.min_scale is not None and self.max_scale is not None:
self.scale = np.random.uniform(self.min_scale, self.max_scale)
height, width, _ = new_image.shape
new_height, new_width = height * self.scale, width * self.scale
trans_x = np.random.randint(width - new_width, 0)
trans_y = np.random.randint(height - new_height, 0)
transformation_matrix = np.array([[self.scale, 0, trans_x],
[0, self.scale, trans_y]])
new_image = cv2.warpAffine(new_image, transformation_matrix, (width, height))
new_labels = np.copy(labels)
mins = np.hstack([new_labels[:, :2], np.ones((new_labels.shape[0], 1))]).T
maxs = np.hstack([new_labels[:, 2:], np.ones((new_labels.shape[0], 1))]).T
new_mins = ( | np.dot(transformation_matrix, mins) | numpy.dot |
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
def create_plots(training_loss, reg_losses, validation_loss, num_epochs, regularization, save_path):
epochs = | np.arange(1, num_epochs, 1) | numpy.arange |
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import os
import uuid
from datetime import datetime
from logging import NOTSET
from typing import Dict, List, Tuple, Union
# Libs
import jsonschema
import numpy as np
import pandas as pd
import torch as th
from flask import jsonify, request
from flask_restx import fields
from sklearn.metrics import (
r2_score,
mean_squared_error,
mean_absolute_error,
accuracy_score,
roc_curve,
roc_auc_score,
auc,
precision_recall_curve,
precision_score,
recall_score,
f1_score,
confusion_matrix
)
from sklearn.metrics.cluster import contingency_matrix
from sklearn.preprocessing import LabelBinarizer
from tinydb import TinyDB, Query, where
from tinydb.middlewares import CachingMiddleware
from tinydb.storages import JSONStorage
from tinyrecord import transaction
from tinydb_serialization import SerializationMiddleware
from tinydb_smartcache import SmartCacheTable
# Custom
from rest_rpc import app
from rest_rpc.core.datetime_serialization import (
DateTimeSerializer,
TimeDeltaSerializer
)
##################
# Configurations #
##################
SOURCE_FILE = os.path.abspath(__file__)
schemas = app.config['SCHEMAS']
payload_template = app.config['PAYLOAD_TEMPLATE']
label_binarizer = LabelBinarizer()
logging = app.config['NODE_LOGGER'].synlog
logging.debug("utils.py logged", Description="No Changes")
####################
# Helper Functions #
####################
def construct_combination_key(expt_id, run_id):
return str((expt_id, run_id))
############################################
# REST Response Formatting Class - Payload #
############################################
class Payload:
""" Helper class to standardise response formatting for the REST-RPC service
in order to ensure compatibility between the TTP's & Workers' Flask
interfaces
Attributes:
# Private Attributes
__template (dict): Configured payload template
# Public Attributes
subject (str): Topic of data in payload (i.e. name of table accessed)
Args:
subject (str): Topic of data in payload (i.e. name of table accessed)
namespace (flask_restx.Namespace): Namespace API to construct models in
model (flask_restx.Model): Seeding model to propagate
"""
def __init__(self, subject, namespace, model):
self.__template = payload_template.copy()
self.subject = subject
payload_model = namespace.model(
name="payload",
model={
'apiVersion': fields.String(required=True),
'success': fields.Integer(required=True),
'status': fields.Integer(required=True),
'method': fields.String(),
'params': fields.Nested(
namespace.model(
name="route_parameters",
model={
'collab_id': fields.String(),
'project_id': fields.String(),
'expt_id': fields.String(),
'run_id': fields.String(),
}
),
skip_none=True
)
}
)
self.singular_model = namespace.inherit(
"payload_single",
payload_model,
{'data': fields.Nested(model, required=True, skip_none=True)}
)
self.plural_model = namespace.inherit(
"payload_plural",
payload_model,
{
'data': fields.List(
fields.Nested(model, skip_none=True),
required=True
)
}
)
def construct_success_payload(
self,
status: int,
method: str,
params: dict,
data: Union[list, dict],
strict_format: bool = True
):
""" Automates the construction & formatting of a payload for a
successful endpoint operation
Args:
status (int): Status code of method of operation
method (str): Endpoint operation invoked
params (dict): Identifiers required to start endpoint operation
data (list or dict): Data to be moulded into a response
strict_format (bool): Toggles strict adherence to archival format
Returns:
Formatted payload (dict)
"""
def format_document(document, kind):
def encode_datetime_objects(document):
datetime_serialiser = DateTimeSerializer()
document['created_at'] = datetime_serialiser.encode(document['created_at'])
return document
def annotate_document(document, kind):
document['doc_id'] = document.doc_id
document['kind'] = kind
return document
encoded_document = encode_datetime_objects(document)
annotated_document = annotate_document(encoded_document, kind)
return annotated_document
self.__template['success'] = 1
self.__template['status'] = status
self.__template['method'] = method
self.__template['params'] = params
if isinstance(data, list):
formatted_data = []
for record in data:
formatted_record = (
format_document(record, kind=self.subject)
if strict_format
else record
)
formatted_data.append(formatted_record)
else:
formatted_data = (
format_document(data, kind=self.subject)
if strict_format
else data
)
self.__template['data'] = formatted_data
jsonschema.validate(self.__template, schemas['payload_schema'])
logging.info(
f"Operation was successful!",
ID_path=SOURCE_FILE,
ID_class=Payload.__name__,
ID_function=Payload.construct_success_payload.__name__
)
return self.__template
#####################################
# Base Data Storage Class - Records #
#####################################
class Records:
"""
Automates CRUD operations on a structured TinyDB database. Operations are
atomicised using TinyRecord transactions, queries are smart cahced
Attributes:
db_path (str): Path to json source
Args:
db_path (str): Path to json source
*subjects: All subject types pertaining to records
"""
def __init__(self, db_path: str):
self.db_path = db_path
###########
# Helpers #
###########
def load_database(self):
""" Loads json source as a TinyDB database, configured to cache queries,
I/O operations, as well as serialise datetimes objects if necessary.
Subjects are initialised as tables of the database
Returns:
database (TinyDB)
"""
serialization = SerializationMiddleware(JSONStorage)
serialization.register_serializer(DateTimeSerializer(), 'TinyDate')
serialization.register_serializer(TimeDeltaSerializer(), 'TinyDelta')
database = TinyDB(
path=self.db_path,
sort_keys=True,
indent=4,
separators=(',', ': '),
storage=CachingMiddleware(serialization)
)
database.table_class = SmartCacheTable
return database
##################
# Core Functions #
##################
def create(self, subject, key, new_record):
""" Creates a new record in a specified subject table within database
Args:
subject (str): Table to be operated on
new_record (dict): Information for creating a new record
key (str): Primary key of the current table
Returns:
New record added (tinydb.database.Document)
"""
database = self.load_database()
with database as db:
subject_table = db.table(subject)
with transaction(subject_table) as tr:
# Remove additional digits (eg. microseconds)
date_created = datetime.strptime(
datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
"%Y-%m-%d %H:%M:%S"
)
new_record['created_at'] = date_created
if subject_table.contains(where(key) == new_record[key]):
tr.update(new_record, where(key) == new_record[key])
else:
tr.insert(new_record)
record = subject_table.get(where(key) == new_record[key])
return record
def read_all(self, subject, filter={}):
""" Retrieves entire collection of records, with an option to filter out
ones with specific key-value pairs.
Args:
filter (dict(str,str)): Key-value pairs for filtering records
Returns:
Filtered records (list(tinydb.database.Document))
"""
def retrieve_all_records(subject):
""" Retrieves all records in a specified table of the database
Args:
subject (str): Table to be operated on
Returns:
Records (list(tinydb.database.Document))
"""
database = self.load_database()
with database as db:
subject_table = db.table(subject)
records = subject_table.all()
print(records)
return records
all_records = retrieve_all_records(subject=subject)
filtered_records = []
for record in all_records:
if (
(not filter.items() <= record['key'].items()) and
(not filter.items() <= record.items())
):
continue
filtered_records.append(record)
return filtered_records
def read(self, subject, key, r_id):
""" Retrieves a single record from a specified table in the database
Args:
subject (str): Table to be operated on
key (str): Primary key of the current table
r_id (dict): Identifier of specified records
Returns:
Specified record (tinydb.database.Document)
"""
database = self.load_database()
with database as db:
subject_table = db.table(subject)
record = subject_table.get(where(key) == r_id)
return record
def update(self, subject, key, r_id, updates):
""" Updates an existing record with specified updates
Args:
subject (str): Table to be operated on
key (str): Primary key of the current table
r_id (dict): Identifier of specified records
updates (dict): New key-value pairs to update existing record with
Returns:
Updated record (tinydb.database.Document)
"""
database = self.load_database()
with database as db:
subject_table = db.table(subject)
with transaction(subject_table) as tr:
tr.update(updates, where(key) == r_id)
updated_record = subject_table.get(where(key) == r_id)
return updated_record
def delete(self, subject, key, r_id):
""" Deletes a specified record from the specified table in the database
Args:
subject (str): Table to be operated on
key (str): Primary key of the current table
r_id (dict): Identifier of specified records
Returns:
Deleted record (tinydb.database.Document)
"""
database = self.load_database()
with database as db:
subject_table = db.table(subject)
record = subject_table.get(where(key) == r_id)
with transaction(subject_table) as tr:
tr.remove(where(key) == r_id)
assert not subject_table.get(where(key) == r_id)
return record
####################################
# Data Storage Class - MetaRecords #
####################################
class MetaRecords(Records):
""" Records all metadata generated in a single PROJECT.
Note: This class is used to generate multiple `operations.json` for
multiple collaborations.
"""
def __init__(self, db_path: str):
super().__init__(db_path=db_path)
def __generate_key(self, project_id):
return {"project_id": project_id}
def create(self, project_id, details):
# Check that new details specified conforms to export schema. Json
# describing schema is located at synergos_worker/templates/meta_schema.json
jsonschema.validate(details, schemas["meta_schema"])
meta_key = self.__generate_key(project_id)
new_metadata = {'key': meta_key}
new_metadata.update(details)
return super().create('Metadata', 'key', new_metadata)
def read_all(self, filter={}):
return super().read_all('Metadata', filter=filter)
def read(self, project_id):
meta_key = self.__generate_key(project_id)
return super().read('Metadata', 'key', meta_key)
def update(self, project_id, updates):
meta_key = self.__generate_key(project_id)
return super().update('Metadata', 'key', meta_key, updates)
def delete(self, project_id):
meta_key = self.__generate_key(project_id)
return super().delete('Metadata', 'key', meta_key)
#######################################
# MetaExtractor Class - MetaExtractor #
#######################################
class MetaExtractor:
""" Given a dataset of a specific type, extract the appropriate meta
statistics for ease of summary
Attributes:
df (pd.DataFrame): Dataset to extract metrics from
schema
"""
def __init__(
self,
df: pd.DataFrame,
schema: Dict[str, str],
dataset_type: str
):
# Private attibutes
self.metadata = None
# Public Attributes
self.df = df
self.schema = schema
self.dataset_type = dataset_type
###########
# Helpers #
###########
@staticmethod
def extract_categorical_feature_metadata(
feature: pd.Series
) -> Dict[str, Union[List[str], int, float]]:
""" Extracts relevant statistics from a single categorical feature.
For categorical variables, supported metadata extracted include:
1) Labels
2) Count
3) Unique
4) Top
5) Frequency
Args:
feature (pd.Series): Name of feature column
Returns:
Categorical Meta statistics (Dict)
"""
datatype = feature.dtype.name
if datatype == "category":
# Extract class labels
labels = feature.cat.categories.to_list()
logging.debug(
"Datatypes of categorical labels tracked.",
label_datatypes=[type(v) for v in labels],
ID_path=SOURCE_FILE,
ID_class=MetaExtractor.__name__,
ID_function=MetaExtractor.extract_categorical_feature_metadata.__name__
)
# Extract meta statistics
meta_stats = feature.describe().to_dict()
logging.debug(
"Datatypes of meta-statistics calculated for categorical feature tracked.",
meta_datatypes=[type(v) for k,v in meta_stats.items()],
ID_path=SOURCE_FILE,
ID_class=MetaExtractor.__name__,
ID_function=MetaExtractor.extract_categorical_feature_metadata.__name__
)
# Add in class label information
meta_stats.update({'labels': labels})
return meta_stats
else:
raise RuntimeError(
f"Feature '{feature.name}' is not a categorical variable!"
)
@staticmethod
def extract_numeric_feature_metadata(
feature: pd.Series
) -> Dict[str, Union[int, float]]:
""" Extracts relevant statistics from a single numeric feature.
For numeric variables, supported metadata extracted include:
1) Count
2) Mean
3) Std
4) Min
5) 25%
6) 50%
7) 75%
8) max
Args:
feature (pd.Series): Name of feature column
Returns:
Numerical Meta statistics (Dict)
"""
datatype = feature.dtype.name
if datatype not in ["category", "object"]: # capture nulls
meta_stats = feature.describe().to_dict()
logging.debug(
"Datatypes of meta-statistics calculated for numeric features tracked.",
meta_stats=[type(v) for k,v in meta_stats.items()],
ID_path=SOURCE_FILE,
ID_class=MetaExtractor.__name__,
ID_function=MetaExtractor.extract_numeric_feature_metadata.__name__
)
return meta_stats
else:
raise RuntimeError(
f"Feature '{feature.name}' is not a numerical variable!"
)
@staticmethod
def extract_object_feature_metadata(feature: pd.Series) -> dict:
""" Extracts relevant statistics from a single object feature.
Note:
This is a placeholder function to handle nullities/incompatibilities
in the event that the specified dataset was not thoroughly cleaned
Args:
feature (pd.Series): Name of feature column
Returns:
An empty dictionary (Dict)
"""
return {}
def extract_tabular_metadata(
self
) -> Dict[str, Dict[str, Union[List[str], int, float]]]:
""" Extracts meta data/statistics from a specified tabular dataset.
Expected metadata format:
{
'features': {
'cat_variables': {
'cat_feature_1': {'datatype': "category", ...},
...
},
'num_variables': {
'num_feature_1': {'datatype': "integer", ...},
...
},
'misc_variables': {
'misc_feature_1': {'datatype': "object"},
...
}
}
}
Returns:
Tabular meta statistics (Dict)
"""
if self.dataset_type == "tabular":
# Ensures that template always has consistent keys
metadata = {
'cat_variables': {},
'num_variables': {},
'misc_variables': {}
}
for name in self.df.columns:
feature = self.df[name]
datatype = self.schema[name] # robust datatype extraction
# Check that datatype is not ambigious (eg. null, list, etc.)
if datatype == "object":
variable_key = 'misc_variables'
meta_stats = self.extract_object_feature_metadata(feature)
# Check that datatype is categorical
elif datatype == "category":
variable_key = 'cat_variables'
meta_stats = self.extract_categorical_feature_metadata(feature)
# Check that datatype is numerical
else:
###########################
# Implementation Footnote #
###########################
# [Cause]
# There are many other datatypes apart from objects &
# categories in numpy.
# [Problems]
# This results in ambiguity when inferring numeric datatypes
# [Solution]
# Assume that all boolean types are specified as categories
variable_key = 'num_variables'
meta_stats = self.extract_numeric_feature_metadata(feature)
meta_stats['datatype'] = datatype
variable_stats = metadata.get(variable_key, {})
variable_stats[name] = meta_stats
metadata.update({variable_key: variable_stats})
return {'features': metadata}
else:
raise RuntimeError("Dataset is not of type tabular!")
def extract_image_metadata(self) -> Dict[str, Union[int, str]]:
""" Extracts meta data/statistics from a specified tabular dataset.
Expected metadata format:
{
'pixel_height': 255,
'pixel_width': 255,
'color': "rgb", # for now, only grayscale & RGB is supported
}
Returns:
Image meta statistics
"""
if self.dataset_type == "image":
# Columns of image DFs are named "{img_format}x{height}x{width}"
features = self.df.drop(columns=['target'])
color, pixel_height, pixel_width = features.columns[-1].split('x')
return {
'pixel_height': int(pixel_height),
'pixel_width': int(pixel_width),
'color': color
}
else:
raise RuntimeError("Dataset is not of type image!")
def extract_text_metadata(self) -> Dict[str, Union[int, float]]:
""" Extracts meta data/statistics from a specified text dataset.
Assumption:
Text datasets are represented as doc-term matrices
Expected metadata format:
{
'word_count': 5000, # Total no. of words represented
'sparsity': 0.6 # count(zeros)/total of doc-term matrix
'representation': 0.2 # sum(non-zeros)/total of doc-term matrix
}
Returns:
Text meta statistics (Dict)
"""
if self.dataset_type == "text":
features = self.df.drop(columns=['target'])
doc_count, word_count = features.shape
total_cells = doc_count * word_count
zero_count = features[features==0].count().sum()
sparsity = zero_count/total_cells
non_zero_sum = features.sum().sum() # .sum().sum() bypasses nullity
representation = non_zero_sum/total_cells
return {
'word_count': word_count,
'sparsity': sparsity,
'representation': representation
}
else:
raise RuntimeError("Dataset is not of type text!")
def extract_generic_metadata(self) -> Dict[str, Union[int, str]]:
""" Extracts generic meta data/statistics of the specified dataset.
Returns:
Generic meta statistics (Dict)
"""
return {
'src_count': len(self.df),
'_type': self.dataset_type
}
##################
# Core functions #
##################
def extract(self) -> Dict[str, Union[str, int, float, dict]]:
""" Extracts & compiles all metadata for each feature within the
specified dataset.
Expected metadata format:
{
'src_count': 1000,
'_type': "<insert datatype>",
<insert type-specific meta statistics>
...
}
Returns:
Data-specific meta statistics (Dict)
"""
EXTRACTORS = {
'tabular': self.extract_tabular_metadata,
'image': self.extract_image_metadata,
'text': self.extract_text_metadata
}
supported_dataset_types = list(EXTRACTORS.keys())
if self.dataset_type not in supported_dataset_types:
raise RuntimeError(f"{self.dataset_type} is not yet supported!")
generic_metadata = self.extract_generic_metadata()
type_specific_metadata = EXTRACTORS[self.dataset_type]()
self.metadata = {**generic_metadata, **type_specific_metadata}
return self.metadata
####################################
# Benchmarking Class - Benchmarker #
####################################
class Benchmarker:
""" Automates the calculation of all supported descriptive statistics
Attributes:
y_true (np.ndarray): Truth labels loaded into WSSW
y_pred (np.ndarray): Predictions obtained from TTP, casted into classes
y_score (np.ndarray): Raw scores/probabilities obtained from TTP
"""
def __init__(
self,
y_true: np.ndarray,
y_pred: np.ndarray,
y_score: np.ndarray
):
self.y_true = y_true
self.y_pred = y_pred
self.y_score = y_score
############
# Checkers #
############
def is_multiclass(self):
""" Checks if the current experiment to be evaluated is from a binary or
multiclass setup
Returns
True if setup is multiclass
False otherwise
"""
try:
return self.y_score.shape[1] > 1
except IndexError:
return False
###########
# Helpers #
###########
@staticmethod
def _calculate_summary_stats(
y_true: np.ndarray,
y_pred: np.ndarray,
y_score: np.ndarray,
) -> Dict[str, List[Union[int, float]]]:
""" Given y_true, y_pred & y_score from a classification machine
learning operation, calculate the corresponding summary statistics.
Args:
y_true (np.ndarray)
y_pred (np.ndarray)
y_score (np.ndarray)
Returns:
Summary Statistics (dict(str, list(int)))
"""
# Calculate accuracy of predictions
accuracy = accuracy_score(y_true, y_pred)
# Calculate ROC-AUC for each label
try:
roc = roc_auc_score(y_true, y_score)
fpr, tpr, _ = roc_curve(y_true, y_score)
except ValueError:
roc = 0.0
fpr, tpr = (None, None)
# Calculate Area under PR curve
pc_vals, rc_vals, _ = precision_recall_curve(y_true, y_score)
auc_pr_score = auc(rc_vals, pc_vals)
# Calculate F-score
f_score = f1_score(y_true, y_pred)
# Calculate contingency matrix
ct_matrix = contingency_matrix(y_true, y_pred)
statistics = {
'accuracy': float(accuracy),
'roc_auc_score': float(roc),
'pr_auc_score': float(auc_pr_score),
'f_score': float(f_score)
}
plots = {'roc_curve': [fpr, tpr], 'pr_curve': [pc_vals, rc_vals]}
return statistics
@staticmethod
def _calculate_descriptive_rates(
TNs: List[int],
FPs: List[int],
FNs: List[int],
TPs: List[int]
) -> Dict[str, List[Union[int, float]]]:
""" Calculates the descriptive rates for each class in a multiclass
setup. Supported rates are as follows:
1. TPRs: True positive rate
2. TNRs: True negative rate
3. PPVs: Positive predictive value
4. NPVs: Negative predictive value
5. FPRs: False positive rate
6. FNRs: False negative rate
7. FDRs: False discovery rate
Args:
TNs (list(float)): No. of true negatives for all classes
FPs (list(float)): No. of false positives for all classes
FNs (list(float)): No. of false negatives for all classes
TPs (list(float)): No. of true positives for all classes
Returns:
Descriptive Rates (dict(str, list(float)))
"""
rates = {}
def add_rate(r_type, value):
target_rates = rates.get(r_type, [])
target_rates.append(value)
rates[r_type] = [float(value) for value in target_rates]
for TN, FP, FN, TP in zip(TNs, FPs, FNs, TPs):
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN) if (TP+FN) != 0 else 0
add_rate('TPRs', TPR)
# Specificity or true negative rate
TNR = TN/(TN+FP) if (TN+FP) != 0 else 0
add_rate('TNRs', TNR)
# Precision or positive predictive value
PPV = TP/(TP+FP) if (TP+FP) != 0 else 0
add_rate('PPVs', PPV)
# Negative predictive value
NPV = TN/(TN+FN) if (TN+FN) != 0 else 0
add_rate('NPVs', NPV)
# Fall out or false positive rate
FPR = FP/(FP+TN) if (FP+TN) != 0 else 0
add_rate('FPRs', FPR)
# False negative rate
FNR = FN/(TP+FN) if (TP+FN) != 0 else 0
add_rate('FNRs', FNR)
# False discovery rate
FDR = FP/(TP+FP) if (TP+FP) != 0 else 0
add_rate('FDRs', FDR)
return rates
def _find_stratified_descriptors(self) -> Dict[str, List[Union[int, float]]]:
""" Finds the values of descriptors for all classes in a multiclass
setup. Descriptors are True Negatives (TNs), False Positives (FPs),
False Negatives (FNs) and True Positives (TPs).
Returns:
Stratified Descriptors (dict(str, list(int)))
"""
# Calculate confusion matrix
cf_matrix = confusion_matrix(self.y_true, self.y_pred)
logging.debug(
"Confusion matrix of calculated benchmarks tracked.",
cf_matrix=cf_matrix,
ID_path=SOURCE_FILE,
ID_class=Benchmarker.__name__,
ID_function=Benchmarker._find_stratified_descriptors.__name__
)
FPs = cf_matrix.sum(axis=0) - np.diag(cf_matrix)
FNs = cf_matrix.sum(axis=1) - | np.diag(cf_matrix) | numpy.diag |
#<NAME> <EMAIL>
#See the README.md for more information: https://github.com/alexholcombe/attentional-blink/blob/master/README.md
#git remote add origin https://github.com/alexholcombe/attentional-blink.git
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui, prefs
import psychopy.info
import numpy as np
from math import atan, log, ceil
from copy import deepcopy
import time, sys, os, random
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import imageLineupResponse
except ImportError:
print('Could not import imageLineupResponse.py (you need that file to be in the same directory)')
imageSz = (320,240)
lineupImagesNotInStream = False
lineupComprisedOfTargetFlankers = True
descendingPsycho = True
tasks=['T1','T1T2','T2']; task = tasks[2]
beepNotColoredFrame = True #if True, play a tone at the time of the critical distractor rather than drawing a colored rectangle
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
if demo:
refreshRate = 60.; #100
threshCriterion = 0.58
bgColor = [-1,-1,-1] # [-1,-1,-1]
targetCueColor = [0,0,1.25] # [0,0,1]
distractorCuePossibleColors = [ [.5,.5,0], #yellow [1,1,0]
[0,.5,0], #green [0,1,0]
[.5,.25,0] #orange [1,.5,0]
]
letterColor = [1.,1.,1.]
cueRadius = 6 #6 deg, as in Martini E2 Letters should have height of 2.5 deg
widthPix= 1600 #monitor width in pixels of Agosta
heightPix= 900 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=1 #0 to use main screen, 1 to use external screen connected to computer
fullscr=False #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = True
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 400; heightPix = 400
monitorwidth = 13.0
fullscr=False; scrn=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Check refresh etc':True, 'Fullscreen (timing errors if not)': fullscr, 'Screen refresh rate': 60 }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='RSVP experiment',
order=['Check refresh etc', 'Fullscreen (timing errors if not)','Screen refresh rate'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
numImagesInStream = 10
numRespOptions = 4
numImagesToPresent = 10
SOAms = 500
imageDurMs = 500
ISIms = SOAms - imageDurMs
imageDurFrames = int( np.floor(imageDurMs / (1000./refreshRate)) )
cueDurFrames = imageDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + imageDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + imageDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and imageDurFrames ='+str(imageDurFrames)+' or '+str(round( imageDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numImagesToPresent*(ISIframes+imageDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #
trialsPerCondition = 1 #default value
dlgLabelsOrdered = list()
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color='DimGrey')
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
#Outdated below
#if prefs.general['audioLib'][0] == 'pyo': #In Psychopy2->Preferences->General can try putting pyo first, maybe then it will use coreaudio
# if pyo is the first lib in the list of preferred libs then we could use small buffer
# pygame sound is very bad with a small buffer though
# sound.init(48000, buffer=128)
print('Using ',sound.audioLib,' (with ',sound.audioDriver,' for sounds' )
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferior click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015)
beep=sound.Sound('D',octave=5, sampleRate=22050, secs=0.100)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(int(fixSizePix/4),int(fixSizePix/4)) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=10,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=.16,units='norm',autoLog=autoLogging)
clickSound, badKeySound = imageLineupResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS
possibleCue1positions = np.array([2,3,4])
possibleCue2lags = np.array([2])
for cue1pos in possibleCue1positions:
for cue2lag in possibleCue2lags:
for critDistractorArousing in [True,True]: #[True, False]:
for otherItemsArousing in [False, False]:
stimList.append( {'cue1pos':cue1pos, 'cue2lag':cue2lag, 'critDistractorArousing':critDistractorArousing, 'otherItemsArousing':otherItemsArousing } )
#Martini E2 and also AB experiments used 400 trials total, with breaks between every 100 trials
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
numRightWrongEachCuepos = np.zeros([ len(possibleCue1positions), 1 ]); #summary results to print out at end
numRightWrongEachCue2lag = np.zeros([ len(possibleCue2lags), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
#if type(number) != type(5) and type(number) != type(np.array([3])[0]): #not an integer or numpy.int32
# return ('@')
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
#print header for data file
print('critDistFname\ttargetFname\t',file=dataFile,end='')
for i in range(numImagesInStream-2):
print('fillerImage',i,sep='',file=dataFile,end='\t')
for i in range(3):
print('lineupImage',i,sep='',file=dataFile,end='\t')
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
if task=='T1' or task=='T2':
numRespsWanted = 1
print('critDistractorArousing\totherItemsArousing\tcue1pos\tcue2lag\t',file=dataFile,end='')
elif task=='T1T2':
numRespsWanted = 2
print('noisePercent\t',end='',file=dataFile)
for i in range(numRespsWanted):
dataFile.write('answerPos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
dataFile.write('respImageIdx\t')
dataFile.write('respStimSeqIdx\t') #of the X items in the stimulus sequence, which one, i.e. the ith one was picked from the lineup
dataFile.write('respStimRelToTarget\t') #the image picked from the lineu8p position in the stream relative to the target position
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,task,distractorCueColor,beepPlayedYet,cue1pos,cue2lag,cue,cueDurFrames,imageDurFrames,ISIframes,targetsPos,
noise,proportnNoise,allFieldCoords,numNoiseDots,
fillerAndLineupImages, targetImage, critDistImage):
#defining a function to draw each frame of stim. So can call second time for tracking task response phase
SOAframes = imageDurFrames+ISIframes
cueFrames =list( targetsPos*SOAframes )#targetsPos is variable
if task=='T2': #Katherine's 2nd experiment, the oddball distractor is highlighted with a colored rectangle
distractorFrame = cue1pos*SOAframes
cueFrames.append(distractorFrame) #oddball in Katherine's E2
imageN = int( np.floor(n/SOAframes) )
if imageN > numImagesInStream:
print('ERROR asking for ',imageN, ' but only ',numImagesInStream,' desired in stream')
frameOfThisImage = n % SOAframes #every SOAframes, new letter
showImage = frameOfThisImage < imageDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
thisImageIdx = imageN
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setFillColor( bgColor )
#print('distractorFrame=',distractorFrame,'cueFrames=',cueFrames, 'distractorCueColor=',distractorCueColor)
for cueFrame in cueFrames: #check whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames: #time for the target
if cueFrame == distractorFrame: #pick a random one of the distractorCuePossibleColors (Katherine E2) or alternatively use a tone (Katherine E3)
if beepNotColoredFrame: #Katherine E3
cueColor = bgColor
if not beepPlayedYet:
beep.play()
beepPlayedYet= True
else:
cueColor = distractorCueColor
else:
cueColor = targetCueColor
cue.setFillColor( cueColor )
cue.draw()
if showImage:
if imageN == cue1pos:
critDistImage.draw()
elif imageN == cue1pos + cue2lag:
targetImage.draw()
else:
if imageN > cue1pos:
thisImageIdx -= 1 #critical distractor was drawn separately, doesn't count toward nth item to take out of the fillerandLineup
if imageN> cue1pos+cue2lag:
thisImageIdx -= 1 #target was drawn separately, doesn't count toward nth item to take out of the fillerandLineup
fillerAndLineupImages[thisImageIdx].draw()
#if/then statements for what item to draw
else:
pass
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisImage ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return beepPlayedYet
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cueLineWidth = 20
cue = visual.Rect(myWin,
width=320+cueLineWidth*2,
height=240+cueLineWidth*2,
units = 'pix',
lineColor = bgColor,
fillColorSpace='rgb',
fillColor=[1,0,0], #this will be changed to cue the distractor and the target
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
#predraw all images needed for this trial
imageHeight = 240; imageWidth = 320
#populated with 0s when the drawImages... function is called the first time.
#Represents the number of times an image has been used. Position in the list represents image identity, which is numeric
calmCritDistUsage = np.array([])
calmTargetUsage = np.array([])
calmFillerUsage = | np.array([]) | numpy.array |
#!/usr/bin/env python3
################################################################################
# parse arguments first
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--min_2d_power', type=int, default=3)
parser.add_argument('--max_2d_power', type=int, default=15)
parser.add_argument('--min_3d_power', type=int, default=3)
parser.add_argument('--max_3d_power', type=int, default=10)
parser.add_argument('--build_type', type=str, default='Release')
args = parser.parse_args()
################################################################################
# preliminaries
import sys;
sys.path.insert(0, '../build/%s' % args.build_type)
sys.path.insert(0, '../misc/py')
import common
import common3d
import matplotlib.pyplot as plt
import numpy as np
import pyolim as olim
from matplotlib.colors import LogNorm
from numpy.linalg import norm
plt.rc('text', usetex=True)
plt.rc('font', **{
'family': 'serif',
'serif': ['Computer Modern'],
'size': 8
})
plt.style.use('bmh')
################################################################################
# parameters
R_fac = 0.1
N = 2**np.arange(args.min_2d_power, args.max_2d_power + 1) + 1
N3D = 2**np.arange(args.min_3d_power, args.max_3d_power + 1) + 1
vx, vy, vz = 5, 13, 20
x_fac_1, y_fac_1, z_fac_1 = 0.0, 0.0, 0.0
x_fac_2, y_fac_2, z_fac_2 = 0.8, 0.0, 0.0
marchers_2d = [olim.Olim8Mid0, olim.Olim8Mid1, olim.Olim8Rect]
marchers_3d = [olim.Olim26Mid0, olim.Olim26Mid1, olim.Olim26Rect,
olim.Olim3dHuMid0, olim.Olim3dHuMid1, olim.Olim3dHuRect]
################################################################################
# 2D
s = lambda x, y: 1/(2 + vx*x + vy*y)
s_1, s_2 = s(x_fac_1, y_fac_1), s(x_fac_2, y_fac_2)
def make_u(x_fac, y_fac, vx, vy, s):
return lambda x, y: \
(1/np.sqrt(vx**2 + vy**2)) * \
np.arccosh(
1 +
s(x_fac, y_fac)*s(x, y)*(vx**2 + vy**2)*(
(x - x_fac)**2 + (y - y_fac)**2)/2)
u_1 = make_u(x_fac_1, y_fac_1, vx, vy, s)
u_2 = make_u(x_fac_2, y_fac_2, vx, vy, s)
u = lambda x, y: np.minimum(u_1(x, y), u_2(x, y))
E2 = dict()
E2_fac = dict()
for Olim in marchers_2d:
print(common.get_marcher_name(Olim))
E2[Olim] = np.zeros(len(N))
E2_fac[Olim] = np.zeros(len(N))
for k, n in enumerate(N):
print('- n = %d (%d/%d)' % (n, k + 1, len(N)))
L = np.linspace(0, 1, n)
X, Y = np.meshgrid(L, L)
u_ = u(X, Y)
S = s(X, Y)
h = 1/(n - 1)
i_1, j_1 = y_fac_1/h, x_fac_1/h
i_2, j_2 = y_fac_2/h, x_fac_2/h
m_fac = Olim(S, h)
R_1 = np.sqrt((x_fac_1 - X)**2 + (y_fac_1 - Y)**2)
fc_1 = olim.FacCenter(i_1, j_1, s_1)
for i, j in zip(*np.where(R_1 <= R_fac)):
m_fac.set_node_fac_center(i, j, fc_1)
m_fac.add_boundary_node(x_fac_1, y_fac_1, s_1)
R_2 = | np.sqrt((x_fac_2 - X)**2 + (y_fac_2 - Y)**2) | numpy.sqrt |
import hoki.age_utils as au
import hoki.load as load
import pkg_resources
import numpy as np
import pandas as pd
import pytest
from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError
# Loading Data
data_path = pkg_resources.resource_filename('hoki', 'data')
hr_file = data_path + '/hrs-sin-imf_chab100.zem4.dat'
cmd_file = data_path + '/cmd_bv_z002_bin_imf135_300'
myhrd = load.model_output(hr_file, hr_type='TL')
mycmd = load.unpickle(cmd_file)
# Creating Test Inputs
fake_hrd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'star3'],
'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input = pd.DataFrame.from_dict({'logT': np.array(['bla']),
'logL': np.array([4.83])})
no_name_input = pd.DataFrame.from_dict({'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input2 = pd.DataFrame.from_dict({'logT': np.array([4.58, 'bla']),
'logL': np.array([4.83, 2.0])})
fake_cmd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'STAR3'],
'col': np.array([-0.3, 0.5, -0.25]),
'mag': np.array([-5, -10, -1])})
bad_cmd_input = pd.DataFrame.from_dict({'col': np.array(['bla']),
'mag': np.array([-5])})
# Testing Suite
class TestAgeWizard(object):
def test_init_basic(self):
assert au.AgeWizard(obs_df=fake_hrd_input, model=hr_file), "Loading HRD file path failed"
assert au.AgeWizard(obs_df=fake_hrd_input, model=myhrd), "Loading with hoki.hrdiagrams.HRDiagram failed"
assert au.AgeWizard(obs_df=fake_cmd_input, model=mycmd), 'Loading with hoki.cmd.CMD'
assert au.AgeWizard(obs_df=fake_cmd_input, model=cmd_file), 'Loading CMD from frile failed'
def test_bad_init(self):
with pytest.raises(HokiFatalError):
__, __ = au.AgeWizard(obs_df=fake_cmd_input, model='sdfghj'), 'HokiFatalError should be raised'
with pytest.raises(HokiFormatError):
__, __ = au.AgeWizard(obs_df='edrftgyhu', model=cmd_file), 'HokiFormatError should be raised'
def test_combine_pdfs_not_you(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
wiz.calculate_sample_pdf(not_you=['star1'])
cpdf = wiz.sample_pdf.pdf
assert np.sum(np.isclose([cpdf[0], cpdf[9]], [0.0, 0.7231526323765232])) == 2, "combined pdf is not right"
def test_most_likely_age(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
assert np.isclose(wiz.most_likely_age[0], 6.9), "Most likely age wrong"
def test_most_likely_ages(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
a = wiz.most_likely_ages
assert np.sum( | np.isclose([a[0], a[1], a[2]], [6.9, 6.9, 6.9]) | numpy.isclose |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 09:27:49 2020
@author: <NAME>
"""
import pickle
import pandas as pd
import numpy as np
from country import country
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
from scipy.optimize import brute
from scipy.interpolate import interp1d
from scipy.ndimage.filters import uniform_filter1d
import psutil
from functools import partial
import multiprocessing as mp
from tqdm import tqdm_notebook as tqdm
import pdb
from datetime import date, datetime, timedelta
import time
from pathlib import Path
from matplotlib import pyplot as plt
import statsmodels.api as sm
from sklearn import linear_model
import matplotlib.patches as mpatches
import country_converter as coco
import math
import seaborn as sns
# --------------------------------------------------------
# Global variables, chosen cohorts of data and estimates
# --------------------------------------------------------
from param_simple import *
# ----------------------
# Main class
# ----------------------
class solveCovid:
def __init__(self,iso2: str): # eg 'US'
self.iso2 = iso2
# Policy strategies for forecast
self.policy = 'optim' # ['optim', 'linear']
self.phi_option = 'fit' # ['fit','exo']: Fit phi to latest data or specify as exogenous
self.phi_exo = 2.5e-9 # weight on mobility in social welfare function
self.phi_min = 1e-13 # Lowerbound for phi - authorities care about output
# Infection rate model for forecast
self.gamma_tilde_model = 'AR1' # ['AR1','AR2','shock']
self.gamma_shock_length = 10 # Shock gamma_tilde for x days
self.gamma_shock_depth = 0.5 # Daily increment of gamma
self.default_init_single = default_init_single
self.default_bounds_single = default_bounds_single
# Vaccine assumptions
self.vac_assump = 'vac_base' # Vaccination scenarios: ['vac_base','vac_worse','vac_better']
self.vac_receiver = 'S+R' # Vaccines given to S or S+R? ['S only','S+R']
self.effi_one = 0.5 # Efficacy after one dose in %
self.effi_two = 0.95 # Efficacy after two doses in %
self.target_weight = 0.7 # How targeted vaccine distribution is (1 = sequenced from eldest to youngest, 0 is random)
self.vac_base_cover = 1 # Baseline: (already started): % of effective coverage by December 2021 (to be controlled by country-specific scaling factor below)
self.vac_base_delayedstart = '2021-06-30' # Baseline: (hasn't started): first date of vaccination
self.vac_base_delayedcover = 0.75 # Baseline: (hasn't started): % of contracted dosages deployed by December 2021
self.vac_worse_cover = 0.3 # Worse (started): Use by end of 2021
self.vac_worse_delayedstart = '2021-09-30' # Worse (hasn't started): Starting date
self.vac_worse_delayedcover = 0.3 # Worse (hasn't started): Use by end of 2021
self.vac_better_cover = 1.3
self.vac_better_delayedstart = '2021-06-30'
self.vac_better_delayedcover = 1
# Reinfection and loss of immunity
self.reinfect = 'immune' # ['immune','reinfect']
self.r_re1_R = np.log(2)/10000 # Baseline: R loses immunity after 3 years
self.r_re1_V = np.log(2)/10000 # Baseline: V loses immunity after 3 years
self.r_re2_R = np.log(2)/60 # Downside risk: R loses immunity after 60 days, approx 1% of R lose immunity each day
self.r_re2_V = np.log(2)/60 # Downside risk: V loses immunity after 60 days, approx 1% of V lose immunity each day
# Death probabilities
self.pdth_assump = 'martingale' # ['martingale','treatment']
self.pdth_min = 0.005 # Lowerbound on death probability - countries with very few cases still think there is death probability
self.pdth_halflife = 60 # Halflife for treatment case; no. of days it takes to close half the gap of current and assumed minimum death prob
self.pdth_theta = np.exp(-np.log(2)/self.pdth_halflife)
# --------------- 1. Preliminary: Get the data ------------------------
def prelim(self):
iso2 = self.iso2
self.N = df1.fillna(method='ffill')['population'][iso2].iloc[-1]
df2 = df1.iloc[:,df1.columns.get_level_values(1)==iso2][[
'total_cases','total_deaths','new_cases','new_deaths',
'google_smooth','icu_patients','hosp_patients','reproduction_rate',
'new_tests','tests_per_case','aged_70_older',
'vac_total','vac_people',
'vac_fully']][df1['total_cases'][iso2] > virus_thres]
df2 = df2.droplevel('iso2',axis=1)
df2['vac_total'] = df2['vac_total'].interpolate()
df2['vac_people'] = df2['vac_people'].interpolate()
if iso2 == 'AU' or iso2 == 'SA': # Countries with no breakdowns; do manual approximation
df2['vac_partial'] = 0.8 * df2['vac_total']
df2['vac_fully'] = 0.2 * df2['vac_total']
else : # For most countries,
date1 = df2['vac_fully'].first_valid_index() # Next 2 lines fill NA in 'vac_fully', so vac_partial is defined
df2['vac_fully'].iloc[:df2.index.get_loc(date1)-1] = 0
df2['vac_fully'] = df2['vac_fully'].interpolate()
df2['vac_partial'] = df2['vac_people'] - df2['vac_fully']
df2 = df2.fillna(0) # Replace NaN by 0 - deaths and vaccinations
PopulationI = df2['total_cases'][0]
PopulationD = df2['total_deaths'][0]
if PopulationD==0:
PopulationD = 0
PopulationR = 5
else:
PopulationR = PopulationD * 5
PopulationCI = PopulationI - PopulationD - PopulationR # Undetected and infectious cases
self.cases_data_fit = df2['total_cases'].tolist()
self.deaths_data_fit = df2['total_deaths'].tolist()
self.newcases_data_fit = df2['new_cases'].tolist()
self.newdeaths_data_fit = df2['new_deaths'].tolist()
self.balance = self.cases_data_fit[-1] / max(self.deaths_data_fit[-1], 10) / 3
date_day_since100 = pd.to_datetime(df2.index[0])
self.maxT = (default_maxT - date_day_since100).days + 1
self.mobility_vec = df2['google_smooth'].values
self.T = len(df2)
self.t_cases = np.arange(0,self.T)
self.mobility_interp = interp1d(self.t_cases,self.mobility_vec,bounds_error=False,fill_value=0.,kind='cubic')
self.GLOBAL_PARAMS = (self.N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v)
self.gamma_0_days = 1 # average of gamma_t during first n days becomes the target
# Compute vaccination parameters
self.vac_partial = df2['vac_partial'].values
self.vac_fully = df2['vac_fully'].values
#self.vac_contracted = 1000*df_vac.loc[iso2]['No. of people covered (thousands)']/self.N
df2['V_'] = self.N * (self.effi_one*df2['vac_partial']
+ self.effi_two*df2['vac_fully'])/100 # V = expected number of effectively vaccinated persons
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df_v = df2.reindex(ix)
# Vaccination assumptions
if self.iso2 in ['GB','US']:
vac_scale = 1
elif self.iso2 in ['BE','FR','DE','IT','NL','PL','SG','ES','CH','RO','CL','CA']:
vac_scale = 0.8
elif self.iso2 in ['AU','SA','SE','TR']:
vac_scale = 0.65
elif self.iso2 in ['AR','BR','MX','RU']:
vac_scale = 0.50
elif self.iso2 in ['ID','IN','JP','KR','MY','TH']:
vac_scale = 0.25
elif self.iso2 in ['ZA']:
vac_scale = 0.10
else:
vac_scale = 0.50
print('Missing vaccine assumption for selected country')
if self.vac_assump == 'vac_base':
if df2['V_'][-1] > 0: # already started
df_v['V_'].loc['2021-12-31'] = self.vac_base_cover * vac_scale * self.N
elif df2['V_'][-1] == 0: # If has not started, assume starting by xxx and cover xxx at year end
df_v['V_'].loc[self.vac_base_delayedstart] = 100 # 100 = assumed number of effectively vaccinated on first day
df_v['V_'].loc['2021-12-31'] = self.vac_base_delayedcover* vac_scale*self.N # partial orders filled by year end
elif self.vac_assump == 'vac_worse':
if df2['V_'][-1] > 0:
df_v['V_'].loc['2021-12-31'] = self.vac_worse_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_worse_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_worse_delayedcover* vac_scale*self.N
elif self.vac_assump == 'vac_better':
if df2['V_'][-1]>0:
df_v['V_'].loc['2021-12-31'] = self.vac_better_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_better_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_better_delayedcover* vac_scale*self.N
df_v['V_'] = df_v['V_'].interpolate()
df_v['V_'] = df_v['V_'].clip(0,self.N)
self.df2 = df2
self.df_v = df_v
print(f'Data preparation for {iso2} done')
# --------------------------3 . SEIR model ------------------
def step_seir(self, t, x, gamma_t, p_dth) -> list:
"""
SEIR model building on DELPHI v.3
Features 16 distinct states, taking into account undetected, deaths, hospitalized and
recovered
[0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D,
11 TH, 12 DVR,13 DVD, 14 DD, 15 DT, 16 V]
"""
S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT, V = x
r_v = self.df_v['V_'].iloc[t+1] - self.df_v['V_'].iloc[t]
# Reinfection parameters
if self.reinfect == 'immune':
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
elif self.reinfect == 'reinfect':
if t <= self.T:
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
else:
r_re_R = self.r_re2_R
r_re_V = self.r_re2_V
# Vaccination recipients (S, or S+R)
if self.vac_receiver == 'S only':
zeta = 1
elif self.vac_receiver == 'S+R':
zeta = S/(S+R)
else:
print('Re-specify vaccine recipient choice')
# Main equations
S1 = S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V - r_v * zeta
if S1 < 0: # Vaccination reaches saturating point
S1 = 0
r_v = (S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V) /zeta
E1 = E + gamma_t * S * I / self.N - r_i * E
I1 = I + r_i * E - r_d * I
AR1 = AR + r_d * (1 - p_dth) * (1 - p_d) * I - r_ri * AR
DHR1 = DHR + r_d * (1 - p_dth) * p_d * p_h * I - r_rh * DHR
DQR1 = DQR + r_d * (1 - p_dth) * p_d * (1 - p_h) * I - r_ri * DQR
AD1 = AD + r_d * p_dth * (1 - p_d) * I - r_dth * AD
DHD1 = DHD + r_d * p_dth * p_d * p_h * I - r_dth * DHD
DQD1 = DQD + r_d * p_dth * p_d * (1 - p_h) * I - r_dth * DQD
R1 = R + r_ri * (AR + DQR) + r_rh * DHR - r_re_R*R - r_v * (1-zeta)
D1 = D + r_dth * (AD + DQD + DHD)
# Helper states
TH1 = TH + r_d * p_d * p_h * I
DVR1 = DVR + r_d * (1 - p_dth) * p_d * p_h * p_v * I - r_rv * DVR
DVD1 = DVD + r_d * p_dth * p_d * p_h * p_v * I - r_dth * DVD
DD1 = DD + r_dth * (DHD + DQD)
DT1 = DT + r_d * p_d * I
V1 = V + r_v -r_re_V*V
x1 = [S1, E1, I1, AR1, DHR1, DQR1, AD1, DHD1, DQD1,
R1, D1, TH1, DVR1, DVD1, DD1, DT1, V1]
return x1
# ------------------ X. Construct initial conditions
def initial_states_func(self,k):
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v = self.GLOBAL_PARAMS
p_dth0 = self.newdeaths_data_fit[0]/(r_dth*PopulationCI) # Set p_dth0 to match D1-D0 to newdeaths_data_fit
E_0 = PopulationCI / p_d * k
I_0 = PopulationCI / p_d * k
UR_0 = (PopulationCI / p_d - PopulationCI) * (1 - p_dth0)
DHR_0 = (PopulationCI * p_h) * (1 - p_dth0)
DQR_0 = PopulationCI * (1 - p_h) * (1 - p_dth0)
UD_0 = (PopulationCI / p_d - PopulationCI) * p_dth0
DHD_0 = PopulationCI * p_h * p_dth0
DQD_0 = PopulationCI * (1 - p_h) * p_dth0
R_0 = PopulationR / p_d
D_0 = PopulationD / p_d
S_0 = N - (E_0 +I_0 +UR_0 +DHR_0 +DQR_0 +UD_0 +DHD_0 +DQD_0 +R_0 +D_0)
TH_0 = PopulationCI * p_h
DVR_0 = (PopulationCI * p_h * p_v) * (1 - p_dth0)
DVD_0 = (PopulationCI * p_h * p_v) * p_dth0
DD_0 = PopulationD
DT_0 = PopulationI
V_0 = 0
x_init = [
S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0
]
return x_init
# Find k=k1,k2 that matches gamma_0 to 2.08 (R0=6 equivalent)
def loss_gamma0(self,k):
newcases = | np.array(self.newcases_data_fit) | numpy.array |
# Class to do parallelized clustering
import os
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn.decomposition import PCA
from scipy import signal
from scipy import stats
from scipy.signal import argrelmax
from scipy.spatial import cKDTree
from copy import deepcopy
from sklearn.mixture import GaussianMixture
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from yass.cluster.diptest.diptest import diptest as dp
from sklearn.cluster import AgglomerativeClustering
import networkx as nx
from yass.explore.explorers import RecordingExplorer
from yass.geometry import n_steps_neigh_channels
from yass import mfm
from yass.util import absolute_path_to_asset
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
colors = np.array([
'black','blue','red','green','cyan','magenta','brown','pink',
'orange','firebrick','lawngreen','dodgerblue','crimson','orchid','slateblue',
'darkgreen','darkorange','indianred','darkviolet','deepskyblue','greenyellow',
'peru','cadetblue','forestgreen','slategrey','lightsteelblue','rebeccapurple',
'darkmagenta','yellow','hotpink',
'black','blue','red','green','cyan','magenta','brown','pink',
'orange','firebrick','lawngreen','dodgerblue','crimson','orchid','slateblue',
'darkgreen','darkorange','indianred','darkviolet','deepskyblue','greenyellow',
'peru','cadetblue','forestgreen','slategrey','lightsteelblue','rebeccapurple',
'darkmagenta','yellow','hotpink',
'black','blue','red','green','cyan','magenta','brown','pink',
'orange','firebrick','lawngreen','dodgerblue','crimson','orchid','slateblue',
'darkgreen','darkorange','indianred','darkviolet','deepskyblue','greenyellow',
'peru','cadetblue','forestgreen','slategrey','lightsteelblue','rebeccapurple',
'darkmagenta','yellow','hotpink',
'black','blue','red','green','cyan','magenta','brown','pink',
'orange','firebrick','lawngreen','dodgerblue','crimson','orchid','slateblue',
'darkgreen','darkorange','indianred','darkviolet','deepskyblue','greenyellow',
'peru','cadetblue','forestgreen','slategrey','lightsteelblue','rebeccapurple',
'darkmagenta','yellow','hotpink'])
sorted_colors=colors
class Cluster(object):
"""Class for doing clustering."""
def __init__(self, data_in):
"""Sets up the cluster class for each core
Parameters: ...
"""
# load data and check if prev completed
if self.load_data(data_in): return
# local clustering
print("\nchan "+str(self.channel)+", START LOCAL CLUSTERING")
self.initialize(initial_spt=self.spike_indexes_chunk[:, 0])
self.cluster(current_indices=self.starting_indices, gen=0, local=True)
self.finish(fname='channel_{}'.format(self.channel))
#
spike_train_local = np.copy(self.spike_train)
spike_train_final = []
templates_final = []
for ii, spike_train_k in enumerate(spike_train_local):
print("\nchan {}, START CLUSTERING UNIT {}/{}".format(self.channel, ii, len(spike_train_local)))
self.initialize(initial_spt=spike_train_k)
self.cluster(current_indices=self.starting_indices, gen=0, local=False)
self.finish(fname='channel_{}_local_unit_{}'.format(self.channel, ii))
spike_train_final += self.spike_train
templates_final += self.templates
# save clusters
self.save_result(spike_train_final, templates_final)
def cluster(self, current_indices, gen, local):
''' Recursive clustering function
channel: current channel being clusterd
wf = wf_PCA: denoised waveforms (# spikes, # time points, # chans)
sic = spike_indices of spikes on current channel
gen = generation of cluster; increases with each clustering step
'''
# Exit if cluster too small
if current_indices.shape[0] <= self.CONFIG.cluster.min_spikes: return
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', # spikes: '+ str(current_indices.shape[0]))
# generation 0 steps
if gen==0:
# load waveforms
self.load_waveforms(local)
# align waveforms
self.align_step(local)
# denoise waveforms on active channels
self.denoise_step(local)
# featurize it
pca_wf = self.featurize_step(gen, current_indices)
# knn triage
idx_keep = self.knn_triage_step(gen, pca_wf)
if idx_keep.shape[0] <= self.CONFIG.cluster.min_spikes: return
# if anything is triaged, re-featurize and re-cluster
if idx_keep.shape[0] < pca_wf.shape[0]:
current_indices = current_indices[idx_keep]
pca_wf = self.featurize_step(gen, current_indices)
# run initial cluster step
vbParam = self.run_mfm(gen, self.subsample_step(gen, pca_wf))
##### TRIAGE 1 #####
# adaptive knn triage
#idx_keep = self.knn_triage_dynamic(gen, vbParam, pca_wf)
#if idx_keep.shape[0] <= self.CONFIG.cluster.min_spikes: return
# if anything is triaged, re-featurize and re-cluster
#if idx_keep.shape[0] < pca_wf.shape[0]:
# current_indices = current_indices[idx_keep]
# pca_wf = self.featurize_step(gen, current_indices)
# vbParam = self.run_mfm(gen, self.subsample_step(gen, pca_wf))
##### TRIAGE 2 #####
# if we subsampled then recover soft-assignments using above:
idx_recovered, vbParam = self.recover_step(gen, vbParam, pca_wf)
if idx_recovered.shape[0] <= self.CONFIG.cluster.min_spikes: return
# if anything is triaged further, update the info
if idx_recovered.shape[0] < pca_wf.shape[0]:
current_indices = current_indices[idx_recovered]
pca_wf = pca_wf[idx_recovered]
##### TRIAGE 3 #####
# kill any units with less than min_spikes
idx_survived, vbParam = self.kill_small_units(gen, vbParam)
if idx_survived.shape[0] <= self.CONFIG.cluster.min_spikes: return
# if anything is triaged further, update the info
if idx_survived.shape[0] < pca_wf.shape[0]:
current_indices = current_indices[idx_survived]
pca_wf = pca_wf[idx_survived]
'''*************************************************
*********** REVIEW AND SAVE RESULTS *************
*************************************************
'''
# Case #1: single mfm cluster found
if vbParam.rhat.shape[1] == 1:
self.single_cluster_step(gen, current_indices, pca_wf)
# Case #2: multiple clusters
else:
# this is outside of multi_cluster_step to make
# pca_wf = None before going to the next generation
cc_assignment, stability = self.cluster_annealing(vbParam)
if self.plotting and gen<20:
self.plot_clustering_scatter(gen, pca_wf, cc_assignment,
stability, 'mfm multi split')
pca_wf = None
self.multi_cluster_step(gen, current_indices, cc_assignment, local)
def load_data(self, data_in):
''''''
''' *******************************************
************ LOADED PARAMETERS ************
*******************************************
'''
# this indicates channel-wise clustering - NOT postdeconv recluster
self.deconv_flag = data_in[0]
self.channel = data_in[1]
self.CONFIG = data_in[2]
# spikes in the current chunk
self.spike_indexes_chunk = data_in[3]
self.chunk_dir = data_in[4]
# Check if channel alreedy clustered
self.filename_postclustering = (self.chunk_dir + "/channel_"+
str(self.channel).zfill(6)+".npz")
# additional parameters if doing deconv:
if self.deconv_flag:
self.spike_train_cluster_original = data_in[5]
self.template_original = data_in[6]
self.deconv_max_spikes = 3000
self.unit = self.channel.copy()
self.filename_postclustering = (self.chunk_dir + "/recluster/unit_"+
str(self.unit).zfill(6)+".npz")
# check to see if 'result/' folder exists otherwise make it
recluster_dir = self.chunk_dir+'/recluster'
if not os.path.isdir(recluster_dir):
os.makedirs(recluster_dir)
if os.path.exists(self.filename_postclustering):
return True
# check to see if 'result/' folder exists otherwise make it
self.figures_dir = self.chunk_dir+'/figures/'
if not os.path.isdir(self.figures_dir):
os.makedirs(self.figures_dir)
''' ********************************************
*********** DEFAULT PARAMETERS *************
********************************************
'''
# default parameters
self.n_channels = self.CONFIG.recordings.n_channels
self.min_spikes_local = self.CONFIG.cluster.min_spikes
self.standardized_filename = os.path.join(self.CONFIG.path_to_output_directory, 'preprocess', 'standarized.bin')
self.geometry_file = os.path.join(self.CONFIG.data.root_folder,
self.CONFIG.data.geometry)
# CAT: todo read params below from file:
self.plotting = False
self.verbose = False
self.starting_gen = 0
self.knn_triage_threshold = 0.95 * 100
self.knn_triage_flag = True
self.selected_PCA_rank = 5
self.yscale = 10.
self.xscale = 2.
self.triageflag = True
self.n_feat_chans = 5
self.mfm_threshold = 0.90
self.upsample_factor = 5
self.nshifts = 15
self.n_dim_pca = 3
self.n_dim_pca_compression = 5
# limit on featurization window;
# Cat: TODO this needs to be further set using window based on spike_size and smapling rate
self.spike_size = int(self.CONFIG.recordings.spike_size_ms*2
*self.CONFIG.recordings.sampling_rate/1000)+1
# load raw data array
if self.deconv_flag==False:
self.load_data_channels()
else:
self.load_data_units()
# return flag that clustering not yet complete
return False
def load_data_channels(self):
#if self.verbose:
# print("chan " + str(self.channel) + " loading data")
# Cat: TO DO: Is this index search expensive for hundreds of chans and many
# millions of spikes? Might want to do once rather than repeat
indexes = np.where(self.spike_indexes_chunk[:,1]==self.channel)[0]
# limit clustering to at most 50,000 spikes
if True:
if indexes.shape[0]>50000:
idx_50k = np.random.choice(np.arange(indexes.shape[0]),
size=50000,
replace=False)
indexes = indexes[idx_50k]
# check that spkes times not too lcose to edges:
# first determine length of processing chunk based on lenght of rec
fp_len = int(os.path.getsize(self.standardized_filename)/4/49)
# limit indexes away from edge of recording
idx_inbounds = np.where(np.logical_and(
self.spike_indexes_chunk[indexes,0]>=self.spike_size//2,
self.spike_indexes_chunk[indexes,0]<(fp_len-self.spike_size//2)))[0]
indexes = indexes[idx_inbounds]
# check to see if any duplicate spike times occur
if np.unique(indexes).shape[0] != indexes.shape[0]:
print (" >>>>>>>>>>>>>>>>>>>>>>>> DUPLICATE SPIKE TIMES <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
indexes = np.unique(indexes)
# set spikeindexes from all spikes
self.spike_indexes_chunk = self.spike_indexes_chunk[indexes]
# load raw data from disk
#self.load_align_save_waveforms(fp)
# make sure no artifacts in data, clip to 1000
# Cat: TODO: is this necessary?
#self.wf_global = self.wf_global.clip(min=-1000, max=1000)
def load_data_units(self):
if self.verbose:
print("unit " + str(self.unit) + " loading data")
# select deconv spikes and read waveforms
self.indexes = np.where(self.spike_indexes_chunk[:, 1] == self.unit)[0]
# If there are no spikes assigned to unit, exit
if self.indexes.shape[0] == 0:
print(" unit: ", str(self.unit), " has no spikes...")
np.savez(deconv_filename, spike_index=[],
templates=[],
templates_std=[],
weights=[])
return
if self.indexes.shape[0] != np.unique(self.indexes).shape[0]:
print(" unit: ", self.unit, " non unique spikes found...")
idx_unique = np.unique(self.indexes[:, 0], return_index=True)[1]
self.indexes = self.indexes[idx_unique]
# Cat: TODO read this from disk
if self.indexes.shape[0] > self.deconv_max_spikes:
idx_subsampled = np.random.choice(np.arange(self.indexes.shape[0]),
size=self.deconv_max_spikes,
replace=False)
self.indexes = self.indexes[idx_subsampled]
# check that all spike indexes are inbounds
# Cat: TODO: this should be solved inside the waveform reader!
fp = np.memmap(self.standardized_filename, dtype='float32', mode='r')
fp_len = fp.shape[0] / self.n_channels
# limit indexes away from edge of recording
idx_inbounds = np.where(np.logical_and(
self.spike_indexes_chunk[self.indexes,0]>=self.spike_size//2,
self.spike_indexes_chunk[self.indexes,0]<(fp_len-self.spike_size//2)))[0]
self.indexes = self.indexes[idx_inbounds]
# set global spike indexes for all downstream analysis:
self.sic_global = self.spike_indexes_chunk[self.indexes]
# sets up initial array of indexes
self.starting_indexes = np.arange(self.indexes.shape[0])
# Cat: TODO: here we add additional offset for buffer inside residual matrix
# read waveforms by adding templates to residual
self.wf_global = self.load_waveforms_from_residual()
# make sure no artifacts in data, clip to 1000
# Cat: TODO: this should not be required; to test
self.wf_global = self.wf_global.clip(min=-1000, max=1000)
def initialize(self, initial_spt):
self.spike_train = []
self.templates = []
self.spt_global = initial_spt.astype('float32')
self.starting_indices = np.arange(len(self.spt_global))
if self.plotting:
self.x = np.zeros(100, dtype = int)
self.fig1 = plt.figure(figsize =(60,60))
self.grid1 = plt.GridSpec(20,20,wspace = 0.0,hspace = 0.2)
self.ax1 = self.fig1.add_subplot(self.grid1[:,:])
# setup template plot; scale based on electrode array layout
xlim = self.CONFIG.geom[:,0].ptp(0)
ylim = self.CONFIG.geom[:,1].ptp(0)#/float(xlim)
self.fig2 = plt.figure(figsize =(100,max(ylim/float(xlim)*100,10)))
self.ax2 = self.fig2.add_subplot(111)
def finish(self, fname=None):
if self.plotting:
if self.deconv_flag:
spikes_original = np.where(self.spike_train_cluster_original == self.unit)[0]
####### finish cluster plots #######
if self.deconv_flag:
max_chan = self.template_original.ptp(0).argmax(0)
else:
max_chan = self.channel
self.fig1.suptitle(fname, fontsize=100)
if self.deconv_flag:
self.fig1.savefig(self.chunk_dir + "/recluster/unit_{}_scatter.png".format(self.unit))
else:
#self.fig1.savefig(self.chunk_dir + "/channel_{}_scatter.png".format(self.channel))
self.fig1.savefig(os.path.join(self.figures_dir,fname+'_scatter.png'))
#plt.close(self.fig1)
####### finish template plots #######
# plot channel numbers and shading
for i in self.loaded_channels:
self.ax2.text(self.CONFIG.geom[i,0], self.CONFIG.geom[i,1],
str(i), alpha=0.4, fontsize=10)
# fill bewteen 2SUs on each channel
self.ax2.fill_between(self.CONFIG.geom[i,0] +
np.arange(-self.spike_size,0,1)/self.xscale, -self.yscale +
self.CONFIG.geom[i,1], self.yscale + self.CONFIG.geom[i,1],
color='black', alpha=0.05)
# plot max chan with big red dot
self.ax2.scatter(self.CONFIG.geom[max_chan,0],
self.CONFIG.geom[max_chan,1], s = 2000,
color = 'red')
# plot original templates for post-deconv reclustering
if self.deconv_flag:
self.ax2.plot(self.CONFIG.geom[:, 0] +
np.arange(-self.template_original.shape[0] // 2,
self.template_original.shape[0] // 2, 1)[:, np.newaxis] / self.xscale,
self.CONFIG.geom[:, 1] + self.template_original * self.yscale,
'r--', c='red')
labels = []
if self.deconv_flag:
patch_j = mpatches.Patch(color='red', label="size = {}".format(spikes_original.shape[0]))
labels.append(patch_j)
# if at least 1 cluster is found, plot the template
if len(self.spike_train)>0:
for clust in range(len(self.spike_train)):
patch_j = mpatches.Patch(color = sorted_colors[clust%100],
label = "size = {}".format(len(self.spike_train[clust])))
labels.append(patch_j)
self.ax2.legend(handles=labels, fontsize=100)
# plot title
self.fig2.suptitle(fname, fontsize=100)
if self.deconv_flag:
self.fig2.savefig(self.chunk_dir + "/recluster/unit_{}_template.png".format(self.unit))
else:
#self.fig2.savefig(self.chunk_dir + "/channel_{}_template.png".format(self.channel))
self.fig2.savefig(os.path.join(self.figures_dir,fname+'_template.png'))
#plt.close(self.fig2)
plt.close('all')
def save_result(self, spike_train=None, templates=None):
if self.deconv_flag:
spikes_original = np.where(self.spike_train_cluster_original == self.unit)[0]
# Cat: TODO: note clustering is done on PCA denoised waveforms but
# templates are computed on original raw signal
# recompute templates to contain full width information...
if self.deconv_flag:
np.savez(self.filename_postclustering,
spike_index_postrecluster=self.spike_train,
templates_postrecluster=self.templates,
spike_index_cluster= spikes_original,
templates_cluster=self.template_original)
else:
np.savez(self.filename_postclustering,
spiketime=spike_train,
templates=templates)
print ("**** Channel/Unit ", str(self.channel), " starting spikes: ",
len(self.spike_indexes_chunk), ", found # clusters: ",
len(spike_train))
self.wf_global = None
self.denoised_wf = None
self.spike_train = None
self.templates = None
def load_waveforms(self, local):
if self.verbose:
print ("chan "+str(self.channel)+", gen 0, loading waveforms")
neighbors = n_steps_neigh_channels(self.CONFIG.neigh_channels, 1)
self.neighbor_chans = np.where(neighbors[self.channel])[0]
if local:
self.loaded_channels = self.neighbor_chans
else:
self.loaded_channels = np.arange(self.CONFIG.recordings.n_channels)
self.wf_global = binary_reader_waveforms(self.standardized_filename,
self.CONFIG.recordings.n_channels,
self.spike_size,
self.spt_global.astype('int32')-(self.spike_size//2),
self.loaded_channels)
#x = np.arange(-self.spike_size // 2, self.spike_size // 2)
#spt = self.spt_global.astype('int32')
#self.wf_global = np.copy(recording[x + spt[:, np.newaxis]][:, :, self.loaded_channels]).astype('float32')
# clip waveforms
self.wf_global = self.wf_global.clip(min=-1000, max=1000)
def align_step(self, local):
if self.verbose:
print ("chan "+str(self.channel)+", gen 0, aligning")
if local:
ref_template = np.load(absolute_path_to_asset(os.path.join('template_space', 'ref_template.npy')))
mc = np.where(self.loaded_channels==self.channel)[0][0]
best_shifts = align_get_shifts_with_ref(
self.wf_global[:, :, mc], ref_template)
self.spt_global -= best_shifts
else:
best_shifts = self.spt_global.astype('int32') - self.spt_global
self.wf_global = shift_chans(self.wf_global, best_shifts)
def denoise_step(self, local):
if self.verbose:
print ("chan "+str(self.channel)+", gen 0, denoising waveorms")
if local:
self.denoise_step_local()
else:
self.denoise_step_distant()
def denoise_step_local(self):
# align, note: aligning all channels to max chan which is appended to the end
# note: max chan is first from feat_chans above, ensure order is preserved
# note: don't want for wf array to be used beyond this function
# Alignment: upsample max chan only; linear shift other chans
pc_mc = np.load(absolute_path_to_asset(os.path.join('template_space', 'pc_mc.npy')))
pc_sec = np.load(absolute_path_to_asset(os.path.join('template_space', 'pc_sec.npy')))
pc_mc_std = np.load(absolute_path_to_asset(os.path.join('template_space', 'pc_mc_std.npy')))
pc_sec_std = np.load(absolute_path_to_asset(os.path.join('template_space', 'pc_sec_std.npy')))
n_data, _, n_chans = self.wf_global.shape
self.denoised_wf = np.zeros((n_data, pc_mc.shape[1], n_chans),
dtype='float32')
for ii in range(n_chans):
if self.loaded_channels[ii] == self.channel:
self.denoised_wf[:, :, ii] = np.matmul(self.wf_global[:, :, ii], pc_mc)/pc_mc_std[np.newaxis]
else:
self.denoised_wf[:, :, ii] = np.matmul(self.wf_global[:, :, ii], pc_sec)/pc_sec_std[np.newaxis]
self.denoised_wf = np.reshape(self.denoised_wf, [n_data, -1])
good_features = np.median(np.square(self.denoised_wf), axis=0) > 0.5
self.denoised_wf = self.denoised_wf[:, good_features]
def denoise_step_distant(self):
energy = np.median(self.wf_global, axis=0)
max_energy = np.min(energy, axis=0)
th = np.max((-0.5, max_energy[self.channel]))
max_energy_loc_c = np.where(max_energy <= th)[0]
max_energy_loc_t = energy.argmin(axis=0)
max_energy_loc = np.hstack((max_energy_loc_t[max_energy_loc_c][:, np.newaxis],
max_energy_loc_c[:, np.newaxis]))
neighbors = n_steps_neigh_channels(self.CONFIG.neigh_channels, 1)
t_diff = 3
index = np.where(max_energy_loc[:,1]==self.channel)[0][0]
keep = self.connecting_points(max_energy_loc, index, neighbors, t_diff)
max_energy_loc = max_energy_loc[keep]
# exclude main and secondary channels
max_energy_loc = max_energy_loc[~np.in1d(max_energy_loc[:,1], self.neighbor_chans)]
self.denoised_wf = np.zeros((self.wf_global.shape[0], len(max_energy_loc)), dtype='float32')
for ii in range(len(max_energy_loc)):
self.denoised_wf[:, ii] = self.wf_global[:, max_energy_loc[ii,0], max_energy_loc[ii,1]]
def connecting_points(self, points, index, neighbors, t_diff, keep=None):
if keep is None:
keep = np.zeros(len(points), 'bool')
if keep[index] == 1:
return keep
else:
keep[index] = 1
spatially_close = np.where(neighbors[points[index, 1]][points[:, 1]])[0]
close_index = spatially_close[np.abs(points[spatially_close, 0] - points[index, 0]) <= t_diff]
for j in close_index:
keep = self.connecting_points(points, j, neighbors, t_diff, keep)
return keep
def active_chans_step(self, local):
if self.verbose:
print ("chan "+str(self.channel)+", gen 0, getting active channels")
energy = np.max(np.median(np.square(self.wf_global), axis=0), axis=0)
active_chans = np.where(energy > 0.5)[0]
if not local:
active_chans = active_chans[~np.in1d(active_chans, self.neighbor_chans)]
if len(active_chans) == 0:
active_chans = np.where(self.loaded_channels==self.channel)[0]
self.active_chans = active_chans
if local:
self.denoised_wf
def featurize_step(self, gen, indices):
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', featurizing')
if self.denoised_wf.shape[1] > self.selected_PCA_rank:
stds = np.std(self.denoised_wf[indices], axis=0)
good_d = np.where(stds > 1.05)[0]
if len(good_d) < self.selected_PCA_rank:
good_d = np.argsort(stds)[::-1][:self.selected_PCA_rank]
data_to_fit = self.denoised_wf[indices][:, good_d]
n_samples, n_features = data_to_fit.shape
pca = PCA(n_components=min(self.selected_PCA_rank, n_features))
pca_wf = pca.fit_transform(data_to_fit)
else:
pca_wf = self.denoised_wf[indices].copy()
return pca_wf.astype('float32')
def subsample_step(self, gen, pca_wf):
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', random subsample')
if not self.deconv_flag and (pca_wf.shape[0]> self.CONFIG.cluster.max_n_spikes):
idx_subsampled = np.random.choice(np.arange(pca_wf.shape[0]),
size=self.CONFIG.cluster.max_n_spikes,
replace=False)
pca_wf = pca_wf[idx_subsampled]
return pca_wf
def run_mfm(self, gen, pca_wf):
mask = np.ones((pca_wf.shape[0], 1))
group = np.arange(pca_wf.shape[0])
vbParam = mfm.spikesort(pca_wf[:,:,np.newaxis],
mask,
group,
self.CONFIG)
if self.verbose:
print("chan "+ str(self.channel)+', gen '\
+str(gen)+", "+str(vbParam.rhat.shape[1])+" clusters from ",pca_wf.shape)
return vbParam
def knn_triage_dynamic(self, gen, vbParam, pca_wf):
muhat = vbParam.muhat[:,:,0].T
cov = vbParam.invVhat[:,:,:,0].T / vbParam.nuhat[:,np.newaxis, np.newaxis]
if cov.shape[0] == 1:
self.triage_value = 0
idx_keep = np.arange(pca_wf.shape[0])
else:
min_spikes = 1200
pca_wf_temp = np.zeros([min_spikes*cov.shape[0], cov.shape[1]])
assignment_temp = np.zeros(min_spikes*cov.shape[0], dtype = int)
for i in range(cov.shape[0]):
pca_wf_temp[i*min_spikes:(i+1)*min_spikes]= np.random.multivariate_normal(muhat[i], cov[i], min_spikes)
assignment_temp[i*min_spikes:(i+1)*min_spikes] = i
kdist_temp = knn_dist(pca_wf_temp)
kdist_temp = kdist_temp[:,1:]
median_distances = np.zeros([cov.shape[0]])
for i in range(median_distances.shape[0]):
#median_distances[i] = np.median(np.median(kdist_temp[i*min_spikes:(i+1)*min_spikes], axis = 0), axis = 0)
median_distances[i] = np.percentile(np.median(kdist_temp[i*min_spikes:(i+1)*min_spikes], axis = 1), 90)
kdist = knn_dist(pca_wf)
idx_keep = np.median(kdist[:,1:], axis = 1) < 1 * np.median(median_distances)
self.triage_value = 1.0 - idx_keep.sum()/idx_keep.size
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', '+str(np.round(self.triage_value*100))+'% triaged from adaptive knn triage')
return np.where(idx_keep)[0]
def knn_triage_step(self, gen, pca_wf):
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+', knn triage')
idx_keep = self.knn_triage(self.knn_triage_threshold, pca_wf)
idx_keep = np.where(idx_keep==1)[0]
self.triage_value = self.knn_triage_threshold
return idx_keep
def knn_triage(self, th, pca_wf):
tree = cKDTree(pca_wf)
dist, ind = tree.query(pca_wf, k=11)
dist = np.sum(dist, 1)
idx_keep1 = dist < np.percentile(dist, th)
return idx_keep1
def recover_step(self, gen, vbParam, pca_wf_all):
# for post-deconv reclustering, we can safely cluster only 10k spikes or less
if not self.deconv_flag:
idx_recovered, vbParam = self.recover_spikes(vbParam, pca_wf_all)
else:
idx_recovered = np.arange(pca_wf_all.shape[0])
if self.verbose:
print ("chan "+ str(self.channel)+', gen '+str(gen)+", recovered ",
str(idx_recovered.shape[0])+ " spikes")
return idx_recovered, vbParam
def recover_spikes(self, vbParam, pca, maha_dist = 1):
N, D = pca.shape
threshold = D*maha_dist
# update rhat on full data
maskedData = mfm.maskData(pca[:,:,np.newaxis], np.ones([N, 1]), np.arange(N))
vbParam.update_local(maskedData)
# calculate mahalanobis distance
maha = mfm.calc_mahalonobis(vbParam, pca[:,:,np.newaxis])
idx_recovered = np.where(~np.all(maha >= threshold, axis=1))[0]
vbParam.rhat = vbParam.rhat[idx_recovered]
# zero out low assignment vals
self.recover_threshold = 0.001
if True:
vbParam.rhat[vbParam.rhat < self.recover_threshold] = 0
vbParam.rhat = vbParam.rhat/np.sum(vbParam.rhat,
1, keepdims=True)
return idx_recovered, vbParam
def kill_small_units(self, gen, vbParam):
# for post-deconv reclustering, we can safely cluster only 10k spikes or less
assignment = vbParam.rhat.argmax(1)
unique_units, n_data = np.unique(assignment, return_counts=True)
big_units = unique_units[n_data > self.CONFIG.cluster.min_spikes]
n_unit_killed = vbParam.rhat.shape[1] - len(big_units)
if len(big_units) > 0:
idx_survived = np.where(np.in1d(assignment, big_units))[0]
vbParam.rhat = vbParam.rhat[idx_survived][:, big_units]
vbParam.rhat = vbParam.rhat/vbParam.rhat.sum(axis=1, keepdims=True)
vbParam.ahat = vbParam.ahat[big_units]
vbParam.lambdahat = vbParam.lambdahat[big_units]
vbParam.nuhat = vbParam.nuhat[big_units]
vbParam.muhat = vbParam.muhat[:,big_units]
vbParam.Vhat = vbParam.Vhat[:,:,big_units]
vbParam.invVhat = vbParam.invVhat[:,:,big_units]
else:
idx_survived = np.zeros(0)
vbParam.rhat = np.zeros((0,0))
if self.verbose:
print ("chan "+ str(self.channel)+', gen '+str(gen)+", killed ",
str(n_unit_killed)+' small units')
if vbParam.rhat.shape[1] != len(big_units):
raise ValueError('number of units in rhat is wrong!')
return idx_survived, vbParam
def calculate_stability(self, rhat):
K = rhat.shape[1]
mask = rhat > 0.0
stability = np.zeros(K)
for clust in range(stability.size):
if mask[:,clust].sum() == 0.0:
continue
stability[clust] = np.average(mask[:,clust] * rhat[:,clust], axis = 0, weights = mask[:,clust])
return stability
def get_k_cc(self, maha, maha_thresh_min, k_target):
# it assumes that maha_thresh_min gives
# at least k+1 number of connected components
k_now = k_target + 1
if len(self.get_cc(maha, maha_thresh_min)) != k_now:
raise ValueError("something is not right")
maha_thresh = maha_thresh_min
while k_now > k_target:
maha_thresh += 1
cc = self.get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
else:
maha_thresh_max = maha_thresh
maha_thresh_min = maha_thresh - 1
if len(self.get_cc(maha, maha_thresh_min)) <= k_target:
raise ValueError("something is not right")
ctr = 0
maha_thresh_max_init = maha_thresh_max
while True:
ctr += 1
maha_thresh = (maha_thresh_max + maha_thresh_min)/2.0
cc = self.get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
elif k_now > k_target:
maha_thresh_min = maha_thresh
elif k_now < k_target:
maha_thresh_max = maha_thresh
if ctr > 1000:
print(k_now, k_target, maha_thresh, maha_thresh_max_init)
print(cc)
print(len(self.get_cc(maha, maha_thresh+0.001)))
print(len(self.get_cc(maha, maha_thresh-0.001)))
raise ValueError("something is not right")
def get_cc(self, maha, maha_thresh):
row, column = np.where(maha<maha_thresh)
G = nx.DiGraph()
for i in range(maha.shape[0]):
G.add_node(i)
for i, j in zip(row,column):
G.add_edge(i, j)
cc = [list(units) for units in nx.strongly_connected_components(G)]
return cc
def cluster_annealing(self, vbParam):
N, K = vbParam.rhat.shape
stability = self.calculate_stability(vbParam.rhat)
if (K <= 2) or np.all(stability > 0.9):
return vbParam.rhat.argmax(1), stability
maha = mfm.calc_mahalonobis(vbParam, vbParam.muhat.transpose((1,0,2)))
maha = np.maximum(maha, maha.T)
#N, K = vbParam.rhat.shape
#mu = np.copy(vbParam.muhat[:,:,0].T)
#mudiff = mu[:,np.newaxis] - mu
#prec = vbParam.Vhat[:,:,:,0].T * vbParam.nuhat[:,np.newaxis, np.newaxis]
#maha = np.matmul(np.matmul(mudiff[:, :, np.newaxis], prec[:, np.newaxis]), mudiff[:, :, :, np.newaxis])[:, :, 0, 0]
# decrease number of connected components one at a time.
# in any step if all components are stables, stop and return
# otherwise, go until there are only two connected components and return it
maha_thresh_min = 0
for k_target in range(K-1, 1, -1):
# get connected components with k_target number of them
cc, maha_thresh_min = self.get_k_cc(maha, maha_thresh_min, k_target)
# calculate soft assignment for each cc
rhat_cc = np.zeros([N,len(cc)])
for i, units in enumerate(cc):
rhat_cc[:, i] = np.sum(vbParam.rhat[:, units], axis=1)
rhat_cc[rhat_cc<0.001] = 0.0
rhat_cc = rhat_cc/np.sum(rhat_cc,axis =1 ,keepdims = True)
# calculate stability for each component
# and make decision
stability = self.calculate_stability(rhat_cc)
if np.all(stability>0.90) or k_target == 2:
return rhat_cc.argmax(1), stability
def single_cluster_step(self, gen, current_indices, pca_wf):
# exclude units whose maximum channel is not on the current
# clustered channel; but only during clustering, not during deconv
template = np.median(self.wf_global[current_indices], axis=0)
assignment = np.zeros(len(current_indices))
mc = self.loaded_channels[np.argmax(template.ptp(0))]
if mc != self.channel and (self.deconv_flag==False):
if self.verbose:
print (" chan "+str(self.channel)+", template has maxchan "+str(mc),
" skipping ...")
# always plot scatter distributions
if self.plotting and gen<20:
split_type = 'mfm non_max-chan'
end_flag = 'cyan'
self.plot_clustering_scatter(gen,
pca_wf, assignment, [1], split_type, end_flag)
else:
N = len(self.spike_train)
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+", >>> cluster "+
str(N)+" saved, size: "+str(len(assignment))+"<<<")
print ("")
self.spike_train.append(self.spt_global[current_indices])
self.templates.append(template)
# plot template if done
if self.plotting:
self.plot_clustering_template(gen, template,
len(current_indices), N)
# always plot scatter distributions
if gen<20:
split_type = 'mfm single unit'
end_flag = 'red'
self.plot_clustering_scatter(gen,
pca_wf, assignment, [1], split_type, end_flag)
def multi_cluster_step(self, gen, current_indices, cc_assignment, local):
for clust in np.unique(cc_assignment):
idx = np.where(cc_assignment==clust)[0]
if self.verbose:
print("chan "+str(self.channel)+', gen '+str(gen)+
", reclustering cluster with "+ str(idx.shape[0]) +' spikes')
self.cluster(current_indices[idx], gen+1, local)
def diptest_step(self, EM_split, assignment2, idx_recovered, vbParam2, pca_wf_all):
if EM_split:
gmm = GaussianMixture(n_components=2)
ctr=0
dp_val = 1.0
idx_temp_keep = np.arange(idx_recovered.shape[0])
cluster_idx_keep = np.arange(vbParam2.muhat.shape[1])
# loop over cluster until at least 3 loops and take lowest dp value
while True:
# use EM algorithm to get binary split
if EM_split:
gmm.fit(pca_wf_all[idx_recovered])
labels = gmm.predict_proba(pca_wf_all[idx_recovered])
temp_rhat = labels
temp_assignment = np.zeros(labels.shape[0], 'int32')
idx = np.where(labels[:,1]>0.5)[0]
temp_assignment[idx]=1
# use mfm algorithm to find temp-assignment
else:
temp_assignment = self.mfm_binary_split2(
vbParam2.muhat[:, cluster_idx_keep],
assignment2[idx_recovered],
cluster_idx_keep)
# check if any clusters smaller than min spikes
counts = np.unique(temp_assignment, return_counts=True)[1]
# update indexes if some clusters too small
if min(counts)<self.CONFIG.cluster.min_spikes:
print (" REMOVING SMALL CLUSTER DURING diptest")
bigger_cluster_id = | np.argmax(counts) | numpy.argmax |
# -*- coding: utf-8 -*- #
"""
Created on Tue Mar 3 11:18:30 2015
@author: wcgrizolli
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('/home/wcgrizolli/pythonWorkspace/wgTools')
import wgTools as wgt
from myFourierLib import *
from memory_profiler import profile
##=========================================================#
# %% auxiliar functions
##=========================================================#
def circ(X, Y, wx, wy, Xo=0.0, Yo=0.0): # circular
out = X*0.0
out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) < 0.5**2] = 1.0
out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) == 0.5**2] = .50
return out
def tFuncLens(X, Y, wavelength, fx=1e23, fy=1e23):
return np.exp(-1j*2*np.pi/wavelength/2/fx*(X**2+Y**2))
def tFuncZP(X, Y, wavelength, fx=1e23, fy=1e23):
return .5*(1.0 + np.sign(np.cos(np.pi/wavelength/fx*(X**2 + Y**2))))
##=========================================================#
# %% sampling and base definition
##=========================================================#
#@profile
def main():
wavelength = 1.2398e-9 # 1KeV
Lx = 2e-3
#zz = 1.0 # XXX: dist t1o propag
zz = .01000 # XXX: dist to propag
zoomFactor = 1/500.0
Lx2 = Lx*zoomFactor
##=========================================================#
# %% 2D analytical function.
##=========================================================#
#npoints = 1001
#
#Y, X = np.mgrid[-Lx/2:Lx/2:1j*npoints, -Lx/2:Lx/2:1j*npoints]
#
#wx = 200e-6
#wy = 200e-6
#
#print('WG: Creating Source Wave u1...')
#
##u1_xy = circ(X, Y, wx, wy)*tFuncLens(X, Y, wavelength, fx=(1/5.0+1/zz)**-1)
#
## %% gaussian beam
#u1_xy = (tFuncLens(X, Y, wavelength, fx=(1/5.0+1/zz)**-1) * circ(X, Y, wx, wy) *
# gaussianBeam(10e-6, wavelength, 5.000, Lx, X.shape[0]))
#
## %% double slit
##u1_xy = circ(X, Y, wx, wy, 0, 80e-6) + circ(X, Y, wx, wy, 0,-80e-6)
#
#print('WG: Creating Source Wave u1: DONE!')
##=========================================================#
# %% 2D load data
##=========================================================#
u1_xy = np.load('emWave.npz')['emWave']
X = np.load('emWave.npz')['x']
Y = np.load('emWave.npz')['y']
[Mx,My] = u1_xy.shape
print('WG: u1_xy.shape: %d, %d' % (Mx, My))
Lx = X[0, -1] - X[0, 0]
Ly = Y[-1, 0] - Y[0, 0]
print('WG: Lx = %.3f mm' % (Lx*1e3))
print('WG: Ly = %.3f mm' % (Ly*1e3))
valueToMaskX = 2e-3
interpolateFlag = 1
# %% Crop and increase number of points
if valueToMaskX > 0.0000:
print('WG: Crop data...')
# mask2
idx_1 = np.argmin(np.abs(X[0, :] + valueToMaskX/2))
idx_2 = np.argmin(np.abs(X[0, :] - valueToMaskX/2))
idx_3 = np.argmin(np.abs(Y[:, 0] + valueToMaskX/2))
idx_4 = np.argmin(np.abs(Y[:, 0] - valueToMaskX/2))
u1_xy = u1_xy[idx_3:idx_4, idx_1:idx_2]
X = X[idx_3:idx_4, idx_1:idx_2]
Y = Y[idx_3:idx_4, idx_1:idx_2]
Lx = X[0,-1] - X[0,0]
Ly = Y[-1,0] - Y[0,0]
[Mx,My] = u1_xy.shape
print('WG: new Lx = %.3f mm' % (Lx*1e3))
print('WG: new Ly = %.3f mm' % (Ly*1e3))
print('WG: new shape after crop: %d, %d' % (Mx,My))
print('WG: Crop data: done!')
# %% increase resolution using interpolation
if interpolateFlag:
# from scipy import interpolate
from scipy.interpolate import griddata
print('WG: Interpolation to increase resolution...')
nPointsInterp = 1001j
grid_y, grid_x = np.mgrid[X[0, 0]:X[0, -1]:nPointsInterp,
X[0, 0]:X[0, -1]:nPointsInterp]
grid_z0_real = griddata(np.concatenate((X.reshape(-1, 1),
Y.reshape(-1, 1)), axis=1),
np.real(u1_xy).flat[:],
(grid_x, grid_y),
method='cubic',
fill_value=0)
grid_z0_im = griddata(np.concatenate((X.reshape(-1, 1),
Y.reshape(-1, 1)), axis=1),
np.imag(u1_xy).flat[:],
(grid_x, grid_y),
method='cubic',
fill_value=0)
u1_xy = grid_z0_real + 1j*grid_z0_im
X = grid_x
Y = grid_y
Lx = X[0,-1] - X[0,0]
Ly = Y[-1,0] - Y[0,0]
[Mx,My] = u1_xy.shape
print('WG: Lx = %.3f mm' % (Lx*1e3))
print('WG: Ly = %.3f mm' % (Ly*1e3))
print('WG: done!')
print('WG: new shape resize: %d, %d' % (Mx, My))
print('WG: new Lx = %.3f mm' % (Lx*1e3))
print('WG: new Ly = %.3f mm' % (Ly*1e3))
# %% add lens, etc to wave from data
wx = 200e-6
wy = 200e-6
#u1_xy = circ(X, Y, wx, wy)*tFuncLens(X, Y, wavelength, fx=(1/5.0+1/zz)**-1)*u1_xy
u1_xy = circ(X, Y, wx, wy)*tFuncLens(X, Y, wavelength, fx=(1/5.0+1/zz)**-1)*u1_xy
##=========================================================#
# %% Plot u1
##=========================================================#
saveFigure = 0
## U1
if saveFigure:
xo, yo = 0.0, 0.0
else:
xo, yo = None, None
print('WG: Plot u1...')
factorX, unitStrX = wgt.chooseUnit(X)
factorY, unitStrY = wgt.chooseUnit(Y)
unitStrX = unitStrX + ' m'
unitStrY = unitStrY + ' m'
# %% U1
#phase = np.angle(u1_xy)*circ(X, Y, wx, wy)
#phase = -(np.unwrap(np.unwrap(np.unwrap(np.unwrap(phase), axis=0)), axis=0)/np.pi*
# circ(X, Y, wx, wy))
wgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy)**2,
r'$x [' + unitStrX + ']$',
r'$y [' + unitStrY + ']$',
r'Intensity [a.u.]',
r'u1_xy',
xo=xo, yo=yo,
unitX=unitStrX, unitY=unitStrY)
if saveFigure:
outputFigureName = wgt.datetimeNowStr() + '_u1.png'
plt.savefig(outputFigureName)
print('WG: Figure saved at %s!\n' % (outputFigureName))
plt.close()
else:
plt.show(block=True)
plt.close()
print('WG: Plot u1: DONE!')
#phase = None
##=========================================================#
# %% Propagation
##=========================================================#
print('WG: Propagation...')
# u2_xy = propTForIR(u1_xy,Lx,Ly,wavelength,zz)
# titleStr = str(r'propTForIR, zz=%.3fmm, Intensity [a.u.]'
# % (zz*1e3))
# u2_xy = propIR_RayleighSommerfeld(u1_xy,Lx,Ly,wavelength,zz)
# titleStr = str(r'propIR_RayleighSommerfeld, zz=%.3fmm, Intensity [a.u.]'
# % (zz*1e3))
# u2_xy = propTF_RayleighSommerfeld(u1_xy,Lx,Ly,wavelength,zz)
# titleStr = str(r'propTF_RayleighSommerfeld, zz=%.3fmm, Intensity [a.u.]'
# % (zz*1e3))
# u2_xy, L2 = propFF(u1_xy, Lx, wavelength, zz)
# titleStr = str(r'propFF, zz=%.3fmm, Intensity [a.u.]'
# % (zz*1e3))
# X,Y = np.meshgrid(np.linspace(-L2/2,L2/2,Mx,endpoint=False),
# np.linspace(-L2/2,L2/2,My),endpoint=False)
# print('WG: L2: %.5gmm' % (L2*1e3))
# print('WG: X.shape: ', X.shape)
#
# Lx2 = Lx/1.00
u2_xy = prop2step(u1_xy, Lx, Lx2, wavelength, zz)
X, Y = X,Y = np.meshgrid(np.linspace(-Lx/2,Lx/2,Mx,endpoint=False),
np.linspace(-Ly/2,Ly/2,My,endpoint=False))
titleStr = str(r'prop2step, zz=%.3fmm, Intensity [a.u.]'
% (zz*1e3))
print('WG: Power 1: %.5g' % np.sum( | np.abs(u1_xy) | numpy.abs |
import numpy as np
import scipy.spatial as spacial
from math import sqrt
class TspEnv:
"""
A Travelling Salesman Environment.
Any environment needs:
* An initialise (reset) method that returns the initial observations,
reward, whether state is terminal, additional information.
* A reset
* A state space
* A way to denote possible actions
* A way to make sure the move is legal
* A way to affect environment
* A step function that returns the new observations, reward,
whether state is terminal, additional information
* A way to render the environment.
Methods:
--------
__innit__:
Constructor method.
is_terminal_state:
Check whether all cities visited
render:
Display state (grid showing which cities visited or unvisited
reset:
Initialise environment (including TspState object),
and return obs, reward, terminal, info
step:
Take an action. Update state. Return obs, reward, terminal, info
Attributes:
----------
action_space:
Number of cities (integer)
number_of_cities:
Number of cities to be visited (integer)
observation_space:
Cities visited (NumPy 0/1 array)
render_game:
Show game grid
"""
def __init__(self, number_of_cities = 6, grid_dimensions = (100,100),
render_game = False):
"""
Constructor class for TSP environment
"""
self.action_space = np.zeros(number_of_cities)
self.grid_dimensions = grid_dimensions
self.max_possible_distance = sqrt(grid_dimensions[0]**2 + grid_dimensions[1]**2)
self.number_of_cities = number_of_cities
self.observation_space = np.zeros(number_of_cities)
self.render_game = render_game
self.info = dict()
def is_terminal_state(self, action):
"""Check if current state is terminal. All cities complete and agent
returns to city 0"""
is_terminal = False
if (self.state.visited_status.sum() == self.number_of_cities) and (
action ==0):
is_terminal = True
return is_terminal
def render(self):
"""Show which cities visited and current city"""
# TODO: REPLACE THIS WITH MATPLOTLIB OUTPUT
grid = np.zeros(self.grid_dimensions)
# Add unvisited cities as 1, visited cities as 2
for city in range(self.number_of_cities):
city_grid_ref = self.state.city_locations[city]
if self.state.visited_status[city] == 0:
grid[city_grid_ref] = 1
else:
grid[city_grid_ref] = 2
# Print
print (grid)
def reset(self, reverse=False):
"""
Initialise model and return observations.
reverse=True is used for autodidactic iteration learning.
"""
self.state = TspState(self.number_of_cities, self.grid_dimensions)
# Obs = array of visited cities and on-ehot array of current city
if reverse:
# Start with all cities visited
obs = np.ones(self.number_of_cities)
else:
obs = np.zeros(self.number_of_cities)
obs[0] = 1
obs = np.concatenate((self.state.visited_status, obs))
reward = 0
is_terminal = self.is_terminal_state(0)
if self.render_game:
self.render()
# return city order chosen as info
self.info['route_taken'] = self.state.visited_order
return obs, reward, is_terminal, self.info
def step(self, action):
"""Make agent take a step"""
# ToDo check action is legal (in action space)
self.state.visited_order.append(action)
# Get reward if new city visited (max reward = max possible distance):
if self.state.visited_status[action] == 0:
reward = self.max_possible_distance
else:
reward = 0 - self.max_possible_distance
# Subtract distance travelled from reward
distance = self.state.distances[self.state.agent_location, action]
reward -= distance
# Update agent location is state
self.state.agent_location = action
# Update visted_status
self.state.visited_status[action] = 1
# Check whether all cities visited and returned home extra reward)
terminal = self.is_terminal_state(action)
if terminal:
reward += self.max_possible_distance
# Obs = array of visited cities and on-ehot array of current city
obs = np.zeros(self.number_of_cities)
obs[action]= 1
obs = np.concatenate((self.state.visited_status, obs))
# return city order chosen as info
self.info['route_taken'] = self.state.visited_order
if self.render_game:
self.render()
return obs, reward, terminal, self.info
class TspState:
"""TSP state object.
Methods:
--------
__innit__
Constructor method.
Attributes:
-----------
city_locations:
List of city x,y, locations
distances:
Dictionary of distance between two cities (index = (from, to))
Distance (cost) of staying in the same city = 100
visited_order:
List of actions (cities visited) by agent. Can contain duplicates
if agent returned to a a city.
visited_status:
Array showing if cities unvisited (0) or visited (1)
The state is set up with the agent at city 0 (which is marked as
visited)"""
def __init__(self, number_of_cities, grid_dimensions):
"""
Constructor method for TSP state.
"""
self.agent_location = 0
self.city_locations = []
self.distances = dict()
self.visited_order = [0]
self.visited_status = | np.zeros(number_of_cities) | numpy.zeros |
#!/usr/bin/env python
"""
A script that generates report files based on Measurements JSON output.
It requires providing the report type and JSON file to extract data from.
"""
import sys
import argparse
from pathlib import Path
from typing import Dict, List, Optional
import json
import numpy as np
if sys.version_info.minor < 9:
from importlib_resources import path
else:
from importlib.resources import path
from kenning.resources import reports
from kenning.core.drawing import time_series_plot
from kenning.core.drawing import draw_confusion_matrix
from kenning.core.drawing import recall_precision_curves
from kenning.core.drawing import recall_precision_gradients
from kenning.core.drawing import true_positive_iou_histogram
from kenning.core.drawing import true_positives_per_iou_range_histogram
from kenning.core.drawing import draw_plot
from kenning.utils import logger
from kenning.core.report import create_report_from_measurements
from kenning.utils.class_loader import get_command
log = logger.get_logger()
def performance_report(
reportname: str,
measurementsdata: Dict[str, List],
imgdir: Path,
reportpath: Path,
rootdir: Optional[Path] = None) -> str:
"""
Creates performance section of the report.
Parameters
----------
reportname : str
Name of the report
measurementsdata : Dict[str, List]
Statistics from the Measurements class
imgdir : Path
Path to the directory for images
reportpath : Path
Path to the output report
rootdir : Optional[Path]
Path to the root of the RST project involving this report
Returns
-------
str : content of the report in RST format
"""
log.info('Running performance_report')
if rootdir is None:
rootdir = reportpath.parent
if 'target_inference_step' in measurementsdata:
log.info('Using target measurements for inference time')
usepath = imgdir / f'{reportpath.stem}_inference_time.png'
time_series_plot(
str(usepath),
f'Inference time for {reportname}',
'Time', 's',
'Inference time', 's',
measurementsdata['target_inference_step_timestamp'],
measurementsdata['target_inference_step'],
skipfirst=True)
measurementsdata['inferencetimepath'] = str(
usepath.relative_to(rootdir)
)
measurementsdata['inferencetime'] = \
measurementsdata['target_inference_step']
elif 'protocol_inference_step' in measurementsdata:
log.info('Using protocol measurements for inference time')
usepath = imgdir / f'{reportpath.stem}_inference_time.png'
time_series_plot(
str(usepath),
f'Inference time for {reportname}',
'Time', 's',
'Inference time', 's',
measurementsdata['protocol_inference_step_timestamp'],
measurementsdata['protocol_inference_step'],
skipfirst=True)
measurementsdata['inferencetimepath'] = str(
usepath.relative_to(rootdir)
)
measurementsdata['inferencetime'] = \
measurementsdata['protocol_inference_step']
else:
log.warning('No inference time measurements in the report')
if 'session_utilization_mem_percent' in measurementsdata:
log.info('Using target measurements memory usage percentage')
usepath = imgdir / f'{reportpath.stem}_cpu_memory_usage.png'
time_series_plot(
str(usepath),
f'Memory usage for {reportname}',
'Time', 's',
'Memory usage', '%',
measurementsdata['session_utilization_timestamp'],
measurementsdata['session_utilization_mem_percent'])
measurementsdata['memusagepath'] = str(
usepath.relative_to(rootdir)
)
else:
log.warning('No memory usage measurements in the report')
if 'session_utilization_cpus_percent' in measurementsdata:
log.info('Using target measurements CPU usage percentage')
usepath = imgdir / f'{reportpath.stem}_cpu_usage.png'
measurementsdata['session_utilization_cpus_percent_avg'] = [
np.mean(cpus) for cpus in
measurementsdata['session_utilization_cpus_percent']
]
time_series_plot(
str(usepath),
f'Mean CPU usage for {reportname}',
'Time', 's',
'Mean CPU usage', '%',
measurementsdata['session_utilization_timestamp'],
measurementsdata['session_utilization_cpus_percent_avg'])
measurementsdata['cpuusagepath'] = str(
usepath.relative_to(rootdir)
)
else:
log.warning('No memory usage measurements in the report')
if 'session_utilization_gpu_mem_utilization' in measurementsdata:
log.info('Using target measurements GPU memory usage percentage')
usepath = imgdir / f'{reportpath.stem}_gpu_memory_usage.png'
time_series_plot(
str(usepath),
f'GPU memory usage for {reportname}',
'Time', 's',
'GPU memory usage', 'MB',
measurementsdata['session_utilization_gpu_timestamp'],
measurementsdata['session_utilization_gpu_mem_utilization'])
measurementsdata['gpumemusagepath'] = str(
usepath.relative_to(rootdir)
)
else:
log.warning('No GPU memory usage measurements in the report')
if 'session_utilization_gpu_utilization' in measurementsdata:
log.info('Using target measurements GPU utilization')
usepath = imgdir / f'{reportpath.stem}_gpu_usage.png'
time_series_plot(
str(usepath),
f'GPU Utilization for {reportname}',
'Time', 's',
'Utilization', '%',
measurementsdata['session_utilization_gpu_timestamp'],
measurementsdata['session_utilization_gpu_utilization'])
measurementsdata['gpuusagepath'] = str(
usepath.relative_to(rootdir)
)
else:
log.warning('No GPU memory usage measurements in the report')
with path(reports, 'performance.rst') as reporttemplate:
return create_report_from_measurements(
reporttemplate,
measurementsdata
)
def classification_report(
reportname: str,
measurementsdata: Dict[str, List],
imgdir: Path,
reportpath: Path,
rootdir: Optional[Path] = None):
"""
Creates classification quality section of the report.
Parameters
----------
reportname : str
Name of the report
measurementsdata : Dict[str, List]
Statistics from the Measurements class
imgdir : Path
Path to the directory for images
reportpath : Path
Path to the output report
rootdir : Optional[Path]
Path to the root of the RST project involving this report
Returns
-------
str : content of the report in RST format
"""
log.info('Running classification report')
if rootdir is None:
rootdir = reportpath.parent
if 'eval_confusion_matrix' not in measurementsdata:
log.error('Confusion matrix not present for classification report')
return ''
log.info('Using confusion matrix')
confusionpath = imgdir / f'{reportpath.stem}_confusion_matrix.png'
draw_confusion_matrix(
measurementsdata['eval_confusion_matrix'],
str(confusionpath),
'Confusion matrix',
measurementsdata['class_names']
)
measurementsdata['confusionpath'] = str(
confusionpath.relative_to(rootdir)
)
with path(reports, 'classification.rst') as reporttemplate:
return create_report_from_measurements(
reporttemplate,
measurementsdata
)
def detection_report(
reportname: str,
measurementsdata: Dict[str, List],
imgdir: Path,
reportpath: Path,
rootdir: Optional[Path] = None) -> str:
"""
Creates detection quality section of the report.
Parameters
----------
reportname : str
Name of the report
measurementsdata : Dict[str, List]
Statistics from the Measurements class
imgdir : Path
Path to the directory for images
reportpath : Path
Path to the output report
rootdir : Optional[Path]
Path to the root of the RST project involving this report
Returns
-------
str : content of the report in RST format
"""
from kenning.datasets.open_images_dataset import compute_ap11
from kenning.datasets.open_images_dataset import get_recall_precision
from kenning.datasets.open_images_dataset import compute_map_per_threshold
log.info('Running detection report')
if rootdir is None:
rootdir = reportpath.parent
lines = get_recall_precision(measurementsdata, 0.5)
aps = []
for line in lines:
aps.append(compute_ap11(line[0], line[1]))
measurementsdata['mAP'] = | np.mean(aps) | numpy.mean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.