prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class WizardsEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, suits=4, max_card=13, players=4, seed=None):
self.suits = suits
self.max_card = max_card
self.players = players
self.cards_per_player = (suits * max_card) // players
# self.action_space = spaces.Discrete(self.cards_per_player)
self.action_space = spaces.Box(low=0.0, high=1.0, shape=[self.cards_per_player], dtype=np.float32)
self.unflattened_observation_space = spaces.Tuple((
spaces.MultiDiscrete([max_card + 1, suits + 1] * self.cards_per_player), # cards in hand
spaces.MultiDiscrete([max_card + 1, suits + 1] * players), # cards played this turn
spaces.MultiDiscrete([self.cards_per_player + 1] * players) # scores
))
self.observation_space = spaces.Box(low=-float('inf'), high=float('inf'), shape=(spaces.flatdim(self.unflattened_observation_space), ), dtype=np.float32)
self.seed(seed)
self.reset()
def seed(self, seed):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
# play the card the agent has chosen
card_id = action.argmax()
chosen_card = np.array(self.hands[0][card_id])
if chosen_card[0] == 0:
return self._lost("played card that was played before")
self.hands[0][card_id] = [0, 0]
self.played_cards[0] = chosen_card
# check whether the correct suit was played
played_suits = [card[1] for card in self.played_cards[1:] if not card[1] == 0]
if len(played_suits) > 0:
called_suite = played_suits[0]
if not chosen_card[1] == called_suite and len([card for card in self.hands[0] if card[1] == called_suite]) > 0:
return self._lost("did not play suite called for")
else:
called_suite = chosen_card[1]
# play cards by other agents until the round is finished
for i in range(1, self.players):
if not self.played_cards[i][0] == 0:
continue
self._play_card(i, called_suite)
# determin winner of the round and distribute scores
winner = max(range(self.players), key=lambda player: self.played_cards[player][0] + (1 if self.played_cards[player][1] == called_suite else 0) * self.max_card)
self.scores[winner] += 1
self.played_cards = [[0, 0]] * self.players
called_suite = -1
# set up return values
reward = self.scores[0]
done = max([card[0] for card in self.hands[0]]) == 0
info = {}
# simulate beginning of next round until the agent has to take an action
if not done and not winner == 0:
for i in range(max(1, winner), self.players):
called_suite = self._play_card(i, called_suite)
return self._get_observations(), reward, done, info
def _lost(self, reason):
return self._get_observations(), -1000 + 10*sum(self.scores) + self.scores[0], True, {"reason": reason}
def reset(self):
deck = [[i, suit] for i in range(1, self.max_card + 1) for suit in range(1, self.suits + 1)]
shuffled_deck = self.np_random.permutation(deck)
self.hands = shuffled_deck[:self.players * self.cards_per_player].reshape(self.players, -1, 2)
self.scores = [0] * self.players
self.played_cards = [[0, 0]] * self.players
return self._get_observations()
def render(self, mode='human'):
print("scores: {}".format(self.scores))
print("hands: {}".format(self.hands))
print("played cards: {}".format(self.played_cards))
def close(self):
pass
def _play_card(self, player, called_suite):
# play cards left to right for now
legal_cards = [i for i in range(self.cards_per_player) if self.hands[player][i][1] == called_suite]
if len(legal_cards) == 0:
legal_cards = [i for i in range(self.cards_per_player) if not self.hands[player][i][0] == 0]
card_idx = min(legal_cards)
self.played_cards[player] = | np.array(self.hands[player][card_idx]) | numpy.array |
"""Spike sorting classes and window"""
from __future__ import division
from __future__ import print_function
__authors__ = ['<NAME>', '<NAME>']
import os
import sys
import time
import datetime
from copy import copy
import operator
import random
import shutil
import hashlib
import multiprocessing as mp
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QAction, QIcon, QApplication
import numpy as np
import scipy
import scipy.signal
#from scipy.cluster.hierarchy import fclusterdata
import pylab as pl
import pyximport
pyximport.install(build_in_temp=False, inplace=True)
from . import util # .pyx file
from . import core
from .core import (WaveForm, Gaussian, MAXLONGLONG, R, toiter, intround, printflush, lstrip,
rstrip, lrstrip, pad, td2days, SpykeToolWindow, NList, NSList, dist,
USList, ClusterChange, SpikeSelectionSlider, lrrep2Darrstripis, rollwin2D)
from .detect import DEBUG
from .surf import EPOCH
from .plot import SpikeSortPanel, CLUSTERCOLOURDICT, WHITE
from .__version__ import __version__
#MAXCHANTOLERANCE = 100 # um
NSLISTWIDTH = 70 # minimize nslist width, enough for 7 digit spike IDs
PANELWIDTHPERCOLUMN = 120 # sort panel width per column of channels
PANELHEIGHTPERROW = 50 # sort panel height per row of channels
VSCROLLBARWIDTH = 14 # hack
SORTWINDOWHEIGHT = 1035 # TODO: this should be set programmatically
MINSORTWINDOWWIDTH = 566
MEANWAVEMAXSAMPLES = 2000
NPCSPERCHAN = 7
PCALIB = 'mdp'
ICALIB = 'sklearn'
DEFMINISI = 50 # default minimum ISI to check for on export, us
MAXGROUPISI = 100000 # us (100 ms)
MAXGROUPDT = 100000000 # us (100 s)
class Sort(object):
"""A spike sorting session, in which you can detect spikes and sort them into Neurons.
A .sort file is a single Python2-pickled Sort object. A .json file is a
jsonpickle-pickled Sort object"""
def __init__(self, detector=None, stream=None, tw=None):
self.__version__ = __version__
self.fname = ''
self.user = ''
self.notes = ''
self.detector = detector # this Sort's current Detector object
self.tw = tw # time window (us) relative to spike time
self.stream = stream
self.probe = stream.probe # only one probe design per sort allowed
self.converter = stream.converter
self.neurons = {}
self.clusters = {} # neurons with multidm params scaled for plotting
self.norder = [] # stores order of neuron ids display in nlist
self.npcsperchan = NPCSPERCHAN
def get_nextnid(self):
"""nextnid is used to retrieve the next unique single unit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return 1 # single unit nids start at 1
else:
return max(max(nids) + 1, 1) # at least 1
nextnid = property(get_nextnid)
def get_nextmuid(self):
"""nextmuid is used to retrieve the next unique multiunit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return -1 # multiunit ids start at -1
else:
return min(min(nids) - 1, -1) # at most -1
nextmuid = property(get_nextmuid)
def get_good(self):
"""Return array of nids marked by user as 'good'"""
good = []
for neuron in self.neurons.values():
try:
if neuron.good:
good.append(neuron.id)
except AttributeError: # neuron is from older sort, no .good attrib
neuron.good = False
return np.asarray(good)
def set_good(self, good):
"""Set good flag to True for nids in good, False otherwise"""
nids = list(self.neurons)
assert np.all([ nid in nids for nid in good ]) # make sure all nids in good exist
notgood = np.setdiff1d(nids, good)
for nid in notgood:
neuron = self.neurons[nid]
neuron.good = False
for nid in good:
neuron = self.neurons[nid]
neuron.good = True
good = property(get_good, set_good)
def get_stream(self):
try:
return self._stream
except AttributeError:
# this is likely a brand new sort, has yet to be assigned a Stream
return None
def set_stream(self, stream=None):
"""Check stream type and name and probe type, and restore filtmeth, car, sampfreq and
shcorrect to stream when binding/modifying stream to self"""
oldstream = self.stream
if stream != None and oldstream != None:
# do stream types match?
if type(stream) != type(oldstream):
raise ValueError("Stream types don't match: %s, %s"
% (type(oldstream), type(stream)))
# do stream probe types match?
if type(stream.probe) != type(oldstream.probe):
raise ValueError("Stream probe types don't match: %s, %s"
% (type(oldstream.probe), type(stream.probe)))
# is one stream fname a superset of the other?
if (stream.fname not in oldstream.fname) and (oldstream.fname not in stream.fname):
raise ValueError("Stream file names are not supersets of each other: %s, %s"
% (oldstream.fname, stream.fname))
else:
print('Stream file names are similar enough to proceed: %s, %s'
% (stream.fname, oldstream.fname))
try:
stream.filtmeth = self.filtmeth
stream.car = self.car
stream.sampfreq = self.sampfreq
stream.shcorrect = self.shcorrect
except AttributeError:
pass # one of the above aren't bound
self._stream = stream # set it
print('Bound stream %r to sort %r' % (stream.fname, self.fname))
# now that tres is known, calculate window timepoints wrt spike time:
self.calc_twts_twi()
stream = property(get_stream, set_stream)
def calc_twts_twi(self):
"""Calculate temporal window timepoints wrt spike time, and the indices of these
timepoints wrt spike time"""
tres = self.tres
tw = self.tw
twts = np.arange(tw[0], tw[1], tres)
twts += twts[0] % tres # get rid of mod, so twts go through zero
self.twts = twts
self.twi = intround(twts[0] / tres), intround(twts[-1] / tres)
#info('twi = %s' % (self.twi,))
def update_tw(self, tw):
"""Update tw and everything that depends on it. Note that this shouldn't
be called directly by the user. Call SpykeWindow.update_spiketw() instead"""
oldtw = self.tw
self.tw = tw
self.calc_twts_twi()
dtw = np.asarray(tw) - np.asarray(oldtw) # new minus old
self.spikes['t0'] += dtw[0]
self.spikes['t1'] += dtw[1]
self.spikes['tis'] = self.spikes['tis'] - intround(dtw[0] / self.tres)
# recalculate any existing templates:
for neuron in self.neurons.values():
if neuron.wave.data != None:
neuron.update_wave()
print('WARNING: all spike waveforms need to be reloaded!')
def get_tres(self):
return self.stream.tres
tres = property(get_tres)
def __getstate__(self):
"""Get object state for pickling"""
# copy it cuz we'll be making changes, this is fast because it's just a shallow copy
d = self.__dict__.copy()
# Spikes and wavedata arrays are (potentially) saved separately.
# usids and PCs/ICs can be regenerated from the spikes array.
for attr in ['spikes', 'wavedata', 'usids', 'X', 'Xhash']:
# keep _stream during normal pickling for multiprocessing, but remove it
# manually when pickling to sort file
try: del d[attr]
except KeyError: pass
return d
def get_nspikes(self):
try: return len(self.spikes)
except AttributeError: return 0
nspikes = property(get_nspikes)
def update_usids(self):
"""Update usids, which is an array of indices of unsorted spikes"""
nids = self.spikes['nid']
self.usids, = np.where(nids == 0) # 0 means unclustered
def get_spikes_sortedby(self, attr='id'):
"""Return array of all spikes, sorted by attribute 'attr'"""
vals = self.spikes[attr]
spikes = self.spikes[vals.argsort()]
return spikes
def get_wave(self, sid):
"""Return WaveForm corresponding to spike sid"""
spikes = self.spikes
nchans = spikes['nchans'][sid]
chans = spikes['chans'][sid, :nchans]
t0 = spikes['t0'][sid]
t1 = spikes['t1'][sid]
wavedata = self.wavedata[sid, 0:nchans]
ts = np.arange(t0, t1, self.tres) # build them up
return WaveForm(data=wavedata, ts=ts, chans=chans, tres=self.tres)
def get_maxchan_wavedata(self, sid=None, nid=None):
"""Return wavedata of maxchan of spike sid or neuron nid"""
if sid != None:
assert nid == None
chani = self.spikes['chani'][sid]
return self.wavedata[sid, chani]
elif nid != None:
assert sid == None
neuron = self.neurons[nid]
chani, = np.where(neuron.chans == neuron.chan)
assert len(chani) == 1
chani = chani[0] # pull out of length 1 array
return neuron.wave.data[chani]
def get_mean_wave(self, sids, nid=None):
"""Return the mean and std waveform of spike waveforms in sids"""
spikes = self.spikes
nsids = len(sids)
if nsids > MEANWAVEMAXSAMPLES:
step = nsids // MEANWAVEMAXSAMPLES + 1
s = ("get_mean_wave() sampling every %d spikes instead of all %d"
% (step, nsids))
if nid != None:
s = "neuron %d: " % nid + s
print(s)
sids = sids[::step]
nsids = len(sids) # update
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
chanslist = [ chans[:nchans] for chans, nchans in zip(chanss, nchanss) ] # list of arrays
chanpopulation = np.concatenate(chanslist)
groupchans = np.unique(chanpopulation) # comes out sorted
wavedata = self.wavedata[sids]
if wavedata.ndim == 2: # should be 3, get only 2 if nsids == 1
wavedata.shape = 1, wavedata.shape[0], wavedata.shape[1] # give it a singleton 3rd dim
nt = wavedata.shape[-1]
maxnchans = len(groupchans)
data = np.zeros((maxnchans, nt))
# all spikes have same nt, but not necessarily same nchans, keep track of
# how many spikes contributed to each of the group's chans
nspikes = np.zeros((maxnchans, 1), dtype=int)
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
data[chanis] += wd[:len(chans)] # accumulate
nspikes[chanis] += 1 # inc spike count for this spike's chans
#t0 = time.time()
data /= nspikes # normalize all data points appropriately, this is now the mean
var = np.zeros((maxnchans, nt))
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
var[chanis] += (wd[:len(chans)] - data[chanis]) ** 2 # accumulate 2nd moment
var /= nspikes # normalize all data points appropriately, this is now the variance
std = np.sqrt(var)
# keep only those chans that at least 1/2 the spikes contributed to
bins = list(groupchans) + [np.inf] # concatenate rightmost bin edge
hist, bins = np.histogram(chanpopulation, bins=bins)
chans = groupchans[hist >= nsids/2]
chanis = groupchans.searchsorted(chans)
data = data[chanis]
std = std[chanis]
return WaveForm(data=data, std=std, chans=chans)
def check_ISIs(self, nids='good'):
"""Check that interspike intervals of spikes in each nid never fall below DEFMINISI"""
print('Checking inter-spike intervals')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
ndupl = (np.diff(spikets) < DEFMINISI).sum()
if ndupl > 0:
msg = ('n%d has %d duplicate spikes (given DEFMINISI=%d us).\n'
'Remove duplicate spikes with the ISI tool in the Verify tab'
% (nid, ndupl, DEFMINISI))
raise RuntimeError(msg)
def check_wavealign(self, nids='good', maxdti=1):
"""Check that each neurons's primary peak on the max chan is no more than +/- maxdti
timepoints away from the t=0 alignment timepoint"""
print('Checking neuron mean waveform alignment')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
nt = self.twi[1] - self.twi[0] + 1 # expected number of points of each chan's wavedata
for nid in nids:
neuron = self.neurons[nid]
wd = self.get_maxchan_wavedata(nid=nid)
assert len(wd) == nt
# find biggest positive and negative peaks, check which comes first, ensure
# the primary peak is within maxdti of t=0 alignment timepoint:
ppeakis, _ = scipy.signal.find_peaks(wd) # positive peak indices
npeakis, _ = scipy.signal.find_peaks(-wd) # negative peak indices
pmaxi = ppeakis[wd[ppeakis].argmax()] # max positive peak index
nmaxi = npeakis[wd[npeakis].argmin()] # max negative peak index
if nmaxi < pmaxi: # usual case: -ve then +ve peak
peak1i = nmaxi
else: # less common: +ve then -ve peak, make sure +ve peak is worthy of alignment
pmax, nmax = wd[pmaxi], wd[nmaxi]
if pmax > abs(nmax): # +ve peak is bigger than -ve peak, align to +ve peak
peak1i = pmaxi
else:
peak1i = nmaxi # default to -ve peak
alignti = 0 - self.twi[0] # +ve
dti = peak1i - alignti
#print("n%d: dti=%d" % (nid, dti))
if abs(dti) > maxdti:
peak1uV = self.converter.AD2uV(wd[peak1i])
peak1us = intround(self.tres*(peak1i-alignti))
msg = ('Primary peak (%+d uV @ t=%d us) of n%d is %+d timepoints away from '
'the t=0 us alignment point. Shift it closer and try again'
% (peak1uV, peak1us, nid, dti))
raise RuntimeError(msg)
def check_wavepadding(self, nids='good', npad=2):
"""Check if any spikes are edge padded, presumably due to being shifted but not
reloaded. For robustness, check for consistent signs of padding across all channels.
An edge is considered padded if it does not change over npad datapoints"""
print('Checking spike waveform padding')
assert npad >= 2 # need at least 2 points to do a diff
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
for sid in neuron.sids:
wd = self.wavedata[sid] # multichannel waveform data
# are left and right edges of wavedata identical for npad number of points?
l, r = wd[:, :npad], wd[:, -npad:] # shape (nchans, npad)
leftpadded = (np.diff(l, axis=1) == 0).all()
rightpadded = (np.diff(r, axis=1) == 0).all()
# handle case where spike is right after or right before a 0-padded
# region of data due to gaps between experiments:
if leftpadded:
if (wd[:, 0] == 0).all():
leftpadded = False
if rightpadded:
if (wd[:, -1] == 0).all():
rightpadded = False
if leftpadded or rightpadded:
msg = ('n%d has s%d that looks like it has been padded.\n'
'leftpadded, rightpadded = %r, %r\n'
'Reload s%d or n%d or all spikes and try again'
% (nid, sid, leftpadded, rightpadded, sid, nid))
raise RuntimeError(msg)
def check_contiguous_nids(self):
"""Check that neuron IDs are contiguous (no gaps)"""
print('Checking that neuron IDs are contiguous')
nids = np.array(list(self.neurons))
nids = nids[nids > 0] # only consider +ve nids
nids.sort()
if (np.diff(nids) != 1).any():
raise RuntimeError('Neuron IDs are not contiguous, renumber all and try again')
def exportptcsfiles(self, basepath, sortpath, user='', notes=''):
"""Export spike data to binary .ptcs files under basepath, one file per recording"""
# First check to make sure various things are OK before exporting:
self.check_ISIs()
self.check_wavealign()
self.check_wavepadding()
self.check_contiguous_nids()
spikes = self.spikes
exportdt = str(datetime.datetime.now()) # get an export datetime stamp
exportdt = exportdt.split('.')[0] # ditch the us
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting "good" clusters to:')
# do a separate export for each recording:
# absolute start and stop times of all streams, rounded to nearest raw timepoint:
tranges = self.stream.tranges
t0 = tranges[0, 0] # absolute start time of first stream
for stream, trange in zip(streams, tranges):
abst0 = trange[0] # absolute start time of this stream relative to t0
# time delta between this stream and first stream, to nearest raw timepoint, us:
dt = abst0 - t0
dt = intround(dt) # to nearest int us
self.exportptcsfile(stream, basepath, dt, exportdt, sortpath,
user=user, notes=notes)
def exportptcsfile(self, stream, basepath, dt, exportdt, sortpath, user='', notes=''):
"""Export spike data of all "good" spikes to binary .ptcs file in basepath.
Constrain to spikes in stream, and undo any time delta in spike times.
dt is the integer time difference between start of stream and start of first stream in
the track, rounded to the nearest us (spike times are stored as int64 us in .ptcs)"""
# build up list of PTCSNeuronRecords that have spikes in this stream,
# and tally their spikes
nsamplebytes = 4 # float32
nrecs = []
nspikes = 0
# only export neurons marked as "good", could be single or multi unit:
for nid in sorted(self.good):
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
spikets -= dt # export spike times relative to t=0 of this recording
# only include spikes that occurred during this recording
lo, hi = spikets.searchsorted([stream.t0, stream.t1])
spikets = spikets[lo:hi]
if len(spikets) == 0:
continue # don't save empty neurons
nrec = PTCSNeuronRecord(neuron, spikets, nsamplebytes, descr='')
nrecs.append(nrec)
nspikes += len(spikets)
nneurons = len(nrecs)
# create the header and write everything to file:
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
fname = stream.srcfnameroot + '.ptcs'
fullfname = os.path.join(path, fname)
header = PTCSHeader(self, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user=user, notes=notes)
with open(fullfname, 'wb') as f:
header.write(f)
for nrec in nrecs:
nrec.write(f)
print(fullfname)
def exportcsv(self, fname):
"""Export all "good" spikes to a .csv file with time (s), nid, and maxchan as the
columns"""
sids = []
#chans = []
for nid in sorted(self.good):
neuron = self.neurons[nid]
sids.append(neuron.sids)
# the alternative is to export each spike's unit's channel:
#chans.append(np.tile(neuron.chan, neuron.nspikes))
sids = np.hstack(sids)
spikes = self.spikes[sids]
tsecs = spikes['t'] / 1e6 # convert from us to s
nids = spikes['nid']
chans = spikes['chan']
#chans = np.hstack(chans)
data = np.column_stack([tsecs, nids, chans])
print('Exporting (tsec, nid, chan) of all spikes marked as "good" to %s' % fname)
np.savetxt(fname, data, fmt='%.6f, %d, %d')
def exporttschid(self, basepath):
"""Export int64 (timestamp, channel, neuron id) 3 tuples to binary file"""
raise NotImplementedError('Needs to be redone to work with multiple streams')
spikes = self.spikes[self.spikes['nid'] > 0] # don't export unsorted/multiunit spikes
dt = str(datetime.datetime.now()) # get an export timestamp
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
srffnameroot = srffnameroot.replace(' ', '_')
tschidfname = dt + '_' + srffnameroot + '.tschid'
tschid = np.empty((len(spikes), 3), dtype=np.int64)
tschid[:, 0] = spikes['t']
tschid[:, 1] = spikes['chan']
tschid[:, 2] = spikes['nid']
tschid.tofile(os.path.join(path, tschidfname)) # save it
print(tschidfname)
def exportdin(self, basepath):
"""Export stimulus din(s) to binary .din file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
dinfiledtype=[('TimeStamp', '<i8'), ('SVal', '<i8')] # pairs of int64s
print('Exporting DIN(s) to:')
for stream in streams:
try: # neither of these attribs should exist for recordings with no stimuli:
svrecs = stream.srff.digitalsvalrecords
dsprecs = stream.srff.displayrecords
except AttributeError:
continue # no din to export for this stream
if len(svrecs) == 0 or stream.srff.ndigitalsvalrecords == 0:
raise ValueError("digitalsvalrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# upcast SVal field from uint16 to int64, creates a copy,
# but it's not too expensive:
svrecs = svrecs.astype(dinfiledtype)
# convert to normal n x 2 int64 array
svrecs = svrecs.view(np.int64).reshape(-1, 2)
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one .din
# per displayrecord. Append experiment ID to each .din filename, if necessary.
svrects = svrecs[:, 0]
dsprects = [ dsprec.TimeStamp for dsprec in dsprecs ]
svalrecis = svrects.searchsorted(dsprects)
assert svalrecis[0] == 0
svalrecis = svalrecis[1:] # exclude the trivial 0 index
# split sval records according to displayrecord timestamps:
dins = np.split(svrecs, svalrecis)
assert len(dins) == len(dsprecs)
for eid, din in enumerate(dins):
if eid == 0 and len(dins) == 1:
eidstr = ''
elif len(dins) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
dinfname = stream.srcfnameroot + eidstr + '.din'
fullfname = os.path.join(path, dinfname)
din.tofile(fullfname) # save it
print(fullfname)
def exporttextheader(self, basepath):
"""Export stimulus text header(s) to .textheader file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting text header(s) to:')
for stream in streams:
try:
dsprecs = stream.srff.displayrecords
except AttributeError: # no textheader to export for this stream
continue
if len(dsprecs) == 0:
raise ValueError("displayrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one
# .textheader per displayrecord. Append experiment ID to each .textheader
# filename, if necessary.
for eid, dsprec in enumerate(dsprecs):
textheader = dsprec.Header.python_tbl
if eid == 0 and len(dsprecs) == 1:
eidstr = ''
elif len(dsprecs) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
textheaderfname = stream.srcfnameroot + eidstr + '.textheader'
fullfname = os.path.join(path, textheaderfname)
with open(fullfname, 'w') as f:
f.write(textheader) # save it
print(fullfname)
def exportall(self, basepath, sortpath):
"""Export spike data, stimulus din and textheader to basepath"""
self.exportptcsfiles(basepath, sortpath)
self.exportdin(basepath)
self.exporttextheader(basepath)
def exportspikewaves(self, sids, selchans, tis, fname, format):
"""Export spike waveform data of selected sids, selchans and tis to binary
.spikes.zip file or text .spikes.csv file"""
nspikes = len(sids)
chans, chanslist = self.get_common_chans(sids, selchans)
nchans = len(chans)
ti0, ti1 = tis
nt = ti1 - ti0
# fill in 3D data array:
dtype = self.wavedata.dtype
data = np.zeros((nspikes, nchans, nt), dtype=dtype)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
if format == 'text': # flatten timepoints of all chans into columns
data.shape = nspikes, nchans*nt
stream = self.stream
assert stream.kind == 'highpass' # should be the only type ever saved to self
if format == 'binary':
nids = self.spikes['nid'][sids]
spiketimes = self.spikes['t'][sids]
chanpos = stream.probe.siteloc_arr()
uVperAD = stream.converter.AD2uV(1) # convert 1 AD unit to uV
with open(fname, 'wb') as f:
np.savez_compressed(f, data=data, sids=sids, nids=nids,
spiketimes=spiketimes, chans=chans, tis=tis,
chanpos=chanpos, uVperAD=uVperAD)
elif format == 'text':
np.savetxt(fname, data, fmt='%d', delimiter=',') # data should be int
else:
raise ValueError('Unknown format: %r' % format)
print('Exported %d spikes on chans=%r and tis=%r to %s'
% (nspikes, list(chans), list(tis), fname))
def get_param_matrix(self, kind=None, sids=None, tis=None, selchans=None, norm=False,
dims=None, scale=True):
"""Organize dims parameters from sids into a data matrix, each column
corresponding to a dim. To do PCA/ICA clustering on all spikes, one maxchan at
a time, caller needs to call this multiple times, one for each set of
maxchan unique spikes,"""
spikes = self.spikes
dtypefields = list(spikes.dtype.fields)
if sids is None:
sids = spikes['id'] # default to all spikes
comps = [ dim for dim in dims if dim.startswith('c') and dim[-1].isdigit() ]
rmserror = np.any([ dim == 'RMSerror' for dim in dims ])
ncomp = len(comps)
hascomps = ncomp > 0
if hascomps:
X = self.get_component_matrix(kind, sids, tis=tis, chans=selchans,
minncomp=ncomp, norm=norm)
if rmserror:
rms = self.get_rms_error(sids, tis=tis, chans=selchans)
data = []
for dim in dims:
if dim in dtypefields:
data.append( np.float32(spikes[dim][sids]) )
elif dim.startswith('c') and dim[-1].isdigit():
compid = int(lstrip(dim, 'c'))
data.append( np.float32(X[:, compid]) )
elif dim == 'RMSerror':
data.append( np.float32(rms) )
else:
raise RuntimeError('Unknown dim %r' % dim)
# np.column_stack returns a copy, not modifying the original array
data = np.column_stack(data)
if scale:
# ensure 0 mean, and unit variance/stdev
for dim, d in zip(dims, data.T): # d iterates over columns
d -= d.mean()
if dim in ['x0', 'y0'] and self.probe.ncols > 1:
try: x0std # normalize spatial params by x0 std
except NameError: x0std = spikes['x0'].std()
if x0std != 0.0:
d /= x0std
#elif dim == 't': # the longer the recording in hours, the greater the
# # scaling in time
# trange = d.max() - d.min()
# tscale = trange / (60*60*1e6)
# d *= tscale / d.std()
else: # normalize all other dims by their std
dstd = d.std()
if dstd != 0.0:
d /= dstd
return data
def get_component_matrix(self, kind, sids, tis=None, chans=None, minncomp=None,
norm=False):
"""Find set of chans common to all sids, and do PCA/ICA on those waveforms. Or,
if chans are specified, limit PCA/ICA to them. Return component matrix with at
least minncomp dimensions"""
spikes = self.spikes
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nspikes < 2:
raise RuntimeError("Need at least 2 spikes for %s" % kind)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for %s" % kind)
# check if desired components have already been calculated (cache hit):
Xhash = self.get_Xhash(kind, sids, tis, chans, self.npcsperchan, norm)
self.Xhash = Xhash # save as key to most recent component matrix in self.X
try: self.X
except AttributeError: self.X = {} # init the dimension reduction cache attrib
if Xhash in self.X:
print('Cache hit, using cached %ss from tis=%r, chans=%r of %d spikes' %
(kind[:-1], list(tis), list(chans), nspikes))
return self.X[Xhash] # no need to recalculate
print('Cache miss, (re)calculating %ss' % kind[:-1])
# collect data between tis from chans from all spikes:
print('Doing %s on tis=%r, chans=%r of %d spikes' %
(kind, list(tis), list(chans), nspikes))
# MDP complains of roundoff errors with float32 for large covariance matrices
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
spikedata = self.wavedata[sid][spikechanis, ti0:ti1]
if norm:
# normalize by Vpp of chan with max Vpp:
maxptp = spikedata.ptp(axis=1).max()
if maxptp != 0: # prevent div by 0
spikedata = spikedata / maxptp
data[sii] = spikedata
print('Input shape for %s: %r' % (kind, data.shape))
t0 = time.time()
data.shape = nspikes, nchans*nt # flatten timepoints of all chans into columns
print('Reshaped input for %s: %r' % (kind, data.shape))
if kind == 'PCA': # principal components analysis
if PCALIB == 'mdp':
import mdp # delay as late as possible
X = mdp.pca(data, output_dim=5, svd=False) # svd=False is default
elif PCALIB == 'sklearn':
# sklearn's PCA is about 8x slower than mdp.pca, I think because it
# doesn't tap into scipy.linalg.eig compiled code. RandomizedPCA is faster
# than PCA, but isn't deterministic, and is still 2-3x slower than mdp.pca
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
X = pca.fit_transform(data) # do both the fit and the transform
else:
raise ValueError('Invalid PCALIB %r' % PCALIB)
if X.shape[1] < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
elif kind == 'sPCA': # sparse principal components analysis
from sklearn.decomposition import SparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
spca = SparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = spca.fit_transform(data) # do both the fit and the transform
elif kind == 'mbsPCA': # mini batch sparse principal components analysis
from sklearn.decomposition import MiniBatchSparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
mbspca = MiniBatchSparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = mbspca.fit_transform(data) # do both the fit and the transform
elif kind == 'NMF': # non-negative matrix factorization
from sklearn.decomposition import NMF
n_components = 5
init = None # 'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'
nmf = NMF(n_components=n_components, init=init)
X = nmf.fit_transform(data) # do both the fit and the transform
elif kind == 'tSNE': # t-distributed stochastic neighbor embedding
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, data.shape[1]))
print('ncomp: %d' % ncomp)
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
data = mdp.pca(data, output_dim=ncomp)
from sklearn.manifold import TSNE
n_components = 3 # not suited for any more than 3, according to the paper
#init = 'random', 'pca'
tsne = TSNE(n_components=n_components)
X = tsne.fit_transform(data) # do both the fit and the transform
elif kind == 'ICA': # independent components analysis
# ensure nspikes >= ndims**2 for good ICA convergence
maxncomp = intround(np.sqrt(nspikes))
if maxncomp < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
if data.shape[0] <= data.shape[1]:
raise RuntimeError('Need more observations than dimensions for ICA')
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, maxncomp, data.shape[1]))
if ICALIB == 'mdp':
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
print('ncomp: %d' % ncomp)
data = mdp.pca(data, output_dim=ncomp)
# nonlinearity g='pow3', ie x**3. tanh seems to separate better,
# but is a bit slower. gaus seems to be slower still, and no better
# than tanh, but these are just vague impressions.
# defaults to whitened=False, ie assumes data isn't whitened
node = mdp.nodes.FastICANode(g='pow3')
X = node(data)
pm = node.get_projmatrix()
X = X[:, np.any(pm, axis=0)] # keep only the non zero columns
elif ICALIB == 'sklearn':
from sklearn.decomposition import FastICA
# when whiten=True (default), FastICA preprocesses the data using PCA, and
# n_components is the number of PCs that are kept before doing ICA.
alg = 'parallel' # parallel or deflation, default is parallel
fun = 'logcosh' # logcosh, exp, or cube, default is logcosh
maxiter = 100 # default is 200
tol = 0.5 # default is 0.0001, seems need >~ 0.1 to exit faster
## TODO: make FastICA algorithm (parallel, deflation), nonlinearity (logcosh,
## exp, cube) and IC sort method (abs(kurtosis) vs. negentropy) GUI options
print('ncomp=%d, alg=%r, fun=%r, maxiter=%d, tol=%g'
% (ncomp, alg, fun, maxiter, tol))
fastica = FastICA(n_components=ncomp, algorithm=alg,
whiten=True, fun=fun, fun_args=None,
max_iter=maxiter, tol=tol, w_init=None,
random_state=None)
X = fastica.fit_transform(data) # do both the fit and the transform
#pm = fastica.components_
print('fastica niters: %d' % (fastica.n_iter_))
else:
raise ValueError('Invalid ICALIB %r' % ICALIB)
if X.shape[1] < 3:
raise RuntimeError('Need at least 3 columns')
# Sort ICs by decreasing kurtosis or negentropy. For kurtosis, see Scholz2004 (or
# rather, opposite to their approach, which picked ICs with most negative
# kurtosis). For methods of estimating negentropy, see Hyvarinen1997.
'''
# sort by abs(kurtosis) of each IC (column)
k = scipy.stats.kurtosis(X, axis=0)
ki = abs(k).argsort()[::-1] # decreasing order of abs(kurtosis)
print('Sort by abs(kurtosis):')
print(k[ki])
X = X[:, ki] # sort the ICs
'''
# sort by negentropy of each IC (column), this seems to work better than kurtosis
# at separating clusters of similar size:
ne = core.negentropy(X, axis=0)
assert (ne > 0).all()
nei = ne.argsort()[::-1] # decreasing order of negentropy
print('Sort by negentropy:')
print(ne[nei])
X = X[:, nei] # sort the ICs
'''
import pylab as pl
pl.figure()
pl.imshow(pm)
pl.colorbar()
pl.title('original projmatrix')
pl.figure()
pl.imshow(pm[:, ki])
pl.colorbar()
pl.title('decreasing abs(kurtosis) projmatrix')
pl.figure()
pl.imshow(pm[:, nei])
pl.colorbar()
pl.title('decreasing negentropy projmatrix')
'''
else:
raise ValueError('Unknown kind %r' % kind)
print('Output shape for %s: %r' % (kind, X.shape))
self.X[Xhash] = X # cache for fast future retrieval
print('%s took %.3f sec' % (kind, time.time()-t0))
unids = list(np.unique(spikes['nid'][sids])) # set of all nids that sids span
for nid in unids:
# don't update pos of junk cluster, if any, since it might not have any chans
# common to all its spikes, and therefore can't have PCA/ICA done on it
if nid != 0:
self.clusters[nid].update_comppos(X, sids)
return X
def get_rms_error(self, sids, tis=None, chans=None):
"""Calculate RMS error of spike waveforms (all from the same cluster) relative to
their cluster's mean waveform. Consider only selected tis and chans"""
spikes = self.spikes
nids = np.unique(spikes['nid'][sids])
nid = nids[0]
if len(nids) > 1 or nid == 0:
raise RuntimeError("Spikes must all belong to the same (non-junk) cluster for "
"RMS error calculation")
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for RMS error")
# collect data between tis from chans from all spikes:
print('Getting RMS error on tis=%r, chans=%r of %d spikes' %
(list(tis), list(chans), nspikes))
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
# get cluster mean waveform between tis on chans:
wave = self.neurons[nid].get_wave()
chanis = wave.chans.searchsorted(chans)
meandata = np.float64(wave.data[chanis, ti0:ti1])
# calculate RMS error between each spike and the cluster mean waveform:
se = (data - meandata) ** 2 # squared error
# take mean across timepoints and chans, but not across spikes:
mse = se.mean(axis=2).mean(axis=1) # mean squared error
return np.sqrt(mse)
def get_common_chans(self, sids, chans=None):
"""Find channels common to all sids, and optionally to chans as well. Also,
return chanslist, ie list of arrays of chans of sids"""
spikes = self.spikes
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
#t0 = time.time()
chanslist = [ cs[:ncs] for cs, ncs in zip(chanss, nchanss) ] # list of arrays
#print('Building chanslist took %.3f sec' % (time.time()-t0))
commonchans = util.intersect1d_uint8(chanslist) # find intersection
if chans is not None and len(chans) > 0:
# values in chans but not in commonchans:
diffchans = np.setdiff1d(chans, commonchans)
commonchans = np.intersect1d(chans, commonchans) # values in both
if len(diffchans) > 0:
print('WARNING: ignored chans %r not common to all spikes' % list(diffchans))
return commonchans, chanslist
def get_Xhash(self, kind, sids, tis, chans, npcsperchan, norm):
"""Return MD5 hex digest of args, for uniquely identifying the matrix resulting
from dimension reduction of spike data"""
h = hashlib.md5()
h.update(kind.encode())
h.update(sids)
h.update(tis)
h.update(chans)
if kind == 'ICA': # consider npcsperchan only if doing ICA
h.update(str(npcsperchan).encode())
h.update(str(norm).encode())
return h.hexdigest()
def create_neuron(self, id=None, inserti=None):
"""Create and return a new Neuron with a unique ID"""
if id == None:
id = self.nextnid
if id in self.neurons:
raise RuntimeError('Neuron %d already exists' % id)
id = int(id) # get rid of numpy ints
neuron = Neuron(self, id)
# add neuron to self
self.neurons[neuron.id] = neuron
if inserti == None:
self.norder.append(neuron.id)
else:
self.norder.insert(inserti, neuron.id)
return neuron
def remove_neuron(self, id):
try:
del self.neurons[id] # may already be removed due to recursive call
del self.clusters[id]
self.norder.remove(id)
except (KeyError, ValueError):
pass
def shift(self, sids, nt):
"""Shift sid waveforms by nt timepoints: -ve shifts waveforms left, +ve shifts right.
For speed, pad waveforms with edge values at the appropriate end"""
spikes = self.spikes
wd = self.wavedata
for sid in sids: # maybe there's a more efficient way than iterating over sids
core.shiftpad(wd[sid], nt) # modifies wd in-place
# update spike parameters:
dt = intround(nt * self.tres) # amount of time to shift by, signed, in us
# so we can later reload the wavedata accurately, shifting the waveform right and
# padding it on its left requires decrementing the associated timepoints
# (and vice versa)
spikes['t'][sids] -= dt
spikes['t0'][sids] -= dt
spikes['t1'][sids] -= dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Opposite sign wrt timepoints above, referencing within
# wavedata:
spikes['tis'][sids] = spikes['tis'][sids] + nt
# this in-place operation raises a TypeError in numpy 1.11.2, something related to
# subtracting an int from an unsigned int:
#spikes['tis'][sid] += nt
# caller should treat all sids as dirty
'''
# replaced by util.alignbest_cy():
def alignbest(self, sids, tis, chans):
"""Align all sids between tis on chans by best fit according to mean squared error.
chans are assumed to be a subset of channels of sids. Return sids
that were actually moved and therefore need to be marked as dirty"""
spikes = self.spikes
nspikes = len(sids)
nchans = len(chans)
wd = self.wavedata
nt = wd.shape[2] # num timepoints in each waveform
ti0, ti1 = tis
subnt = ti1 - ti0 # num timepoints to slice from each waveform
# TODO: make maxshift a f'n of interpolation factor
maxshift = 2 # shift +/- this many timepoints
subntdiv2 = subnt // 2
#print('subntdiv2 on either side of t=0: %d' % subntdiv2)
if subntdiv2 < maxshift:
raise ValueError("Selected waveform duration too short")
#maxshiftus = maxshift * self.stream.tres
# NOTE: in this case, it may be faster to keep shifts and sti0s and sti1s as lists
# of ints instead of np int arrays, maybe because their values are faster to iterate
# over or index with in python loops and lists:
shifts = range(-maxshift, maxshift+1) # from -maxshift to maxshift, inclusive
nshifts = len(shifts)
sti0s = [ ti0+shifti for shifti in range(nshifts) ] # shifted ti0 values
sti1s = [ ti1+shifti for shifti in range(nshifts) ] # shifted ti1 values
sti0ssti1s = zip(sti0s, sti1s)
print("Padding waveforms with up to +/- %d points of fake data" % maxshift)
# not worth subsampling here while calculating meandata, since all this
# stuff in this loop is needed in the shift loop below
subsd = np.zeros((nspikes, nchans, subnt), dtype=wd.dtype) # subset of spike data
spikechanis = np.zeros((nspikes, nchans), dtype=np.int64)
t0 = time.time()
for sidi, sid in enumerate(sids):
spike = spikes[sid]
nspikechans = spike['nchans']
spikechans = spike['chans'][:nspikechans]
spikechanis[sidi] = spikechans.searchsorted(chans)
subsd[sidi] = wd[sid, spikechanis[sidi], ti0:ti1]
print('Mean prep loop for best shift took %.3f sec' % (time.time()-t0))
t0 = time.time()
meandata = subsd.mean(axis=0) # float64
print('Mean for best shift took %.3f sec' % (time.time()-t0))
# choose best shifted waveform for each spike
# widesd holds current spike data plus padding on either side
# to allow for full width slicing for all time shifts:
maxnchans = spikes['nchans'].max() # of all spikes in sort
widesd = np.zeros((maxnchans, maxshift+nt+maxshift), dtype=wd.dtype)
shiftedsubsd = subsd.copy() # init
tempsubshifts = np.zeros((nshifts, nchans, subnt), dtype=wd.dtype)
dirtysids = []
t0 = time.time()
for sidi, sid in enumerate(sids):
# for speed, instead of adding real data, pad start and end with fake values
chanis = spikechanis[sidi]
sd = wd[sid] # sid's spike data
widesd[:, maxshift:-maxshift] = sd # 2D
widesd[:, :maxshift] = sd[:, 0, None] # pad start with first point per chan
widesd[:, -maxshift:] = sd[:, -1, None] # pad end with last point per chan
wideshortsd = widesd[chanis] # sid's padded spike data on chanis, 2D
# keep this inner loop as fast as possible:
for shifti, (sti0, sti1) in enumerate(sti0ssti1s):
tempsubshifts[shifti] = wideshortsd[:, sti0:sti1] # len: subnt
errors = tempsubshifts - meandata # (nshifts, nchans, subnt) - (nchans, subnt)
# get sum squared errors by taking sum across highest two dims - for purpose
# of error comparison, don't need to take mean or square root. Also, order
# of summation along axes doesn't matter, as long as it's done on the highest two:
sserrors = (errors**2).sum(axis=2).sum(axis=1) # nshifts long
bestshifti = sserrors.argmin()
bestshift = shifts[bestshifti]
if bestshift != 0: # no need to update sort.wavedata[sid] if there's no shift
# update time values:
dt = bestshift * self.tres # time to shift by, signed, in us
spikes['t'][sid] += dt # should remain halfway between t0 and t1
spikes['t0'][sid] += dt
spikes['t1'][sid] += dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Opposite sign, referencing within wavedata:
spikes['tis'][sid] -= bestshift
# update sort.wavedata
wd[sid] = widesd[:, bestshifti:bestshifti+nt]
shiftedsubsd[sidi] = tempsubshifts[bestshifti]
dirtysids.append(sid) # mark sid as dirty
print('Shifting loop took %.3f sec' % (time.time()-t0))
AD2uV = self.converter.AD2uV
stdevbefore = AD2uV(subsd.std(axis=0).mean())
stdevafter = AD2uV(shiftedsubsd.std(axis=0).mean())
print('stdev went from %.3f to %.3f uV' % (stdevbefore, stdevafter))
return dirtysids
'''
def alignminmax(self, sids, to):
"""Align sids by their min or max. Return those that were actually moved
and therefore need to be marked as dirty"""
if not self.stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
V0s = spikes['V0'][sids]
V1s = spikes['V1'][sids]
Vss = np.column_stack((V0s, V1s))
alignis = spikes['aligni'][sids]
b = np.column_stack((alignis==0, alignis==1)) # 2D boolean array
if to == 'min':
i = Vss[b] > 0 # indices into sids of spikes aligned to the max peak
elif to == 'max':
i = Vss[b] < 0 # indices into sids of spikes aligned to the min peak
else:
raise ValueError('Unknown to %r' % to)
sids = sids[i] # sids that need realigning
nspikes = len(sids)
print("Realigning %d spikes" % nspikes)
if nspikes == 0: # nothing to do
return [] # no sids to mark as dirty
multichantis = spikes['tis'][sids] # nspikes x nchans x 2 arr
chanis = spikes['chani'][sids] # nspikes arr of max chanis
# peak tis on max chan of each spike, convert from uint8 to int32 for safe math
tis = np.int32(multichantis[np.arange(nspikes), chanis]) # nspikes x 2 arr
# NOTE: tis aren't always in temporal order!
dpeaktis = tis[:, 1] - tis[:, 0] # could be +ve or -ve
dpeaks = spikes['dt'][sids] # stored as +ve
# for each spike, decide whether to add or subtract dpeak to/from its temporal values
ordered = dpeaktis > 0 # in temporal order
reversed = dpeaktis < 0 # in reversed temporal order
alignis = spikes['aligni'][sids]
alignis0 = alignis == 0
alignis1 = alignis == 1
dpeaki = np.zeros(nspikes, dtype=int)
# add dpeak to temporal values to align to later peak
dpeaki[ordered & alignis0 | reversed & alignis1] = 1
# subtact dpeak from temporal values to align to earlier peak
dpeaki[ordered & alignis1 | reversed & alignis0] = -1
# upcast aligni from 1 byte to an int before doing arithmetic on it:
#dalignis = -np.int32(alignis)*2 + 1
dts = dpeaki * dpeaks
dtis = -dpeaki * abs(dpeaktis)
# shift values
spikes['t'][sids] += dts
spikes['t0'][sids] += dts
spikes['t1'][sids] += dts
spikes['tis'][sids] = spikes['tis'][sids] + dtis[:, None, None] # update wrt new t0i
spikes['aligni'][sids[alignis0]] = 1
spikes['aligni'][sids[alignis1]] = 0
# update wavedata for each shifted spike
self.reload_spikes(sids)
return sids # mark all sids as dirty
def choose_new_meanchans(self, sids):
"""Get mean waveform of all sids, then find the mean's chan with max Vpp, then
choose det.maxnchansperspike channels around that maxchan.
Return meanchans, furthestchan, and furthestchani"""
print('Choosing new channel set for all selected spikes')
det = self.detector
meanwave = self.get_mean_wave(sids)
# mean chan with max Vpp:
maxchan = meanwave.chans[meanwave.data.ptp(axis=1).argmax()]
maxchani = det.chans.searchsorted(maxchan)
distances = det.dm.data[maxchani]
# keep the maxnchansperspike closest chans to maxchan, including maxchan:
chanis = distances.argsort()[:det.maxnchansperspike]
meanchans = det.chans[chanis]
meanchans.sort() # keep them sorted
print('meanchans: %r' % list(meanchans))
furthestchan = det.chans[chanis[-1]]
print('furthestchan: %d' % furthestchan)
furthestchani = meanchans.searchsorted(furthestchan)
# sanity checks:
assert len(meanchans) == det.maxnchansperspike
assert maxchan in meanchans
return meanchans, furthestchan, furthestchani
def reload_spikes(self, sids, usemeanchans=False):
"""Update wavedata of designated spikes from stream. Optionally fix incorrect
time values from .sort 0.3 files. Optionally choose new set of channels for all
sids based on the chans closest to the mean of the sids. It's the caller's
responsibility to mark sids as dirty and trigger resaving of .wave file"""
## TODO: add findmaxchan=False and recenteronmaxchan=False kwargs
nsids = len(sids)
print('(Re)loading %d spikes' % nsids)
stream = self.stream
if not stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
det = self.detector
ver_lte_03 = float(self.__version__) <= 0.3
if ver_lte_03:
print('Fixing potentially incorrect time values during spike reloading')
nfixed = 0
treload = time.time()
if usemeanchans:
if ver_lte_03:
raise RuntimeError("Best not to choose new chans from mean until after "
"converting to .sort >= 0.4")
meanchans, furthestchan, furthestchani = self.choose_new_meanchans(sids)
nmeanchans = len(meanchans)
# split up sids into groups efficient for loading from stream:
ts = spikes[sids]['t'] # noncontig, not a copy
# ensure they're in temporal order:
if not (np.diff(ts) >= 0).all():
print("Selected sids aren't in temporal order, sorting by time...")
tsis = ts.argsort()
sids = sids[tsis]
print("Done sorting sids by time")
# break up spikes by ISIs >= MAXGROUPISI:
splitis = np.where(np.diff(ts) >= MAXGROUPISI)[0] + 1
groups = np.split(sids, splitis)
# limit each group of sids to no more than MAXGROUPDT:
groupi = 0
while groupi < len(groups):
group = groups[groupi] # group of sids all with ISIs < MAXGROUPISI
## TODO: not a copy: is this the optimal way to get the times in this case?
relts = spikes[group]['t'] - spikes[group[0]]['t']
splitis = np.where(np.diff(relts // MAXGROUPDT) > 0)[0] + 1
nsubgroups = len(splitis) + 1
if nsubgroups > 1:
# del original group, replace with subgroups
del groups[groupi]
subgroups = np.split(group, splitis)
groups[groupi:groupi] = subgroups
groupi += len(subgroups)
else:
groupi += 1
print('ngroups: %d' % len(groups))
# process each group:
sidi = 0 # init sid index across all groups, used as status counter
for groupi, group in enumerate(groups):
printflush('<%d>' % groupi, end='')
assert len(group) > 0 # otherwise something went wrong above
t0 = spikes[group[0]]['t0']
t1 = spikes[group[-1]]['t1']
if ver_lte_03:
# load a little extra, in case we need to reload misaligned first and/or
# last spike in this group
t0 -= 5000 # -5 ms
t1 += 5000 # +5 ms
"""
Find union of chans of sids in this group, ask Stream for only those such that no
unnecessary resampling takes place on unneeded chans. Note that this doesn't make
a difference when CAR is enabled in the stream, because the full set of enabled
chans have to be maintained in Stream.__call__ until the very end. Don't bother
cutting out the correct nchans for each sid. At worst, chan 0 (the "empty" chans
array value) will be unnecessarily added to unionchans, and we'll retrieve one
extra chan when creating tempwave, which will then later be discarded:
"""
unionchans = np.unique(spikes['chans'][group])
if usemeanchans:
# now that we have the original unionchans of this group,
# update this group's spikes array entries with meanchans:
spikes['nchans'][group] = nmeanchans
# we're using the max num chans, so assign the full array:
spikes['chans'][group] = meanchans
# now update unionchans as well:
unionchans = np.unique(np.hstack((unionchans, meanchans)))
if 0 not in stream.chans: # if chan 0 is disabled in stream
# remove 0 from unionchans, otherwise an error would be raised when
# calling stream()
unionchans = unionchans[unionchans != 0]
# load and resample only what's needed for this group:
tempwave = stream(t0, t1, unionchans)
# slice out each spike's reloaded data from tempwave:
for sid in group:
# print status:
if sidi % 10000 == 0:
printflush(sidi, end='')
elif sidi % 1000 == 0:
printflush('.', end='')
if usemeanchans: # already checked above that ver_lte_03 == False
# this spike's chans have been set to meanchans, now
# check that each spike's maxchan is in meanchans:
chan = spikes[sid]['chan']
if chan not in meanchans:
# replace furthest chan with spike's maxchan:
print("spike %d: replacing furthestchan %d with spike's maxchan %d"
% (sid, furthestchan, chan))
nchans = spikes[sid]['nchans']
chans = spikes[sid]['chans'][:nchans]
# replace furthest chan with max chan, modifies spikes array in-place:
chans[furthestchani] = chan
# make sure chans remain sorted:
chans.sort()
# this isn't necessary, because all the above was in-place:
#spikes['chans'][sid][:nchans] = chans
spike = spikes[sid]
nchans = spike['nchans']
chans = spike['chans'][:nchans]
rd = tempwave[spike['t0']:spike['t1']][chans].data # reloaded data
if ver_lte_03: # fix potentially incorrect spike tis
result = self.reload_spike_ver_lte_03(sid, nchans, tempwave, rd)
if result == None:
sidi += 1 # inc status counter
continue # rollwin2D won't work, skip to next sid
else:
rd, fixed = result
if fixed:
nfixed += 1
nt = rd.shape[1]
self.wavedata[sid, :nchans, :nt] = rd # update wavedata
sidi += 1 # inc status counter
print()
if ver_lte_03:
print('Fixed time values of %d spikes' % nfixed)
print('(Re)loaded %d spikes, took %.3f sec' % (len(sids), time.time()-treload))
def reload_spike_ver_lte_03(self, sid, nchans, tempwave, rd):
"""In sort.__version__ <= 0.3, t, t0, t1, and tis were not updated
during alignbest() calls. To fix this, load new data with old potentially
incorrect t0 and t1 values, and compare this new data to existing old data
in wavedata array. Find where the non-repeating parts of the old data fits
into the new, and calculate the correction needed to fix the time values.
Finally, reload new data according to these corrected time values."""
#print('Reloading sid from ver_lte_03: %d' % sid)
od = self.wavedata[sid, :nchans] # old data
# indices that strip const values from left and right ends:
lefti, righti = lrrep2Darrstripis(od)
od = od[:, lefti:righti] # stripped old data
# reloaded data rd uses old incorrect t0 and t1, but they should be
# wide enough to encompass the non-repeating parts of the old data
width = od.shape[1] # rolling window width
if not width <= rd.shape[1]:
print('') # newline
print("WARNING: od.shape[1]=%d > rd.shape[1]=%d for sid %d" %
(od.shape[1], rd.shape[1], sid))
#import pdb; pdb.set_trace()
return
odinndis = np.where((rollwin2D(rd, width) == od).all(axis=1).all(axis=1))[0]
if len(odinndis) == 0: # no hits of old data in new
dnt = 0 # reload data based on current timepoints
elif len(odinndis) == 1: # exactly 1 hit of old data in new
odinndi = odinndis[0] # pull it out
dnt = odinndi - lefti # num timepoints to correct by, signed
else:
raise RuntimeError("Multiple hits of old data in new, don't know "
"how to reload spike %d" % sid)
newrd, fixed = rd, False
if dnt != 0:
dt = intround(dnt * self.tres) # time to correct by, signed, in us
spikes['t'][sid] += dt # should remain halfway between t0 and t1
spikes['t0'][sid] += dt
spikes['t1'][sid] += dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Use opposite sign because we're
# referencing within wavedata:
# in versions <= 0.3, 'tis' were named 'phasetis':
spikes['phasetis'][sid] = spikes['phasetis'][sid] - dnt
spike = spikes[sid]
# reslice tempwave again now that t0 and t1 have changed
newrd = tempwave[spike['t0']:spike['t1']][chans].data
fixed = True
#printflush('F', end='')
return newrd, fixed
def reload_spikes_and_templates(self, sids, usemeanchans=False):
self.reload_spikes(sids, usemeanchans=usemeanchans)
# update neuron templates:
unids = np.unique(self.spikes['nid'][sids])
unids = unids[unids != 0] # exclude junk cluster, which doesn't have a neuron
neurons = [ self.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
def init_spike_alignment(self):
"""Set initial spike alignment points according to alignment points of each
spike's neuron"""
print('Setting initial spike alignment points')
ntis, nalignis = {}, {} # tis and aligni derived from each neuron's mean waveform
for neuron in self.neurons.values():
nwave = neuron.get_wave() # update and return mean waveform
mintis = nwave.data.argmin(axis=1)
maxtis = nwave.data.argmax(axis=1)
ntis[neuron.id] = np.column_stack([mintis, maxtis])
# choose aligni with least variance:
nalignis[neuron.id] = np.argmin([mintis.std(), maxtis.std()])
AD2uV = self.converter.AD2uV
for s, wd in zip(self.spikes, self.wavedata):
sid = s['id']
# print out progress on a regular basis:
if sid % 100000 == 0:
printflush(sid, end='')
elif sid % 10000 == 0:
printflush('.', end='')
nid = s['nid']
#chan = s['chan']
nchans = s['nchans']
chans = s['chans'][:nchans]
neuronchans = self.neurons[nid].wave.chans
assert (chans == neuronchans).all()
s['tis'][:nchans] = ntis[nid] # set according to its neuron, wrt t0i=0
s['aligni'] = nalignis[nid] # set according to its neuron
maxchani = s['chani']
t0i, t1i = int(s['tis'][maxchani, 0]), int(s['tis'][maxchani, 1])
s['dt'] = abs(t1i - t0i) / self.sampfreq * 1e6 # us
# note that V0 and V1 might not be of opposite sign, because tis are derived
# from mean neuron waveform, not from each individual spike:
s['V0'], s['V1'] = AD2uV(wd[maxchani, t0i]), wd[maxchani, t1i] # uV
s['Vpp'] = abs(s['V1'] - s['V0']) # uV
print()
def spatially_localize_spikes(self, sortwin, method='fit'):
"""Assuming that wavedata have been extracted and neuron mean waveforms calculated,
find tis and perform spatial localization of every spike in self"""
det = self.detector
weights2f = self.extractor.weights2spatial
weights2spatialmean = self.extractor.weights2spatialmean
f = self.extractor.f
nreject = 0 # number spikes rejected during spatial localization
print('Running spatial localization on all %d spikes' % self.nspikes)
tstart = time.clock()
## TODO: chan this be multithreaded/processed?
for s, wd in zip(self.spikes, self.wavedata):
# Get Vpp at each inclchan's tis, use as spatial weights:
# see core.rowtake() or util.rowtake_cy() for indexing explanation:
sid = s['id']
# print out progress on a regular basis:
if sid % 10000 == 0:
printflush(sid, end='')
elif sid % 1000 == 0:
printflush('.', end='')
chan = s['chan']
nchans = s['nchans']
chans = s['chans'][:nchans]
maxchani = s['chani']
chanis = det.chans.searchsorted(chans)
w = np.float32(wd[np.arange(s['nchans'])[:, None], s['tis'][:nchans]]) # nchans x 2
w = abs(w).sum(axis=1) # Vpp for each chan, measured at t0i and t1i
x = det.siteloc[chanis, 0] # 1D array (row)
y = det.siteloc[chanis, 1]
if method == 'fit':
# localize by fitting extractor.f function to wavedata
params = weights2f(f, w, x, y, maxchani)
elif method == 'mean':
# set localization to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# a very ad-hoc guess for spatial sigma:
sx = 2 * dist((x0, y0), self.probe.SiteLoc[chan])
params = x0, y0, sx, sx
else:
print('Unknown method %r' % method)
if params == None: # presumably a non-localizable many-channel noise event
#printflush('X', end='') # to indicate a rejected spike
if DEBUG:
spiket = intround(s['t']) # nearest us
det.log("Reject spike %d at t=%d based on fit params" % (sid, spiket))
neuron = self.neurons[s['nid']]
# remove from its neuron, add to unsorted list of spikes:
sortwin.MoveSpikes2List(neuron, [sid], update=False)
# manually set localization params to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# set sigma to 0 um, and then later round lockr up to 1 um so that only one
# raster tick shows up for each rejected spike, reducing clutter
params = x0, y0, 0, 0
nreject += 1
# Save spatial fit params, and "lockout" only the channels within lockrx*sx
# of the fit spatial location of the spike, up to a max of inclr. "Lockout"
# in this case only refers to which channels are highlighted with a raster tick
# for each spike:
s['x0'], s['y0'], s['sx'], s['sy'] = params
x0, y0 = s['x0'], s['y0']
# lockout radius for this spike:
lockr = min(det.lockrx*s['sx'], det.inclr) # in um
lockr = max(lockr, 1) # at least 1 um, so at least the maxchan gets a tick
# test y coords of chans in y array, ylockchaniis can be used to index
# into x, y and chans:
ylockchaniis, = np.where(np.abs(y - y0) <= lockr) # convert bool arr to int
# test Euclid distance from x0, y0 for each ylockchani:
lockchaniis = ylockchaniis.copy()
for ylockchanii in ylockchaniis:
if dist((x[ylockchanii], y[ylockchanii]), (x0, y0)) > lockr:
# Euclidean distance is too great, remove ylockchanii from lockchaniis:
lockchaniis = lockchaniis[lockchaniis != ylockchanii]
lockchans = chans[lockchaniis]
nlockchans = len(lockchans)
s['lockchans'][:nlockchans], s['nlockchans'] = lockchans, nlockchans
print('Spatial localization of spikes took %.3f s' % (time.clock() - tstart))
return nreject
'''
def get_component_matrix(self, dims=None, weighting=None):
"""Convert spike param matrix into pca/ica data for clustering"""
import mdp # can't delay this any longer
X = self.get_param_matrix(dims=dims)
if weighting == None:
return X
if weighting.lower() == 'ica':
node = mdp.nodes.FastICANode()
elif weighting.lower() == 'pca':
node = mdp.nodes.PCANode()
else:
raise ValueError, 'unknown weighting %r' % weighting
node.train(X)
features = node.execute(X) # returns all available components
#self.node = node
#self.weighting = weighting
#self.features = features
return features
def get_ids(self, cids, spikes):
"""Convert a list of cluster ids into 2 dicts: n2sids maps neuron IDs to
spike IDs; s2nids maps spike IDs to neuron IDs"""
cids = np.asarray(cids)
cids = cids - cids.min() # make sure cluster IDs are 0-based
uniquecids = set(cids)
nclusters = len(uniquecids)
# neuron ID to spike IDs (plural) mapping
n2sids = dict(zip(uniquecids, [ [] for i in range(nclusters) ]))
s2nids = {} # spike ID to neuron ID mapping
for spike, nid in zip(spikes, cids):
s2nids[spike['id']] = nid
n2sids[nid].append(spike['id'])
return n2sids, s2nids
def write_spc_input(self):
"""Generate input data file to SPC"""
X = self.get_component_matrix()
# write to space-delimited .dat file. Each row is a spike, each column a param
spykedir = os.path.dirname(__file__)
dt = str(datetime.datetime.now())
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
self.spcdatfname = os.path.join(spykedir, 'spc', dt+'.dat')
# not sure why spc adds the dg_01 part:
self.spclabfname = os.path.join(spykedir, 'spc', dt+'.dg_01.lab')
f = open(self.spcdatfname, 'w')
for params in X: # write text data to file, one row at a time
params.tofile(f, sep=' ', format='%.6f')
f.write('\n')
f.close()
def parse_spc_lab_file(self, fname=None):
"""Parse output .lab file from SPC. Each row in the file is the assignment of each
spin (datapoint) to a cluster, one row per temperature datapoint. First column is
temperature run number (0-based). 2nd column is the temperature. All remaining
columns correspond to the datapoints in the order presented in the input .dat file.
Returns (Ts, cids)"""
#spikes = self.get_spikes_sortedby('id')
if fname == None:
defaultDir = r"C:\Documents and Settings\Administrator\Desktop\Charlie\From"
dlg = wx.FileDialog(None, message="Open SPC .lab file",
defaultDir=defaultDir, defaultFile='',
wildcard="All files (*.*)|*.*|.lab files (*.lab)|*.lab|",
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath()
dlg.Destroy()
data = np.loadtxt(fname, dtype=np.float32)
Ts = data[:, 1] # 2nd column
cids = np.int32(data[:, 2:]) # 3rd column on
print('Parsed %r' % fname)
return Ts, cids
def parse_charlies_output(self, fname=None):
if fname == None:
fname = (r'C:\Documents and Settings\Administrator\Desktop\Charlie\'
'From\2009-07-20\clustered_events_coiflet_T0.125.txt')
nids = np.loadtxt(fname, dtype=int) # one neuron id per spike
return nids
def write_spc_app_input(self):
"""Generate input data file to spc_app"""
spikes = self.get_spikes_sortedby('id')
X = self.get_component_matrix()
# write to tab-delimited data file. Each row is a param, each column a spike
# (this is the transpose of X)
# first row has labels "AFFX", "NAME", and then spike ids
# first col has labels "AFFX", and then param names
f = open(r'C:\home\mspacek\Desktop\Work\SPC\Weizmann\spc_app\spc_app_input.txt', 'w')
f.write('AFFX\tNAME\t')
for spike in spikes:
f.write('s%d\t' % spike['id'])
f.write('\n')
for parami, param in enumerate(['Vpp', 'dt', 'x0', 'y0', 'sx', 'sy', 'theta']):
f.write(param+'\t'+param+'\t')
for val in X[:, parami]:
f.write('%f\t' % val)
f.write('\n')
f.close()
def hcluster(self, t=1.0):
"""Hierarchically cluster self.spikes
TODO: consider doing multiple cluster runs. First, cluster by spatial location (x0,
y0). Then split those clusters up by Vpp. Then those by spatial distrib (sy/sx,
theta), then by temporal distrib (dt, s1, s2). This will ensure that the lousier
params will only be considered after the best ones already have, and therefore that
you start off with pretty good clusters that are then only slightly refined using
the lousy params
"""
spikes = self.get_spikes_sortedby('id')
X = self.get_component_matrix()
print(X)
# try 'weighted' or 'average' with 'mahalanobis'
cids = fclusterdata(X, t=t, method='single', metric='euclidean')
n2sids, s2nids = self.get_ids(cids, spikes)
return n2sids
def export2Charlie(self, fname='spike_data', onlymaxchan=False, nchans=3, npoints=32):
"""Export spike data to a text file, one spike per row.
Columns are x0, y0, followed by most prominent npoints datapoints
(1/4, 3/4 wrt spike time) of each nearest nchans. This is to
give to Charlie to do WPD and SPC on"""
if onlymaxchan:
nchans = 1
assert np.log2(npoints) % 1 == 0, 'npoints is not a power of 2'
# get ti - time index each spike is assumed to be centered on
self.spikes[0].update_wave(self.stream) # make sure it has a wave
ti = intround(self.spikes[0].wave.data.shape[-1] / 4) # 13 for 50 kHz, 6 for 25 kHz
dims = self.nspikes, 2+nchans*npoints
output = np.empty(dims, dtype=np.float32)
dm = self.detector.dm
chanis = np.arange(len(dm.data))
coords = np.asarray(dm.coords)
xcoords = coords[:, 0]
ycoords = coords[:, 1]
sids = list(self.spikes) # self.spikes is a dict!
sids.sort()
for sid in sids:
spike = self.spikes[sid]
chani = spike.chani # max chani
x0, y0 = spike.x0, spike.y0
if onlymaxchan:
nearestchanis = np.asarray([chani])
else:
# find closest chans to x0, y0
d2s = (xcoords - x0)**2 + (ycoords - y0)**2 # squared distances
sortis = d2s.argsort()
nearestchanis = chanis[sortis][0:nchans] # pick the first nchan nearest chans
if chani not in nearestchanis:
print("WARNING: max chani %d is not among the %d chanis nearest "
"(x0, y0) = (%.1f, %.1f) for spike %d at t=%d"
% (chani, nchans, x0, y0, sid, spike.t))
if spike.wave.data is None:
spike.update_wave(self.stream)
row = [x0, y0]
for chani in nearestchanis:
chan = dm.chans[chani] # dereference
try:
data = spike.wave[chan].data[0] # pull out singleton dimension
except IndexError: # empty array
data = np.zeros(data.shape[-1], data.dtype)
row.extend(data[ti-npoints/4:ti+npoints*3/4])
output[sid] = row
dt = str(datetime.datetime.now())
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
fname += '.' + dt + '.txt'
np.savetxt(fname, output, fmt='%.1f', delimiter=' ')
def match(self, templates=None, weighting='signal', sort=True):
"""Match templates to all .spikes with nearby maxchans,
save error values to respective templates.
Note: slowest step by far is loading in the wave data from disk.
(First match is slow, subsequent ones are ~ 15X faster.)
Unless something's done about that in advance, don't bother optimizing here much.
Right now, once waves are loaded, performance is roughly 20000 matches/sec
TODO: Nick's alternative to gaussian distance weighting: have two templates: a mean
template, and an stdev template, and weight the error between each matched
spike and the mean on each chan at each timepoint by the corresponding stdev value
(divide the error by the stdev, so that timepoints with low stdev are more sensitive
to error)
TODO: looks like I still need to make things more nonlinear - errors at high signal
values aren't penalized enough, while errors at small signal values are penalized
too much. Try cubing both signals, then taking sum(err**2)
DONE: maybe even better, instead of doing an elaborate cubing of signal, followed by
a rather elaborate gaussian spatiotemporal weighting of errors, just take difference
of signals, and weight the error according to the abs(template_signal) at each point
in time and across chans. That way, error in parts of the signal far from zero are
considered more important than deviance of perhaps similar absolute value for signal
close to zero
"""
# None defaults to matching all templates:
templates = templates or self.templates.values()
sys.stdout.write('matching')
t0 = time.time()
nspikes = len(self.spikes)
dm = self.detector.dm
for template in templates:
template.err = [] # overwrite any existing .err attrib
tw = template.tw
templatewave = template.wave[template.chans] # pull out template's enabled chans
#stdev = template.get_stdev()[template.chans] # pull out template's enabled chans
# replace any 0s with 1s - TODO: what's best way to avoid singularities?:
#stdev[stdev == 0] = 1
# Gaussian weighting in space and/or time:
weights = template.get_weights(weighting=weighting, sstdev=self.detector.slock/2,
tstdev=self.detector.tlock/2)
for spike in self.spikes.values():
# check if spike.maxchan is outside some minimum distance from template.maxchan
if dm[template.maxchan, spike.maxchan] > MAXCHANTOLERANCE: # um
continue # don't even bother
if spike.wave.data is None or template.tw != TW: # make sure their data line up
spike.update_wave(tw) # this slows things down a lot, but is necessary
# slice template's enabled chans out of spike, calculate sum of
# squared weighted error
# first impression is that dividing by stdev makes separation worse, not better
# low stdev means more sensitive to error:
#err = (templatewave.data - spike.wave[template.chans].data) / stdev * weights
# pull out template's enabled chans from spike:
spikewave = spike.wave[template.chans]
if weighting == 'signal':
tsdata = np.asarray([templatewave.data, spikewave.data])
# take elementwise max of abs of template and spike data:
weights = np.abs(tsdata).max(axis=0)
err = (templatewave.data - spikewave.data) * weights # weighted error
err = (err**2).sum(axis=None) # sum of squared weighted error
template.err.append((spike.id, intround(err)))
template.err = np.asarray(template.err, dtype=np.int64)
if sort and len(template.err) != 0:
i = template.err[:, 1].argsort() # row indices that sort by error
template.err = template.err[i]
sys.stdout.write('.')
print('\nmatch took %.3f sec' % (time.time()-t0))
'''
class Neuron(object):
"""A collection of spikes that have been deemed somehow, whether manually
or automatically, to have come from the same cell. A Neuron's waveform
is the mean of its member spikes"""
def __init__(self, sort, id=None):
self.sort = sort
self.id = id # neuron id
self.wave = WaveForm() # init to empty waveform
self.sids = np.array([], dtype=int) # indices of spikes that make up this neuron
# relative reference timestamp, here for symmetry with fellow spike rec
# (obj.t comes up sometimes):
self.t = 0
self.plt = None # Plot currently holding self
self.cluster = None
self.good = False # user can mark this neuron as "good" if so desired
#self.fname # not here, let's allow neurons to have spikes from different files?
def get_chans(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans # self.chans just refers to self.wave.chans
chans = property(get_chans)
def get_chan(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans[self.wave.data.ptp(axis=1).argmax()] # chan with max Vpp
chan = property(get_chan)
def get_nspikes(self):
return len(self.sids)
nspikes = property(get_nspikes)
def __getstate__(self):
"""Get object state for pickling"""
d = self.__dict__.copy()
# don't save any calculated PCs/ICs:
#d.pop('X', None)
#d.pop('Xhash', None)
# don't save plot self is assigned to, since that'll change anyway on unpickle
d['plt'] = None
return d
def get_wave(self):
"""Check for valid mean and std waveform before returning it"""
# many neuron waveforms saved in old .sort files won't have a wave.std field:
try:
self.wave.std
except AttributeError:
return self.update_wave()
if self.wave == None or self.wave.data is None or self.wave.std is None:
return self.update_wave()
else:
return self.wave # return existing waveform
def update_wave(self):
"""Update mean and std of self's waveform"""
sort = self.sort
spikes = sort.spikes
if len(self.sids) == 0: # no member spikes, perhaps I should be deleted?
raise RuntimeError("n%d has no spikes and its waveform can't be updated" % self.id)
meanwave = sort.get_mean_wave(self.sids, nid=self.id)
# update self's Waveform object
self.wave.data = meanwave.data
self.wave.std = meanwave.std
self.wave.ts = sort.twts.copy() # meanwave has no .ts, copy for clean jsonpickle
self.wave.chans = meanwave.chans
self.wave.tres = sort.tres # meanwave has no .tres
return self.wave
def __sub__(self, other):
"""Return difference array between self and other neurons' waveforms
on common channels"""
selfwavedata, otherwavedata = self.getCommonWaveData(other.chan, other.chans,
other.wave.data)
return selfwavedata - otherwavedata
def getCommonWaveData(self, otherchan, otherchans, otherwavedata):
"""Return waveform data common to self's chans and otherchans, while
requiring that both include the other's maxchan"""
chans = np.intersect1d(self.chans, otherchans, assume_unique=True)
if len(chans) == 0:
raise ValueError('No common chans')
if self.chan not in chans or otherchan not in chans:
raise ValueError("maxchans aren't part of common chans")
selfchanis = self.chans.searchsorted(chans)
otherchanis = otherchans.searchsorted(chans)
return self.wave.data[selfchanis], otherwavedata[otherchanis]
'''
def get_stdev(self):
"""Return 2D array of stddev of each timepoint of each chan of member spikes.
Assumes self.update_wave has already been called"""
data = []
# TODO: speed this up by pre-allocating memory and then filling in the array
for spike in self.spikes:
data.append(spike.wave.data) # collect spike's data
stdev = np.asarray(data).std(axis=0)
return stdev
def get_weights(self, weighting=None, sstdev=None, tstdev=None):
"""Returns unity, spatial, temporal, or spatiotemporal Gaussian weights
for self's enabled chans in self.wave.data, given spatial and temporal
stdevs"""
nchans = len(self.wave.chans)
nt = len(self.wave.data[0]) # assume all chans have the same number of timepoints
if weighting == None:
weights = 1
elif weighting == 'spatial':
weights = self.get_gaussian_spatial_weights(sstdev) # vector
elif weighting == 'temporal':
weights = self.get_gaussian_temporal_weights(tstdev) # vector
elif weighting == 'spatiotemporal':
sweights = self.get_gaussian_spatial_weights(sstdev)
tweights = self.get_gaussian_temporal_weights(tstdev)
weights = np.outer(sweights, tweights) # matrix, outer product of the two
elif weighting == 'signal':
weights = None # this is handled by caller
#print('\nweights:\n%r' % weights)
return weights
def get_gaussian_spatial_weights(self, stdev):
"""Return a vector that weights self.chans according to a 2D gaussian
centered on self.maxchan with standard deviation stdev in um"""
g = Gaussian(mean=0, stdev=stdev)
# distances between maxchan and all enabled chans:
d = self.sort.detector.dm[self.maxchan, self.chans]
weights = g[d]
weights.shape = (-1, 1) # vertical vector with nchans rows, 1 column
return weights
def get_gaussian_temporal_weights(self, stdev):
"""Return a vector that weights timepoints in self's mean waveform
by a gaussian centered on t=0, with standard deviation stdev in us"""
g = Gaussian(mean=0, stdev=stdev)
ts = self.wave.ts # template mean timepoints relative to t=0 spike time
weights = g[ts] # horizontal vector with 1 row, nt timepoints
return weights
'''
class PTCSHeader(object):
"""
Polytrode clustered spikes file header:
formatversion: int64 (currently version 3)
ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
descr: ndescrbytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
nneurons: uint64 (number of neurons)
nspikes: uint64 (total number of spikes)
nsamplebytes: uint64 (number of bytes per template waveform sample)
samplerate: uint64 (Hz)
npttypebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
pttype: npttypebytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
nptchans: uint64 (total num chans in polytrode)
chanpos: nptchans * 2 * float64
(array of (x, y) positions, in um, relative to top of polytrode,
indexed by 0-based channel IDs)
nsrcfnamebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
srcfname: nsrcfnamebytes of ASCII text
(source file name, probably .srf, padded with null bytes if needed for
8 byte alignment)
datetime: float64
(absolute datetime corresponding to t=0 us timestamp, stored as days since
epoch: December 30, 1899 at 00:00)
ndatetimestrbytes: uint64
datetimestr: ndatetimestrbytes of ASCII text
(human readable string representation of datetime, preferrably ISO 8601,
padded with null bytes if needed for 8 byte alignment)
"""
FORMATVERSION = 3 # overall .ptcs file format version, not header format version
def __init__(self, sort, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user='', notes=''):
self.sort = sort
self.stream = stream
self.nneurons = nneurons
self.nspikes = nspikes
self.nsamplebytes = nsamplebytes
homelessfullfname = lstrip(fullfname, os.path.expanduser('~'))
sortfname = sort.fname
sortfullfname = os.path.join(sortpath, sortfname)
sortfmoddt = str(datetime.datetime.fromtimestamp(os.path.getmtime(sortfullfname)))
sortfmoddt = sortfmoddt.split('.')[0] # ditch the us
sortfsize = os.path.getsize(sortfullfname) # in bytes
d = {'file_type': '.ptcs (polytrode clustered spikes) file',
'original_fname': homelessfullfname, 'export_time': exportdt,
'sort': {'fname': sortfname, 'path': sortpath,
'fmtime': sortfmoddt, 'fsize': sortfsize},
'user': user, 'notes': notes}
descr = str(d)
self.descr = pad(descr, align=8)
self.srcfname = pad(lstrip(stream.fname, '../'), align=8)
self.pttype = pad(stream.probe.name, align=8)
self.dt = stream.datetime
self.dtstr = pad(self.dt.isoformat(), align=8)
def write(self, f):
s = self.sort
np.int64(self.FORMATVERSION).tofile(f) # formatversion
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr
np.uint64(self.nneurons).tofile(f) # nneurons
np.uint64(self.nspikes).tofile(f) # nspikes
np.uint64(self.nsamplebytes).tofile(f) # nsamplebytes
np.uint64(s.sampfreq).tofile(f) # samplerate
np.uint64(len(self.pttype)).tofile(f) # npttypebytes
f.write(self.pttype) # pttype
np.uint64(s.stream.probe.nchans).tofile(f) # nptchans
np.float64(s.stream.probe.siteloc_arr()).tofile(f) # chanpos
np.uint64(len(self.srcfname)).tofile(f) # nsrcfnamebytes
f.write(self.srcfname) # srcfname
np.float64(td2days(self.dt - EPOCH)).tofile(f) # datetime (in days)
np.uint64(len(self.dtstr)).tofile(f) # ndatetimestrbytes
f.write(self.dtstr)
class PTCSNeuronRecord(object):
"""
Polytrode clustered spikes file neuron record:
nid: int64 (signed neuron id, could be -ve, could be non-contiguous with previous)
ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment, defaults to 0)
descr: ndescrbytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
clusterscore: float64
xpos: float64 (um)
ypos: float64 (um)
sigma: float64 (um) (Gaussian spatial sigma)
nchans: uint64 (num chans in template waveforms)
chanids: nchans * uint64 (0 based IDs of channels in template waveforms)
maxchanid: uint64 (0 based ID of max channel in template waveforms)
nt: uint64 (num timepoints per template waveform channel)
nwavedatabytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
wavedata: nwavedatabytes of nsamplebytes sized floats
(template waveform data, laid out as nchans * nt, in uV,
padded with null bytes if needed for 8 byte alignment)
nwavestdbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
wavestd: nwavestdbytes of nsamplebytes sized floats
(template waveform standard deviation, laid out as nchans * nt, in uV,
padded with null bytes if needed for 8 byte alignment)
nspikes: uint64 (number of spikes in this neuron)
spike timestamps: nspikes * uint64 (us, should be sorted)
"""
def __init__(self, neuron, spikets=None, nsamplebytes=None, descr=''):
n = neuron
AD2uV = n.sort.converter.AD2uV
self.neuron = neuron
self.spikets = spikets # constrained to stream range, may be < neuron.sids
self.wavedtype = {2: np.float16, 4: np.float32, 8: np.float64}[nsamplebytes]
if n.wave.data is None or n.wave.std is None: # some may have never been displayed
n.update_wave()
# wavedata and wavestd are nchans * nt * nsamplebytes long:
self.wavedata = pad(self.wavedtype(AD2uV(n.wave.data)), align=8)
self.wavestd = pad(self.wavedtype(AD2uV(n.wave.std)), align=8)
self.descr = pad(descr, align=8)
def write(self, f):
n = self.neuron
np.int64(n.id).tofile(f) # nid
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr, bytes
np.float64(np.nan).tofile(f) # clusterscore
np.float64(n.cluster.pos['x0']).tofile(f) # xpos (um)
np.float64(n.cluster.pos['y0']).tofile(f) # ypos (um)
np.float64(n.cluster.pos['sx']).tofile(f) # sigma (um)
np.uint64(len(n.wave.chans)).tofile(f) # nchans
np.uint64(n.wave.chans).tofile(f) # chanids
np.uint64(n.chan).tofile(f) # maxchanid
np.uint64(len(n.wave.ts)).tofile(f) # nt
np.uint64(self.wavedata.nbytes).tofile(f) # nwavedatabytes
self.wavedata.tofile(f) # wavedata
np.uint64(self.wavestd.nbytes).tofile(f) # nwavestdbytes
self.wavestd.tofile(f) # wavestd
np.uint64(len(self.spikets)).tofile(f) # nspikes
np.uint64(self.spikets).tofile(f) # spike timestamps (us)
class PanelScrollArea(QtGui.QScrollArea):
"""A scroll area for the spikesortpanel"""
def keyPressEvent(self, event):
key = event.key()
# seems the ENTER key needs be handled to directly call plot, unlike in sortwin
# where the event is passed on to be handled by the list widgets
if key in [Qt.Key_Enter, Qt.Key_Return]:
sortwin = self.topLevelWidget()
sortwin.parent().ui.plotButton.click()
else:
QtGui.QScrollArea.keyPressEvent(self, event) # pass it on
class SortWindow(SpykeToolWindow):
"""Sort window"""
def __init__(self, parent, pos=None):
SpykeToolWindow.__init__(self, parent, flags=QtCore.Qt.Tool)
self.spykewindow = parent
ncols = self.sort.probe.ncols
nrows = self.sort.probe.nrows
# try and allow the same amount of horizontal space per column for 2 and 3 col probes:
if ncols <= 2:
self.MAINSPLITTERPOS = 300
else:
self.MAINSPLITTERPOS = 265 # move it more to the left
# make horizontal sort slider use as little vertical space as possible
self.VSPLITTERPOS = 1
panelwidth = PANELWIDTHPERCOLUMN * ncols
panelheight = PANELHEIGHTPERROW * nrows
width = max(self.MAINSPLITTERPOS + panelwidth + VSCROLLBARWIDTH, MINSORTWINDOWWIDTH)
size = (width, SORTWINDOWHEIGHT)
self.setWindowTitle('Sort Window')
self.move(*pos)
self.resize(*size)
self._source = None # source cluster for comparison
self.slider = SpikeSelectionSlider(Qt.Horizontal, self)
self.slider.setInvertedControls(True)
self.slider.setToolTip('Position of sliding spike selection time window')
self.connect(self.slider, QtCore.SIGNAL('valueChanged(int)'),
self.on_slider_valueChanged)
self.connect(self.slider, QtCore.SIGNAL('sliderPressed()'),
self.on_slider_sliderPressed)
self.nlist = NList(self)
self.nlist.setToolTip('Neuron list')
self.nslist = NSList(self)
self.nslist.setToolTip('Sorted spike list')
self.uslist = USList(self) # should really be multicolumn tableview
self.uslist.setToolTip('Unsorted spike list')
tw = self.spykewindow.sort.tw
self.panel = SpikeSortPanel(self, tw=tw)
self.panel.setMinimumSize(QtCore.QSize(panelwidth, panelheight))
self.panelscrollarea = PanelScrollArea(self)
self.panelscrollarea.setWidget(self.panel)
self.panelscrollarea.setMinimumWidth(panelwidth + VSCROLLBARWIDTH)
self.panelscrollarea.setWidgetResizable(True) # allows panel to size bigger than min
self.vsplitter = QtGui.QSplitter(Qt.Vertical)
self.vsplitter.addWidget(self.slider)
self.vsplitter.addWidget(self.nlist)
self.vsplitter.addWidget(self.nslist)
self.vsplitter.addWidget(self.uslist)
self.mainsplitter = QtGui.QSplitter(Qt.Horizontal)
self.mainsplitter.addWidget(self.vsplitter)
self.mainsplitter.addWidget(self.panelscrollarea)
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.mainsplitter)
mainwidget = QtGui.QWidget(self)
mainwidget.setLayout(self.layout)
self.setCentralWidget(mainwidget)
self.toolbar = self.setupToolbar()
self.addToolBar(self.toolbar)
def setupToolbar(self):
toolbar = QtGui.QToolBar(self)
toolbar.setObjectName('toolbar')
toolbar.setFloatable(True)
toolbar.setIconSize(QtCore.QSize(16, 16)) # like in main spyke window
actionDelete = QAction(QIcon('res/edit-delete.svg'), 'Del', self)
tt = ('<nobr><b>Del</b> Delete selected spikes or clusters</nobr>\n'
'<nobr><b>CTRL+Del</b> Delete selected spikes</nobr>')
actionDelete.setToolTip(tt)
self.connect(actionDelete, QtCore.SIGNAL('triggered()'),
self.on_actionDelete_triggered)
toolbar.addAction(actionDelete)
actionMergeClusters = QAction('M', self)
tt = '<nobr><b>M</b> Merge clusters</nobr>'
actionMergeClusters.setToolTip(tt)
self.connect(actionMergeClusters, QtCore.SIGNAL('triggered()'),
self.on_actionMergeClusters_triggered)
toolbar.addAction(actionMergeClusters)
#actionToggleClustersGood = QAction(QIcon('res/dialog-apply.svg'), 'G', self)
actionToggleClustersGood = QAction('G', self)
tt = '<nobr><b>G</b> Toggle clusters as "good"</nobr>'
actionToggleClustersGood.setToolTip(tt)
self.connect(actionToggleClustersGood, QtCore.SIGNAL('triggered()'),
self.on_actionToggleClustersGood_triggered)
toolbar.addAction(actionToggleClustersGood)
actionSplit = QAction('+', self)
tt = '<nobr><b>+</b> Split off selected spikes</nobr>'
actionSplit.setToolTip(tt)
self.connect(actionSplit, QtCore.SIGNAL('triggered()'),
self.on_actionSplit_triggered)
toolbar.addAction(actionSplit)
actionLabelMultiunit = QAction('-', self)
tt = '<nobr><b>-</b> Label clusters as multiunit</nobr>'
actionLabelMultiunit.setToolTip(tt)
self.connect(actionLabelMultiunit, QtCore.SIGNAL('triggered()'),
self.on_actionLabelMultiunit_triggered)
toolbar.addAction(actionLabelMultiunit)
actionChanSplitClusters = QAction('/', self)
tt = '<nobr><b>/</b> Split clusters by channels</nobr>'
actionChanSplitClusters.setToolTip(tt)
self.connect(actionChanSplitClusters, QtCore.SIGNAL('triggered()'),
self.on_actionChanSplitClusters_triggered)
toolbar.addAction(actionChanSplitClusters)
actionDensitySplit = QAction('P', self)
tt = ('<nobr><b>P</b> Split cluster pair by density along line between '
'their centers</nobr>')
actionDensitySplit.setToolTip(tt)
self.connect(actionDensitySplit, QtCore.SIGNAL('triggered()'),
self.on_actionDensitySplit_triggered)
toolbar.addAction(actionDensitySplit)
actionRandomSplit = QAction('\\', self)
tt = ('<nobr><b>\\</b> Randomly split each selected cluster in half</nobr>')
actionRandomSplit.setToolTip(tt)
self.connect(actionRandomSplit, QtCore.SIGNAL('triggered()'),
self.on_actionRandomSplit_triggered)
toolbar.addAction(actionRandomSplit)
#actionRenumber = QAction(QIcon('res/gtk-edit.svg'), '#', self)
actionRenumber = QAction('#', self)
tt = ('<nobr><b>#</b> Renumber all clusters in vertical spatial order</nobr>\n'
'<nobr><b>CTRL+#</b> Renumber selected cluster</nobr>')
actionRenumber.setToolTip(tt)
self.connect(actionRenumber, QtCore.SIGNAL('triggered()'),
self.on_actionRenumber_triggered)
toolbar.addAction(actionRenumber)
actionFind = QAction(QIcon('res/edit-find.svg'), 'Find', self)
tt = ('<nobr><b>CTRL+F</b> Find spike in cluster plot</nobr>')
actionFind.setToolTip(tt)
self.connect(actionFind, QtCore.SIGNAL('triggered()'),
self.on_actionFind_triggered)
toolbar.addAction(actionFind)
actionSelectRandomSpikes = QAction('R', self)
tt = '<nobr><b>R</b> Select random sample of spikes of current clusters</nobr>'
actionSelectRandomSpikes.setToolTip(tt)
self.connect(actionSelectRandomSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionSelectRandomSpikes_triggered)
toolbar.addAction(actionSelectRandomSpikes)
actionToggleErrors = QAction('E', self)
actionToggleErrors.setCheckable(True)
actionToggleErrors.setChecked(self.panel.enable_fills)
tt = '<nobr><b>CTRL+E</b> Toggle visibility of template error limits</nobr>'
actionToggleErrors.setToolTip(tt)
self.connect(actionToggleErrors, QtCore.SIGNAL('toggled(bool)'),
self.on_actionToggleErrors_toggled)
toolbar.addAction(actionToggleErrors)
self.actionToggleErrors = actionToggleErrors
nsamplesComboBox = QtGui.QComboBox(self)
nsamplesComboBox.setToolTip('Number of spikes per cluster to randomly select')
nsamplesComboBox.setFocusPolicy(Qt.NoFocus)
nsamplesComboBox.addItems(['100', '50', '20', '10', '5', '1'])
nsamplesComboBox.setCurrentIndex(2)
toolbar.addWidget(nsamplesComboBox)
self.connect(nsamplesComboBox, QtCore.SIGNAL('activated(int)'),
self.on_actionSelectRandomSpikes_triggered)
self.nsamplesComboBox = nsamplesComboBox
gainComboBox = QtGui.QComboBox(self)
gainComboBox.setToolTip('Waveform gain (default: 1.5)')
gainComboBox.setFocusPolicy(Qt.NoFocus)
gainComboBox.addItems(['4', '3.75', '3.5', '3.25', '3', '2.75', '2.5', '2.25', '2',
'1.75', '1.5', '1.25', '1', '0.75', '0.5', '0.25'])
gainComboBox.setCurrentIndex(3)
toolbar.addWidget(gainComboBox)
self.connect(gainComboBox, QtCore.SIGNAL('activated(int)'),
self.on_gainComboBox_triggered)
self.gainComboBox = gainComboBox
#actionAlignMin = QAction(QIcon('res/go-bottom.svg'), 'Min', self)
actionAlignMin = QAction('Min', self)
actionAlignMin.setToolTip('Align selected spikes to min')
self.connect(actionAlignMin, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMin_triggered)
toolbar.addAction(actionAlignMin)
#actionAlignMax = QAction(QIcon('res/go-top.svg'), 'Max', self)
actionAlignMax = QAction('Max', self)
actionAlignMax.setToolTip('Align selected spikes to max')
self.connect(actionAlignMax, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMax_triggered)
toolbar.addAction(actionAlignMax)
#actionAlignBest = QAction(QIcon('res/emblem-OK.png'), 'Best', self)
actionAlignBest = QAction('B', self)
tt = '<nobr><b>B</b> Align selected spikes by best fit</nobr>'
actionAlignBest.setToolTip(tt)
self.connect(actionAlignBest, QtCore.SIGNAL('triggered()'),
self.on_actionAlignBest_triggered)
toolbar.addAction(actionAlignBest)
actionShiftLeft = QAction('[', self)
tt = ('<nobr><b>[</b> Shift selected spikes 2 points left</nobr>\n'
'<nobr><b>CTRL+[</b> Shift selected spikes 1 point left</nobr>')
actionShiftLeft.setToolTip(tt)
self.connect(actionShiftLeft, QtCore.SIGNAL('triggered()'),
self.on_actionShiftLeft_triggered)
toolbar.addAction(actionShiftLeft)
actionShiftRight = QAction(']', self)
tt = ('<nobr><b>]</b> Shift selected spikes 2 points right</nobr>\n'
'<nobr><b>CTRL+]</b> Shift selected spikes 1 point right</nobr>')
actionShiftRight.setToolTip(tt)
self.connect(actionShiftRight, QtCore.SIGNAL('triggered()'),
self.on_actionShiftRight_triggered)
toolbar.addAction(actionShiftRight)
incltComboBox = QtGui.QComboBox(self)
incltComboBox.setToolTip("Waveform duration (us) to include for component "
"analysis,\nasymmetric around spike time")
incltComboBox.setFocusPolicy(Qt.NoFocus)
dtw = self.sort.tw[1] - self.sort.tw[0] # spike time window width
incltstep = intround(dtw / 10) # evenly spaced inclt values
incltvals = np.arange(dtw, 0, -incltstep)
incltComboBox.addItems([ str(incltval) for incltval in incltvals ])
incltComboBox.setCurrentIndex(0)
toolbar.addWidget(incltComboBox)
self.connect(incltComboBox, QtCore.SIGNAL('activated(int)'),
self.on_incltComboBox_triggered)
self.incltComboBox = incltComboBox
#incltunitsLabel = QtGui.QLabel('us', self)
#toolbar.addWidget(incltunitsLabel)
nPCsPerChanSpinBox = QtGui.QSpinBox(self)
nPCsPerChanSpinBox.setToolTip("Number of PCs to use per channel to feed into ICA")
nPCsPerChanSpinBox.setFocusPolicy(Qt.NoFocus)
toolbar.addWidget(nPCsPerChanSpinBox)
nPCsPerChanSpinBox.setMinimum(1)
self.connect(nPCsPerChanSpinBox, QtCore.SIGNAL('valueChanged(int)'),
self.on_nPCsPerChanSpinBox_valueChanged)
nPCsPerChanSpinBox.setValue(self.sort.npcsperchan)
self.nPCsPerChanSpinBox = nPCsPerChanSpinBox
#actionFindPrevMostSimilar = QAction(QIcon('res/go-previous.svg'), '<', self)
actionFindPrevMostSimilar = QAction('<', self)
tt = '<nobr><b><</b> Find previous most similar cluster</nobr>'
actionFindPrevMostSimilar.setToolTip(tt)
self.connect(actionFindPrevMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindPrevMostSimilar_triggered)
toolbar.addAction(actionFindPrevMostSimilar)
#actionFindNextMostSimilar = QAction(QIcon('res/go-next.svg'), '>', self)
actionFindNextMostSimilar = QAction('>', self)
tt = '<nobr><b>></b> Find next most similar cluster</nobr>'
actionFindNextMostSimilar.setToolTip(tt)
self.connect(actionFindNextMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindNextMostSimilar_triggered)
toolbar.addAction(actionFindNextMostSimilar)
actionReloadSpikes = QAction(QIcon('res/view-refresh.svg'), 'Reload', self)
tt = ('<nobr><b>F5</b> Reload waveforms of selected spikes. '
'If none selected, reload all</nobr>\n'
'<nobr><b>CTRL+F5</b> Use mean waveform to choose chans to reload</nobr>')
actionReloadSpikes.setToolTip(tt)
self.connect(actionReloadSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionReloadSpikes_triggered)
toolbar.addAction(actionReloadSpikes)
actionSave = QAction(QIcon('res/document-save.svg'), '&Save', self)
actionSave.setToolTip('Save sort panel to file')
self.connect(actionSave, QtCore.SIGNAL('triggered()'),
self.on_actionSave_triggered)
toolbar.addAction(actionSave)
return toolbar
def get_sort(self):
return self.spykewindow.sort
sort = property(get_sort) # make this a property for proper behaviour after unpickling
def closeEvent(self, event):
self.spykewindow.HideWindow('Sort')
def mousePressEvent(self, event):
"""These are mostly passed on up from spyke list views and sort panel. Left
clicks are (or should be) filtered out"""
buttons = event.buttons()
if buttons == QtCore.Qt.MiddleButton:
#self.on_actionSelectRandomSpikes_triggered()
self.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist
elif buttons == QtCore.Qt.RightButton:
self.clear()
def keyPressEvent(self, event):
"""Alpha character keypresses are by default caught by the child lists for quickly
scrolling down to and selecting list items. However, the appropriate alpha
keypresses have been set in the child lists to be ignored, so they propagate
up to here"""
key = event.key()
modifiers = event.modifiers()
ctrl = modifiers & Qt.ControlModifier # ctrl is down
spw = self.spykewindow
if key == Qt.Key_A: # ignored in SpykeListViews
spw.ui.plotButton.click() # same as hitting ENTER in nslist
elif key == Qt.Key_X: # ignored in SpykeListViews
spw.ui.plotXcorrsButton.click()
elif key == Qt.Key_N: # ignored in SpykeListViews
spw.ui.normButton.click()
elif key == Qt.Key_Escape: # deselect all spikes and all clusters
self.clear()
elif key == Qt.Key_Delete:
self.on_actionDelete_triggered()
elif key == Qt.Key_M: # ignored in SpykeListViews
self.on_actionMergeClusters_triggered()
elif key == Qt.Key_G: # ignored in SpykeListViews
self.on_actionToggleClustersGood_triggered()
elif key == Qt.Key_Equal: # ignored in SpykeListViews
self.on_actionSplit_triggered()
elif key == Qt.Key_Minus: # ignored in SpykeListViews
self.on_actionLabelMultiunit_triggered()
elif key == Qt.Key_Slash: # ignored in SpykeListViews
self.on_actionChanSplitClusters_triggered()
elif key == Qt.Key_P: # ignored in SpykeListViews
self.on_actionDensitySplit_triggered()
elif key == Qt.Key_Backslash: # ignored in SpykeListViews
self.on_actionRandomSplit_triggered()
elif key == Qt.Key_NumberSign: # ignored in SpykeListViews
self.on_actionRenumber_triggered()
elif key == Qt.Key_F: # ignored in SpykeListViews
if ctrl:
self.FindSpike()
else:
self.FindCluster()
elif key == Qt.Key_R: # ignored in SpykeListViews
self.on_actionSelectRandomSpikes_triggered()
elif key == Qt.Key_Space: # ignored in SpykeListViews
if ctrl:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
else:
spw.on_clusterButton_clicked()
elif key == Qt.Key_B: # ignored in SpykeListViews
self.on_actionAlignBest_triggered()
elif key == Qt.Key_BracketLeft: # ignored in SpykeListViews
self.on_actionShiftLeft_triggered()
elif key == Qt.Key_BracketRight: # ignored in SpykeListViews
self.on_actionShiftRight_triggered()
elif key == Qt.Key_Comma: # ignored in SpykeListViews
self.on_actionFindPrevMostSimilar_triggered()
elif key == Qt.Key_Period: # ignored in SpykeListViews
self.on_actionFindNextMostSimilar_triggered()
elif key == Qt.Key_F5: # ignored in SpykeListViews
self.on_actionReloadSpikes_triggered()
elif key == Qt.Key_E: # ignored in SpykeListViews
if ctrl:
self.actionToggleErrors.toggle()
else:
self.clear() # E is synonymous with ESC
elif key == Qt.Key_C: # toggle between PCA and ICA, ignored in SpykeListViews
c = str(spw.ui.componentAnalysisComboBox.currentText())
if c == 'PCA':
index = spw.ui.componentAnalysisComboBox.findText('ICA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
elif c == 'ICA':
index = spw.ui.componentAnalysisComboBox.findText('PCA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
spw.on_plotButton_clicked()
elif key == Qt.Key_T: # toggle plotting against time, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 't':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.on_c0c1tButton_clicked() # plot against time
elif key == Qt.Key_W: # toggle plotting against RMSError, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 'RMSerror':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.ui.zDimComboBox.setCurrentIndex(3)
spw.on_plotButton_clicked() # plot against RMSError
elif key in [Qt.Key_Enter, Qt.Key_Return]:
# this is handled at a lower level by on_actionItem_triggered
# in the various listview controls
pass
else:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
def clear(self):
"""Clear selections in this order: unsorted spikes, sorted spikes,
cluster automatically selected for comparison, cluster 0, clusters"""
spw = self.spykewindow
clusters = spw.GetClusters()
if len(self.uslist.selectedIndexes()) > 0:
self.uslist.clearSelection()
elif self.nslist.nrowsSelected > 0:
self.nslist.clearSelection()
elif len(clusters) == 2 and self._source in clusters:
clusters.remove(self._source)
spw.SelectClusters(clusters, on=False)
elif 0 in spw.GetClusterIDs():
for cluster in spw.GetClusters():
if cluster.id == 0:
spw.SelectClusters([cluster], on=False)
break
else:
self.nlist.clearSelection()
# reset colours in cluster plot:
gw = spw.windows['Cluster'].glWidget
gw.colour()
gw.updateGL()
def on_actionDelete_triggered(self):
"""Delete explicity selected spikes, or clusters"""
selsids = self.spykewindow.GetSpikes() # IDs of explicitly selected spikes
nselsids = len(selsids)
if (QApplication.instance().keyboardModifiers() & Qt.ControlModifier
or nselsids > 0):
self.delete_spikes()
else:
self.delete_clusters()
def delete_clusters(self):
"""Del button press/click"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'delete clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# deselect and delete clusters
spw.DelClusters(clusters)
if len(s.clusters) > 0:
# select cluster that replaces the first of the deleted clusters in norder
selrows = [ cc.oldnorder.index(oldunid) for oldunid in cc.oldunids ]
if len(selrows) > 0:
selrow = selrows[0]
nlist = spw.windows['Sort'].nlist
nlist.selectRows(selrow) # TODO: this sets selection, but not focus
#else: # first of deleted clusters was last in norder, don't select anything
# save more undo/redo stuff
newclusters = []
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def delete_spikes(self):
"""CTRL+Del button press/click"""
self.spykewindow.SplitSpikes(delete=True)
def on_actionSplit_triggered(self):
"""+ button click. Split off selected clusters into their own cluster"""
self.spykewindow.SplitSpikes(delete=False)
def on_actionMergeClusters_triggered(self):
"""Merge button (M) click. Merge selected clusters. Easier to use than
running gac() on selected clusters using a really big sigma to force
them to all merge"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = [] # spikes to merge
for cluster in clusters:
sids.append(cluster.neuron.sids)
# merge any selected usids as well
sids.append(spw.GetUnsortedSpikes())
sids = np.concatenate(sids)
if len(sids) == 0:
return
# save some undo/redo stuff
message = 'merge clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# decide on newnid and where to insert it into norder
newnid = None # merge by default into a new highest numbered nid
inserti = None # order new cluster by default to end of nlist
if len(clusters) == 1:
# keep same position of this one nid in norder, regardless of whether it's
# single-unit, multiunit, or junk
inserti = s.norder.index(clusters[0].id)
elif len(clusters) > 1:
oldunids = np.asarray(cc.oldunids)
suids = oldunids[oldunids > 0] # selected single unit nids
if len(suids) > 0: # merge into largest selected single unit nid:
spikecounts = np.asarray([ s.neurons[suid].nspikes for suid in suids ])
newnid = suids[spikecounts.argmax()]
inserti = s.norder.index(newnid)
# correct for shift due to deletion of oldunids that precede newnid in norder:
inserti -= sum([ s.norder.index(oldunid) < inserti for oldunid in oldunids])
# delete selected clusters and deselect selected usids
spw.DelClusters(clusters, update=False)
self.uslist.clearSelection()
# create new cluster
#t0 = time.time()
newcluster = spw.CreateCluster(update=False, id=newnid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
plotdims = spw.GetClusterPlotDims()
newcluster.update_pos()
# save more undo/redo stuff
cc.save_new([newcluster], s.norder, s.good)
spw.AddClusterChangeToStack(cc)
# now do some final updates
spw.UpdateClustersGUI()
spw.ColourPoints(newcluster)
#print('applying clusters to plot took %.3f sec' % (time.time()-t0))
# select newly created cluster
spw.SelectClusters(newcluster)
cc.message += ' into cluster %d' % newcluster.id
print(cc.message)
def on_actionToggleClustersGood_triggered(self):
"""'Good' button (G) click. Toggle 'good' flag of all selected clusters"""
spw = self.spykewindow
clusters = spw.GetClusters()
cids = []
for cluster in clusters:
cluster.neuron.good = not cluster.neuron.good
cids.append(cluster.id)
self.nlist.updateAll() # nlist item colouring will change as a result
print("Toggled 'good' flag of clusters %r" % cids)
def on_actionLabelMultiunit_triggered(self):
"""- button click. Label all selected clusters as multiunit by deleting them
and creating new ones with -ve IDs"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
# only relabel single unit clusters:
clusters = [ cluster for cluster in clusters if cluster.id > 0 ]
if len(clusters) == 0:
return
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'label as multiunit clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# delete old clusters
inserti = s.norder.index(clusters[0].id)
# collect cluster sids before cluster deletion
sidss = [ cluster.neuron.sids for cluster in clusters ]
spw.DelClusters(clusters, update=False)
# create new multiunit clusters
newclusters = []
for sids in sidss:
muid = s.get_nextmuid()
newcluster = spw.CreateCluster(update=False, id=muid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
newcluster.update_pos()
newclusters.append(newcluster)
inserti += 1
# select newly labelled multiunit clusters
spw.SelectClusters(newclusters)
# save more undo/redo stuff
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def on_actionChanSplitClusters_triggered(self):
"""Split by channels button (/) click"""
## TODO: make sure this works on .srf files! Why was chancombosplit being used?
self.spykewindow.maxchansplit()
#self.spykewindow.chancombosplit()
def on_actionDensitySplit_triggered(self):
"""Split cluster pair by density along line between their centers"""
self.spykewindow.densitysplit()
def on_actionRandomSplit_triggered(self):
"""Randomly split each selected cluster in half"""
self.spykewindow.randomsplit()
def on_actionRenumber_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
self.renumber_selected_cluster()
else:
self.renumber_all_clusters()
def renumber_selected_cluster(self):
"""Renumber a single selected cluster to whatever free ID the user wants, for
colouring purposes"""
spw = self.spykewindow
s = self.sort
spikes = s.spikes
cluster = spw.GetCluster() # exactly one selected cluster
oldid = cluster.id
newid = max(s.norder) + 1
newid, ok = QtGui.QInputDialog.getInt(self, "Renumber cluster",
"This will clear the undo/redo stack, and is not undoable.\n"
"Enter new ID:", value=newid)
if not ok:
return
if newid in s.norder:
print("Choose a non-existing nid to renumber to")
return
# deselect cluster
spw.SelectClusters(cluster, on=False)
# rename to newid
cluster.id = newid # this indirectly updates neuron.id
# update cluster and neuron dicts, and spikes array
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
# remove duplicate oldid dict entries
del s.clusters[oldid]
del s.neurons[oldid]
# replace oldid with newid in norder
s.norder[s.norder.index(oldid)] = newid
# update colour of any relevant points in cluster plot
spw.ColourPoints(cluster)
# reselect cluster
spw.SelectClusters(cluster)
# some cluster changes in stack may no longer be applicable, reset cchanges
del spw.cchanges[:]
spw.cci = -1
print('Renumbered neuron %d to %d' % (oldid, newid))
def renumber_all_clusters(self):
"""Renumber single unit clusters consecutively from 1, ordered by y position. Do the
same for multiunit (-ve number) clusters, starting from -1. Sorting by y position
makes user inspection of clusters more orderly, makes the presence of duplicate
clusters more obvious, and allows for maximal spatial separation between clusters of
the same colour, reducing colour conflicts"""
val = QtGui.QMessageBox.question(self.panel, "Renumber all clusters",
"Are you sure? This will clear the undo/redo stack, and is not undoable.",
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if val == QtGui.QMessageBox.No:
return
spw = self.spykewindow
s = self.sort
spikes = s.spikes
# get spatially and numerically ordered lists of new ids
oldids = np.asarray(s.norder)
oldsuids = oldids[oldids > 0]
oldmuids = oldids[oldids < 0]
# this is a bit confusing: find indices that would sort old ids by y pos, but then
# what you really want is to find the y pos *rank* of each old id, so you need to
# take argsort again:
newsuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldsuids ]).argsort().argsort() + 1
newmuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldmuids ]).argsort().argsort() + 1
newmuids = -newmuids
# multiunit, followed by single unit, no 0 junk cluster. Can't seem to do it the other
# way around as of Qt 4.7.2 - it seems QListViews don't like having a -ve value in
# the last entry. Doing so causes all 2 digit values in the list to become blank,
# suggests a spacing calculation bug. Reproduce by making last entry multiunit,
# undoing then redoing. Actually, maybe the bug is it doesn't like having a number
# in the last entry with fewer digits than the preceding entry. Only seems to be a
# problem when setting self.setUniformItemSizes(True).
newids = np.concatenate([newmuids, newsuids])
# test
if np.all(oldids == newids):
print('Nothing to renumber: cluster IDs already ordered in y0 and contiguous')
return
# update for replacing oldids with newids
oldids = np.concatenate([oldmuids, oldsuids])
# deselect current selections
selclusters = spw.GetClusters()
oldselids = [ cluster.id for cluster in selclusters ]
spw.SelectClusters(selclusters, on=False)
# delete junk cluster, if it exists
if 0 in s.clusters:
s.remove_neuron(0)
print('Deleted junk cluster 0')
if 0 in oldselids:
oldselids.remove(0)
# replace old ids with new ids
cw = spw.windows['Cluster']
oldclusters = s.clusters.copy() # no need to deepcopy, just copy refs, not clusters
dims = spw.GetClusterPlotDims()
for oldid, newid in zip(oldids, newids):
newid = int(newid) # keep as Python int, not numpy int
if oldid == newid:
continue # no need to waste time removing and recreating this cluster
# change all occurences of oldid to newid
cluster = oldclusters[oldid]
cluster.id = newid # this indirectly updates neuron.id
# update cluster and neuron dicts
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
# remove any orphaned cluster ids
for oldid in oldids:
if oldid not in newids:
del s.clusters[oldid]
del s.neurons[oldid]
# reset norder
s.norder = []
s.norder.extend(sorted([ int(newid) for newid in newmuids ])[::-1])
s.norder.extend(sorted([ int(newid) for newid in newsuids ]))
# now do some final updates
spw.UpdateClustersGUI()
spw.ColourPoints(s.clusters.values())
# reselect the previously selected (but now renumbered) clusters,
# helps user keep track
oldiis = [ list(oldids).index(oldselid) for oldselid in oldselids ]
newselids = newids[oldiis]
spw.SelectClusters([s.clusters[cid] for cid in newselids])
# all cluster changes in stack are no longer applicable, reset cchanges
del spw.cchanges[:]
spw.cci = -1
print('Renumbering complete')
def on_actionFind_triggered(self):
"""Find current cluster or spike"""
ctrl = QApplication.instance().keyboardModifiers() & Qt.ControlModifier
if ctrl:
self.FindSpike()
else:
self.FindCluster()
def FindCluster(self):
"""Move focus to location of currently selected (single) cluster"""
spw = self.spykewindow
try:
cluster = spw.GetCluster()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
dims = spw.GetClusterPlotDims()
gw.focus = np.float32([ cluster.normpos[dim] for dim in dims ])
gw.panTo() # pan to new focus
gw.updateGL()
def FindSpike(self):
"""Move focus to location of currently selected (single) spike"""
spw = self.spykewindow
try:
sid = spw.GetSpike()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
pointis = gw.sids.searchsorted(sid)
gw.focus = gw.points[pointis]
gw.panTo() # pan to new focus
gw.updateGL()
def on_actionSelectRandomSpikes_triggered(self):
"""Select random sample of spikes in current cluster(s), or random sample
of unsorted spikes if no cluster(S) selected"""
nsamples = int(self.nsamplesComboBox.currentText())
if len(self.nslist.neurons) > 0:
slist = self.nslist
else:
slist = self.uslist
slist.clearSelection() # emits selectionChanged signal, .reset() doesn't
slist.selectRandom(nsamples)
def on_gainComboBox_triggered(self):
"""Set gain of panel based on gainComboBox selection"""
panel = self.panel
panel.gain = float(self.gainComboBox.currentText())
panel.do_layout() # resets axes lims and recalcs panel.pos
panel._update_scale()
panel.draw_refs()
panel.updateAllItems()
def on_actionAlignMin_triggered(self):
self.Align('min')
def on_actionAlignMax_triggered(self):
self.Align('max')
def on_actionAlignBest_triggered(self):
self.Align('best')
def on_actionShiftLeft_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = -1
else:
nt = -2
self.Shift(nt)
def on_actionShiftRight_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = 1
else:
nt = 2
self.Shift(nt)
def on_incltComboBox_triggered(self):
"""Change length of chan selection lines, optionally trigger cluster replot"""
self.panel.update_selvrefs()
self.panel.draw_refs()
#self.spykewindow.ui.plotButton.click()
def get_inclt(self):
"""Return inclt value in incltComboBox"""
return float(self.incltComboBox.currentText()) # us
inclt = property(get_inclt)
def get_tis(self):
"""Return tis (start and end timepoint indices) of duration inclt, asymmetric around
t=0 spike time. Note that any changes to the code here should also be made in the
timepoint selection display code in SortPanel.update_selvrefs()"""
s = self.sort
inclt = self.inclt # duration to include, asymmetric around t=0 spike time (us)
tw = self.panel.tw
dtw = tw[1] - tw[0] # spike time window width
left = intround(abs(tw[0]) / dtw * inclt) # left fraction wrt t=0 spike time
right = inclt - left # right fraction wrt t=0 spike time
tis = s.twts.searchsorted([-left, right])
return tis
tis = property(get_tis)
def on_nPCsPerChanSpinBox_valueChanged(self, val):
self.sort.npcsperchan = val
def on_actionReloadSpikes_triggered(self):
spw = self.spykewindow
sids = spw.GetAllSpikes()
sort = self.sort
if len(sids) == 0:
# if no spikes specified, reload all spikes
sids = sort.spikes['id']
usemeanchans = False
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
usemeanchans = True
sort.reload_spikes_and_templates(sids, usemeanchans=usemeanchans)
# add sids to the set of dirtysids to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots:
self.panel.updateAllItems()
def on_actionFindPrevMostSimilar_triggered(self):
self.findMostSimilarCluster('previous')
def on_actionFindNextMostSimilar_triggered(self):
self.findMostSimilarCluster('next')
def on_actionToggleErrors_toggled(self, checked):
self.panel.showFills(checked)
def on_slider_valueChanged(self, slideri):
self.nslist.clearSelection() # emits selectionChanged signal, .reset() doesn't
if self.nslist.model().sliding == False:
self.nslist.model().sids.sort() # change from nid order to sid order
self.nslist.updateAll() # update to reflect new ordering
self.nslist.model().sliding = True
nsamples = int(self.nsamplesComboBox.currentText())
rows = np.arange(slideri, slideri+nsamples)
self.nslist.selectRows(rows)
def on_slider_sliderPressed(self):
"""Make slider click (without movement) highlight the first nsamples
or fewer spikes when slider is at 0 position"""
slideri = self.slider.value()
if slideri == 0:
nsamples = int(self.nsamplesComboBox.currentText())
nsamples = min(nsamples, self.nslist.model().nspikes)
rows = np.arange(nsamples)
self.nslist.selectRows(rows)
def update_slider(self):
"""Update slider limits and step sizes"""
nsamples = int(self.nsamplesComboBox.currentText())
nsids = len(self.nslist.sids)
ulim = max(nsids-nsamples, 1) # upper limit
self.slider.setRange(0, ulim)
self.slider.setSingleStep(1)
self.slider.setPageStep(nsamples)
def findMostSimilarCluster(self, which='next'):
"""If no chans selected, compare source to next or previous most similar cluster
based on chans the two have in common, while requiring the two have each others'
max chans in common. If chans have been selected, use them as a starting set of
chans to compare on. Also, use only the timepoint range selected in incltComboBox"""
try:
source = self.getClusterComparisonSource()
except RuntimeError as err:
print(err)
return
destinations = list(self.sort.clusters.values())
destinations.remove(source)
selchans = np.sort(self.panel.chans_selected)
if len(selchans) > 0:
srcchans = np.intersect1d(source.neuron.wave.chans, selchans)
if len(srcchans) == 0:
print("Source cluster doesn't overlap with selected chans")
return
else:
srcchans = source.neuron.wave.chans
if self.spykewindow.ui.normButton.isChecked():
print("NOTE: findMostSimilarCluster() doesn't currently take spike amplitude "
"normalization into account. To see the true amplitudes used to compare "
"neuron pairs, turn off normalization")
errors = []
dests = []
t0i, t1i = self.tis # timepoint range selected in incltComboBox
# try and compare source neuron waveform to all destination neuron waveforms
for dest in destinations:
if dest.neuron.wave.data is None: # hasn't been calculated yet
dest.neuron.update_wave()
dstchans = dest.neuron.wave.chans
if len(selchans) > 0:
if not set(selchans).issubset(dstchans):
continue
dstchans = selchans
cmpchans = np.intersect1d(srcchans, dstchans)
if len(cmpchans) == 0: # not comparable
continue
# ensure maxchan of both source and dest neuron are both in cmpchans
if source.neuron.chan not in cmpchans or dest.neuron.chan not in cmpchans:
continue
srcwavedata = source.neuron.wave[cmpchans].data[:, t0i:t1i]
dstwavedata = dest.neuron.wave[cmpchans].data[:, t0i:t1i]
error = core.rms(srcwavedata - dstwavedata)
errors.append(error)
dests.append(dest)
if len(errors) == 0:
print("No sufficiently overlapping clusters on selected chans to compare to")
return
errors = np.asarray(errors)
dests = np.asarray(dests)
desterrsortis = errors.argsort()
if which == 'next':
self._cmpid += 1
elif which == 'previous':
self._cmpid -= 1
else: raise ValueError('Unknown which: %r' % which)
self._cmpid = max(self._cmpid, 0)
self._cmpid = min(self._cmpid, len(dests)-1)
dest = dests[desterrsortis][self._cmpid]
self.spykewindow.SelectClusters(dest)
desterr = errors[desterrsortis][self._cmpid]
print('n%d to n%d rmserror: %.2f uV' %
(source.id, dest.id, self.sort.converter.AD2uV(desterr)))
def getClusterComparisonSource(self):
selclusters = self.spykewindow.GetClusters()
errmsg = 'unclear which cluster to use as source for comparison'
if len(selclusters) == 1:
source = selclusters[0]
self._source = source
self._cmpid = -1 # init/reset
elif len(selclusters) == 2:
source = self._source
if source not in selclusters:
raise RuntimeError(errmsg)
# deselect old destination cluster:
selclusters.remove(source)
self.spykewindow.SelectClusters(selclusters, on=False)
else:
self._source = None # reset for tidiness
raise RuntimeError(errmsg)
return source
def Shift(self, nt):
"""Shift selected sids by nt timepoints"""
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
self.sort.shift(sids, nt)
print('Shifted %d spikes by %d timepoints' % (len(sids), nt))
unids = np.unique(spikes['nid'][sids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots
self.panel.updateAllItems()
def Align(self, to):
"""Align all implicitly selected spikes to min or max, or best fit
on selected chans"""
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
if to == 'best':
tis = self.tis
# find which chans are common to all sids:
commonchans = s.get_common_chans(sids)[0]
# check selected chans
selchans = spw.get_selchans(sids)
for selchan in selchans:
if selchan not in commonchans:
print("Chan %d not common to all spikes, pick from %r"
% (selchan, list(commonchans)))
return
print('Best fit aligning %d spikes between tis=%r on chans=%r' %
(len(sids), list(tis), selchans))
# numpy implementation:
#dirtysids = s.alignbest(sids, tis, selchans)
# cython implementation:
dirtysids = util.alignbest_cy(s, sids, tis, np.int64(selchans))
else: # to in ['min', 'max']
print('Aligning %d spikes to %s' % (len(sids), to))
dirtysids = s.alignminmax(sids, to)
paligned = len(dirtysids) / len(sids) * 100
print('Aligned %d/%d (%.1f%%) spikes' % (len(dirtysids), len(sids), paligned))
unids = np.unique(spikes['nid'][dirtysids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(dirtysids)
# auto-refresh all plots:
self.panel.updateAllItems()
def RemoveNeuron(self, neuron, update=True):
"""Remove neuron and all its spikes from the GUI and the Sort"""
self.MoveSpikes2List(neuron, neuron.sids, update=update)
self.sort.remove_neuron(neuron.id)
if update:
self.nlist.updateAll()
def MoveSpikes2Neuron(self, sids, neuron=None, update=True):
"""Assign spikes from sort.spikes to a neuron, and trigger eventual update of
mean wave. If neuron is None, create a new one"""
sids = toiter(sids)
spikes = self.sort.spikes
if neuron == None:
neuron = self.sort.create_neuron()
neuron.sids = np.union1d(neuron.sids, sids) # update
spikes['nid'][sids] = neuron.id
if update:
self.sort.update_usids()
self.uslist.updateAll()
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # trigger nslist refresh
# TODO: selection doesn't seem to be working, always jumps to top of list
#self.uslist.Select(row) # automatically select the new item at that position
neuron.wave.data = None # trigger template mean update
return neuron
def MoveSpikes2List(self, neuron, sids, update=True):
"""Move spikes from a neuron back to the unsorted spike list control"""
sids = toiter(sids)
if len(sids) == 0:
return # nothing to do
spikes = self.sort.spikes
neuron.sids = np.setdiff1d(neuron.sids, sids) # return what's in 1st arr and not in 2nd
spikes['nid'][sids] = 0 # unbind neuron id of sids in spikes struct array
if update:
self.sort.update_usids()
self.uslist.updateAll()
# this only makes sense if the neuron is currently selected in the nlist:
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # this triggers a refresh
neuron.wave.data = None # triggers an update when it's actually needed
def PlotClusterHistogram(self, X, nids):
"""Plot histogram of given clusters along a single dimension. If two clusters are
given, project them onto axis connecting their centers, and calculate separation
indices between them. Otherwise, plot the distribution of all given clusters
(up to a limit) along the first dimension in X."""
spw = self.spykewindow
mplw = spw.OpenWindow('MPL')
unids = np.unique(nids) # each unid corresponds to a cluster, except possibly unid 0
nclusters = len(unids)
if nclusters == 0:
mplw.ax.clear()
mplw.figurecanvas.draw()
print("No spikes selected")
return
elif nclusters > 5: # to prevent slowdowns, don't plot too many
mplw.ax.clear()
mplw.figurecanvas.draw()
print("Too many clusters selected for cluster histogram")
return
elif nclusters == 2:
calc_measures = True
else:
calc_measures = False
projdimi = 0
ndims = X.shape[1]
points = [] # list of projection of each cluster's points onto dimi
for unid in unids:
sidis, = | np.where(nids == unid) | numpy.where |
import torch
import torch.nn as nn
from torch.nn import Module
import torch.nn.functional as F
from torch.autograd import Function
import numpy as np
def shift(x):
#TODO: edge case, when x contains 0
return 2.**torch.round(torch.log2(x))
def S(bits):
return 2.**(bits-1)
def SR(x):
r = torch.cuda.FloatTensor(*x.size()).uniform_()
return torch.floor(x+r)
def C(x, bits):
if bits > 15 or bits == 1:
delta = 0
else:
delta = 1. / S(bits)
upper = 1 - delta
lower = -1 + delta
return torch.clamp(x, lower, upper)
def Q(x, bits):
assert bits != -1
if bits==1:
return torch.sign(x)
if bits > 15:
return x
return torch.round(x*S(bits))/S(bits)
def QW(x, bits, scale=1.0):
y = Q(C(x, bits), bits)
# per layer scaling
if scale>1.8: y /= scale
return y
def QE(x, bits):
max_entry = x.abs().max()
assert max_entry != 0, "QE blow"
#if max_entry != 0:
x /= shift(max_entry)
return Q(C(x, bits), bits)
def QG(x, bits_G, lr):
max_entry = x.abs().max()
assert max_entry != 0, "QG blow"
#if max_entry != 0:
x /= shift(max_entry)
norm = lr * x
norm = SR(norm)
return norm / S(bits_G)
def Retention(x, t, v, detect, target):
lower = -1.0
upper = 1.0
if detect == 1: # need to define the sign of v
sign = torch.zeros_like(x)
truncateX = (x+1)/2
truncateTarget = (target+1)/2
sign = torch.sign(torch.add(torch.zeros_like(x),truncateTarget)-truncateX)
ratio = t**(v*sign)
else : # random generate target for each cell
sign = torch.randint_like(x, -1, 2)
truncateX = (x+1)/2
ratio = t**(v*sign)
return torch.clamp((2*truncateX*ratio-1), lower, upper)
def NonLinearQuantizeOut(x, bit):
# minQ = torch.min(x)
# delta = torch.max(x) - torch.min(x)
k=7.0
minQ = -k*x.abs().mean()
maxQ = k*x.abs().mean()
delta = maxQ - minQ
#print(minQ)
#print(delta)
if (bit == 3) :
# 3-bit ADC
y = x.clone()
base = torch.zeros_like(y)
bound = np.array([0.02, 0.08, 0.12, 0.18, 0.3, 0.5, 0.7, 1])
out = np.array([0.01, 0.05, 0.1, 0.15, 0.24, 0.4, 0.6, 0.85])
ref = torch.from_numpy(bound).float()
quant = torch.from_numpy(out).float()
y = torch.where(y<(minQ+ref[0]*delta), torch.add(base,(minQ+quant[0]*delta)), y)
y = torch.where(((minQ+ref[0]*delta)<=y) & (y<(minQ+ref[1]*delta)), torch.add(base,(minQ+quant[1]*delta)), y)
y = torch.where(((minQ+ref[1]*delta)<=y) & (y<(minQ+ref[2]*delta)), torch.add(base,(minQ+quant[2]*delta)), y)
y = torch.where(((minQ+ref[2]*delta)<=y) & (y<(minQ+ref[3]*delta)), torch.add(base,(minQ+quant[3]*delta)), y)
y = torch.where(((minQ+ref[3]*delta)<=y) & (y<(minQ+ref[4]*delta)), torch.add(base,(minQ+quant[4]*delta)), y)
y = torch.where(((minQ+ref[4]*delta)<=y) & (y<(minQ+ref[5]*delta)), torch.add(base,(minQ+quant[5]*delta)), y)
y = torch.where(((minQ+ref[5]*delta)<=y) & (y<(minQ+ref[6]*delta)), torch.add(base,(minQ+quant[6]*delta)), y)
y = torch.where(((minQ+ref[6]*delta)<=y) & (y<(minQ+ref[7]*delta)), torch.add(base,(minQ+quant[7]*delta)), y)
elif (bit == 4):
y = x.clone()
# 4-bit ADC
base = torch.zeros_like(y)
# good for 2-bit cell
bound = np.array([0.02, 0.05, 0.08, 0.12, 0.16, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.85, 1])
out = np.array([0.01, 0.035, 0.065, 0.1, 0.14, 0.18, 0.225, 0.275, 0.325, 0.375, 0.425, 0.475, 0.55, 0.65, 0.775, 0.925])
ref = torch.from_numpy(bound).float()
quant = torch.from_numpy(out).float()
y = torch.where(y.data<(minQ+ref[0]*delta), torch.add(base,(minQ+quant[0]*delta)), y)
y = torch.where(((minQ+ref[0]*delta)<=y.data) & (y.data<(minQ+ref[1]*delta)), torch.add(base,(minQ+quant[1]*delta)), y)
y = torch.where(((minQ+ref[1]*delta)<=y.data) & (y.data<(minQ+ref[2]*delta)), torch.add(base,(minQ+quant[2]*delta)), y)
y = torch.where(((minQ+ref[2]*delta)<=y.data) & (y.data<(minQ+ref[3]*delta)), torch.add(base,(minQ+quant[3]*delta)), y)
y = torch.where(((minQ+ref[3]*delta)<=y.data) & (y.data<(minQ+ref[4]*delta)), torch.add(base,(minQ+quant[4]*delta)), y)
y = torch.where(((minQ+ref[4]*delta)<=y.data) & (y.data<(minQ+ref[5]*delta)), torch.add(base,(minQ+quant[5]*delta)), y)
y = torch.where(((minQ+ref[5]*delta)<=y.data) & (y.data<(minQ+ref[6]*delta)), torch.add(base,(minQ+quant[6]*delta)), y)
y = torch.where(((minQ+ref[6]*delta)<=y.data) & (y.data<(minQ+ref[7]*delta)), torch.add(base,(minQ+quant[7]*delta)), y)
y = torch.where(((minQ+ref[7]*delta)<=y.data) & (y.data<(minQ+ref[8]*delta)), torch.add(base,(minQ+quant[8]*delta)), y)
y = torch.where(((minQ+ref[8]*delta)<=y.data) & (y.data<(minQ+ref[9]*delta)), torch.add(base,(minQ+quant[9]*delta)), y)
y = torch.where(((minQ+ref[9]*delta)<=y.data) & (y.data<(minQ+ref[10]*delta)), torch.add(base,(minQ+quant[10]*delta)), y)
y = torch.where(((minQ+ref[10]*delta)<=y.data) & (y.data<(minQ+ref[11]*delta)), torch.add(base,(minQ+quant[11]*delta)), y)
y = torch.where(((minQ+ref[11]*delta)<=y.data) & (y.data<(minQ+ref[12]*delta)), torch.add(base,(minQ+quant[12]*delta)), y)
y = torch.where(((minQ+ref[12]*delta)<=y.data) & (y.data<(minQ+ref[13]*delta)), torch.add(base,(minQ+quant[13]*delta)), y)
y = torch.where(((minQ+ref[13]*delta)<=y.data) & (y.data<(minQ+ref[14]*delta)), torch.add(base,(minQ+quant[14]*delta)), y)
y = torch.where(((minQ+ref[14]*delta)<=y.data) & (y.data<(minQ+ref[15]*delta)), torch.add(base,(minQ+quant[15]*delta)), y)
elif (bit == 5):
y = x.clone()
# 5-bit ADC
base = torch.zeros_like(y)
# good for 2-bit cell
# bound = np.array([0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.4, 0.44, 0.48, 0.52, 0.56, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1])
# bound = np.array([0.02, 0.06, 0.1, 0.14, 0.18, 0.22, 0.26, 0.30, 0.34, 0.38, 0.40, 0.42, 0.44, 0.46, 0.48, 0.50, 0.52, 0.54, 0.56, 0.58, 0.60, 0.62, 0.64, 0.66, 0.68, 0.72, 0.76, 0.80, 0.84, 0.88, 0.92, 0.96])
# out = np.array([0.0, 0.04, 0.08, 0.12, 0.16, 0.20, 0.24, 0.28, 0.32, 0.36, 0.39, 0.41, 0.43, 0.45, 0.47, 0.49, 0.51, 0.53, 0.55, 0.57, 0.59, 0.61, 0.63, 0.65, 0.67, 0.70, 0.74, 0.78, 0.82, 0.86, 0.90, 0.94])
bound = | np.array([0.02, 0.06, 0.1, 0.14, 0.18, 0.22, 0.26, 0.30, 0.34, 0.36, 0.38, 0.40, 0.42, 0.44, 0.46, 0.48, 0.50, 0.52, 0.54, 0.56, 0.58, 0.60, 0.62, 0.64, 0.68, 0.72, 0.76, 0.80, 0.84, 0.88, 0.92, 0.96]) | numpy.array |
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, figure, imshow, show, xlabel, ylabel, legend, gca, subplot, title, tight_layout, savefig
import matplotlib.ticker as mtick
from cycler import cycler
# Load timings
cwd = os.getcwd()
path_r5 = cwd + '/results/run_r5_max_cpu/'
path_c5n = cwd + '/results/run_c5n_max_cpus/'
file_list_r5 = os.listdir(path_r5)
file_list_c5n = os.listdir(path_c5n)
file_timings_r5 = []
file_timings_c5n = []
num_instances = np.array([1,2,4,8,16])
num_scalings = len(num_instances)
num_runs_per_scaling = 3
for batchsize in num_instances:
for run in range(num_runs_per_scaling):
for filename in file_list_r5:
if filename[0:int(23 + len(str(batchsize)) + len(str(run)))] == \
'timings_num_nodes_' + str(batchsize) + '_run_' + str(run):
file_timings_r5.append(filename)
for filename in file_list_c5n:
if filename[0:int(23 + len(str(batchsize)) + len(str(run)))] == \
'timings_num_nodes_' + str(batchsize) + '_run_' + str(run):
file_timings_c5n.append(filename)
print("Found ", len(file_timings_r5), " R5 file(s).")
print("Found ", len(file_timings_c5n), " C5n file(s).")
timings_r5 = []
timings_c5n = []
for filename in file_timings_r5:
timings_r5.append(np.load(path_r5 + filename, allow_pickle=True)) # timings: list of num_files entries: 1 x 6
for filename in file_timings_c5n:
timings_c5n.append(np.load(path_c5n + filename, allow_pickle=True))
# Timings
# create=0; start=1; end=2; var=3; devito=4; script=5
job_runtime_r5 = np.zeros((num_scalings, num_runs_per_scaling))
container_runtime_r5 = np.zeros((num_scalings, num_runs_per_scaling))
kernel_runtime_r5 = np.zeros((num_scalings, num_runs_per_scaling))
devito_runtime_r5 = np.zeros((num_scalings, num_runs_per_scaling))
script_runtime_r5 = np.zeros((num_scalings, num_runs_per_scaling))
job_runtime_c5n = np.zeros((num_scalings, num_runs_per_scaling))
container_runtime_c5n = np.zeros((num_scalings, num_runs_per_scaling))
kernel_runtime_c5n = np.zeros((num_scalings, num_runs_per_scaling))
devito_runtime_c5n = np.zeros((num_scalings, num_runs_per_scaling))
script_runtime_c5n = np.zeros((num_scalings, num_runs_per_scaling))
idx = 0
for j in range(num_scalings):
for k in range(num_runs_per_scaling):
T = timings_r5[idx].reshape(7)
job_runtime_r5[j,k] = (T[2] - T[0]) / 1e3
container_runtime_r5[j,k] = (T[2] - T[1]) / 1e3
kernel_runtime_r5[j,k] = T[4] / num_instances[j]
devito_runtime_r5[j,k] = T[5]
script_runtime_r5[j,k] = T[6]
T = timings_c5n[idx].reshape(7)
job_runtime_c5n[j,k] = (T[2] - T[0]) / 1e3
container_runtime_c5n[j,k] = (T[2] - T[1]) / 1e3
kernel_runtime_c5n[j,k] = T[4] / num_instances[j]
devito_runtime_c5n[j,k] = T[5]
script_runtime_c5n[j,k] = T[6]
idx += 1
# Timings plot
fig, ax = plt.subplots(figsize=(3.33, 3))
#ax.set_xscale("log", nonposx='clip')
#ax.set_yscale("log", nonposy='clip')
bar1 = ax.bar(np.log(num_instances), np.mean(container_runtime_r5, axis=1), align='edge', alpha=0.8, ecolor='black', width=-.2, capsize=3)
bar2 = ax.bar(np.log(num_instances), np.mean(container_runtime_c5n, axis=1), align='edge', alpha=0.8, ecolor='black', width=.2, capsize=3)
plt.xticks(np.log(num_instances), ('1', '2', '4', '8', '16'), size=10)
ax.set_xlabel('No. of instances', fontsize=10)
ax.set_ylabel('Devito kernel runtime [s]', fontsize=10)
ax.tick_params(axis='y', labelsize=10)
ax.tick_params(axis='x', labelsize=10)
ax.set_ylim([0, 500])
plt.legend(['r5 family', 'c5n family'], loc='upper right', fontsize=9)
def autolabel(rects, labels, scale):
"""
Attach a text label above each bar displaying its height
"""
i=0
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/scale, 10, labels[i],
va='bottom', fontsize=9, rotation=90, color='white')
i+=1
labels_r5 = ['r5.12xlarge (24)', 'r5.4xlarge (16)', 'r5.2xlarge (16)', 'r5.xlarge (16)', 'r5.large (16)']
labels_c5n = ['c5n.18xlarge (18)', 'c5n.9xlarge (36)', 'c5n.4xlarge (32)', 'c5n.2xlarge (32)', 'c5n.xlarge (32)']
autolabel(bar1, labels_c5n, 1.15)
autolabel(bar2, labels_r5, 7)
plt.tight_layout()
savefig('strong_scaling_runtime_max_threads.png', dpi=600, format='png')
# Cost r5 vs c5n
# Cost plot (<NAME>, May 13, 2019, 10:04 PM)
r5_on_demand = np.array([3.024, 1.008, 0.504, 0.252, 0.126])/60/60 # r5.12xlarge, r5.4xlarge, r5.2xlarge, r5.xlarge, r5.large
r5_spot = np.array([0.8766, 0.2959, 0.1491, 0.0732, 0.0356])/60/60
c5n_on_demand = np.array([3.888, 1.944, 0.864, 0.432, 0.216])/60/60 # c5n.18xlarge, c5n.9xlarge, c5n.4xlarge, c5n.2xlarge, c5n.xlarge
c5n_spot = | np.array([1.1659, 0.583, 0.2591, 0.1295, 0.0648]) | numpy.array |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
import numpy as np
import cv2
import math
#import torch.nn.functional as F
from mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob, Scale, ConvModule
INF = 1e8
@HEADS.register_module
class MatrixCenterHead(nn.Module):
def __init__(self,
num_classes, # init 80
in_channels,
feat_channels=256,
stacked_convs=1,
strides=(4, 4, 4,
8, 8, 8, 8,
16, 16, 16, 16, 16,
32, 32, 32, 32,
64, 64, 64),
regress_ranges=((-1, 48), (48, 96), (96, 192), (192, 384), (384, INF)),
flags = [[0, 0, 0, 1, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 1, 0, 0, 0]],
index_map = {0:0, 1:1, 2:2,
5:3, 6:4, 7:5, 8:6,
10:7, 11:8, 12:9, 13:10, 14:11,
16:12, 17:13, 18:14, 19:15,
22:16, 23:17, 24:18}, # use i * 5 + j to get the featmap
loss_hm = dict(
type="CenterFocalLoss"
), # 这里实现 CenterFocalLoss
loss_wh = dict(
type="L1Loss",
loss_weight=0.1
),
loss_offset = dict(
type="L1Loss",
loss_weight=1.0
),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(MatrixCenterHead, self).__init__()
self.num_classes = num_classes
# self.cls_out_channels = num_classes - 1
self.cls_out_channels = num_classes
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.featmap_sizes = None
self.loss_hm = build_loss(loss_hm)
self.loss_wh = build_loss(loss_wh)
self.loss_offset = build_loss(loss_offset)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.flags = flags
self.index_map = index_map
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.wh_convs = nn.ModuleList()
self.offset_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.wh_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.offset_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.center_hm = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1, bias=True)
self.center_wh = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
self.center_offset = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
#self.scales = nn.ModuleList([Scale(1.0) for _ in 19])
def init_weights(self):
# for m in self.cls_convs:
# normal_init(m.conv, std=0.01)
# for m in self.wh_convs:
# normal_init(m.conv, std=0.01)
# for m in self.offset_convs:
# normal_init(m.conv, std=0.01)
#bias_hm = bias_init_with_prob(0.01) # 这里的初始化?
#normal_init(self.center_hm, std=0.01, bias=bias_hm)
self.center_hm.bias.data.fill_(-2.19)
nn.init.constant_(self.center_wh.bias, 0)
nn.init.constant_(self.center_offset.bias, 0)
# normal_init(self.center_hm, std=0.01)
# normal_init(self.center_wh, std=0.01)
# normal_init(self.center_offset, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
wh_feat = x
offset_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.center_hm(cls_feat)
for wh_layer in self.wh_convs:
wh_feat = wh_layer(wh_feat)
wh_pred = self.center_wh(wh_feat)
for offset_layer in self.offset_convs:
offset_feat = offset_layer(offset_feat)
offset_pred = self.center_offset(offset_feat)
return cls_score, wh_pred, offset_pred
@force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))
def loss(self,
cls_scores,
wh_preds,
offset_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(wh_preds) == len(offset_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
self.featmap_sizes = featmap_sizes
all_level_points = self.get_points(featmap_sizes, offset_preds[0].dtype,
offset_preds[0].device)
#print(img_metas)
#self.c = img_metas['c']
#self.s = img_metas['s']
self.tensor_dtype = offset_preds[0].dtype
self.tensor_device = offset_preds[0].device
heatmaps, wh_targets, offset_targets = self.center_target(gt_bboxes, gt_labels, img_metas, all_level_points) # 所有层的concat的, 每张图对应一个
num_imgs = cls_scores[0].size(0) # batch_size
#print(num_imgs)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
] # cls_scores(num_levels, batch_size, 80, h, w) => (num_levels, batch_size * w * h, 80)
flatten_wh_preds = [
wh_pred.permute(0, 2, 3, 1).reshape(-1, 2) # batchsize, h, w, 2 => batchsize, h, w, 2
for wh_pred in wh_preds
]
flatten_offset_preds = [
offset_pred.permute(0, 2, 3, 1).reshape(-1, 2)
for offset_pred in offset_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_wh_preds = torch.cat(flatten_wh_preds)
flatten_offset_preds = torch.cat(flatten_offset_preds)
# targets
flatten_heatmaps = torch.cat(heatmaps)
flatten_wh_targets = torch.cat(wh_targets) # torch.Size([all_level_points, 2])
flatten_offset_targets = torch.cat(offset_targets)
# repeat points to align with bbox_preds
# flatten_points = torch.cat(
# [points.repeat(num_imgs, 1) for points in all_level_points])
# pos_inds = flatten_labels.nonzero().reshape(-1)
#print(flatten_wh_targets.shape)
#print(flatten_wh_targets.nonzero())
center_inds = flatten_wh_targets[...,0].nonzero().reshape(-1)
#print(center_inds)
num_center = len(center_inds)
#print(num_center)
# what about use the centerness * labels to indict an object
# loss_cls = self.loss_cls(
# flatten_cls_scores, flatten_labels, # labels gt is small area
# avg_factor=num_pos + num_imgs) # avoid num_pos is 0
flatten_cls_scores = torch.clamp(flatten_cls_scores.sigmoid_(), min=1e-4, max=1-1e-4)
loss_hm = self.loss_hm(flatten_cls_scores, flatten_heatmaps)
pos_wh_targets = flatten_wh_targets[center_inds]
#print(pos_wh_targets.shape)
pos_wh_preds = flatten_wh_preds[center_inds]
pos_offset_preds = flatten_offset_preds[center_inds]
pos_offset_targets = flatten_offset_targets[center_inds]
if num_center > 0:
# TODO: use the iou loss
# center_points = flatten_points[center_inds]
# center_decoded_bbox_preds = wh_offset2bbox(center_points, pos_wh_preds, pos_offset_preds)
# center_decoded_bbox_targets = wh_offset2bbox(center_points, pos_wh_targets, pos_offset_targets)
loss_wh = self.loss_wh(pos_wh_preds, pos_wh_targets, avg_factor=num_center + num_imgs)
#loss_wh = F.l1_loss(pos_wh_preds, pos_wh_targets, reduction='sum') / (num_center + num_imgs)
#loss_wh = 0.1 * loss_wh
loss_offset = self.loss_offset(pos_offset_preds, pos_offset_targets, avg_factor=num_center + num_imgs)
else:
loss_wh = pos_wh_preds.sum()
loss_offset = pos_offset_preds.sum()
return dict(
loss_hm = loss_hm,
loss_wh = loss_wh,
loss_offset = loss_offset)
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device) # 以一定间隔取x的值
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range) # 得到featmap的所有点
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def center_target(self, gt_bboxes_list, gt_labels_list, img_metas, all_level_points):
#assert len(self.featmap_sizes) == len(self.regress_ranges)
assert len(self.featmap_sizes) == len(self.strides)
# get heatmaps and targets of each image
# heatmaps in heatmaps_list: [num_points, 80]
# wh_targets: [num_points, 2] => [batch_size, num_points, 2]
heatmaps_list, wh_targets_list, offset_targets_list = multi_apply(
self.center_target_single,
gt_bboxes_list,
gt_labels_list,
img_metas
)
# split to per img, per level
num_points = [center.size(0) for center in all_level_points] # 每一层多少个点 all_level_points [[12414, 2], []]
heatmaps_list = [heatmaps.split(num_points, 0) for heatmaps in heatmaps_list]
wh_targets_list = [wh_targets.split(num_points, 0) for wh_targets in wh_targets_list]
offset_targets_list = [offset_targets.split(num_points, 0) for offset_targets in offset_targets_list]
# concat per level image, 同一层的concat # [(batch_size,featmap_size[1]), ...)
concat_lvl_heatmaps = []
concat_lvl_wh_targets = []
concat_lvl_offset_targets = []
num_levels = len(self.featmap_sizes)
for i in range(num_levels):
concat_lvl_heatmaps.append(
torch.cat([heatmaps[i] for heatmaps in heatmaps_list])) # (num_levels, batch_size * w * h, 80)
concat_lvl_wh_targets.append(
torch.cat(
[wh_targets[i] for wh_targets in wh_targets_list]))
concat_lvl_offset_targets.append(
torch.cat(
[offset_targets[i] for offset_targets in offset_targets_list]))
return concat_lvl_heatmaps, concat_lvl_wh_targets, concat_lvl_offset_targets
def center_target_single(self, gt_bboxes, gt_labels, img_meta):
"""
single image
gt_bboxes:torch.Size([6, 4])
gt_labels:torch.Size([6]) tensor([34, 34, 34, 34, 34, 34], device='cuda:0')
featmap_sizes:(list[tuple]): Multi-level feature map sizes.
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),(512, INF))
"""
# transform the gt_bboxes, gt_labels to numpy
gt_bboxes = gt_bboxes.data.cpu().numpy()
gt_labels = gt_labels.data.cpu().numpy()
#print(gt_bboxes, gt_labels)
num_objs = gt_labels.shape[0]
#print(num_objs)
# heatmaps [level1, level2, level3, level4, level5]
num_levels = len(self.featmap_sizes)
heatmaps_targets = []
wh_targets = []
offset_targets = []
# get the target shape for each image
for i in range(num_levels): # 一共有19层
h, w = self.featmap_sizes[i]
hm = np.zeros((self.cls_out_channels, h, w), dtype=np.float32)
heatmaps_targets.append(hm)
wh = np.zeros((h, w, 2), dtype=np.float32)
wh_targets.append(wh)
offset = np.zeros((h, w, 2), dtype=np.float32)
offset_targets.append(offset)
for k in range(num_objs):
bbox = gt_bboxes[k]
cls_id = gt_labels[k]
if img_meta['flipped']:
bbox[[0, 2]] = img_meta['width'] - bbox[[2, 0]] - 1
# condition: in the regress_ranges
origin_h, origin_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
# 根据h, w在哪一层将output设置为当前层的
index_i = 0
index_j = 0
for i in range(5):
min_regress_distance, max_regress_distance = self.regress_ranges[i]
if (origin_w > min_regress_distance) and (origin_w <= max_regress_distance):
index_i = i
break
for j in range(5):
min_regress_distance, max_regress_distance = self.regress_ranges[j]
if (origin_h > min_regress_distance) and (origin_h <= max_regress_distance):
index_j = j
break
if self.flags[i][j] == 1:
continue
index_level = self.index_map[index_i * 5 + index_j]
output_h, output_w = self.featmap_sizes[index_level]
#print(output_h, output_w)
hm = heatmaps_targets[index_level]
wh = wh_targets[index_level]
offset = offset_targets[index_level]
# c, s is passed by meta
trans_output = get_affine_transform(img_meta['c'], img_meta['s'], 0, [output_w, output_h])
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = | np.clip(bbox[[0, 2]], 0, output_w - 1) | numpy.clip |
# Author: <NAME>
# email: <EMAIL>
# this file includes functions about transforming the bounding box
import numpy as np, math, time, copy
from math import radians as rad
from .private import safe_bbox, safe_center_bbox, bboxcheck_TLBR, bboxcheck_TLWH
from .math_geometry import get_2dline_from_pts_slope, get_2dpts_from_lines
from .math_conversion import imagecoor2cartesian, cartesian2imagecoor
from xinshuo_miscellaneous import isnparray, is2dptsarray, is2dptsarray_occlusion, is2dptsarray_confidence, is2dpts, isinteger, isbbox, islist, iscenterbbox
# general format instruction
# TLBR: top left bottom right, stands for two corner points, the top left point is included, the bottom right point is not included
# e.g., TLBR = [5, 5, 10, 10], it indicates point coordinates from 5 to 9, not including 10
# TLWH: top left width height, stands for one corner point and range, the range means how many points are included along an axis
# e.g., TLWH = [0, 0, 5, 5], it indicates point coordinates from 0 to 4, not including 5
############################################# format transform #################################
def bbox_TLBR2TLWH(bboxes_in, warning=True, debug=True):
'''
transform the input bounding box with TLBR format to TLWH format
parameters:
bboxes_in: TLBR format, a list of 4 elements, a listoflist of 4 elements: e.g., [[1,2,3,4], [5,6,7,8]],
a numpy array with shape or (N, 4) or (4, )
outputs:
bbox_TLWH: N X 4 numpy array, TLWH format
'''
np_bboxes = safe_bbox(bboxes_in, warning=warning, debug=debug)
if debug: assert bboxcheck_TLBR(np_bboxes, warning=warning, debug=debug), 'the input bounding box should be TLBR format'
bbox_TLWH = np.zeros_like(np_bboxes)
bbox_TLWH[:, 0] = np_bboxes[:, 0]
bbox_TLWH[:, 1] = np_bboxes[:, 1]
bbox_TLWH[:, 2] = np_bboxes[:, 2] - np_bboxes[:, 0]
bbox_TLWH[:, 3] = np_bboxes[:, 3] - np_bboxes[:, 1]
return bbox_TLWH
def bbox_TLWH2TLBR(bboxes_in, warning=True, debug=True):
'''
transform the input bounding box with TLWH format to TLBR format
parameters:
bboxes_in: TLWH format, a list of 4 elements, a listoflist of 4 elements: e.g., [[1,2,3,4], [5,6,7,8]],
a numpy array with shape or (N, 4) or (4, )
outputs:
bbox_TLBR: N X 4 numpy array, TLBR format
'''
np_bboxes = safe_bbox(bboxes_in, warning=warning, debug=debug)
if debug: assert bboxcheck_TLWH(np_bboxes, warning=warning, debug=debug), 'the input bounding box should be TLBR format'
bbox_TLBR = np.zeros_like(np_bboxes)
bbox_TLBR[:, 0] = np_bboxes[:, 0]
bbox_TLBR[:, 1] = np_bboxes[:, 1]
bbox_TLBR[:, 2] = np_bboxes[:, 2] + np_bboxes[:, 0]
bbox_TLBR[:, 3] = np_bboxes[:, 3] + np_bboxes[:, 1]
return bbox_TLBR
############################################# 2D transform #################################
def clip_bboxes_TLBR(bboxes_in, im_width, im_height, warning=True, debug=True):
'''
this function clips bboxes inside the image boundary, the coordinates in the clipped bbox are half-included [x, y)
parameters:
bboxes_in: TLBR format, a list of 4 elements, a listoflist of 4 elements: e.g., [[1,2,3,4], [5,6,7,8]],
a numpy array with shape or (N, 4) or (4, )
im_width/im_height: scalar
outputs:
clipped_bboxes: TLBR format, numpy array with shape of (N, 4)
'''
np_bboxes = safe_bbox(bboxes_in, warning=warning, debug=debug)
if debug:
assert isinteger(im_width) and isinteger(im_height), 'the image width and height are not correct'
assert bboxcheck_TLBR(np_bboxes, warning=warning, debug=debug), 'the input bboxes are not good'
clipped_bboxes = np.zeros_like(np_bboxes)
clipped_bboxes[:, 0] = np.maximum(np.minimum(np_bboxes[:, 0], im_width), 0) # x1 >= 0 & x1 <= width, included
clipped_bboxes[:, 1] = np.maximum(np.minimum(np_bboxes[:, 1], im_height), 0) # y1 >= 0 & y1 <= height, included
clipped_bboxes[:, 2] = np.maximum(np.minimum(np_bboxes[:, 2], im_width), 0) # x2 >= 0 & x2 <= width, not included
clipped_bboxes[:, 3] = np.maximum(np.minimum(np_bboxes[:, 3], im_height), 0) # y2 >= 0 & y2 <= height, not included
return clipped_bboxes
def clip_bboxes_TLWH(bboxes_in, im_width, im_height, warning=True, debug=True):
'''
this function clips bboxes inside the image boundary
parameters:
bboxes_in: TLWH format, a list of 4 elements, a listoflist of 4 elements: e.g., [[1,2,3,4], [5,6,7,8]],
a numpy array with shape or (N, 4) or (4, )
im_width/im_height: scalar
outputs:
clipped_bboxes_TLWH: TLWH format, numpy array with shape of (N, 4)
'''
np_bboxes = safe_bbox(bboxes_in, warning=warning, debug=debug)
if debug:
assert isinteger(im_width) and isinteger(im_height), 'the image width and height are not correct'
assert bboxcheck_TLWH(np_bboxes, warning=warning, debug=debug), 'the input bboxes are not good'
bboxes_TLBR = bbox_TLWH2TLBR(np_bboxes, debug=debug)
clipped_bboxes_TLBR = clip_bboxes_TLBR(bboxes_TLBR, im_width, im_height, warning=warning, debug=debug)
clipped_bboxes_TLWH = bbox_TLBR2TLWH(clipped_bboxes_TLBR, warning=warning, debug=debug)
return clipped_bboxes_TLWH
def get_center_crop_bbox(center_bboxes_in, im_width=None, im_height=None, warning=True, debug=True):
'''
obtain a bbox to crop around a center point
parameters:
center_bboxes_in: a list of 2 or 4 scalar elements, or (N, 2) / (N, 4) numpy array
2 - > [crop_width, crop_height], the center is the image center
4 - > [center_x, center_y, crop_width, crop_height]
im_width/im_height: scalar
outputs:
crop_bboxes: TLHW format, an int64 numpy array with shape of (N, 4)
'''
np_center_bboxes = safe_center_bbox(center_bboxes_in, warning=warning, debug=debug)
if debug: assert iscenterbbox(np_center_bboxes), 'the center bbox does not have a good shape'
if np_center_bboxes.shape[1] == 4: # crop around the given center and width and height
center_x = np_center_bboxes[:, 0]
center_y = np_center_bboxes[:, 1]
crop_width = np_center_bboxes[:, 2]
crop_height = np_center_bboxes[:, 3]
else: # crop around the center of the image
if debug: assert (im_width is not None) and (im_height is not None), 'the image shape should be known when center is not provided'
center_x = np.ceil(im_width / 2)
center_y = np.ceil(im_height / 2)
crop_width = np_center_bboxes[:, 0]
crop_height = np_center_bboxes[:, 1]
xmin = center_x - np.ceil(crop_width / 2)
ymin = center_y - np.ceil(crop_height / 2)
crop_bboxes = np.hstack((xmin.reshape((-1, 1)), ymin.reshape((-1, 1)), crop_width.reshape((-1, 1)), crop_height.reshape((-1, 1))))
crop_bboxes = crop_bboxes.astype('int64')
return crop_bboxes
############################################# pts related transform #################################
def pts2bbox(pts, debug=True, vis=False):
'''
convert a set of 2d points to a bounding box
parameter:
pts: 2 x N numpy array, N should >= 2
return:
bbox: 1 x 4 numpy array, TLBR format
'''
if debug:
assert is2dptsarray(pts) or is2dptsarray_occlusion(pts), 'the input points should have shape: 2 or 3 x num_pts vs %d x %s' % (pts.shape[0], pts.shape[1])
assert pts.shape[1] >= 2, 'number of points should be larger or equal than 2'
bbox = np.zeros((1, 4), dtype='float32')
bbox[0, 0] = np.min(pts[0, :]) # x coordinate of left top point
bbox[0, 1] = np.min(pts[1, :]) # y coordinate of left top point
bbox[0, 2] = np.max(pts[0, :]) # x coordinate of bottom right point
bbox[0, 3] = np.max(pts[1, :]) # y coordinate of bottom right point
# if vis:
# fig = plt.figure()
# pts = imagecoor2cartesian(pts)
# plt.scatter(pts[0, :], pts[1, :], color='r')
# plt.scatter(bbox[0, 0], -bbox[0, 1], color='b') # -1 is to convert the coordinate from image to cartesian
# plt.scatter(bbox[0, 2], -bbox[0, 3], color='b')
# plt.show()
# plt.close(fig)
return bbox
def bbox2center(bboxes_in, debug=True, vis=False):
'''
convert a bounding box to a point, which is the center of this bounding box
parameter:
bbox: N x 4 numpy array, TLBR format
return:
center: 2 x N numpy array, x and y correspond to first and second row respectively
'''
np_bboxes = safe_bbox(bboxes_in, debug=debug)
if debug: assert bboxcheck_TLBR(np_bboxes), 'the input bounding box should be TLBR format'
num_bbox = np_bboxes.shape[0]
center = np.zeros((num_bbox, 2), dtype='float32')
center[:, 0] = (np_bboxes[:, 0] + np_bboxes[:, 2]) / 2.
center[:, 1] = (np_bboxes[:, 1] + np_bboxes[:, 3]) / 2.
# if vis:
# fig = plt.figure()
# plt.scatter(np_bboxes[0, 0], -np_bboxes[0, 1], color='b') # -1 is to convert the coordinate from image to cartesian
# plt.scatter(np_bboxes[0, 2], -np_bboxes[0, 3], color='b')
# center_show = imagecoor2cartesian(center)
# plt.scatter(center_show[0], center_show[1], color='r')
# plt.show()
# plt.close(fig)
return np.transpose(center)
def pts_conversion_bbox(pts_array, bboxes_in, debug=True):
'''
convert pts in the original image to pts in the cropped image
parameters:
bboxes_in: 1 X 4 numpy array, TLBR or TLWH format
pts_array: 2(3) x N numpy array, N should >= 1
'''
np_bboxes = safe_bbox(bboxes_in, debug=debug)
if debug:
assert is2dptsarray(pts_array) or is2dptsarray_occlusion(pts_array) or is2dptsarray_confidence(pts_array), 'the input points should have shape: 2 or 3 x num_pts vs %d x %s' % (pts_array.shape[0], pts_array.shape[1])
assert isbbox(np_bboxes), 'the input bounding box is not correct'
pts_out = pts_array.copy()
pts_out[0, :] = pts_array[0, :] - np_bboxes[0, 0]
pts_out[1, :] = pts_array[1, :] - np_bboxes[0, 1]
return pts_out
def pts_conversion_back_bbox(pts_array, bboxes_in, debug=True):
'''
convert pts in the cropped image to the pts in the original image
parameters:
bboxes_in: 1 X 4 numpy array, TLBR or TLWH format
pts_array: 2(3) x N numpy array, N should >= 1
'''
np_bboxes = safe_bbox(bboxes_in, debug=debug)
if debug:
assert is2dptsarray(pts_array) or is2dptsarray_occlusion(pts_array) or is2dptsarray_confidence(pts_array), 'the input points should have shape: 2 or 3 x num_pts vs %d x %s' % (pts_array.shape[0], pts_array.shape[1])
assert isbbox(np_bboxes), 'the input bounding box is not correct'
pts_out = pts_array.copy()
pts_out[0, :] = pts_array[0, :] + np_bboxes[0, 0]
pts_out[1, :] = pts_array[1, :] + np_bboxes[0, 1]
return pts_out
############################################# to test #################################
def bbox_transform(ex_rois, gt_rois):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas, debug=True):
'''
boxes are from RPN, deltas are from boxes regression parameter
'''
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths # center of the boxes
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
def bbox_rotation_inv(bbox_in, angle_in_degree, image_shape, debug=True):
'''
bbox_in is two coordinate
angle is clockwise
'''
if debug:
assert isnparray(bbox_in) and bbox_in.size == 4, 'box is not correct'
im_width = image_shape[1]
im_height = image_shape[0]
coor_in_tl = | np.array([(bbox_in[0] - im_width/2)/im_width*2, (bbox_in[1] - im_height/2)/im_height*2, 1]) | numpy.array |
# -*- coding: UTF-8 -*-
import math
import pymatgen as mg
from ase.utils import gcd, basestring
from ase.build import bulk
from copy import deepcopy
from numpy.linalg import norm, solve
from pymatgen.analysis.graphs import MoleculeGraph, StructureGraph
from pymatgen.core.structure import Molecule
from pymatgen.io.vasp.inputs import Poscar
from ase import io
import networkx.algorithms.isomorphism as iso
import numpy as np
import networkx as nx
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
# used for deciding which atoms are bonded
from pymatgen.analysis.local_env import JmolNN
import os
import sys
import time
from functools import wraps
from collections import Counter
def from_ASE_to_pymatgen(working_dir, images):
"""
change ASE structure to pymatgen structure
"""
file_name = working_dir + "/temp.POSCAR.vasp"
io.write(file_name, images)
modify_poscar(file_name)
slab = mg.Structure.from_file(file_name)
os.remove(file_name)
return slab
def modify_poscar(file):
"""
Change the file to compliant POSCAR.
"""
index = 0
prev_file = open(file, 'r')
new_file = open(file+'.new', 'w')
for line in prev_file:
if index == 0:
tmp = line
new_file.write('slab\n')
elif index == 5:
#new_file.write(tmp)
new_file.write(line)
else:
new_file.write(line)
index = index+1
prev_file.close()
new_file.close()
os.remove(file)
os.rename(file+'.new', file)
def surface(lattice, indices, layers, tol=1e-10, termination=0):
"""Create surface from a given lattice and Miller indices.
lattice: Atoms object or str
Bulk lattice structure of alloy or pure metal. Note that the
unit-cell must be the conventional cell - not the primitive cell.
One can also give the chemical symbol as a string, in which case the
correct bulk lattice will be generated automatically.
indices: sequence of three int
Surface normal in Miller indices (h,k,l).
layers: int
Number of equivalent layers of the slab.
termination: int
The termination "number" for your crystal. The same value will not
produce the same termination for different symetrically identical
bulk structures, but changing this value allows your to explore all
the possible terminations for the bulk structure you provide it.
note: this code is not well tested
"""
indices = np.asarray(indices)
if indices.shape != (3,) or not indices.any() or indices.dtype != int:
raise ValueError('%s is an invalid surface type' % indices)
if isinstance(lattice, basestring):
lattice = bulk(lattice, cubic=True)
h, k, l = indices
h0, k0, l0 = (indices == 0)
if termination != 0: # changing termination
import warnings
warnings.warn('Work on changing terminations is currently in '
'progress. Code may not behave as expected.')
lattice1 = deepcopy(lattice)
cell = lattice1.get_cell()
pt = [0, 0, 0]
millers = list(indices)
for index, item in enumerate(millers):
if item == 0:
millers[index] = 10 ** 9 # make zeros large numbers
elif pt == [0, 0, 0]: # for numerical stability
pt = list(cell[index] / float(item) /
np.linalg.norm(cell[index]))
h1, k1, l1 = millers
N = np.array(cell[0] / h1 + cell[1] / k1 + cell[2] / l1)
n = N / np.linalg.norm(N) # making a unit vector normal to cut plane
d = [np.round(np.dot(n, (a - pt)), 4)
for a in lattice.get_scaled_positions()]
d = set(d)
d = sorted(list(d))
d = [0] + d # distances of atoms from cut plane
displacement = (h * cell[0] + k * cell[1] +
l * cell[2]) * d[termination]
lattice1.positions += displacement
lattice = lattice1
if h0 and k0 or h0 and l0 or k0 and l0: # if two indices are zero
if not h0:
c1, c2, c3 = [(0, 1, 0), (0, 0, 1), (1, 0, 0)]
if not k0:
c1, c2, c3 = [(0, 0, 1), (1, 0, 0), (0, 1, 0)]
if not l0:
c1, c2, c3 = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
else:
p, q = ext_gcd(k, l)
a1, a2, a3 = lattice.cell
# constants describing the dot product of basis c1 and c2:
# dot(c1,c2) = k1+i*k2, i in Z
k1 = np.dot(p * (k * a1 - h * a2) + q * (l * a1 - h * a3),
l * a2 - k * a3)
k2 = np.dot(l * (k * a1 - h * a2) - k * (l * a1 - h * a3),
l * a2 - k * a3)
if abs(k2) > tol:
i = -int(round(k1 / k2)) # i corresponding to the optimal basis
p, q = p + i * l, q - i * k
a, b = ext_gcd(p * k + q * l, h)
c1 = (p * k + q * l, -p * h, -q * h)
c2 = np.array((0, l, -k)) // abs(gcd(l, k))
c3 = (b, a * p, a * q)
surf = build(lattice, np.array([c1, c2, c3]), layers, tol)
return surf
def ext_gcd(a, b):
"""
Extended Euclidean Algorithm. Find the result for ax + by = gcd(a, b).
Parameters
----------
a: int
b: int
"""
if b == 0:
return 1, 0
elif a % b == 0:
return 0, 1
else:
x, y = ext_gcd(b, a % b)
return y, x - y * (a // b)
def build(lattice, basis, layers, tol):
"""
Transform the structure to original surface based on basis.
Parameters
----------
basis: 3 * 3 matrix, [[a, b, c], ...]
the basis vectors of the target surfaces.
lattice: Atoms object or str
Bulk lattice structure of alloy or pure metal. Note that the
unit-cell must be the conventional cell - not the primitive cell.
One can also give the chemical symbol as a string, in which case the
correct bulk lattice will be generated automatically.
layers: int
Number of equivalent layers of the slab.
"""
surf = lattice.copy()
scaled = solve(basis.T, surf.get_scaled_positions().T).T
scaled -= np.floor(scaled + tol)
surf.set_scaled_positions(scaled)
surf.set_cell(np.dot(basis, surf.cell), scale_atoms=True)
surf *= (1, 1, layers)
return surf
def modify_cell(structure):
"""
This is the final step of a molecular reconstruction step, and would
align z direction to be perpendicular to the surface
Parameters
---------
structure: Atoms object or str
In this structure, the z direction might not be perpendicular to the
target surface.
"""
slab = structure.copy()
a1, a2, a3 = slab.cell
slab.set_cell([a1, a2,
np.cross(a1, a2) * np.dot(a3, np.cross(a1, a2)) /
norm(np.cross(a1, a2)) ** 2])
# Change unit cell to have the x-axis parallel with a surface vector
# and z perpendicular to the surface:
a1, a2, a3 = slab.cell
slab.set_cell([(norm(a1), 0, 0),
(np.dot(a1, a2) / norm(a1),
np.sqrt(norm(a2) ** 2 - (np.dot(a1, a2) / norm(a1)) ** 2), 0),
(0, 0, norm(a3))],
scale_atoms=True)
slab.pbc = (True, True, False)
scaled = slab.get_scaled_positions()
scaled[:, :2] %= 1
slab.set_scaled_positions(scaled)
return slab
def handle_with_molecules(slab_move, delta, down=True):
"""
Move some very tiny fragments of broken molecule to the other side. This is
a preparation step for the move_method, which could minimize the limitations.
Parameters
----------
slab_move: Atoms structure
slab_move is the original surfaces that is generated by ase library.
delta: list of double, [delta_x, delta_y, delta_z]
Add or subtract the delta (cart_coords) to the tiny broken molecules to
initially repair parts of molecules.
down: bool
True: Add a delta to the tiny broken molecules that are located at the bottom,
False: subtract a delta to the tiny broken molecules that are located at the top.
"""
slab_sg = StructureGraph.with_local_env_strategy(slab_move, JmolNN())
slab_supercell_sg = slab_sg * (3, 3, 1)
slab_sg_graph = nx.Graph(slab_supercell_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(slab_sg_graph))
super_subgraphs = []
for subgraph in all_super_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
super_subgraphs.append(subgraph)
for subgraph in super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(slab_supercell_sg.structure[n].specie))
molecules = []
for subgraph in super_subgraphs:
coords = [slab_supercell_sg.structure[n].coords
for n in subgraph.nodes()]
# get the frac_cood of every atom for every molecules
coord_z_list = [slab_move.lattice.get_fractional_coords(
coord)[-1] for coord in coords]
if down is True:
temp = [coord_z < 0.5 for coord_z in coord_z_list]
else:
temp = [coord_z > 0.5 for coord_z in coord_z_list]
if not all(temp) or len(coords) > 6:
continue
species = [slab_supercell_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
# molecules are the list of molecules that need to be moved
move_list = []
move_sites = reduced_sites(molecules, slab_move)
for move_site in move_sites:
for i, atom in enumerate(slab_move):
if atom.is_periodic_image(move_site):
move_list.append(i)
break
coords_move = slab_move.cart_coords
species_move = slab_move.species
slab_move.remove_sites(move_list)
for i in move_list:
if down is True:
new_coord = np.array(coords_move[i]) + np.array(delta)
else:
new_coord = np.array(coords_move[i]) - np.array(delta)
slab_move.append(species_move[i], new_coord, coords_are_cartesian=True)
return slab_move
def Find_Broken_Molecules(slab, sg, species_intact, coords_intact, unique_bulk_subgraphs):
"""
Use molecular identification method to find those molecules in the surface
that are different from that in the bulk.
Parameters
----------
slab: Atoms structure
The surface that is generated by ase library and might have broken molecules.
sg: list of Molecules
Unique Molecules in bulk Structure.
species_intact: list, ['specie_1', 'specie_2', ...]
A list of atomic species of intact molecules.
coords_intact: list, [[coord_1_1, coord_1_2, coord_1_3], ...]
A list of atomic cart_coords of intact molecules.
unique_bulk_subgraphs: list of graphs
A list of intact molecules' graphs. Note that every graph is this list
is unique
"""
slab_sg = StructureGraph.with_local_env_strategy(slab, JmolNN())
# enlarge the cell to a (3 * 3 * 1) super_cell
slab_supercell_sg = slab_sg * (3, 3, 1)
different_subgraphs_in_slab, slab_molecules = \
get_slab_different_subgraphs(slab_supercell_sg, unique_bulk_subgraphs)
slab_molecules = double_screen(slab_molecules, sg)
# the molecules in slab_original would be the template
#print("The number of molecules that need to be fixed : " +
# str(len(slab_molecules)))
# slab_molecules are the molecules that are broken and need to be fixed
delete_sites = reduced_sites(slab_molecules, slab)
# delete_list is the list of broken atoms
delete_list = []
for delete_site in delete_sites:
for i, atom in enumerate(slab):
if atom.is_periodic_image(delete_site):
delete_list.append(i)
break
species_all = slab.species
coords_all = slab.cart_coords
for i, atom in enumerate(slab):
temp = [i == delete for delete in delete_list]
if not any(temp):
species_intact.append(species_all[i])
coords_intact.append(coords_all[i])
delete_list = []
# remove intact molecules in the slab for convenience
#print("Delete all atoms!")
for i, atom in enumerate(slab):
delete_list.append(i)
slab.remove_sites(delete_list)
sites = []
for slab_molecule in slab_molecules:
for curr_site in slab_molecule:
curr_site = mg.PeriodicSite(curr_site.specie,
curr_site.coords,
slab.lattice,
coords_are_cartesian=True)
tmp = [curr_site.is_periodic_image(site) for site in sites]
if not any(tmp):
sites.append(curr_site)
for site in sites:
# add the broken molecules into the system
slab.append(species=site.specie, coords=site.coords,
coords_are_cartesian=True)
return slab
def get_broken_molecules(self, bulk_subgraphs, use_weights=False):
# compare each molecule in slab to each molecule in the bulk,
# get rid of isomorohic, molecules store the brokens
"""
Retrieve broken_subgraphs as molecules
Will return nonunique molecules, duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
Returns:
-------
: list of nonunique broken Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
supercell_sg = self*(3, 3, 1)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(subgraph)
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
nm = iso.categorical_node_match("specie", "ERROR")
# remove complete molecules in subgraphs
different_subgraphs = []
start = time.time()
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=nm)
for g in bulk_subgraphs]
if not any(already_present):
different_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in different_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
#molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
# now define how we test for isomorphism
def node_match(n1, n2):
"""the strategy for node matching in is_isomorphic.
Parameters
------
n1, n2 : node
Returns:
-------
True of false : bool
based on whether the species of two nodes are the same.
"""
return n1['specie'] == n2['specie']
def get_bulk_molecules(self, use_weights=False):
# get rid of the repetitve molecule in bulk, only left with unique molecule######
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
Parameters:
------
use_weights: (bool) If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
Returns:
-------
list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
# enlarge the structureGraph object to a supercell
supercell_sg = self*(3, 3, 1)
# make undirected to find connected subgraphs
# create networkx undirected graph object to
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# store the input graph
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(
supercell_sg.graph))
# add specie names to graph to be able to test for isomorphism
for subgraph in all_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
nm = iso.categorical_node_match("specie", "ERROR")
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in all_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_subgraphs]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
molecules.append(molecule)
return molecules, unique_subgraphs
#################convert to undirected mx.graph and then determine if isomorphic###############
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
Parameters:
----------
other: MoleculeGraph object to be compared.
Returns:
-------
bool
"""
if self.molecule.composition != other.molecule.composition:
return False
else:
self_undir = self.graph.to_undirected()
other_undir = other.graph.to_undirected()
nm = iso.categorical_node_match("specie", "ERROR")
isomorphic = nx.is_isomorphic(self_undir, other_undir, node_match=nm)
return isomorphic
def reduced_sites(molecules, slab):
"""
Find atoms that appear again due to the periodicity.
Parameters:
-----------
molecules: List[molecule].
All molecules that might be within or out of the slab boundary.
slab: ASE structure.
Slab structure.
Returns:
--------
sites: List[atom].
"""
sites = []
for molecule in molecules:
for curr_site in molecule:
curr_site = PeriodicSite(
curr_site.specie, curr_site.coords, slab.lattice, coords_are_cartesian=True)
tmp = [curr_site.is_periodic_image(site) for site in sites]
if not any(tmp):
sites.append(curr_site)
return sites
def is_isomorphic(molecule1, molecule2):
"""
Determin whether two molecules are the same.
Parameters:
-----------
molecule1 and molecule2.
Returns:
--------
bool.
"""
return isomorphic_to(MoleculeGraph.with_local_env_strategy(molecule1, JmolNN()), MoleculeGraph.with_local_env_strategy(molecule2, JmolNN()))
def double_screen(slab_molecules, bulk_molecules):
"""
Double check with bulk if there is any molecule already present in bulk
"""
delete_list = []
for bulk_molecule in bulk_molecules:
for i, slab_molecule in enumerate(slab_molecules):
if is_isomorphic(bulk_molecule, slab_molecule):
delete_list.append(i)
tmp = [x for i, x in enumerate(slab_molecules) if i not in delete_list]
return tmp
def print_run_time(func):
"""
One wrapper that output the run_time of a funtion.
"""
@wraps(func)
def wrapper(*args, **kw):
local_time = time.time()
func(*args, **kw)
print('Current Function [%s] run time is %.2fs' %
(func.__name__, time.time() - local_time))
return wrapper
def updatePOSCAR(output_file):
"""This function is used to correct the output file (POSCAR) of ase.
Parameters:
----------
output_file : str
The file of surface written by the write function of ase.
Returns:
-------
file : str
The file that is corrected.
"""
with open(output_file, 'r') as original_file:
lines = original_file.readlines()
line1 = lines[0]
lines.insert(5, " " + line1)
with open(output_file, 'w') as final_file_1:
for i in range(len(lines)):
final_file_1.writelines(lines[i])
structure = mg.Structure.from_file(output_file)
lattice = Lattice(structure.lattice.matrix)
frac_coords = lattice.get_fractional_coords(structure.cart_coords)
for i in range(frac_coords.shape[0]):
for j in range(frac_coords.shape[1]):
if abs(frac_coords[i][j] - 1) < 1e-5:
frac_coords[i][j] = 1
if abs(frac_coords[i][j] - 0) < 1e-5:
frac_coords[i][j] = 0
with open(output_file, 'r') as final_file_2:
lines = final_file_2.readlines()
lines[7] = 'Direct' + '\n'
for i in range(np.array(frac_coords).shape[0]):
lines[8 + i] = " " + str(np.array(frac_coords)[i, :][0]) + ' ' + str(np.array(frac_coords)[i, :][1]) +\
' ' + str(np.array(frac_coords)[i, :][2]) + '\n'
with open(output_file, 'w') as final_file:
for i in range(len(lines)):
final_file.writelines(lines[i])
def edge_match(e1, e2):
"""the strategy for edge matching in is_isomorphic.
Parameters:
----------
e1, e2 : edge).
Returns:
-------
True or false : bool
based on whether the length of bonds are the same or close to each other.
"""
return abs(e1['weight'] - e2['weight']) / e2['weight'] < 1e-5
def get_bulk_subgraphs(bulk_structure_sg):
"""
Get all subgraphs of molecules that within or crosses the boundary of
original bulk.
Parameters:
-----------
bulk_structure_sg: StructureGraph.
The structure graph of bulk with local env strategy.
Returns:
--------
super_graphs : List[graph].
Represent the subgraphs of molecules that within or crosses the
boundary of original bulk.
molecules : List[molecule].
Molecules that are correlated to the subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
super_subgraphs = []
for subgraph in all_super_subgraphs:
in_boundary = any([d['to_jimage'] == (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if in_boundary:
super_subgraphs.append(subgraph)
for subgraph in super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
for subgraph in super_subgraphs:
if len(subgraph) == 1 and "H" in [str(bulk_structure_sg.structure[n].specie) for n in subgraph.nodes()]:
super_subgraphs.remove(subgraph)
continue
molecules = []
for subgraph in super_subgraphs:
coords = [bulk_structure_sg.structure[n].coords
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return super_subgraphs, molecules
def get_bulk_subgraphs_v2(bulk_structure_sg):
"""
Get all subgraphs of molecules that within or crosses the boundary of
original bulk.
Parameters:
-----------
bulk_structure_sg: StructureGraph.
The structure graph of bulk with local env strategy.
Returns:
--------
super_graphs : List[graph].
Represent the subgraphs of molecules that within or crosses the
boundary of original bulk.
molecules : List[molecule].
Molecules that are correlated to the subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
for subgraph in all_super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
molecules = []
for subgraph in all_super_subgraphs:
coords = [bulk_structure_sg.structure[n].coords
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return all_super_subgraphs, molecules
def get_bulk_subgraphs_v3(slab_first, bulk_structure_sg):
"""
Get all subgraphs of molecules that within or crosses the boundary of
original bulk and generate HashGraph to modify atoms' positions.
Parameters:
-----------
slab_first: pymatgen structure.
Original slab structure that cleaved by ASE
bulk_structure_sg: StructureGraph.
The structure graph of bulk with local env strategy.
Returns:
--------
delta_cart: List[float].
c_difference between two adajacent layers.
super_graphs : List[graph].
Represent the subgraphs of molecules that within or crosses the
boundary of original bulk.
molecules : List[molecule].
Molecules that are correlated to the subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
for subgraph in all_super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
frac_coods = [0] * len(bulk_structure_sg.structure)
initial_index = -100
molecules = []
for subgraph in all_super_subgraphs:
HashGraph = {}
for u, v, d in subgraph.edges(data=True):
change_z = list(d['to_jimage'])[-1]
if change_z != 0:
change_z = 1 if slab_first.lattice.get_fractional_coords(bulk_structure_sg.structure[u].coords)[-1] > slab_first.lattice.get_fractional_coords(bulk_structure_sg.structure[v].coords)[-1] else -1
try:
HashGraph[str(u)].append([str(v), change_z])
except KeyError:
HashGraph[str(u)] = [initial_index, [str(v), change_z]]
try:
HashGraph[str(v)].append([str(u), -change_z])
except KeyError:
HashGraph[str(v)] = [initial_index, [str(u), -change_z]]
first_node = list(HashGraph.keys())[0]
count = 1
HashGraph[first_node][0] = 0
Pending_node = [first_node]
Pending_node_2 = []
while(count < len(list(HashGraph.keys()))):
for node in Pending_node:
for value in HashGraph[node][1: ]:
if HashGraph[value[0]][0] == initial_index:
count += 1
HashGraph[value[0]][0] = HashGraph[node][0] + value[1]
Pending_node_2.append(value[0])
Pending_node = deepcopy(Pending_node_2)
Pending_node_2 = []
# min_z = min([value[0] for value in HashGraph.values()])
min_z = int(Counter([value[0] for value in HashGraph.values()]).most_common(1)[0][0])
delta = np.array(slab_first.lattice.matrix[-1])
for key in HashGraph.keys():
HashGraph[key][0] -= min_z
coords = [bulk_structure_sg.structure[n].coords + delta * HashGraph[str(n)][0]
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return delta, all_super_subgraphs, molecules
def get_bulk_subgraphs_unique(bulk_structure_sg):
"""
get unique subgraphs of bulk based on graph algorithm.
This function would only return unique molecules and its graphs,
but not any duplicates present in the crystal.
(A duplicate defined as an isomorphic crystals.
Parameters:
-----------
bulk_structure_sg : nx.SturctureGraph class,
this one is actually the supercell one that is equal to(3, 3, 3) * unit cell.
Returns:
--------
unique_super_graphs : (list) [graph, ...],
represent the unique subgraphs in the supercell and expecially
in the boundary of supercell.
molecules : (list) [molecule, ...],
represent the molecules that are correlated to the unque subgraphs.
"""
bulk_super_structure_sg_graph = nx.Graph(bulk_structure_sg.graph)
all_super_subgraphs = list(nx.connected_component_subgraphs
(bulk_super_structure_sg_graph))
super_subgraphs = []
for subgraph in all_super_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
super_subgraphs.append(subgraph)
for subgraph in super_subgraphs:
for n in subgraph:
subgraph.add_node(n,
specie=str(bulk_structure_sg.structure[n].specie))
unique_super_subgraphs = []
for subgraph in super_subgraphs:
if len(subgraph) == 1 and "H" in [str(bulk_structure_sg.structure[n].specie) for n in subgraph.nodes()]:
continue
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_super_subgraphs]
if not any(already_present):
unique_super_subgraphs.append(subgraph)
molecules = []
for subgraph in unique_super_subgraphs:
coords = [bulk_structure_sg.structure[n].coords
for n in subgraph.nodes()]
species = [bulk_structure_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules.append(molecule)
return unique_super_subgraphs, molecules
def get_slab_different_subgraphs(slab_supercell_sg, unique_super_bulk_subgraphs):
"""this function is used to find all the subgraphs in slab that
are different from those in bulk.
Parameters:
----------
slab_supercell_sg : nx.StructureGraph,
the graph of the whole slabs.
Note: In order to thoughtoutly describe the graph,
the slab_supercell_sg = (3, 3, 1) * slab_sg
unique_super_bulk_subgraphs : list.
Returns:
-------
different_subgraphs : list
[different_subgraph, ...], which is the list of subgraphs that
are different from those in bulk. In this function,
we would only find the different subgraphs based on its species.
slab_molecules : list
[slab_molecule, ...], slab_molecule is the mg.Molecule of diffenert_subgraphs.
"""
slab_supercell_sg_graph = nx.Graph(slab_supercell_sg.graph)
all_subgraphs = list(nx.connected_component_subgraphs
(slab_supercell_sg_graph))
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersets_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersets_boundary:
molecule_subgraphs.append(subgraph)
#print("molecule_subgraphs : ", len(molecule_subgraphs))
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(
slab_supercell_sg.structure[n].specie))
nm = iso.categorical_node_match("specie", "ERROR")
different_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=nm)
for g in unique_super_bulk_subgraphs]
if not any(already_present):
different_subgraphs.append(subgraph)
slab_molecules = []
for subgraph in different_subgraphs:
coords = [slab_supercell_sg.structure[n].coords
for n in subgraph.nodes()]
species = [slab_supercell_sg.structure[n].specie
for n in subgraph.nodes()]
molecule = mg.Molecule(species=species, coords=coords)
slab_molecules.append(molecule)
return different_subgraphs, slab_molecules
def belong_to(species1, species2):
"""
Determine whether species1 are totally included by species2.
"""
if len(species1) > len(species2):
return False
i = 0
species_1 = species1[:]
species_2 = species2[:]
while i < len(species_1):
find = False
for j in range(len(species_2)):
if species_1[i] == species_2[j]:
del species_1[i]
find = True
del species_2[j]
break
if find is False:
return False
return True
def length_belong_to(weights1, weights2):
"""
Determine whether weights1 are totally included by weights2
weights are the list [weight, weight, ...] of one node
"""
if len(weights1) > len(weights2):
return False
i = 0
weights_1 = weights1[:]
weights_2 = weights2[:]
while i < len(weights_1):
find = False
for j in range(len(weights_2)):
if abs((weights_1[i] - weights_2[j]) / weights_2[j]) < 1e-5:
del weights_1[i]
find = True
del weights_2[j]
break
if find is False:
return False
return True
def weights_all_belong_to(all_weight1, all_weight2, species1, species2):
"""
Determine whether one graph is totally included by another graph by
comparing species and weights.
"""
if len(all_weight1) > len(all_weight2):
return False
i = 0
account = 0
total = len(all_weight1)
all_weight_1 = all_weight1[:]
all_weight_2 = all_weight2[:]
species_1 = species1[:]
species_2 = species2[:]
while i < len(all_weight_1):
find = False
for j in range(len(all_weight_2)):
if length_belong_to(all_weight_1[i], all_weight_2[j]) and species_1[i] == species_2[j]:
del all_weight_1[i]
del species_1[i]
del species_2[j]
account += 1
del all_weight_2[j]
find = True
break
if not find:
i += 1
if account >= 2.0 / 3.0 * total:
return True
return False
def brokenMolecules_and_corresspoundingIntactMolecules(new_different_subgraphs,
unique_super_subgraphs):
"""
NOT used in current reconstruction method!!!
Determine the intact molecules that each molecule (broken or intact) belongs to by
compcomparing the species and weights of broken molecules and intact
molecules.
Parameters:
-----------
new_different_subgraphs: List[subgraph].
Subgraphs of all molecules.
unique_super_subgraphs: List[subgraph].
Subgraphs of all bulk's unique molecules.
Returns:
--------
qualified_subgraphs: List[subgraph].
List of subgraph of molecules in the raw slab.
unique_super_subgraphs: List[subgraph].
List of subgraph of corresspounding intact molecules. The length of
qualified_unique_subgraphs should be the same as the length of
qualified_subgraphs.
"""
qualified_subgraphs = []
qualified_unique_subgraphs = []
# account = 1
#print("trying to find the connection between broken molecules "
# "and intact molecules")
for subgraph in new_different_subgraphs:
subgraph_species = []
weights_all = []
for n, nbrs in subgraph.adjacency():
subgraph_species.append(subgraph.node[n]['specie'])
weights = []
for nbr, eattr in nbrs.items():
weights.append(eattr['weight'])
weights_all.append(weights)
find = False
for unique_subgraph in unique_super_subgraphs:
unique_subgraph_species = []
unique_weights_all = []
for n, nbrs in unique_subgraph.adjacency():
unique_subgraph_species.append(
unique_subgraph.node[n]['specie'])
weights = []
for nbr, eattr in nbrs.items():
weights.append(eattr['weight'])
unique_weights_all.append(weights)
if not belong_to(subgraph_species, unique_subgraph_species):
continue
else:
if not weights_all_belong_to(weights_all, unique_weights_all,
subgraph_species,
unique_subgraph_species):
continue
else:
find = True
qualified_subgraphs.append(subgraph)
qualified_unique_subgraphs.append(unique_subgraph)
break
if find is False:
print("can't find the qualified subgraphs")
sys.exit()
return qualified_subgraphs, qualified_unique_subgraphs
def fix_broken_molecules(qualified_subgraphs,
qualified_unique_subgraphs,
bulk_super_structure_sg,
slab_supercell_sg,
slab, c_frac_min, fixed_c_negative=False):
"""
NOT used in the current reconstruction method!!!
Fix broken molecules based on graph theory. After determine the affiliation
between all molecules in raw slabs and intact molecules in the original
bulk, this function would replace those broken molecules with intact
molecules.
Parameters:
-----------
qualified_subgraphs: List[subgraph].
List of subgraphs of all molecules in the raw molecules.
qualified_unique_subgraphs: List[subgraph].
Each element in the list is the subgraph of corresspounding intact
molecule of "qualified_subgraphs" in the previous list.
bulk_super_structure_sg: StructureGraph.
Structure Graph of supercell (3 x 3 x 3) of original bulk.
slab_supercell_sg: StructureGraph.
Structure Graph of supercell (3 x 3 x 3) of raw slab.
slab: ASE structure.
Raw slab after ASE cleaving.
c_frac_min: float.
Fractional coordinate of the lowest atom in raw slabs in c direction.
fixed_c_negative: bool
Fix the broken molecules in the lower side or not? Default option is
False.
Returns:
--------
slab: pymatgen structure.
Slab after reconstruction.
"""
molecules_new = []
#print("trying to fix the broken molecules...")
for i in range(len(qualified_subgraphs)):
qualified_subgraphs_species = []
qualified_subgraphs_nodes_neibs = []
qualified_subgraphs_all_weights = []
nodes_qualified_subgraphs = []
for n, nbrs in qualified_subgraphs[i].adjacency():
nodes_qualified_subgraphs.append(n)
neibs = []
weights = []
qualified_subgraphs_species.append(
qualified_subgraphs[i].node[n]['specie'])
for nbr, eattr in nbrs.items():
neibs.append(nbr)
weights.append(eattr['weight'])
qualified_subgraphs_nodes_neibs.append(neibs)
qualified_subgraphs_all_weights.append(weights)
qualified_unique_subgraphs_species = []
qualified_unique_subgraphs_nodes_neibs = []
qualified_unique_subgraphs_all_weights = []
nodes_qualified_unique_subgraphs = []
for n, nbrs in qualified_unique_subgraphs[i].adjacency():
nodes_qualified_unique_subgraphs.append(n)
neibs = []
weights = []
qualified_unique_subgraphs_species.append(
qualified_unique_subgraphs[i].node[n]['specie'])
for nbr, eattr in nbrs.items():
neibs.append(nbr)
weights.append(eattr['weight'])
qualified_unique_subgraphs_all_weights.append(weights)
qualified_unique_subgraphs_nodes_neibs.append(neibs)
node1 = []
node2 = []
account = 0
for t in range(len(qualified_subgraphs_species)):
account = 0
for k in range(len(qualified_unique_subgraphs_species)):
account = 0
if qualified_subgraphs_species[t] == qualified_unique_subgraphs_species[k] \
and length_belong_to(qualified_subgraphs_all_weights[t],
qualified_unique_subgraphs_all_weights[k]) \
and len(qualified_subgraphs_all_weights[t]) == 3:
node1 = [nodes_qualified_subgraphs[t]]
node2 = [nodes_qualified_unique_subgraphs[k]]
account = 0
for a_index, a_weight in enumerate(qualified_subgraphs_all_weights[t]):
for index, weight in enumerate(qualified_unique_subgraphs_all_weights[k]):
has1 = qualified_subgraphs_nodes_neibs[t][a_index] in node1
has2 = qualified_unique_subgraphs_nodes_neibs[k][index] in node2
if abs(weight - a_weight) / weight < 1e-5 and has1 is False and has2 is False:
node1.append(
qualified_subgraphs_nodes_neibs[t][a_index])
node2.append(
qualified_unique_subgraphs_nodes_neibs[k][index])
account += 1
break
if account >= 3:
break
if account >= 3:
break
if account < 3:
print("can't find the corresspounding point")
sys.exit()
coords1 = [slab_supercell_sg.structure[n].coords for n in node1]
coords2 = [bulk_super_structure_sg.structure[n].coords for n in node2]
relative1 = np.array([np.array(coords1[n]) - np.array(coords1[0])
for n in list(range(1, 4))])
relative2 = np.array([np.array(coords2[n]) - np.array(coords2[0])
for n in list(range(1, 4))])
try:
rotationMatrix = np.dot(relative1.T, np.linalg.inv(relative2.T))
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
for m in range(relative1.shape[0]):
if relative1[m, 0] == 0 and relative1[m, 1] == 0 and relative1[m, 2] == 0:
relative1[m, 0] = 1e-9
relative1[m, 2] = -1e-9
for m in range(relative1.shape[1]):
if relative1[0, m] == 0 and relative1[1, m] == 0 and relative1[2, m] == 0:
relative1[0, m] = 1e-9
relative1[2, m] = -1e-9
for m in range(relative2.shape[0]):
if relative2[m, 0] == 0 and relative2[m, 1] == 0 and relative2[m, 2] == 0:
relative2[m, 0] = 1e-9
relative2[m, 2] = -1e-9
for m in range(relative2.shape[1]):
if relative2[0, m] == 0 and relative2[1, m] == 0 and relative2[2, m] == 0:
relative2[0, m] = 1e-9
relative2[2, m] = -1e-9
rotationMatrix = np.dot(
relative1.T, np.linalg.inv(relative2.T))
else:
print('failed')
sys.exit()
relative = np.array([np.array(bulk_super_structure_sg.structure[n].coords)
- np.array(coords2[0])
for n in qualified_unique_subgraphs[i].nodes()])
new_relatives = np.dot(rotationMatrix, relative.T).T
coords = [np.array(coords1[0]) + new_relative
for new_relative in new_relatives]
species = [bulk_super_structure_sg.structure[n].specie
for n in qualified_unique_subgraphs[i].nodes()]
molecule = mg.Molecule(species=species, coords=coords)
molecules_new.append(molecule)
sites = []
molecules_new_backup = list(molecules_new)
if not fixed_c_negative:
i = 0
while i < len(molecules_new):
under = False
for curr_site in molecules_new[i]:
curr_site = mg.PeriodicSite(curr_site.specie,
curr_site.coords,
slab.lattice,
coords_are_cartesian=True)
if curr_site.frac_coords[2] < c_frac_min:
del molecules_new[i]
under = True
break
if under is False:
i += 1
if len(molecules_new) == 0:
molecules_new = molecules_new_backup
for molecule in molecules_new:
for curr_site in molecule:
curr_site = mg.PeriodicSite(curr_site.specie,
curr_site.coords,
slab.lattice,
coords_are_cartesian=True)
tmp = [curr_site.is_periodic_image(site) for site in sites]
if not any(tmp):
sites.append(curr_site)
for site in sites:
slab.append(species=site.specie, coords=site.coords,
coords_are_cartesian=True)
return slab
def put_everyatom_into_cell(slab):
"""
Some atoms might be out of the boundary of slab. Put all atoms into the
boundary in the case that atoms don't overlap.
"""
coords = slab.frac_coords
for i in range(coords.shape[0]):
for j in range(coords.shape[1]):
coords[i, j] = coords[i, j] % 1
species = slab.species
molecule = mg.Molecule(species, coords)
sites = []
for site in molecule:
site = mg.PeriodicSite(site.specie,
site.coords,
slab.lattice)
tmp = [site.is_periodic_image(item, tolerance=1e-5) for item in sites]
if not any(tmp):
sites.append(site)
delete_list = []
for i, atom in enumerate(slab):
delete_list.append(i)
slab.remove_sites(delete_list)
for site in sites:
slab.append(species=site.specie, coords=site.coords,
coords_are_cartesian=True)
return slab
def less_fix_broken_molecules(less_broken_subgraphs, less_intact_subgraphs,
bulk_super_structure_sg,
slab_supercell_sg,
slab, c_frac_min,
fixed_c_negative=True):
"""
NOT used in the current reconstruction method!!!
An optimized function of fix_broken_molecules() but does exactly the
same thing. It can deal with more small broken molecules.
"""
molecules_new = []
for i in range(len(less_broken_subgraphs)):
broken_subgraphs_species = []
broken_subgraphs_nodes_neibs = []
broken_subgraphs_weights = []
nodes_broken_subgraphs = []
for n, nbrs in less_broken_subgraphs[i].adjacency():
nodes_broken_subgraphs.append(n)
neibs = []
weights = []
broken_subgraphs_species.append(
less_broken_subgraphs[i].node[n]['specie'])
for nbr, eattr in nbrs.items():
neibs.append(nbr)
weights.append(eattr['weight'])
broken_subgraphs_nodes_neibs.append(neibs)
broken_subgraphs_weights.append(weights)
intact_subgraphs_species = []
intact_subgraphs_nodes_neibs = []
intact_subgraphs_weights = []
nodes_intact_subgraphs = []
for n, nbrs in less_intact_subgraphs[i].adjacency():
nodes_intact_subgraphs.append(n)
neibs = []
weights = []
intact_subgraphs_species.append(
less_intact_subgraphs[i].node[n]['specie'])
for nbr, eattr in nbrs.items():
neibs.append(nbr)
weights.append(eattr['weight'])
intact_subgraphs_nodes_neibs.append(neibs)
intact_subgraphs_weights.append(weights)
Find = False
nodes1 = []
nodes2 = []
for j in range(len(broken_subgraphs_species)):
if len(broken_subgraphs_nodes_neibs[j]) == 2:
nodes1 = []
weights1 = []
nodes1.append(nodes_broken_subgraphs[j])
for index, neib in enumerate(broken_subgraphs_nodes_neibs[j]):
nodes1.append(neib)
weights1.append(broken_subgraphs_weights[j][index])
nodes2 = []
for k in range(len(intact_subgraphs_species)):
if broken_subgraphs_species[j] == intact_subgraphs_species[k]\
and length_belong_to(broken_subgraphs_weights[j], intact_subgraphs_weights[k]):
nodes2.append(nodes_intact_subgraphs[k])
for index, weight in enumerate(weights1):
for index_intact, weight_intact in enumerate(intact_subgraphs_weights[k]):
if abs(weight - weight_intact) / weight_intact < 1e-5\
and less_broken_subgraphs[i].\
node[nodes1[index + 1]]['specie'] == less_intact_subgraphs[i].\
node[intact_subgraphs_nodes_neibs[k][index_intact]]['specie']:
nodes2.append(
intact_subgraphs_nodes_neibs[k][index_intact])
if len(nodes2) == 3:
Find = True
break
if Find is True:
# print('Find it')
break
if Find is False:
#print("Sucks")
sys.exit()
rest_item = -1
rest_index = -1
for index, item in enumerate(nodes_broken_subgraphs):
if item not in nodes1:
rest_item = item
rest_index = index
nodes1.append(rest_item)
Find = False
for j in range(len(intact_subgraphs_species)):
if intact_subgraphs_species[j] == broken_subgraphs_species[rest_index]\
and length_belong_to(broken_subgraphs_weights[rest_index], intact_subgraphs_weights[j]):
neibs = intact_subgraphs_nodes_neibs[j]
temp = [neib == node2 for neib in neibs for node2 in nodes2]
if any(temp):
nodes2.append(nodes_intact_subgraphs[j])
Find = True
break
if Find is not True:
print("didn't find the fourth one!")
sys.exit()
node1, node2 = nodes1, nodes2
coords1 = [slab_supercell_sg.structure[n].coords for n in node1]
coords2 = [bulk_super_structure_sg.structure[n].coords for n in node2]
relative1 = np.array([ | np.array(coords1[n]) | numpy.array |
'''
file: hum36m_dataloader.py
author: zhangxiong(<EMAIL>)
date: 2018_05_09
purpose: load hum3.6m data
'''
import sys
from torch.utils.data import Dataset, DataLoader
import os
import glob
import numpy as np
import random
import cv2
import json
import h5py
import torch
sys.path.append('./src')
from util import calc_aabb, cut_image, flip_image, draw_lsp_14kp__bone, rectangle_intersect, get_rectangle_intersect_ratio, convert_image_by_pixformat_normalize, reflect_pose, reflect_lsp_kp
from config import args
from timer import Clock
class hum36m_dataloader(Dataset):
def __init__(self, data_set_path, use_crop, scale_range, use_flip, min_pts_required, pix_format = 'NHWC', normalize = False, flip_prob = 0.3):
self.data_folder = data_set_path
self.use_crop = use_crop
self.scale_range = scale_range
self.use_flip = use_flip
self.flip_prob = flip_prob
self.min_pts_required = min_pts_required
self.pix_format = pix_format
self.normalize = normalize
self._load_data_set()
def _load_data_set(self):
clk = Clock()
self.images = []
self.kp2ds = []
self.boxs = []
self.kp3ds = []
self.shapes = []
self.poses = []
print('start loading hum3.6m data.')
anno_file_path = os.path.join(self.data_folder, 'annot.h5')
with h5py.File(anno_file_path) as fp:
total_kp2d = np.array(fp['gt2d'])
total_kp3d = np.array(fp['gt3d'])
total_shap = np.array(fp['shape'])
total_pose = np.array(fp['pose'])
total_image_names = np.array(fp['imagename'])
assert len(total_kp2d) == len(total_kp3d) and len(total_kp2d) == len(total_image_names) and \
len(total_kp2d) == len(total_shap) and len(total_kp2d) == len(total_pose)
l = len(total_kp2d)
def _collect_valid_pts(pts):
r = []
for pt in pts:
if pt[2] != 0:
r.append(pt)
return r
for index in range(l):
kp2d = total_kp2d[index].reshape((-1, 3))
if np.sum(kp2d[:, 2]) < self.min_pts_required:
continue
lt, rb, v = calc_aabb(_collect_valid_pts(kp2d))
self.kp2ds.append(np.array(kp2d.copy(), dtype = np.float))
self.boxs.append((lt, rb))
self.kp3ds.append(total_kp3d[index].copy().reshape(-1, 3))
self.shapes.append(total_shap[index].copy())
self.poses.append(total_pose[index].copy())
self.images.append(os.path.join(self.data_folder, 'image') + total_image_names[index].decode())
print('finished load hum3.6m data, total {} samples'.format(len(self.kp3ds)))
clk.stop()
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image_path = self.images[index]
kps = self.kp2ds[index].copy()
box = self.boxs[index]
kp_3d = self.kp3ds[index].copy()
scale = np.random.rand(4) * (self.scale_range[1] - self.scale_range[0]) + self.scale_range[0]
image, kps = cut_image(image_path, kps, scale, box[0], box[1])
ratio = 1.0 * args.crop_size / image.shape[0]
kps[:, :2] *= ratio
dst_image = cv2.resize(image, (args.crop_size, args.crop_size), interpolation = cv2.INTER_CUBIC)
trival, shape, pose = | np.zeros(3) | numpy.zeros |
#v2.3
import numpy as np
from util import *
L_SOFTPLUS = 0
L_RELU = 1
L_LINEAR = 2
L_SIGMOID = 3
L_DISCRETE = 4
L_BINARY_Z = 5
L_BINARY_N = 6
LS_REAL = [L_SOFTPLUS, L_RELU, L_LINEAR, L_SIGMOID]
LS_DISCRETE = [L_DISCRETE, L_BINARY_Z, L_BINARY_N, ]
ACT_F = {L_SOFTPLUS: softplus,
L_RELU: relu,
L_SIGMOID: sigmoid,
L_LINEAR: lambda x: x,
L_BINARY_Z: sigmoid,
L_BINARY_N: lambda x: 2*sigmoid(x)-1,
}
ACT_D_F = {L_SOFTPLUS: sigmoid,
L_RELU: relu_d,
L_SIGMOID: sigmoid_d,
L_LINEAR: lambda x: 1,
L_BINARY_Z: sigmoid_d,
L_BINARY_N: lambda x: 2*sigmoid_d(x),
}
class eq_prop_layer():
def __init__(self, name, input_size, output_size, optimizer, var, temp, l_type, unbiased=False):
if l_type not in [L_SOFTPLUS, L_RELU, L_LINEAR, L_SIGMOID, L_DISCRETE, L_BINARY_Z, L_BINARY_N, ]:
raise Exception('l_type (%d) not implemented' % l_type)
self.name = name
self.input_size = input_size
self.output_size = output_size
self.optimizer = optimizer
self.l_type = l_type
self.temp = temp if l_type in LS_DISCRETE else 1
self.unbiased = unbiased
lim = np.sqrt(6 / (input_size + output_size))
if l_type == L_DISCRETE: output_size -= 1
self._w = np.random.uniform(-lim, lim, size=(input_size, output_size))
self._b = np.random.uniform(-1e-3, 1e-3, size=(output_size))
self._inv_var = np.full(output_size, 1/var) if var > 0 else None
self.prev_layer = None # Set manually
self.next_layer = None # Set manually
self.values = np.zeros((1, output_size))
self.w_trace = np.zeros((1, input_size, output_size,))
self.b_trace = np.zeros((1, output_size,))
if self.unbiased:
self.p_w_trace = np.zeros((1, input_size, output_size,))
self.p_b_trace = np.zeros((1, output_size,))
def sample(self, inputs, det=False):
self.compute_pot_mean(inputs)
if self.l_type in LS_REAL:
if self._inv_var is None or det:
self.values = self.mean
else:
sigma = np.sqrt(1/self._inv_var)
self.values = self.mean + sigma * np.random.normal(size=self.pot.shape)
elif self.l_type == L_DISCRETE:
self.values = multinomial_rvs(n=1, p=self.mean)
elif self.l_type in [L_BINARY_Z]:
if not det:
self.values = np.random.binomial(1, self.mean, size=self.mean.shape)
else:
self.values = (self.mean > 0.5).astype(np.float)
elif self.l_type == L_BINARY_N:
if not det:
self.values = np.random.binomial(1, (self.mean+1)/2, size=self.mean.shape)*2-1
else:
self.values = (self.mean > 0).astype(np.float)
return self.values
def compute_pot_mean(self, inputs):
# Compute potential (pre-activated mean value) and mean value of layer
self.inputs = inputs
self.pot = (inputs.dot(self._w) + self._b)/self.temp
if self.l_type in LS_REAL + [L_BINARY_Z, L_BINARY_N]:
self.mean = ACT_F[self.l_type](self.pot)
elif self.l_type == L_DISCRETE:
self.pot = np.concatenate([self.pot, np.zeros((inputs.shape[0], 1))], axis=-1)
self.mean = softmax(self.pot, axis=-1)
def record_trace(self, lambda_=0, det=False, zero_learn=False):
dev = 1 if det else (self.values - self.mean)
if self.l_type in LS_REAL:
v_ch = dev * ACT_D_F[self.l_type](self.pot) * self._inv_var
elif self.l_type in [L_BINARY_Z, L_BINARY_N]:
v_ch = dev / self.temp
if self.unbiased:
act_p = sigmoid(zero_to_neg(self.values) * self.pot)
elem_sum = self.inputs[:,:,np.newaxis]*self._w[np.newaxis, :, :]
v_ch_w = (act_p[:, np.newaxis, :] -
sigmoid(zero_to_neg(self.values)[:, np.newaxis, :] * (self.pot[:, np.newaxis, :] - elem_sum)))/self._w
v_ch_b = (act_p - sigmoid(zero_to_neg(self.values)* (self.pot - self._b)))/self._b
v_ch_w /= act_p[:, np.newaxis, :]
v_ch_b /= act_p
self.p_w_trace = lambda_ * self.p_w_trace + v_ch_w
self.p_b_trace = lambda_ * self.p_b_trace + v_ch_b
elif self.l_type == L_DISCRETE:
v_ch = (dev / self.temp)[:, :-1]
if self.unbiased:
act_p = np.sum(self.mean * self.values, axis=-1)
elem_sum = self.inputs[:,:,np.newaxis]*self._w[np.newaxis, :, :]
elem_sum = np.concatenate([elem_sum, np.zeros((elem_sum.shape[0], elem_sum.shape[1], 1))], axis=-1)
expanded = np.zeros(elem_sum.shape + elem_sum.shape[-1:], dtype=elem_sum.dtype)
diagonals = np.diagonal(expanded, axis1=-2, axis2=-1)
diagonals.setflags(write=True)
diagonals[:] = elem_sum
v_ch_w = (act_p[:, np.newaxis, np.newaxis] - np.sum(softmax(self.pot[:, np.newaxis, np.newaxis, :] - expanded, axis=-1)*self.values[:, np.newaxis, np.newaxis, :], -1))[:, :, :-1]/self._w
b_z = np.concatenate([self._b, | np.zeros(1) | numpy.zeros |
# -- coding: utf-8 --
'''
Script for comparing our Bayesian preference learning approach with the results from Habernal 2016.
Steps in this test:
1. Load word embeddings for the original text data that were used in the NN approach in Habernal 2016. -- done, but
only using averages to combine them.
2. Load feature data that was used in the SVM-based approach in Habernal 2016.
3. Load the crowdsourced data. -- done.
4. Copy a similar testing setup to Habernal 2016 (training/test split?) and run the Bayesian approach (during testing,
we can set aside some held-out data). -- done, results saved to file with no metrics computed yet except acc.
5. Print some simple metrics that are comparable to those used in Habernal 2016.
Thoughts:
1. NN takes into account sequence of word embeddings; here we need to use a combined embedding for whole text to avoid
a 300x300 dimensional input space.
2. So our method can only learn which elements of the embedding are important, but cannot learn from patterns in the
sequence, unless we can find a way to encode those.
3. However, the SVM-based approach also did well. Which method is better, NN or SVM, and by how much?
4. We should be able to improve on the SVM-based approach.
5. The advantages of our method: ranking with sparse data; personalised predictions to the individual annotators;
uncertainty estimates for active learning and decision-making confidence thresholds.
Created on 20 Mar 2017
@author: simpson
'''
import logging
from scipy.stats.stats import pearsonr
from sklearn.metrics import log_loss
from sklearn.svm.classes import NuSVR, SVC
logging.basicConfig(level=logging.DEBUG)
import sys
import os
from sklearn.metrics.ranking import roc_auc_score
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/analysis/habernal_comparison")
svm_python_path = '~/libsvm-3.22/python'
sys.path.append(os.path.expanduser("~/git/HeatMapBCC/python"))
sys.path.append(os.path.expanduser("~/git/pyIBCC/python"))
sys.path.append(os.path.expanduser("~/data/personalised_argumentation/embeddings/skip-thoughts"))
sys.path.append(os.path.expanduser("~/data/personalised_argumentation/embeddings/Siamese-CBOW/siamese-cbow"))
sys.path.append(os.path.expanduser(svm_python_path))
import pickle
import time
from gp_pref_learning import GPPrefLearning, pref_likelihood
from gp_classifier_svi import GPClassifierSVI
from gp_classifier_vb import compute_median_lengthscales
from sklearn.svm import SVR
from embeddings import load_embeddings, load_siamese_cbow_embeddings, load_skipthoughts_embeddings
from data_loader import data_root_dir, load_train_test_data, load_ling_features
import numpy as np
ndebug_features = 10
def save_fold_order(resultsdir, folds=None, dataset=None):
if folds is None and dataset is not None:
folds, _, _, _, _ = load_train_test_data(dataset)
elif folds is None:
print("Need to provide a dataset label or a set of fold data...")
return
np.savetxt(resultsdir + "/foldorder.txt", np.array(list(folds.keys()))[:, None], fmt="%s")
# Lengthscale initialisation -------------------------------------------------------------------------------------------
# use the median heuristic to find a reasonable initial length-scale. This is the median of the distances.
# First, grab a sample of points because N^2 could be too large.
def compute_lengthscale_heuristic(feature_type, embeddings_type, embeddings, ling_feat_spmatrix, docids, folds,
index_to_word_map):
# get the embedding values for the test data -- need to find embeddings of the whole piece of text
if feature_type == 'both' or feature_type == 'embeddings' or feature_type == 'debug':
docidxs = []
doc_tok_seqs = []
doctexts = []
for f in folds:
doc_tok_seqs.append(folds.get(f)["test"][0])
doc_tok_seqs.append(folds.get(f)["test"][1])
testids = np.array([ids_pair.split('_') for ids_pair in folds.get(f)["test"][3]])
docidxs.append(get_docidxs_from_ids(docids, testids[:, 0]))
docidxs.append(get_docidxs_from_ids(docids, testids[:, 1]))
doctexts.append(folds.get(f)["test"][5])
doctexts.append(folds.get(f)["test"][6])
X, _, utexts = get_doc_token_seqs(docidxs, doc_tok_seqs, doctexts)
if embeddings_type == 'word_mean':
items_feat = get_mean_embeddings(embeddings, X)
elif embeddings_type == 'skipthoughts':
global skipthoughts
import skipthoughts
items_feat = skipthoughts.encode(embeddings, utexts)
elif embeddings_type == 'siamese-cbow':
items_feat = np.array([embeddings.getAggregate(index_to_word_map[Xi]) for Xi in X])
else:
logging.info("invalid embeddings type! %s" % embeddings_type)
if feature_type == 'both' or feature_type == 'debug':
items_feat = np.concatenate((items_feat, ling_feat_spmatrix.toarray()), axis=1)
if feature_type == 'ling':
items_feat = ling_feat_spmatrix.toarray()
if feature_type == 'debug':
items_feat = items_feat[:, :ndebug_features]
starttime = time.time()
#for f in range(items_feat.shape[1]):
ls_initial = compute_median_lengthscales(items_feat, N_max=3000)
endtime = time.time()
logging.info('@@@ Selected initial lengthscales in %f seconds' % (endtime - starttime))
return ls_initial
def get_doc_token_seqs(ids, X_list, texts=None):
'''
ids -- list of document IDs
X_list -- list of lists of word indices for each argument corresponding to the ids
texts -- list of texts corresponding to the ids
returns
X -- list of lists of word indices for each argument corresponding to the uids
uids -- list of unique document IDs
utexts -- unique texts corresponding to the uids
'''
# X_train_a1 and a1_train both have one entry per observation. We want to replace them with a list of
# unique arguments, and the indexes into that list. First, get the unique argument ids from trainids and testids:
if hasattr(ids[0], '__len__'):
allids = np.concatenate(ids)
else:
allids = ids
uids, uidxs = np.unique(allids, return_index=True)
# get the word index vectors corresponding to the unique arguments
X = np.zeros(np.max(uids) + 1, dtype=object)
if texts is not None:
utexts = np.zeros(np.max(uids) + 1, dtype=object)
utexts[:] = ''
start = 0
fin = 0
for i in range(len(X_list)):
fin += len(X_list[i])
idxs = (uidxs>=start) & (uidxs<fin)
# keep the original IDs to try to make life easier. This means the IDs become indexes into X
X[uids[idxs]] = np.array(X_list[i])[uidxs[idxs] - start]
if texts is not None:
utexts[uids[idxs]] = np.array(texts[i])[uidxs[idxs] - start]
start += len(X_list[i])
if texts is not None:
utexts = [utext for utext in utexts]
return X, uids, utexts
else:
return X, uids
def get_mean_embeddings(word_embeddings, X):
return np.array([np.mean(word_embeddings[Xi, :], axis=0) for Xi in X])
def get_docidxs_from_ids(all_docids, ids_to_map):
return np.array([np.argwhere(docid==all_docids)[0][0] for docid in ids_to_map])
def get_fold_data(folds, fold, docids):
#X_train_a1, X_train_a2 are lists of lists of word indexes
X_train_a1, X_train_a2, prefs_train, ids_train, person_train, tr_a1, tr_a2 = folds.get(fold)["training"]
X_test_a1, X_test_a2, prefs_test, ids_test, person_test, test_a1, test_a2 = folds.get(fold)["test"]
#a1_train, a2_train are lists of argument ids
trainids = np.array([ids_pair.split('_') for ids_pair in ids_train])
if docids is None:
docids = np.arange(np.unique(trainids).size)
a1_train = get_docidxs_from_ids(docids, trainids[:, 0])
a2_train = get_docidxs_from_ids(docids, trainids[:, 1])
testids = np.array([ids_pair.split('_') for ids_pair in ids_test])
a1_test = get_docidxs_from_ids(docids, testids[:, 0])
a2_test = get_docidxs_from_ids(docids, testids[:, 1])
X, uids, utexts = get_doc_token_seqs((a1_train, a2_train, a1_test, a2_test),
[X_train_a1, X_train_a2, X_test_a1, X_test_a2], (tr_a1, tr_a2, test_a1, test_a2))
print(("Training instances ", len(X_train_a1), " training labels ", len(prefs_train)))
print(("Test instances ", len(X_test_a1), " test labels ", len(prefs_test)))
prefs_train = np.array(prefs_train)
prefs_test = np.array(prefs_test)
person_train = np.array(person_train)
person_test = np.array(person_test)
personIDs = np.concatenate((person_train, person_test))
_, personIdxs = np.unique(personIDs, return_inverse=True)
person_train = personIdxs[:len(person_train)]
person_test = personIdxs[len(person_train):]
return a1_train, a2_train, prefs_train, person_train, a1_test, a2_test, prefs_test, person_test, \
X, uids, utexts
def get_noisy_fold_data(folds, fold, docids, acc, tr_pair_subset=None):
a1_train, a2_train, prefs_train, person_train, a1_test, a2_test, prefs_test, person_test, X, \
uids, utexts = get_fold_data(folds, fold, docids)
# now subsample the training data
N = len(a1_train)
if tr_pair_subset is not None:
Nsub = N * tr_pair_subset
subidxs = np.random.choice(N, Nsub, replace=False)
a1_train = a1_train[subidxs]
a2_train = a2_train[subidxs]
prefs_train = prefs_train[subidxs]
person_train = person_train[subidxs]
else:
Nsub = N
if acc != 1.0:
# now we add noise to the training data
flip_labels = np.random.rand(Nsub) > acc
prefs_train[flip_labels] = 2 - prefs_train[flip_labels] # labels are 0, 1 or 2
return a1_train, a2_train, prefs_train, person_train, a1_test, a2_test, prefs_test, person_test, \
X, uids, utexts
def get_fold_regression_data(folds_regression, fold, docids):
if folds_regression is not None:
_, scores_rank_train, argids_rank_train, person_rank_train, _ = folds_regression.get(fold)["training"] # blank argument is turkIDs_rank_test
item_idx_ranktrain = np.array([np.argwhere(trainid==docids)[0][0] for trainid in argids_rank_train])
scores_rank_train = np.array(scores_rank_train)
argids_rank_train = np.array(argids_rank_train)
_, scores_rank_test, argids_rank_test, personIDs_rank_test, _ = folds_regression.get(fold)["test"] # blank argument is turkIDs_rank_test
item_idx_ranktest = np.array([np.argwhere(testid==docids)[0][0] for testid in argids_rank_test])
scores_rank_test = np.array(scores_rank_test)
argids_rank_test = np.array(argids_rank_test)
else:
item_idx_ranktrain = None
scores_rank_train = None
argids_rank_train = None
person_rank_train = None
item_idx_ranktest = None
scores_rank_test = None
argids_rank_test = None
personIDs_rank_test = None
return item_idx_ranktrain, scores_rank_train, argids_rank_train, person_rank_train,\
item_idx_ranktest, scores_rank_test, argids_rank_test, personIDs_rank_test
def subsample_tr_data(subsample_amount, a1_train, a2_train):
item_subsample_ids = []
nselected = 0
while nselected < subsample_amount:
idx = np.random.choice(len(a1_train), 1)
if a1_train[idx] not in item_subsample_ids:
item_subsample_ids.append(a1_train[idx])
if a2_train[idx] not in item_subsample_ids:
item_subsample_ids.append(a2_train[idx])
nselected = len(item_subsample_ids)
pair_subsample_idxs = np.argwhere(np.in1d(a1_train, item_subsample_ids) & np.in1d(a2_train, item_subsample_ids)).flatten()
# pair_subsample_idxs = np.random.choice(len(a1_train), subsample_amount, replace=False)
return pair_subsample_idxs
class TestRunnerSingleFold:
def __init__(self, current_expt_output_dir, datasets, feature_types, embeddings_types, methods,
dataset_increment, expt_tag='kmeans'):
self.folds = None
self.initial_pair_subset = {}
self.default_ls_values = {}
self.expt_output_dir = current_expt_output_dir
self.expt_tag = expt_tag
self.datasets = datasets
self.feature_types = feature_types
self.embeddings_types = embeddings_types
self.methods = methods
self.dataset_increment = dataset_increment
def load_features(self, feature_type, embeddings_type, a1_train, a2_train, uids, utexts=None):
'''
Load all the features specified by the type into an items_feat object. Remove any features where the values are all
zeroes.
'''
# get the embedding values for the test data -- need to find embeddings of the whole piece of text
if feature_type == 'both' or feature_type == 'embeddings' or feature_type=='debug':
logging.info("Converting texts to mean embeddings (we could use a better sentence embedding?)...")
if embeddings_type == 'word_mean':
items_feat = get_mean_embeddings(self.embeddings, self.X)
elif embeddings_type == 'skipthoughts':
global skipthoughts
import skipthoughts
items_feat = skipthoughts.encode(self.embeddings, utexts)
elif embeddings_type == 'siamese-cbow':
items_feat = np.array([self.embeddings.getAggregate(self.index_to_word_map[Xi]) for Xi in self.X])
else:
logging.info("invalid embeddings type! %s" % embeddings_type)
logging.info("...embeddings loaded.")
# trim away any features not in the training data because we can't learn from them
valid_feats = np.sum((items_feat[a1_train] != 0) + (items_feat[a2_train] != 0), axis=0) > 0
items_feat = items_feat[:, valid_feats]
self.ling_items_feat = None # will get overwritten if we load the linguistic features further down.
self.embeddings_items_feat = items_feat
elif feature_type == 'ling':
items_feat = | np.zeros((self.X.shape[0], 0)) | numpy.zeros |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import math
import itertools
import warnings
from six.moves import map, zip
import random
import json
import numpy as np
from numpy.linalg import inv
from numpy import pi, dot, transpose, radians
from monty.json import MSONable
from monty.dev import deprecated
from pymatgen_core.util.num import abs_cap
import pymatgen_core.core.units as units
import logging
logger = logging.getLogger(__name__)
"""
This module defines the classes relating to 3D lattices.
.. Changes ::
*
*
*
*
"""
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import pymatgen_core.core.units as units
class Lattice(units.ObjectUnits):
"""
A lattice object. Essentially a matrix with conversion matrices. In
general, it is assumed that length units are in Angstroms and angles are in
degrees unless otherwise stated.
"""
# Properties lazily generated for efficiency.
@property
def unit_conf(self):
return self._unit_conf
@property
def matrix(self):
return self._property['matrix']
@property
def constants(self):
return self._property['constants']
@property
def angles(self):
return self._property['angles']
@property
def lengths(self):
return self._property['lengths']
@matrix.setter
def matrix(self,matrix):
m = np.array(matrix, dtype=np.float64).reshape((self.n_dim, self.n_dim))
lengths = np.sqrt(np.sum(m ** 2, axis=1))
angles = np.zeros(self.n_dim)
for i in range(self.n_dim):
j = (i + 1) % self.n_dim
k = (i + 2) % self.n_dim
angles[i] = abs_cap(dot(m[j], m[k]) / (lengths[j] * lengths[k]))
self._property['lengths'] = lengths
self._property['angles'] = np.arccos(angles)
if( self.unit_conf['angle'] == 'degree' ):
self._property['angles'] = self._property['angles']* 180/np.pi
self._property['constants'] = np.zeros(6)
self._property['constants'][0:3] = self._property['lengths']
self._property['constants'][3:6] = self._property['angles']
self._property['matrix'] = m
self.is_orthogonal = all([abs(a - 90) < 1e-5 for a in self.angles])
def get_rad_angles(self):
'''Return angles in radians
'''
if( self._unit_conf['angle'] == 'degree'):
alpha_r = np.deg2rad(self._property['angles'][0] )
beta_r = np.deg2rad(self._property['angles'][1] )
gamma_r = np.deg2rad(self._property['angles'][2] )
elif( self._unit_conf['angle'] == 'radian'):
alpha_r = self._property['angles'][0]
beta_r = self._property['angles'][1]
gamma_r = self._property['angles'][2]
else:
raise KeyError('angle unit {} not supported '.format( self._unit_conf['angle'] ))
return alpha_r,beta_r,gamma_r
def constants2matrix(self,a,b,c,alpha_r,beta_r,gamma_r):
'''Calculate the matrix based on the lattice constants
Args:
* a (float): lattice constant a
* b (float): lattice constant b
* c (float): lattice constant c
* alpha (float): lattice angle alpha in radians
* beta (float): lattice angle beta in radians
* gamma (float): lattice angle gamma in radians
'''
#
val = (np.cos(alpha_r) * np.cos(beta_r) - np.cos(gamma_r))\
/ (np.sin(alpha_r) * | np.sin(beta_r) | numpy.sin |
import tiledb, numpy as np
import json
import sys
import os
import io
from collections import OrderedDict
import warnings
from tiledb import TileDBError
if sys.version_info >= (3,3):
unicode_type = str
else:
unicode_type = unicode
unicode_dtype = np.dtype(unicode_type)
# TODO
# - handle missing values
# - handle extended datatypes
# - implement distributed CSV import
# - implement support for read CSV via TileDB VFS from any supported FS
TILEDB_KWARG_DEFAULTS = {
'ctx': None,
'sparse': True,
'index_dims': None,
'allows_duplicates': True,
'mode': 'ingest',
'attrs_filters': None,
'coords_filters': None,
'full_domain': False,
'tile': None,
'row_start_idx': None,
'fillna': None,
'column_types': None,
'capacity': None,
'date_spec': None,
'cell_order': 'row-major',
'tile_order': 'row-major',
'debug': None,
}
def parse_tiledb_kwargs(kwargs):
args = dict(TILEDB_KWARG_DEFAULTS)
for key in TILEDB_KWARG_DEFAULTS.keys():
if key in kwargs:
args[key] = kwargs.pop(key)
return args
class ColumnInfo:
def __init__(self, dtype, repr=None):
self.dtype = dtype
self.repr = repr
def dtype_from_column(col):
import pandas as pd
col_dtype = col.dtype
# TODO add more basic types here
if col_dtype in (np.int32, np.int64, np.uint32, np.uint64, np.float, np.double,
np.uint8):
return ColumnInfo(col_dtype)
# TODO this seems kind of brittle
if col_dtype.base == np.dtype('M8[ns]'):
if col_dtype == np.dtype('datetime64[ns]'):
return ColumnInfo(col_dtype)
elif hasattr(col_dtype, 'tz'):
raise ValueError("datetime with tz not yet supported")
else:
raise ValueError("unsupported datetime subtype ({})".format(type(col_dtype)))
# Pandas 1.0 has StringDtype extension type
if col_dtype.name == 'string':
return ColumnInfo(unicode_dtype)
if col_dtype == 'bool':
return ColumnInfo(np.uint8, repr=np.dtype('bool'))
if col_dtype == np.dtype("O"):
# Note: this does a full scan of the column... not sure what else to do here
# because Pandas allows mixed string column types (and actually has
# problems w/ allowing non-string types in object columns)
inferred_dtype = pd.api.types.infer_dtype(col)
if inferred_dtype == 'bytes':
return ColumnInfo(np.bytes_)
elif inferred_dtype == 'string':
# TODO we need to make sure this is actually convertible
return ColumnInfo(unicode_dtype)
elif inferred_dtype == 'mixed':
raise ValueError(
"Column '{}' has mixed value dtype and cannot yet be stored as a TileDB attribute".format(col.name)
)
raise ValueError(
"Unhandled column type: '{}'".format(
col_dtype
)
)
# TODO make this a staticmethod on Attr?
def attrs_from_df(df,
index_dims=None, filters=None,
column_types=None, ctx=None):
attr_reprs = dict()
if ctx is None:
ctx = tiledb.default_ctx()
if column_types is None:
column_types = dict()
attrs = list()
for name, col in df.items():
# ignore any column used as a dim/index
if index_dims and name in index_dims:
continue
if name in column_types:
spec_type = column_types[name]
# Handle ExtensionDtype
if hasattr(spec_type, 'type'):
spec_type = spec_type.type
attr_info = ColumnInfo(spec_type)
else:
attr_info = dtype_from_column(col)
attrs.append(tiledb.Attr(name=name, dtype=attr_info.dtype, filters=filters))
if attr_info.repr is not None:
attr_reprs[name] = attr_info.repr
return attrs, attr_reprs
def dim_info_for_column(ctx, df, col, tile=None, full_domain=False, index_dtype=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if len(col_values) < 1:
raise ValueError("Empty column '{}' cannot be used for dimension!".format(col_name))
if index_dtype is not None:
dim_info = ColumnInfo(index_dtype)
elif col_values.dtype is np.dtype('O'):
col_val0_type = type(col_values[0])
if col_val0_type in (bytes, unicode_type):
# TODO... core only supports TILEDB_ASCII right now
dim_info = ColumnInfo(np.bytes_)
else:
raise TypeError("Unknown column type not yet supported ('{}')".format(col_val0_type))
else:
dim_info = dtype_from_column(col_values)
return dim_info
def dim_for_column(ctx, name, dim_info, col, tile=None, full_domain=False, ndim=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if tile is None:
if ndim is None:
raise TileDBError("Unexpected Nonetype ndim")
if ndim == 1:
tile = 10000
elif ndim == 2:
tile = 1000
elif ndim == 3:
tile = 100
else:
tile = 10
dtype = dim_info.dtype
if full_domain:
if not dim_info.dtype in (np.bytes_, np.unicode):
# Use the full type domain, deferring to the constructor
(dtype_min, dtype_max) = tiledb.libtiledb.dtype_range(dim_info.dtype)
dim_max = dtype_max
if dtype.kind == 'M':
date_unit = np.datetime_data(dtype)[0]
dim_min = np.datetime64(dtype_min + 1, date_unit)
tile_max = np.iinfo(np.uint64).max - tile
if np.abs( | np.uint64(dtype_max) | numpy.uint64 |
import pdb
import numpy as np
from scipy import linalg, optimize
import cov
class GP(object):
def __init__(self,X,y,cov_type='covSEard'):
self.X = X # training inputs, 2d array
self.y = y # training outputs, 1d array
self.cov_type = cov_type # string specifying covariance function
# initialise all log params to 0
if self.cov_type == 'covSEiso':
self.params = np.asarray([0.0]*3)
self.params[-1] = np.log(0.01)
elif self.cov_type == 'covSEard':
self.params = np.asarray([0.0]*(self.input_dim+2))
self.params[0:self.input_dim] = np.log(np.std(X,axis=0))
self.params[-2] = np.log(np.var(y,axis=0))
self.params[-1] = self.params[-2]-np.log(10)
# cache for the Cholesky factor
self.L_cache = np.empty((self.size,self.size),dtype=X.dtype)
# cache for the K^{-1}y vector
self.Kinvy_cache = np.empty((self.size),dtype=X.dtype)
# cache for the Q = invK - outer(Kinvy,Kinvy) matrix
self.Q_cache = np.empty((self.size,self.size),dtype=X.dtype)
self.L_cached_at = None # parameters at which cached L, Kinvy values hold
self.Q_cached_at = None # parameters at which cached Q values hold
def NLML(self,params=None,derivs=False):
# negative log marginal likelihood and derivatives
params = self.params if params is None else params
cov_params = params[:-1]
sig_noise = np.exp(params[-1])
L = self.L_cache
Kinvy = self.Kinvy_cache
if self.L_cached_at is None or not np.all(self.L_cached_at == params):
# if parameters differ from cached ones, compute
# generate covariance k(X,X)
K = cov.cov(self.cov_type,self.X,self.X,cov_params)
K += np.eye(self.size)*sig_noise # account for Gaussian likelihood
# Cholesky factor of k(X,X)
L[:] = linalg.cholesky(K,lower=True)
# k(X,X)^-1*y
Kinvy[:] = linalg.cho_solve((L,True),self.y)
self.L_cached_at = np.array(params,copy=True)
# log determinant of Cholesky factor of k(X,X)
# = 0.5*log determinant of k(X,X)
logdetL = np.sum(np.log(np.diag(L)))
if not derivs:
NLML = 0.5*np.dot(self.y,Kinvy) # 0.5 * y^T * K^-1 * y
NLML += logdetL # 0.5 * log det(K)
NLML += 0.5*float(self.size)*np.log(2.0*np.pi) # 0.5*N*log(2*pi)
NLML = NLML/float(self.size)
return NLML
Q = self.Q_cache
if self.Q_cached_at is None or not | np.all(self.Q_cached_at == params) | numpy.all |
# -*- coding: utf-8 -*-
import os, math
import cv2
import dlib
import numpy as np
import argparse
import multiprocessing
def parseArgs():
parser = argparse.ArgumentParser(description='Choose model and treadnumbers to align face images',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input_dir', required=True, type=str,
help='where to input movie images')
parser.add_argument('--output_dir', required=True, type=str,
help='where to save movie images')
parser.add_argument('--face_detector', required=True, type=str,
help='which face detect function to use')
parser.add_argument('--landmark_detector', required=True, type=str,
help='which landmark detect function to use')
parser.add_argument('--threads', required=True, type=int,
help='process number to run face detect function')
args = parser.parse_args()
return args
dlib_detector = dlib.get_frontal_face_detector()
def dlibFrontalFaceDetect(img, upsampling=1):
'''
Using dlib frontal face detector to detect faces in image.
使用dlib提供的前脸检测器进行人脸检测。
:param img: face image
:param upsampling: upsample ratio for dlib frontal face detector
:return result: face box in format [[xmin, ymin, xmax, ymax], ]
'''
if type(img) == str:
img = dlib.load_rgb_image(img)
faces = dlib_detector(img, upsampling)
if len(faces) == 0:
return None
result = []
for face in faces:
result.append([face.left(), face.top(), face.right(), face.bottom()])
return result
dlib_cnn_face_detector = dlib.cnn_face_detection_model_v1('../data/mmod_human_face_detector.dat')
def dlibCNNFaceDetect(img):
'''
Using dlib cnn face detector to detect faces in image.
使用dlib的cnn人脸检测器进行人脸检测。
:param img: face image
:return result: face box in format [[xmin, ymin, xmax, ymax], ]
'''
if type(img) == str:
img = dlib.load_rgb_image(img)
faces = dlib_cnn_face_detector(img, 1)
if len(faces) == 0:
return None
result = []
for face in faces:
result.append([face.rect.left(), face.rect.top(), face.rect.right(), face.rect.bottom()])
return result
def faceCrop(img, xmin, ymin, xmax, ymax, scale=2.0):
'''
Input an image and the location of face, crop face with margin.
输入图片和人脸坐标,将带有边缘的人脸剪裁返回。
:param img: image, numpy ndarray
:param xmin,ymin,amax,ymax: face box location
:param scale: the bigger the scale is, the bigger the margin around face is
:return face: face with margin, numpy ndarray
'''
hmax, wmax, _ = img.shape
x = (xmin + xmax) / 2
y = (ymin + ymax) / 2
w = (xmax - xmin) * scale
h = (ymax - ymin) * scale
# new xmin, ymin, xmax and ymax
xmin = x - w/2
xmax = x + w/2
ymin = y - h/2
ymax = y + h/2
# 坐标修正为有效数字
xmin = max(0, int(xmin))
ymin = max(0, int(ymin))
xmax = min(wmax, int(xmax))
ymax = min(hmax, int(ymax))
# crop and return
face = img[ymin:ymax,xmin:xmax,:]
return face
def findEye(landmarks):
'''
Find out the center coordinate of left eye and right eye,
then they will be used to rotate face.
找出左右眼中心的x,y坐标值,用于脸部的旋转对齐。
'''
left_eye = landmarks[36:42]
left_eye = np.array([p for p in left_eye])
left_eye = left_eye.mean(axis=0)
right_eye = landmarks[42:48]
right_eye = np.array([p for p in right_eye])
right_eye = right_eye.mean(axis=0)
return left_eye, right_eye
def findNose(landmarks):
'''
Find out the center coordinate of nose.
找到鼻子的中心点(暂时无用)。
'''
nose = landmarks[31:36]
nose = | np.array([p for p in nose]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import numpy as np
__author__ = ['<NAME>', '<NAME>']
__email__ = ['<EMAIL>', '<EMAIL>']
class Optimizer (object):
'''
Abstract base class for the optimizers
Parameters
----------
lr : float (default=2e-2)
Learning rate value
decay : float (default=0.)
Learning rate decay
lr_min : float (default=0.)
Minimum of learning rate domain
lr_max : float (default=np.inf)
Maximum of learning rate domain
*args : list
Class specialization variables.
**kwargs : dict
Class Specialization variables.
'''
def __init__ (self, lr=1e-3, decay=0., lr_min=0., lr_max=np.inf, *args, **kwargs):
self.lr = lr
self.decay = decay
self.lr_min = lr_min
self.lr_max = lr_max
self.iterations = 1
def update (self, params, gradients):
'''
Update the optimizer parameters
Parameters
----------
params : list
List of parameters to update
gradients : list
List of corresponding gradients
Returns
-------
self
'''
self.lr *= 1. / (self.decay * self.iterations + 1.)
self.lr = np.clip(self.lr, self.lr_min, self.lr_max)
self.iterations += 1
def __repr__ (self):
'''
Representation
'''
class_name = self.__class__.__qualname__
try:
params = super(type(self), self).__init__.__code__.co_varnames
except AttributeError:
params = self.__init__.__code__.co_varnames
params = set(params) - {'self', 'args', 'kwargs'}
args = ', '.join(['{0}={1}'.format(k, str(getattr(self, k)))
if not isinstance(getattr(self, k), str) else '{0}="{1}"'.format(k, str(getattr(self, k)))
for k in params])
return '{0}({1})'.format(class_name, args)
def __str__ (self):
'''
Printer
'''
return self.__class__.__name__
class SGD (Optimizer):
'''
Stochastic Gradient Descent specialization
Update the parameters according to the rule
.. code-block:: python
parameter -= learning_rate * gradient
Parameters
----------
*args : list
Class specialization variables.
**kwargs : dict
Class Specialization variables.
'''
def __init__ (self, *args, **kwargs):
super(SGD, self).__init__(*args, **kwargs)
def update (self, params, gradients):
'''
Update the given parameters according to the class optimization algorithm
Parameters
----------
params : list
List of parameters to update
gradients : list
List of corresponding gradients
Returns
-------
params : list
The updated parameters
'''
for p, g in zip(params, gradients):
p -= self.lr * g # np.clip(g, -1., 1.)
super(SGD, self).update(params, gradients)
return params
class Momentum (Optimizer):
'''
Stochastic Gradient Descent with Momentum specialiation
Update the parameters according to the rule
.. code-block:: python
v = momentum * v - lr * gradient
parameter += v - learning_rate * gradient
Parameters
----------
momentum : float (default=0.9)
Momentum value
*args : list
Class specialization variables.
**kwargs : dict
Class Specialization variables.
'''
def __init__ (self, momentum=.9, *args, **kwargs):
super(Momentum, self).__init__(*args, **kwargs)
self.momentum = momentum
self.velocity = None
def update (self, params, gradients):
'''
Update the given parameters according to the class optimization algorithm
Parameters
----------
params : list
List of parameters to update
gradients : list
List of corresponding gradients
Returns
-------
params : list
The updated parameters
'''
if self.velocity is None:
self.velocity = [ | np.zeros(shape=p.shape, dtype=float) | numpy.zeros |
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.cm as CMAP
import matplotlib.animation as MOV
from matplotlib import ticker
from scipy.interpolate import griddata
import datetime as DT
import time
import progressbar as PGB
import healpy as HP
import geometry as GEOM
import interferometry as RI
import catalog as CTLG
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import lookup_operations as LKP
import ipdb as PDB
## Input/output parameters
telescope_id = 'custom'
element_size = 0.74
element_shape = 'delta'
phased_array = True
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
ground_plane = 0.3 # height of antenna element above ground plane
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
obs_mode = 'custom'
avg_drifts = False
beam_switch = False
snapshot_type_str = ''
if avg_drifts:
snapshot_type_str = 'drift_averaged_'
if beam_switch:
snapshot_type_str = 'beam_switches_'
n_sky_sectors = 4
sky_sector = 3 # if None, use all sky sector. Accepted values are None, 0, 1, 2, or 3
if sky_sector is None:
sky_sector_str = '_all_sky_'
n_sky_sectors = 1
sky_sector = 0
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(sky_sector)
Tsys = 90.0 # System temperature in K
freq = 185.0 * 1e6 # foreground center frequency in Hz
freq_resolution = 80e3 # in Hz
coarse_channel_resolution = 1.28e6 # in Hz
bpass_shape = 'bnw'
f_pad = 1.0
oversampling_factor = 1.0 + f_pad
n_channels = 384
nchan = n_channels
max_abs_delay = 2.5 # in micro seconds
window = n_channels * DSP.windowing(n_channels, shape=bpass_shape, pad_width=0, centering=True, area_normalize=True)
nside = 64
use_GSM = False
use_DSM = True
use_CSM = False
use_NVSS = False
use_SUMSS = False
use_MSS = False
use_GLEAM = False
use_PS = False
if use_GSM:
fg_str = 'asm'
elif use_DSM:
fg_str = 'dsm'
elif use_CSM:
fg_str = 'csm'
elif use_SUMSS:
fg_str = 'sumss'
elif use_GLEAM:
fg_str = 'gleam'
elif use_PS:
fg_str = 'point'
elif use_NVSS:
fg_str = 'nvss'
else:
fg_str = 'other'
antenna_file = '/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt'
ant_locs = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
bl, bl_id = RI.baseline_generator(ant_locs[:,1:], ant_id=ant_locs[:,0].astype(int).astype(str), auto=False, conjugate=False)
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
bl_id = bl_id[sortind]
n_bins_baseline_orientation = 4
n_bl_chunks = 32
baseline_chunk_size = 64
neg_bl_orientation_ind = bl_orientation < 0.0
# neg_bl_orientation_ind = NP.logical_or(bl_orientation < -0.5*180.0/n_bins_baseline_orientation, bl_orientation > 180.0 - 0.5*180.0/n_bins_baseline_orientation)
bl[neg_bl_orientation_ind,:] = -1.0 * bl[neg_bl_orientation_ind,:]
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
total_baselines = bl_length.size
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
bl_chunk = range(len(baseline_bin_indices))
bl_chunk = bl_chunk[:n_bl_chunks]
bl = bl[:baseline_bin_indices[n_bl_chunks],:]
bl_length = bl_length[:baseline_bin_indices[n_bl_chunks]]
bl_orientation = bl_orientation[:baseline_bin_indices[n_bl_chunks]]
bl_id = bl_id[:baseline_bin_indices[n_bl_chunks]]
neg_bl_orientation_ind = bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
## Plot distribution of baseline lengths and distributions
bl_length_binsize = 20.0
bl_length_bins = NP.linspace(0.0, NP.ceil(bl_length.max()/bl_length_binsize) * bl_length_binsize, NP.ceil(bl_length.max()/bl_length_binsize)+1)
bl_orientation_binsize=180.0/(2*n_bins_baseline_orientation)
bl_orientation_bins = NP.linspace(bl_orientation.min(), bl_orientation.max(), 2*n_bins_baseline_orientation+1)
labels = []
labels += ['B{0:0d}'.format(i+1) for i in xrange(bl.shape[0])]
roifile = '/data3/t_nithyanandan/project_MWA/roi_info_'+telescope_str+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
roi = RI.ROI_parameters(init_file=roifile)
telescope = roi.telescope
# telescope = {}
# telescope['id'] = telescope_id
# telescope['shape'] = element_shape
# telescope['size'] = element_size
# telescope['orientation'] = element_orientation
# telescope['ocoords'] = element_ocoords
# telescope['groundplane'] = ground_plane
fig = PLT.figure(figsize=(6,6))
ax1 = fig.add_subplot(211)
n, bins, patches = ax1.hist(bl_length, bins=bl_length_bins, histtype='step', lw=2, color='black')
ax1.xaxis.tick_top()
ax1.xaxis.set_label_position('top')
ax1.set_xlabel('Baseline Length [m]', fontsize=18, weight='medium')
ax1.set_ylabel('Number in bin', fontsize=18, weight='medium')
ax1.tick_params(which='major', length=18, labelsize=12)
ax1.tick_params(which='minor', length=12, labelsize=12)
for axis in ['top','bottom','left','right']:
ax1.spines[axis].set_linewidth(2)
xticklabels = PLT.getp(ax1, 'xticklabels')
yticklabels = PLT.getp(ax1, 'yticklabels')
PLT.setp(xticklabels, fontsize=15, weight='medium')
PLT.setp(yticklabels, fontsize=15, weight='medium')
ax2 = fig.add_subplot(212)
n, bins, patches = ax2.hist(bl_orientation, bins=bl_orientation_bins, histtype='step', lw=2, color='black')
ax2.set_xlabel('Baseline Orientation [deg]', fontsize=18, weight='medium')
ax2.set_ylabel('Number in bin', fontsize=18, weight='medium')
ax2.tick_params(which='major', length=18, labelsize=12)
ax2.tick_params(which='minor', length=12, labelsize=12)
for axis in ['top','bottom','left','right']:
ax2.spines[axis].set_linewidth(2)
xticklabels = PLT.getp(ax2, 'xticklabels')
yticklabels = PLT.getp(ax2, 'yticklabels')
PLT.setp(xticklabels, fontsize=15, weight='medium')
PLT.setp(yticklabels, fontsize=15, weight='medium')
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/baseline_properties.eps', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/baseline_properties.png', bbox_inches=0)
## Animation set up
backdrop_xsize = 100
fps = 0.5
interval = 100
animation_format = 'MP4'
if animation_format == 'MP4':
anim_format = '.mp4'
else:
anim_format = 'gif'
animation_file = None
if animation_file is None:
animation_file = '/data3/t_nithyanandan/project_MWA/animations/multi_baseline_noiseless_visibilities_'+snapshot_type_str+obs_mode+'_'+'{0:0d}'.format(n_bl_chunks*baseline_chunk_size)+'_baselines_{0:0d}_orientations_'.format(n_bins_baseline_orientation)+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_{0:0d}_sectors'.format(n_bins_baseline_orientation)
animation2_file = None
if animation2_file is None:
animation2_file = '/data3/t_nithyanandan/project_MWA/animations/delay_emission_map_'+snapshot_type_str+obs_mode+'_'+'{0:0d}'.format(n_bl_chunks*baseline_chunk_size)+'_baselines_{0:0d}_orientations_'.format(n_bins_baseline_orientation)+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_{0:0d}_sectors'.format(n_bins_baseline_orientation)
lags = None
skyvis_lag = None
vis_lag = None
# # progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_bl_chunks).start()
# # for i in range(0, n_bl_chunks):
# # infile = '/data3/t_nithyanandan/project_MWA/multi_baseline_visibilities_'+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[i]],bl_length[min(baseline_bin_indices[i]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
# # hdulist = fits.open(infile+'.fits')
# # # extnames = [hdu.header['EXTNAME'] for hdu in hdulist]
# # if i == 0:
# # lags = hdulist['SPECTRAL INFO'].data.field('lag')
# # vis_lag = hdulist['real_lag_visibility'].data + 1j * hdulist['imag_lag_visibility'].data
# # skyvis_lag = hdulist['real_lag_sky_visibility'].data + 1j * hdulist['imag_lag_sky_visibility'].data
# # latitude = hdulist[0].header['latitude']
# # pointing_coords = hdulist[0].header['pointing_coords']
# # pointings_table = hdulist['POINTING INFO'].data
# # lst = pointings_table['LST']
# # n_snaps = lst.size
# # if pointing_coords == 'altaz':
# # pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
# # pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
# # pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# # elif pointing_coords == 'radec':
# # pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# # pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
# # pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# # pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# # elif pointing_coords == 'hadec':
# # pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# # pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
# # pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# # pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# # else:
# # vis_lag = NP.vstack((vis_lag, hdulist['real_lag_visibility'].data + 1j * hdulist['imag_lag_visibility'].data))
# # skyvis_lag = NP.vstack((skyvis_lag, hdulist['real_lag_sky_visibility'].data + 1j * hdulist['imag_lag_sky_visibility'].data))
# # hdulist.close()
# # progress.update(i+1)
# # progress.finish()
# progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_bl_chunks).start()
# for i in range(0, n_bl_chunks):
# infile = '/data3/t_nithyanandan/project_MWA/multi_baseline_visibilities_'+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[i]],bl_length[min(baseline_bin_indices[i]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
# if i == 0:
# ia = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
# hdulist = fits.open(infile+'.fits')
# latitude = hdulist[0].header['latitude']
# pointing_coords = hdulist[0].header['pointing_coords']
# pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
# lst = pointings_table['LST']
# n_snaps = lst.size
# hdulist.close()
# if pointing_coords == 'altaz':
# pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
# pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
# pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# elif pointing_coords == 'radec':
# pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
# pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# elif pointing_coords == 'hadec':
# pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
# pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
# pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
# pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# else:
# ia_next = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
# ia.concatenate(ia_next, axis=0)
# progress.update(i+1)
# progress.finish()
infile = '/data3/t_nithyanandan/project_MWA/'+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)
ia = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
hdulist = fits.open(infile+'.fits')
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
lst = pointings_table['LST']
n_snaps = lst.size
hdulist.close()
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
# pc = NP.asarray([90.0, 90.0]).reshape(1,-1)
# pc = NP.asarray([266.416837, -29.00781]).reshape(1,-1)
pc = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
pc_coords = 'dircos'
ia.phase_centering(phase_center=pc, phase_center_coords=pc_coords)
#################################################################################
# Find any negative orientation baselines and conjugate those visibilities
simdata_bl_orientation = NP.angle(ia.baselines[:,0] + 1j * ia.baselines[:,1], deg=True)
simdata_neg_bl_orientation_ind = simdata_bl_orientation < 0.0
simdata_bl_orientation[simdata_neg_bl_orientation_ind] += 180.0
ia.baselines[simdata_neg_bl_orientation_ind,:] = -ia.baselines[simdata_neg_bl_orientation_ind,:]
# ia.baseline_orientations[simdata_neg_bl_orientation_ind] = 180.0 + ia.baseline_orientations[simdata_neg_bl_orientation_ind]
ia.vis_freq[simdata_neg_bl_orientation_ind,:,:] = ia.vis_freq[simdata_neg_bl_orientation_ind,:,:].conj()
ia.skyvis_freq[simdata_neg_bl_orientation_ind,:,:] = ia.skyvis_freq[simdata_neg_bl_orientation_ind,:,:].conj()
ia.vis_noise_freq[simdata_neg_bl_orientation_ind,:,:] = ia.vis_noise_freq[simdata_neg_bl_orientation_ind,:,:].conj()
ia.delay_transform(f_pad, freq_wts=window) # delay transform re-estimate
lags = ia.lags
vis_lag = ia.vis_lag
skyvis_lag = ia.skyvis_lag
if max_abs_delay is not None:
small_delays_ind = NP.abs(lags) <= max_abs_delay * 1e-6
lags = lags[small_delays_ind]
vis_lag = vis_lag[:,small_delays_ind,:]
skyvis_lag = skyvis_lag[:,small_delays_ind,:]
## Delay limits re-estimation
delay_matrix = DLY.delay_envelope(ia.baselines, pointings_dircos, units='mks')
fig = PLT.figure(figsize=(6,8))
ax1 = fig.add_subplot(211)
# ax1.set_xlabel('Baseline Length [m]', fontsize=18)
# ax1.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# dspec1 = ax1.pcolorfast(bl_length, 1e6*lags, NP.abs(skyvis_lag[:-1,:-1,0].T), norm=PLTC.LogNorm(vmin=NP.amin(NP.abs(skyvis_lag)), vmax=NP.amax(NP.abs(skyvis_lag))))
# ax1.set_xlim(bl_length[0], bl_length[-1])
# ax1.set_ylim(1e6*lags[0], 1e6*lags[-1])
ax1.set_xlabel('Baseline Index', fontsize=18)
ax1.set_ylabel(r'lag [$\mu$s]', fontsize=18)
dspec1 = ax1.imshow(NP.abs(skyvis_lag[:,:,0].T), origin='lower', extent=(0, skyvis_lag.shape[0]-1, NP.amin(lags*1e6), NP.amax(lags*1e6)), norm=PLTC.LogNorm(NP.amin(NP.abs(skyvis_lag)), vmax=NP.amax(NP.abs(skyvis_lag))), interpolation=None)
ax1.set_aspect('auto')
ax2 = fig.add_subplot(212)
# ax2.set_xlabel('Baseline Length [m]', fontsize=18)
# ax2.set_ylabel(r'lag [$\mu$s]', fontsize=18)
# dspec2 = ax2.pcolorfast(bl_length, 1e6*lags, NP.abs(skyvis_lag[:-1,:-1,1].T), norm=PLTC.LogNorm(vmin=NP.amin(NP.abs(skyvis_lag)), vmax=NP.amax(NP.abs(skyvis_lag))))
# ax2.set_xlim(bl_length[0], bl_length[-1])
# ax2.set_x=ylim(1e6*lags[0], 1e6*lags[-1])
ax2.set_xlabel('Baseline Index', fontsize=18)
ax2.set_ylabel(r'lag [$\mu$s]', fontsize=18)
dspec2 = ax2.imshow(NP.abs(skyvis_lag[:,:,1].T), origin='lower', extent=(0, skyvis_lag.shape[0]-1, NP.amin(lags*1e6), NP.amax(lags*1e6)), norm=PLTC.LogNorm(vmin=NP.amin(NP.abs(skyvis_lag)), vmax=NP.amax(NP.abs(skyvis_lag))), interpolation=None)
ax2.set_aspect('auto')
cbax = fig.add_axes([0.88, 0.08, 0.03, 0.9])
cb = fig.colorbar(dspec2, cax=cbax, orientation='vertical')
cbax.set_ylabel('Jy Hz', labelpad=-60, fontsize=18)
PLT.tight_layout()
fig.subplots_adjust(right=0.8)
fig.subplots_adjust(left=0.1)
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/'+telescope_str+'multi_combined_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_'.format(Tsys, nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_{0:0d}_snapshots.eps'.format(skyvis_lag.shape[2]), bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/project_MWA/figures/'+telescope_str+'multi_combined_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_'.format(Tsys, nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_{0:0d}_snapshots.png'.format(skyvis_lag.shape[2]), bbox_inches=0)
#################################################################################
backdrop_coords = 'radec'
if use_DSM or use_GSM:
backdrop_coords = 'radec'
if backdrop_coords == 'radec':
xmin = -180.0
xmax = 180.0
ymin = -90.0
ymax = 90.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmin, xmax, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize/2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
elif backdrop_coords == 'dircos':
xmin = -1.0
xmax = 1.0
ymin = -1.0
ymax = 1.0
xgrid, ygrid = NP.meshgrid(NP.linspace(xmin, xmax, backdrop_xsize), NP.linspace(ymin, ymax, backdrop_xsize))
nanind = (xgrid**2 + ygrid**2) > 1.0
goodind = (xgrid**2 + ygrid**2) <= 1.0
zgrid = NP.empty_like(xgrid)
zgrid[nanind] = NP.nan
zgrid[goodind] = NP.sqrt(1.0 - (xgrid[goodind]**2 + ygrid[goodind]**2))
xvect = xgrid.ravel()
yvect = ygrid.ravel()
zvect = zgrid.ravel()
xyzvect = NP.hstack((xvect.reshape(-1,1), yvect.reshape(-1,1), zvect.reshape(-1,1)))
if use_DSM or use_GSM:
dsm_file = '/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq/1e6,nside)
hdulist = fits.open(dsm_file)
dsm_table = hdulist[1].data
ra_deg = dsm_table['RA']
dec_deg = dsm_table['DEC']
temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]
fluxes = temperatures
backdrop = HP.cartview(temperatures.ravel(), coord=['G','E'], rot=[0,0,0], xsize=backdrop_xsize, return_projected_map=True)
elif use_GLEAM or use_SUMSS or use_NVSS or use_CSM:
if use_GLEAM:
catalog_file = '/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv' # GLEAM catalog
catdata = ascii.read(catalog_file, data_start=1, delimiter=',')
dec_deg = catdata['DEJ2000']
ra_deg = catdata['RAJ2000']
fpeak = catdata['S150_fit']
ferr = catdata['e_S150_fit']
freq_catalog = 1.4 # GHz
spindex = -0.83 + NP.zeros(fpeak.size)
fluxes = fpeak * (freq_catalog * 1e9 / freq)**spindex
elif use_SUMSS:
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg = ra_deg[PS_ind]
dec_deg = dec_deg[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 1.0
ra_deg = ra_deg[bright_source_ind]
dec_deg = dec_deg[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg = ra_deg[valid_ind]
dec_deg = dec_deg[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
freq_catalog = 0.843 # in GHz
spindex = -0.83 + NP.zeros(fint.size)
fluxes = fint * (freq_catalog*1e9/freq)**spindex
elif use_NVSS:
pass
else:
freq_SUMSS = 0.843 # in GHz
SUMSS_file = '/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt'
catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))
ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)
dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype="|S3")
sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])
sgn_dec = 1.0*NP.ones(dec_dd.size)
sgn_dec[sgn_dec_str == '-'] = -1.0
dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)
fmajax = catalog[:,7]
fminax = catalog[:,8]
fpa = catalog[:,9]
dmajax = catalog[:,10]
dminax = catalog[:,11]
PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]
dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]
fint = catalog[PS_ind,6] * 1e-3
spindex_SUMSS = -0.83 + NP.zeros(fint.size)
fmajax = fmajax[PS_ind]
fminax = fminax[PS_ind]
fpa = fpa[PS_ind]
dmajax = dmajax[PS_ind]
dminax = dminax[PS_ind]
bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS
ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]
dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]
fint = fint[bright_source_ind]
fmajax = fmajax[bright_source_ind]
fminax = fminax[bright_source_ind]
fpa = fpa[bright_source_ind]
dmajax = dmajax[bright_source_ind]
dminax = dminax[bright_source_ind]
spindex_SUMSS = spindex_SUMSS[bright_source_ind]
valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)
ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]
dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]
fint = fint[valid_ind]
fmajax = fmajax[valid_ind]
fminax = fminax[valid_ind]
fpa = fpa[valid_ind]
spindex_SUMSS = spindex_SUMSS[valid_ind]
freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)
catlabel = NP.repeat('SUMSS', fint.size)
ra_deg = ra_deg_SUMSS + 0.0
dec_deg = dec_deg_SUMSS
spindex = spindex_SUMSS
majax = fmajax/3.6e3
minax = fminax/3.6e3
fluxes = fint + 0.0
nvss_file = '/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits'
freq_NVSS = 1.4 # in GHz
hdulist = fits.open(nvss_file)
ra_deg_NVSS = hdulist[1].data['RA(2000)']
dec_deg_NVSS = hdulist[1].data['DEC(2000)']
nvss_fpeak = hdulist[1].data['PEAK INT']
nvss_majax = hdulist[1].data['MAJOR AX']
nvss_minax = hdulist[1].data['MINOR AX']
hdulist.close()
spindex_NVSS = -0.83 + NP.zeros(nvss_fpeak.size)
not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))
bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)
PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3
count_valid = NP.sum(NP.logical_and( | NP.logical_and(not_in_SUMSS_ind, bright_source_ind) | numpy.logical_and |
"""
The MIT License (MIT)
Copyright (c) 2015 <NAME> (AKA Gato)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from camera import OrthoCamera
import libs.transformations as T
import numpy as N
from glcompat import *
from mathtools import floatArray
class Scene(object):
def __init__(self):
self._model_m_changed = True
self._camera = None
self._default_camera = OrthoCamera()
self.setCamera(None)
self._model_m = T.identity_matrix()
self._model_m_stack = [T.identity_matrix()]
self._light_position = floatArray([1000.0, 1000.0, 1000.0])
# these are computed and cached:
self._normal_m = None
self._modelview_m = None
self._light_m = None
self._view_m = self._projection_m = None
def pushTransform(self, add_transform=None):
self._model_m_stack.append(self._model_m)
if add_transform is not None:
self._model_m = N.dot(self._model_m, add_transform)
self._model_m_changed = True
def popTransform(self):
if len(self._model_m_stack)>1:
self._model_m = self._model_m_stack.pop()
else:
self._model_m = T.identity_matrix()
self._model_m_changed = True
def resetTransform(self):
self._model_m = T.identity_matrix()
del self._model_m_stack[1:]
self._model_m_changed = True
def replaceLastTransform(self, transform):
self._model_m = N.dot(self._model_m_stack[-1], transform)
self._model_m_changed = True
def _prepareMatrices(self):
if self._model_m_changed or self._camera_changed:
if self._camera_changed:
self._view_m, self._projection_m = self._camera.getMatrices()
self._camera_changed = False
self._light_m = self._view_m
self._modelview_m = N.dot(self._view_m, self._model_m)
self._model_m_changed = False
m = self._modelview_m[0:3,0:3]
try:
self._normal_m = N.transpose( | N.linalg.inv(m) | numpy.linalg.inv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import sys,os
curr_dir = os.getcwd()
PyCore_dir = os.path.dirname(curr_dir)
sys.path.append(PyCore_dir)
#%%
import PyCORe_main as pcm
import time
start_time = time.time()
map2d_scan = | np.zeros([],dtype=complex) | numpy.zeros |
import pickle
import numpy as np
import zipfile
import gzip
import os
import random
import copy
from collections import OrderedDict
from astrodash.create_arrays import AgeBinning, CreateLabels, ArrayTools, CreateArrays
from astrodash.helpers import temp_list
random.seed(42)
class CreateTrainingSet(object):
def __init__(self, snidTemplateLocation, snidTempFileList, w0, w1, nw, nTypes, minAge, maxAge, ageBinSize, typeList,
minZ, maxZ, numOfRedshifts, galTemplateLocation, galTempFileList, hostTypes, nHostTypes,
trainFraction):
self.snidTemplateLocation = snidTemplateLocation
self.snidTempFileList = snidTempFileList
self.galTemplateLocation = galTemplateLocation
self.galTempFileList = galTempFileList
self.w0 = w0
self.w1 = w1
self.nw = nw
self.nTypes = nTypes
self.minAge = minAge
self.maxAge = maxAge
self.ageBinSize = ageBinSize
self.typeList = typeList
self.trainFraction = trainFraction
self.ageBinning = AgeBinning(self.minAge, self.maxAge, self.ageBinSize)
self.numOfAgeBins = self.ageBinning.age_bin(self.maxAge - 0.1) + 1
self.nLabels = self.nTypes * self.numOfAgeBins * nHostTypes
self.createArrays = CreateArrays(w0, w1, nw, nTypes, minAge, maxAge, ageBinSize, typeList, minZ, maxZ,
numOfRedshifts, hostTypes, nHostTypes)
self.arrayTools = ArrayTools(self.nLabels, self.nw)
def type_amounts(self, labels):
counts = self.arrayTools.count_labels(labels)
return counts
def all_templates_to_arrays(self, snTempFileList, galTemplateLocation):
"""
Parameters
----------
snTempFileList : list or dictionary
galTemplateLocation
Returns
-------
"""
images, labels, filenames, typeNames = self.createArrays.combined_sn_gal_arrays_multiprocessing(
self.snidTemplateLocation, snTempFileList, galTemplateLocation, self.galTempFileList)
arraysShuf = self.arrayTools.shuffle_arrays(images=images, labels=labels, filenames=filenames,
typeNames=typeNames, memmapName='all')
typeAmounts = self.type_amounts(labels)
return arraysShuf, typeAmounts
def train_test_split(self):
"""
Split training set before creating arrays.
Maybe should change this to include ages in train/test split instead of just SN files.
"""
snTempFileList = copy.copy(self.snidTempFileList)
fileList = temp_list(snTempFileList)
snAndAgeIdxDict = OrderedDict()
spectraList = []
# SPLIT BY SPECTRA
# Get number of spectra per file
for i, sn in enumerate(fileList):
with open(os.path.join(self.snidTemplateLocation, sn), 'r') as FileObj:
for lineNum, line in enumerate(FileObj):
# Read Header Info
if lineNum == 0:
header = (line.strip('\n')).split(' ')
header = [x for x in header if x != '']
numAges, nwx, w0x, w1x, mostKnots, tname, dta, ttype, ittype, itstype = header
numAges, mostKnots = map(int, (numAges, mostKnots))
elif lineNum == mostKnots + 2:
ages = np.array(line.split()[1:]).astype(float)
agesIndexesInRange = | np.where((ages >= self.minAge) & (ages <= self.maxAge)) | numpy.where |
import pytest
import numpy as np
import torch
from torch.nn import Sequential
from hdrl import agents
import hdrl.experiment as experiment
import hdrl.models as models
""" GLOBALS """
SEED = 314
STATE = | np.array([1.1, 2.2, 3.3, 4.4]) | numpy.array |
import geopandas as gpd
from osgeo import gdal, osr, ogr
from osgeo import gdal_array
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyproj
from ..base import GeoArray
def read_shp(path, encoding='utf-8'):
return gpd.read_file(path, encoding=encoding)
def write_shp(shp, path, encoding='utf-8'):
shp.to_file(path, encoding)
def read_hdf(path, chans=None):
ds = gdal.Open(path)
sds = ds.GetSubDatasets()
imgs = []
if chans is None: chans = range(len(sds))
if isinstance(chans, int): chans = [chans]
for i in chans:
ds = gdal.Open(sds[i][0])
img = ds.ReadAsArray()
m = ds.GetGeoTransform()
m = np.array(m).reshape((2,3))
prj = ds.GetProjection()
imgs.append(img)
if len(chans) == 1: imgs = imgs[0]
else: imgs = np.array(imgs).transpose((1,2,0))
return GeoArray(imgs, prj, m)
def read_tif(path, chans=None):
ds = gdal.Open(path)
if chans is None: chans = range(ds.RasterCount)
if isinstance(chans, int): chans = [chans]
prj = ds.GetProjection()
m = ds.GetGeoTransform()
m = | np.array(m) | numpy.array |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
from PIL import Image
import torch
def create_class_mask(img, color_map, is_normalized_img=True, is_normalized_map=False, show_masks=False):
"""
Function to create C matrices from the segmented image, where each of the C matrices is for one class
with all ones at the pixel positions where that class is present
img = The segmented image
color_map = A list with tuples that contains all the RGB values for each color that represents
some class in that image
is_normalized_img = Boolean - Whether the image is normalized or not
If normalized, then the image is multiplied with 255
is_normalized_map = Boolean - Represents whether the color map is normalized or not, if so
then the color map values are multiplied with 255
show_masks = Wherether to show the created masks or not
"""
if is_normalized_img and (not is_normalized_map):
img *= 255
if is_normalized_map and (not is_normalized_img):
img = img / 255
mask = []
hw_tuple = img.shape[:-1]
for color in color_map:
color_img = []
for idx in range(3):
color_img.append(np.ones(hw_tuple) * color[idx])
color_img = np.array(color_img, dtype=np.uint8).transpose(1, 2, 0)
mask.append(np.uint8((color_img == img).sum(axis = -1) == 3))
return | np.array(mask) | numpy.array |
#!/usr/bin/env python3
"""
Gaussian mixture fitting with Nested Sampling. This module was tested in the
main `nestfit` repo on bare arrays and Gaussian components -- without a
spectral axis, units, or other necessary complications.
The `.wrapped` references a Cython implementation of the Gaussian model class.
"""
import ctypes
import operator
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
from scipy import (special, stats)
from matplotlib import ticker
from matplotlib import pyplot as plt
import corner
import pymultinest
from .wrapped import CGaussianModel
plt.rc('font', size=10, family='serif')
plt.rc('text', usetex=True)
plt.rc('xtick', direction='out', top=True)
plt.rc('ytick', direction='out', right=True)
ROOT_DIR = Path('/lustre/aoc/users/bsvoboda/temp/nestfit')
DATA_DIR = ROOT_DIR / Path('data')
PLOT_DIR = ROOT_DIR / Path('plots')
class SyntheticSpectrum:
def __init__(self, xaxis, amp, cen, std, noise=0.03, set_seed=False):
"""
Construct a mixture of Gaussians expressed as:
f(x) = A * exp(-(x - c)^2 / (2 * s^2))
for "A" amplitude, "c" centroid, and "s" standard deviation.
Parameters
----------
xaxis : np.ndarray
amp : np.ndarray
Array of Gaussian amplitudes
cen : np.ndarray
Array of Gaussian centroid positions
std : np.ndarray
Array of Guassian standard deviations
noise : float, default=0.03
Noise standard deviation
set_seed : bool, default=False
If `True` will use a default seed of 5 for the np.random module.
"""
if set_seed:
np.random.seed(5)
else:
np.random.seed()
self.xaxis = xaxis.reshape(-1, 1)
self.ncomp = len(amp)
self.size = self.xaxis.shape[0]
self.amp = amp
self.cen = cen
self.std = std
self.truths = np.concatenate([amp, cen, std])
self.noise = noise
self.components = self.profile().T
self.sum_spec = self.components.sum(axis=0)
self.noise_spec = np.random.normal(scale=self.noise, size=self.size)
self.sampled_spec = self.sum_spec + self.noise_spec
def profile(self):
return self.amp * np.exp(-(self.xaxis - self.cen)**2 / (2 * self.std**2))
def resample_spectrum(self, noise=None):
if noise is not None:
self.noise = noise
noise_spec = np.random.normal(scale=self.noise, size=self.size)
self.noise_spec = noise_spec
self.sampled_spec = self.sum_spec + self.noise_spec
def test_spectrum():
return SyntheticSpectrum(
np.linspace(-6, 6, 100),
amp=np.array([0.3, 0.5, 0.4]),
cen=np.array([-1, 0, 3]),
std=np.array([1.5, 1.0, 0.5]),
noise=0.03,
set_seed=True,
)
class GaussianModel:
model_name = 'gaussian'
def __init__(self, xaxis, ydata, noise, ncomp):
self.xaxis = xaxis.reshape(-1, 1)
self.size = xaxis.shape[0]
self.ydata = ydata
self.noise = noise
self.ncomp = ncomp
self.n_params = 3 * ncomp
self.lnpin = -self.size / 2 * np.log(2 * np.pi * noise**2)
self.null_lnZ = self.lnpin - np.sum(ydata**2) / (2 * self.noise**2)
#self.array_type = np.ctypeslib.ndpointer(
# ctypes.c_double, 1, (self.n_params,), 'C_CONTIGUOUS')
@property
def par_labels(self):
comps = range(1, self.ncomp+1)
return [
f'{label}{n}'
for label in ('a', 'c', 's')
for n in comps
]
def loglikelihood(self, theta, ndim, nparams):
n = self.ncomp
#atheta = ctypes.cast(theta, self.array_type).contents
atheta = np.ctypeslib.as_array(theta, shape=(self.n_params,))
amp = atheta[0 : n]
cen = atheta[ n:2*n]
std = atheta[2*n:3*n]
ymodel = np.sum(
amp * | np.exp(-(self.xaxis - cen)**2 / (2 * std**2)) | numpy.exp |
import sys
import ctypes
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
import cv2
import numpy as np
#from pyassimp import load, release
from gibson2.core.render.mesh_renderer.glutils.meshutil import perspective, lookat, xyz2mat, quat2rotmat, mat2xyz, \
safemat2quat, xyzw2wxyz
from transforms3d.quaternions import axangle2quat, mat2quat
from transforms3d.euler import quat2euler, mat2euler
from gibson2.core.render.mesh_renderer import MeshRendererContext
from gibson2.core.render.mesh_renderer.get_available_devices import get_available_devices
import gibson2.core.render.mesh_renderer as mesh_renderer
import pybullet as p
import gibson2
import os
from gibson2.core.render.mesh_renderer import tinyobjloader
import platform
import logging
class VisualObject(object):
"""
A visual object manages a set of VAOs and textures, one wavefront obj file loads into openGL, and managed
by a VisualObject
"""
def __init__(self, filename, VAO_ids, id, renderer):
"""
:param filename: filename of the obj file
:param VAO_ids: VAO_ids in OpenGL
:param id: renderer maintains a list of visual objects, id is the handle of a visual object
:param renderer: pointer to the renderer
"""
self.VAO_ids = VAO_ids
self.filename = filename
self.texture_ids = []
self.id = id
self.renderer = renderer
def __str__(self):
return "Object({})->VAO({})".format(self.id, self.VAO_ids)
def __repr__(self):
return self.__str__()
class InstanceGroup(object):
"""
InstanceGroup is a set of visual objects, it is grouped together because they are kinematically connected.
Robots and articulated objects are represented as instance groups.
"""
def __init__(self,
objects,
id,
link_ids,
pybullet_uuid,
class_id,
poses_trans,
poses_rot,
dynamic,
robot=None):
"""
:param objects: visual objects
:param id: id this instance_group
:param link_ids: link_ids in pybullet
:param pybullet_uuid: body id in pybullet
:param class_id: class_id to render semantics
:param poses_trans: initial translations for each visual object
:param poses_rot: initial rotation matrix for each visual object
:param dynamic: is the instance group dynamic or not
:param robot: The robot associated with this InstanceGroup
"""
# assert(len(objects) > 0) # no empty instance group
self.objects = objects
self.poses_trans = poses_trans
self.poses_rot = poses_rot
self.id = id
self.link_ids = link_ids
self.class_id = class_id
self.robot = robot
if len(objects) > 0:
self.renderer = objects[0].renderer
else:
self.renderer = None
self.pybullet_uuid = pybullet_uuid
self.dynamic = dynamic
self.tf_tree = None
def render(self):
"""
Render this instance group
"""
if self.renderer is None:
return
self.renderer.r.initvar_instance_group(self.renderer.shaderProgram,
self.renderer.V,
self.renderer.P,
self.renderer.lightpos,
self.renderer.lightcolor)
for i, visual_object in enumerate(self.objects):
for object_idx in visual_object.VAO_ids:
self.renderer.r.init_material_pos_instance(self.renderer.shaderProgram,
self.poses_trans[i],
self.poses_rot[i],
float(self.class_id) / 255.0,
self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].kd[:3],
float(self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture()))
try:
texture_id = self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].texture_id
if texture_id is None:
texture_id = -1
if self.renderer.msaa:
buffer = self.renderer.fbo_ms
else:
buffer = self.renderer.fbo
self.renderer.r.draw_elements_instance(self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture(),
texture_id,
self.renderer.texUnitUniform,
self.renderer.VAOs[object_idx],
self.renderer.faces[object_idx].size,
self.renderer.faces[object_idx],
buffer)
finally:
self.renderer.r.cglBindVertexArray(0)
self.renderer.r.cglUseProgram(0)
def get_pose_in_camera(self):
mat = self.renderer.V.dot(self.pose_trans.T).dot(self.pose_rot).T
pose = np.concatenate([mat2xyz(mat), safemat2quat(mat[:3, :3].T)])
return pose
def set_position(self, pos):
"""
Set positions for each part of this InstanceGroup
:param pos: New translations
"""
self.pose_trans = np.ascontiguousarray(xyz2mat(pos))
def set_rotation(self, quat):
"""
Set rotations for each part of this InstanceGroup
:param quat: New quaternion in w,x,y,z
"""
self.pose_rot = np.ascontiguousarray(quat2rotmat(quat))
def __str__(self):
return "InstanceGroup({}) -> Objects({})".format(
self.id, ",".join([str(object.id) for object in self.objects]))
def __repr__(self):
return self.__str__()
class Robot(InstanceGroup):
def __init__(self, *args, **kwargs):
super(Robot, self).__init__(*args, **kwargs)
def __str__(self):
return "Robot({}) -> Objects({})".format(
self.id, ",".join([str(object.id) for object in self.objects]))
class Instance(object):
"""
Instance is one instance of a visual object. One visual object can have multiple instances to save memory.
"""
def __init__(self, object, id, class_id, pybullet_uuid, pose_trans, pose_rot, dynamic, softbody):
self.object = object
self.pose_trans = pose_trans
self.pose_rot = pose_rot
self.id = id
self.class_id = class_id
self.renderer = object.renderer
self.pybullet_uuid = pybullet_uuid
self.dynamic = dynamic
self.softbody = softbody
def render(self):
"""
Render this instance
"""
if self.renderer is None:
return
# softbody: reload vertex position
if self.softbody:
# construct new vertex position into shape format
object_idx = self.object.VAO_ids[0]
vertices = p.getMeshData(self.pybullet_uuid)[1]
vertices_flattened = [item for sublist in vertices for item in sublist]
vertex_position = np.array(vertices_flattened).reshape((len(vertices_flattened)//3, 3))
shape = self.renderer.shapes[object_idx]
n_indices = len(shape.mesh.indices)
np_indices = shape.mesh.numpy_indices().reshape((n_indices,3))
shape_vertex_index = np_indices[:,0]
shape_vertex = vertex_position[shape_vertex_index]
# update new vertex position in buffer data
new_data = self.renderer.vertex_data[object_idx]
new_data[:, 0:shape_vertex.shape[1]] = shape_vertex
new_data = new_data.astype(np.float32)
# transform and rotation already included in mesh data
self.pose_trans = np.eye(4)
self.pose_rot = np.eye(4)
# update buffer data into VBO
self.renderer.r.render_softbody_instance(self.renderer.VAOs[object_idx], self.renderer.VBOs[object_idx], new_data)
self.renderer.r.initvar_instance(self.renderer.shaderProgram,
self.renderer.V,
self.renderer.P,
self.pose_trans,
self.pose_rot,
self.renderer.lightpos,
self.renderer.lightcolor)
for object_idx in self.object.VAO_ids:
self.renderer.r.init_material_instance(self.renderer.shaderProgram,
float(self.class_id) / 255.0,
self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].kd,
float(self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture()))
try:
texture_id = self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].texture_id
if texture_id is None:
texture_id = -1
if self.renderer.msaa:
buffer = self.renderer.fbo_ms
else:
buffer = self.renderer.fbo
self.renderer.r.draw_elements_instance(self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture(),
texture_id,
self.renderer.texUnitUniform,
self.renderer.VAOs[object_idx],
self.renderer.faces[object_idx].size,
self.renderer.faces[object_idx],
buffer)
finally:
self.renderer.r.cglBindVertexArray(0)
self.renderer.r.cglUseProgram(0)
def get_pose_in_camera(self):
mat = self.renderer.V.dot(self.pose_trans.T).dot(self.pose_rot).T
pose = np.concatenate([mat2xyz(mat), safemat2quat(mat[:3, :3].T)])
return pose
def set_position(self, pos):
self.pose_trans = np.ascontiguousarray(xyz2mat(pos))
def set_rotation(self, quat):
"""
:param quat: New quaternion in w,x,y,z
"""
self.pose_rot = np.ascontiguousarray(quat2rotmat(quat))
def __str__(self):
return "Instance({}) -> Object({})".format(self.id, self.object.id)
def __repr__(self):
return self.__str__()
class Material(object):
def __init__(self, type='color', kd=[0.5, 0.5, 0.5], texture_id=None):
self.type = type
self.kd = kd
self.texture_id = texture_id
def is_texture(self):
return self.type == 'texture'
def __str__(self):
return "Material(type: {}, texture_id: {}, color: {})".format(self.type, self.texture_id,
self.kd)
def __repr__(self):
return self.__str__()
class MeshRenderer(object):
"""
MeshRenderer is a lightweight OpenGL renderer. It manages a set of visual objects, and instances of those objects.
It also manage a device to create OpenGL context on, and create buffers to store rendering results.
"""
def __init__(self, width=512, height=512, vertical_fov=90, device_idx=0, use_fisheye=False, msaa=False):
"""
:param width: width of the renderer output
:param height: width of the renderer output
:param vertical_fov: vertical field of view for the renderer
:param device_idx: which GPU to run the renderer on
:param use_fisheye: use fisheye shader or not
"""
self.shaderProgram = None
self.fbo = None
self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d = None, None, None, None
self.depth_tex = None
self.VAOs = []
self.VBOs = []
self.textures = []
self.objects = []
self.visual_objects = []
self.vertex_data = []
self.shapes = []
self.texUnitUniform = None
self.width = width
self.height = height
self.faces = []
self.instances = []
self.fisheye = use_fisheye
# self.context = glcontext.Context()
# self.context.create_opengl_context((self.width, self.height))
available_devices = get_available_devices()
if device_idx < len(available_devices):
device = available_devices[device_idx]
logging.info("Using device {} for rendering".format(device))
else:
logging.info("Device index is larger than number of devices, falling back to use 0")
device = 0
self.device_idx = device_idx
self.device_minor = device
self.msaa = msaa
if platform.system() == 'Darwin':
from gibson2.core.render.mesh_renderer import GLFWRendererContext
self.r = GLFWRendererContext.GLFWRendererContext(width, height)
else:
self.r = MeshRendererContext.MeshRendererContext(width, height, device)
self.r.init()
self.glstring = self.r.getstring_meshrenderer()
logging.debug('Rendering device and GL version')
logging.debug(self.glstring)
self.colors = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.lightcolor = [1, 1, 1]
logging.debug('Is using fisheye camera: {}'.format(self.fisheye))
if self.fisheye:
[self.shaderProgram, self.texUnitUniform] = self.r.compile_shader_meshrenderer(
"".join(open(
os.path.join(os.path.dirname(mesh_renderer.__file__),
'shaders/fisheye_vert.shader')).readlines()).replace(
"FISHEYE_SIZE", str(self.width / 2)),
"".join(open(
os.path.join(os.path.dirname(mesh_renderer.__file__),
'shaders/fisheye_frag.shader')).readlines()).replace(
"FISHEYE_SIZE", str(self.width / 2)))
else:
[self.shaderProgram, self.texUnitUniform] = self.r.compile_shader_meshrenderer(
"".join(open(
os.path.join(os.path.dirname(mesh_renderer.__file__),
'shaders/vert.shader')).readlines()),
"".join(open(
os.path.join(os.path.dirname(mesh_renderer.__file__),
'shaders/frag.shader')).readlines()))
self.lightpos = [0, 0, 0]
self.setup_framebuffer()
self.vertical_fov = vertical_fov
self.camera = [1, 0, 0]
self.target = [0, 0, 0]
self.up = [0, 0, 1]
P = perspective(self.vertical_fov, float(self.width) / float(self.height), 0.01, 100)
V = lookat(self.camera, self.target, up=self.up)
self.V = np.ascontiguousarray(V, np.float32)
self.P = np.ascontiguousarray(P, np.float32)
self.materials_mapping = {}
self.mesh_materials = []
def setup_framebuffer(self):
"""
Set up RGB, surface normal, depth and segmentation framebuffers for the renderer
"""
[self.fbo, self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d,
self.depth_tex] = self.r.setup_framebuffer_meshrenderer(self.width, self.height)
if self.msaa:
[self.fbo_ms, self.color_tex_rgb_ms, self.color_tex_normal_ms, self.color_tex_semantics_ms, self.color_tex_3d_ms,
self.depth_tex_ms] = self.r.setup_framebuffer_meshrenderer_ms(self.width, self.height)
def load_object(self,
obj_path,
scale= | np.array([1, 1, 1]) | numpy.array |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from gym.spaces import Box, Discrete
import smarts
from envision.client import Client as Envision
from smarts.core.agent_interface import AgentInterface, AgentType
from smarts.core.scenario import Scenario
from smarts.core.smarts import SMARTS
from smarts.core.sumo_traffic_simulation import SumoTrafficSimulation
from .adapters.action_adapter import (
DEFAULT_ACTION_SPACE,
N_ACTIONS,
default_action_adapter,
)
from .adapters.observation_adapter import DEFAULT_OBSERVATION_SPACE, default_obs_adapter
from .adapters.reward_adapter import default_reward_adapter
from .adapters.state_adapter import DEFAULT_STATE_SPACE, default_state_adapter
class PyMARLHiWayEnv:
"""This class adheres to the PyMARL MultiAgentEnv so it can be run by PyMARL.
See: https://git.io/JvMb9
This environment will want a specific configuration:
config: a dictionary with the environment configuration
agent_specs:
a dictionary of agent_ids to agents that will run in the environment (required)
scenarios:
a list of directories of the scenarios that will be run (required)
sim_name:
a string that gives this simulation a name (default None)
envision_record_data_replay_path:
used to specify envision's data replay output directory (default None)
envision_endpoint:
used to specify envision's uri (default None)
headless:
true|false envision disabled (default True)
num_external_sumo_clients:
the number of SUMO clients beyond SMARTS (default 0)
seed:
the seed for random number generation (default 42)
sumo_auto_start:
true|false sumo will start automatically (default False)
sumo_headless:
true|false for sumo|sumo-gui (default False)
sumo_port:
used to specify a specific sumo port (default None)
fixed_timestep_sec:
the step length for all components of the simulation (default 0.1)
"""
def __init__(self, config):
self._config = config
# XXX: These are intentionally left public at PyMARL's request
self.n_agents = config.get("n_agents", 1)
self.episode_limit = config.get("episode_limit", 1000)
self.observation_space = config.get(
"observation_space", DEFAULT_OBSERVATION_SPACE
)
self.action_space = config.get("action_space", DEFAULT_ACTION_SPACE)
self.state_space = config.get("state_space", DEFAULT_STATE_SPACE)
self._agent_ids = ["Agent %i" % i for i in range(self.n_agents)]
self._reward_adapter = config.get("reward_adapter", default_reward_adapter)
self._observation_adapter = config.get(
"observation_adapter", default_obs_adapter
)
self._action_adapter = config.get("action_adapter", default_action_adapter)
self._done_adapter = config.get(
"done_adapter", lambda dones: list(dones.values())
)
self._state_adapter = config.get("state_adapter", default_state_adapter)
self._headless = config.get("headless", False)
self._fixed_timestep_sec = config.get("fixed_timestep_sec", 0.01)
self._observations = None
self._state = None
self._steps = 0
self._dones_registered = 0
seed = self._config.get("seed", 42)
smarts.core.seed(seed)
self._scenarios_iterator = Scenario.scenario_variations(
config["scenarios"], self._agent_ids
)
agent_interfaces = {
agent_id: AgentInterface.from_type(
config.get("agent_type", AgentType.Laner),
max_episode_steps=self.episode_limit,
debug=config.get("debug", False),
)
for i, agent_id, in enumerate(self._agent_ids)
}
envision = None
if not self._headless or config.get("envision_record_data_replay_path", None):
envision = Envision(
endpoint=config.get("envision_endpoint", None),
sim_name=config.get("sim_name", None),
output_dir=config.get("envision_record_data_replay_path", None),
)
self._smarts = SMARTS(
agent_interfaces=agent_interfaces,
traffic_sim=SumoTrafficSimulation(time_resolution=self._fixed_timestep_sec),
envision=envision,
fixed_timestep_sec=self._fixed_timestep_sec,
)
def get_obs(self):
""" Returns all agent observations in a list. """
return self._observations
def get_obs_agent(self, agent_id):
""" Returns the observation for the given agent. """
return self._observations[agent_id]
def get_obs_size(self):
""" Returns the total size of all agent observation data. """
obs_size = 0
for obs in self.observation_space.spaces.values():
if type(obs) is Box:
obs_size += np.prod(obs.shape)
elif type(obs) is Discrete:
obs_size += obs.n
return obs_size
def get_state(self):
""" Returns the concatenated observations. """
return | np.concatenate(self._observations) | numpy.concatenate |
import datetime
from random import gauss
import numpy as np
import tkinter as tk
import pyscreenshot as ImageGrab # For Linux
from . import config as c
class GUI(object):
def __init__(self):
drawscale = 250.0
width = 3.0 # with 1.5 to each side
height = 2.0
gui = tk.Tk()
gui.title("Simulator")
gui.resizable(False, False)
self.panel_width = drawscale * width
self.panel_height = drawscale * height
panel = tk.Canvas(
gui,
width=self.panel_width,
height=self.panel_height,
background="white")
panel.pack()
groundHeight = 0
centerX = self.panel_width / 2
self.groundY = self.panel_height - groundHeight
# Draw grid
for step in range(0, int(self.panel_width+1), 20):
panel.create_line((step, 0, step, self.groundY), fill='lightgray')
panel.create_line((0, step, self.panel_width, step), fill='lightgray')
panel.create_rectangle(
1,
1,
self.panel_width-1,
self.groundY-1,
fill='',
width=10)
self.agents = []
self.gui = gui
self.panel = panel
self.drawscale = drawscale
self.centerX = centerX
self.marker_id = None
self.marker_color = 'yellow'
self.marker_rad = drawscale * 0.03
self.is_marked = False
self.time_step_id = panel.create_text(10, 10, text='', anchor=tk.W)
self.time_step_text = ''
# If true, no line from the ball position to the first prediction
# position will be drawn
self.hide_first_line = False
self.sensor_dirs = calc_sensor_dirs()
def register(self, agent):
self.agents.append(agent)
def add_terrain(self, terrain):
terrain_start = self.scale(np.copy(terrain.start))
terrain_end = self.scale(np.copy(terrain.end))
self.panel.create_rectangle(
*terrain_start,
*terrain_end,
fill=terrain.color,
width=0)
# Draw grid
for step in range(0, int(self.panel_width+1), 20):
self.panel.create_line((step, 0, step, self.groundY), fill='lightgray')
self.panel.create_line((0, step, self.panel_width, step), fill='lightgray')
self.panel.create_rectangle(
1,
1,
self.panel_width-1,
self.groundY-1,
fill='',
width=10)
def mark(self, position):
self.is_marked = True
self.marker_pos = self.scale(position)
def scale(self, coordinates):
result = np.zeros_like(coordinates)
if(coordinates.shape == (2,)):
result[0] = self.centerX + (coordinates[0] * self.drawscale)
result[1] = self.groundY - (coordinates[1] * self.drawscale)
# result = [result[i].item() for i in range(len(result))]
elif(len(coordinates.shape) == 2 and coordinates.shape[1] == 2):
result[:, 0] = self.centerX + (coordinates[:, 0] * self.drawscale)
result[:, 1] = self.groundY - (coordinates[:, 1] * self.drawscale)
# for i in range(result.shape[0]):
# for j in range(result.shape[1]):
# result[i, j] = result[i, j].item()
else:
raise SystemExit("Wrong Dimensionality in gui.scale()")
return result
def unscale(self, gui_coordinates):
result = np.zeros_like(gui_coordinates, dtype=np.float32)
result[0] = (gui_coordinates[0] - self.centerX) / self.drawscale
result[1] = (self.groundY - gui_coordinates[1]) / self.drawscale
return result
def update_time_step(self, from_step, to_step=None):
if(to_step is None):
self.time_step_text = 't: ' + str(from_step)
else:
self.time_step_text = 't: ' + str(
from_step) + ' to ' + str(to_step)
'''
Variables that draw needs:
ballPos
thrust_activity: the values of the thrust activity. Depends on the motor activity.
thrust_ids: array of ids of all thrusts. shape (num_of_thrusts)
Constants that run needs:
ballRad
thrustDirections. X and y coordinates per thrust. Shape (num_of_thrusts, 2)
thrustColor
ballColor
dtmsec: the update frequency
'''
def draw(self):
for a in self.agents:
if a.gui_att.scv_points is not None and a.gui_att.show_scv_targets is True:
a.gui_att.scv_ids = self.draw_scv_targets(
a.gui_att.predictions,
a.gui_att.scv_points,
a.gui_att.scv_ids,
a.gui_att.scv_text_ids)
a.gui_att.thrust_ids = self.draw_thrusts(
a.gui_att.thrust_ids,
a.gui_att.num_thrusts,
a.gui_att.ball_pos,
a.gui_att.thrust_directions,
a.gui_att.thrust_activity,
a.gui_att.thrust_factor,
a.gui_att.thrust_color,
a.gui_att.ball_rad)
a.gui_att.ball_id = self.draw_ball(
a.gui_att.ball_id,
a.gui_att.ball_pos,
a.gui_att.ball_rad,
a.gui_att.ball_color)
a.gui_att.ball_real_id = self.draw_ball_real(
a.gui_att.ball_real_id,
a.gui_att.is_ball_real_pos_set,
a.gui_att.ball_real_pos,
a.gui_att.ball_rad)
if a.gui_att.show_target is True:
a.gui_att.target_id = self.draw_target(
a.gui_att.target_id,
a.gui_att.target_pos,
a.gui_att.target_rad,
a.gui_att.target_color)
if a.gui_att.show_predictions is True and a.gui_att.predictions is not None:
a.gui_att.prediction_ids, a.gui_att.prediction_point_ids = self.draw_prediction_line(
a.gui_att.prediction_ids, a.gui_att.prediction_point_ids, a.gui_att.ball_pos, a.gui_att.predictions)
if a.gui_att.show_simulated_positions is True and a.gui_att.simulated_positions is not None:
a.gui_att.simulated_position_ids = self.draw_simulated_positions_line(
a.gui_att.simulated_position_ids,
a.gui_att.is_ball_real_pos_set,
a.gui_att.ball_real_pos,
a.gui_att.ball_pos,
a.gui_att.simulated_positions)
a.gui_att.ball_name_id = self.draw_ball_name(
a.gui_att.ball_name_id,
a.id,
a.gui_att.ball_pos,
a.gui_att.ball_rad,
a.gui_att.ball_name_color)
# if a.gui_att.sensor_activity is not None:
# a.gui_att.sensor_ray_ids = self.draw_sensor_activity(a.gui_att.sensor_ray_ids, a.gui_att.sensor_activity, a.gui_att.ball_pos, a.gui_att.sensor_ray_length)
if a.gui_att.sensor_predictions is not None:
a.gui_att.sensor_prediction_ids = self.draw_sensor_predictions(
a.gui_att.sensor_predictions, a.gui_att.sensor_prediction_ids, a.gui_att.predictions)
if self.marker_id is not None:
self.panel.delete(self.marker_id)
if self.is_marked:
self.marker_id = self.panel.create_oval(
self.marker_pos[0] + self.marker_rad,
self.marker_pos[1] + self.marker_rad,
self.marker_pos[0] - self.marker_rad,
self.marker_pos[1] - self.marker_rad,
fill=self.marker_color,
outline='black'
)
self.panel.itemconfigure(self.time_step_id, text=self.time_step_text)
# Now Update the GUI
self.gui.update_idletasks()
self.gui.update()
# -----------
# Draw methods
# -----------
def draw_thrusts(self, thrust_ids, num_thrusts, ball_pos,
thrust_directions, thrust_activity, thrust_factor,
thrust_color, ball_rad):
ball_pos = self.scale(np.copy(ball_pos))
for i in range(num_thrusts):
r = gauss(0, 0.1)
points = [ball_pos[0] -
int((1.0 +
r) *
thrust_directions[i, 0] *
thrust_activity[i] *
thrust_factor *
(ball_rad *
self.drawscale)), ball_pos[1] +
int((1.0 +
r) *
thrust_directions[i, 1] *
thrust_activity[i] *
thrust_factor *
(ball_rad *
self.drawscale)), ball_pos[0], ball_pos[1] -
5, ball_pos[0], ball_pos[1] +
5]
if thrust_ids[i] is not None:
self.panel.delete(thrust_ids[i])
points = [points[i].item() for i in range(len(points))]
thrust_ids[i] = self.panel.create_polygon(
points, fill=thrust_color)
return thrust_ids
def draw_ball(self, ball_id, ball_pos, ball_rad, ball_color):
ball_pos = self.scale( | np.copy(ball_pos) | numpy.copy |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.utils import macos_version
from coremltools.models.neural_network import flexible_shape_utils
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=(10, 1, 1),
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=(1, 10, 1, 1),
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
@unittest.skip('TO FIX')
def test_shape_flexibility_enumeration(self):
input_features = [('data', datatypes.Array(*(3,4,6)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [(1, 5, 7), (60, 5, 2), (22, 4, 9), (5, 3, 56)]
flexible_shape_utils.add_multiarray_ndshape_enumeration(spec, feature_name='data', enumerated_shapes=shapes)
shapes.append((3,4,6))
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_batched_mat_mul_cpu(self):
a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),
(1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]
b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),
(1, 4, 3, 2), (2, 1, 2, 4, 5)]
out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),
(1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]
for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['A', 'B'],
output_name='output',
transpose_a=False,
transpose_b=False)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': np.array(np.matmul(a, b))}
shape_dict = {'output': outShape}
self._test_model(builder.spec, input, expected, useCPUOnly=True,
output_name_shape_dict=shape_dict)
def test_batched_mat_mul_with_transposes_cpu(self):
for transpose_a, transpose_b in itertools.product([True, False],
[True, False]):
a_shape = (3, 4)
b_shape = (4, 5)
a_shape = a_shape[::-1] if transpose_a else a_shape
b_shape = b_shape[::-1] if transpose_b else b_shape
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_batched_mat_mul(
name='BatchedMatMul', input_names=['A', 'B'],
output_name='output', transpose_a=transpose_a,
transpose_b=transpose_b
)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
inputs = {'A': a, 'B': b}
a = a.T if transpose_a else a
b = b.T if transpose_b else b
expected = {'output': np.matmul(a, b)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_batched_mat_mul_single_input_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION):
X1 = 11
X2 = 23
W = np.random.rand(X1, X2)
bias = np.random.rand(X2)
input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),
(2, 3, 1, 5, X1)]
for input_shape in input_shapes:
x = np.random.rand(*input_shape)
np_out = np.matmul(x, W) + bias
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['data'],
output_name='output',
weight_matrix_rows=X1,
weight_matrix_columns=X2,
W=W, bias=bias)
inputs = {'data': x}
self._test_model(
builder.spec, inputs, expected,
model_precision=model_precision, useCPUOnly=True)
def test_batched_mat_mul_single_input_half_precision_cpu(self):
self.test_batched_mat_mul_single_input_cpu(
model_precision=_MLMODEL_HALF_PRECISION)
def test_embedding_nd_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):
vocab_size = 10
embedding_size = 19
W = np.random.rand(embedding_size, vocab_size)
input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),
(2, 3, 1, 5, 1)]
for input_shape in input_shapes:
x = np.random.randint(vocab_size, size=input_shape)
np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_embedding_nd(name='embedding_nd',
input_name='data',
output_name='output',
vocab_size=vocab_size,
embedding_size=embedding_size,
W=W)
input = {'data': x.astype(np.float32)}
self._test_model(
builder.spec, input, expected,
model_precision=model_precision, useCPUOnly=use_cpu_only)
def test_embedding_nd_half_precision_cpu(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)
def test_embedding_nd_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)
def test_embedding_nd_half_precision_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)
def test_softmax_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.random.rand(*input_shape)
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_concat_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_inputs = np.random.choice(range(2, 5))
output_shape = np.random.randint(low=2, high=5, size=rank)
output_shape[axis] = 0
input_shapes = []
input_features = []
input_names = []
for _ in range(n_inputs):
input_shapes.append(np.copy(output_shape))
input_shapes[-1][axis] = np.random.choice(range(2, 8))
output_shape[axis] += input_shapes[-1][axis]
for i, input_dim in enumerate(input_shapes):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append((input_name, datatypes.Array(*input_dim)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_concat_nd(name='concat_nd', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for input_dim in input_shapes:
input_tensors.append(np.random.rand(*input_dim))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.concatenate(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_like_cpu(self):
for rank in range(1, 6):
target_shape = np.random.randint(low=2, high=6, size=rank)
value = float(np.random.rand())
input_features = [('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_like(name='fill_like', input_name='tensor',
output_name='output', value=value)
tensor = np.random.rand(*target_shape)
input = {'tensor': tensor}
expected = {'output': np.zeros(target_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_static_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
value = float(np.random.rand())
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_static(name='fill_static', output_name='tmp',
output_shape=list(shape), value=value)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.random.rand(*shape)
input = {'data': data}
expected = {'output': data + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_dynamic_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
value = float(np.random.rand())
input_features = [('shape', datatypes.Array(len(input_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',
output_name='output', value=value)
input = {'shape': np.array(input_shape, dtype='float')}
expected = {'output': np.zeros(input_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_broadcast_to_like_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = | np.random.randint(low=rank, high=6) | numpy.random.randint |
import os, sys
import numpy as np
import torch
import open3d as o3d
from . import pcd_utils
class Colors():
red = [0.8, 0.2, 0]
green = [0, 0.7, 0.2]
blue = [0, 0, 1]
gold = [1, 0.706, 0]
greenish = [0, 0.8, 0.506]
def visualize_point_tensor(
points_list, R, t,
colors_list=None,
compute_bbox_list=None,
additional_pcds=[],
exit_after=False,
convert_to_opengl_coords=True
):
assert len(points_list) == len(colors_list) == len(compute_bbox_list)
# World frame
referece_frame = create_frame(size=1.0)
additional_pcds.append(referece_frame)
# camera frame
camera_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0]
)
camera_frame.rotate(R, pcd_utils.origin)
camera_frame.translate(t, relative=True)
additional_pcds.append(camera_frame)
# Unit bbox
unit_bbox = create_unit_bbox()
additional_pcds.append(unit_bbox)
# Go over list of numpy arrays and convert them to o3d.geometry.PointClouds
# (maybe also create bboxes around them)
pcds = []
bboxes = []
for i, points in enumerate(points_list):
if torch.is_tensor(points):
points_np = points.cpu().numpy()
elif isinstance(points, type(np.empty(0))):
points_np = points
if len(points_np.shape) == 3:
# we then assume the first dimension is the batch_size
points_np = points_np.squeeze(axis=0)
if points_np.shape[1] > points_np.shape[0] and points_np.shape[0] == 3:
points_np = np.moveaxis(points_np, 0, -1) # [N, 3]
# transform to opengl coordinates
if convert_to_opengl_coords:
points_np = pcd_utils.transform_pointcloud_to_opengl_coords(points_np)
pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points_np))
if colors_list is not None:
if colors_list[i] is not None:
color_np = colors_list[i] * np.ones_like(points_np)
pcd.colors = o3d.utility.Vector3dVector(color_np)
pcds.append(pcd)
if compute_bbox_list is not None:
if compute_bbox_list[i]:
bbox = pcd_utils.BBox(points_np)
bboxes.append(bbox.get_bbox_as_line_set())
# sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
# sphere = sphere.translate(np.array([0, -1, 0]), relative=True)
# sphere.paint_uniform_color([1.0, 0.0, 0.0])
# additional_pcds.append(sphere)
# sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
# sphere = sphere.translate(np.array([0, 0, 1]), relative=True)
# sphere.paint_uniform_color([1.0, 0.0, 0.0])
# additional_pcds.append(sphere)
# transform also additional_pcds if necessary
if convert_to_opengl_coords:
for additional_pcd in additional_pcds:
additional_pcd.transform(pcd_utils.T_opengl_cv_homogeneous)
o3d.visualization.draw_geometries([*additional_pcds, *pcds, *bboxes])
if exit_after:
exit()
def create_unit_bbox():
# unit bbox
unit_bbox = pcd_utils.BBox.compute_bbox_from_min_point_and_max_point(
np.array([-1, -1, -1]), np.array([1, 1, 1])
)
return unit_bbox
def create_frame(size=1.0, origin=[0, 0, 0]):
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=size, origin=origin
)
return frame
def create_lines_from_start_and_end_points(start_points, end_points, color=[201/255, 177/255, 14/255]):
if start_points.shape[1] > start_points.shape[0] and start_points.shape[0] == 3:
start_points = start_points.transpose()
end_points = end_points.transpose()
num_pairs = start_points.shape[0]
all_points = np.concatenate((start_points, end_points), axis=0)
lines = [[i, i + num_pairs] for i in range(0, num_pairs, 1)]
line_colors = [color for i in range(num_pairs)]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(all_points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(line_colors)
return line_set
def create_lines_from_view_vectors(
view_vectors_original,
offsets_original,
dist_original,
R, t,
return_geoms=False,
convert_to_opengl_coords=False
):
view_vectors = np.copy(view_vectors_original)
offsets = np.copy(offsets_original)
dist = np.copy(dist_original)
# Move coordinates to the last axis
view_vectors = np.moveaxis(view_vectors, 0, -1) # [N, 3]
offsets = np.moveaxis(offsets, 0, -1) # [N, 3]
len_dist_shape = len(dist.shape)
if len_dist_shape == 1:
dist = dist[:, np.newaxis]
else:
dist = np.moveaxis(dist, 0, -1) # [N, 1]
N = offsets.shape[0] # number of points (and lines)
# Advance along the view_vectors by a distance of "dist"
end_points = offsets + view_vectors * dist
# Concatenate offsets and end_points into one array
points = np.concatenate((offsets, end_points), axis=0)
# Compute list of edges between offsets and end_points
lines = [[i, i + N] for i in range(0, N, 1)]
line_colors = [[201/255, 177/255, 14/255] for i in range(N)]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(line_colors)
# Offsets PointCloud
offsets_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(offsets))
offsets_pcd.paint_uniform_color(Colors.red)
# End points PointCloud
end_points_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(end_points))
end_points_pcd.paint_uniform_color(Colors.green)
# Concatenate PointClouds
pcds = [offsets_pcd, end_points_pcd]
# Convert to opengl coordinates if necessary
if not return_geoms or convert_to_opengl_coords:
offsets_pcd.transform(pcd_utils.T_opengl_cv_homogeneous)
end_points_pcd.transform(pcd_utils.T_opengl_cv_homogeneous)
line_set.transform(pcd_utils.T_opengl_cv_homogeneous)
if return_geoms:
return line_set, pcds
else:
# camera frame
camera_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0]
)
camera_frame.rotate(R, pcd_utils.origin)
camera_frame.translate(t, relative=True)
camera_frame.rotate(pcd_utils.T_opengl_cv, pcd_utils.origin) # convert to opengl coordinates for visualization
o3d.visualization.draw_geometries([camera_frame, *pcds, line_set])
exit()
def viz_and_exit(pcd_list):
o3d.visualization.draw_geometries(pcd_list)
exit()
def visualize_mesh(mesh_path):
# world frame
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.0, origin=[0, 0, 0]
)
mesh = o3d.io.read_triangle_mesh(mesh_path)
o3d.visualization.draw_geometries([world_frame, mesh])
def visualize_grid(points_list, colors=None, exit_after=True):
# world frame
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1.5, origin=[0, 0, 0]
)
world_frame = pcd_utils.rotate_around_axis(world_frame, axis_name="x", angle=-np.pi)
pcds = []
for i, points in enumerate(points_list):
pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(np.moveaxis(points, 0, -1)))
pcd = pcd_utils.rotate_around_axis(pcd, "x", np.pi)
if colors:
pcd.paint_uniform_color(colors[i])
pcds.append(pcd)
o3d.visualization.draw_geometries([world_frame, *pcds])
if exit_after: exit()
def visualize_sphere():
import marching_cubes as mcubes
from utils.sdf_utils import sphere_tsdf
# Extract sphere with Marching cubes.
dim = 20
# Extract the 0-isosurface.
X, Y, Z = np.meshgrid(np.arange(-1, 1, 2.0 / dim), np.arange(-1, 1, 2.0 / dim), np.arange(-1, 1, 2.0 / dim))
sdf = sphere_tsdf(X, Y, Z)
vertices, triangles = mcubes.marching_cubes(sdf, 0)
# Convert extracted surface to o3d mesh.
mesh_sphere = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(vertices), o3d.utility.Vector3iVector(triangles))
mesh_sphere.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_sphere])
def merge_line_sets(line_sets):
# Compute total number of vertices and faces.
num_points = 0
num_lines = 0
num_line_colors = 0
for i in range(len(line_sets)):
num_points += np.asarray(line_sets[i].points).shape[0]
num_lines += np.asarray(line_sets[i].lines).shape[0]
num_line_colors += np.asarray(line_sets[i].colors).shape[0]
# Merge points and faces.
points = np.zeros((num_points, 3), dtype=np.float64)
lines = np.zeros((num_lines, 2), dtype=np.int32)
line_colors = np.zeros((num_line_colors, 3), dtype=np.float64)
vertex_offset = 0
line_offset = 0
vertex_color_offset = 0
for i in range(len(line_sets)):
current_points = np.asarray(line_sets[i].points)
current_lines = np.asarray(line_sets[i].lines)
current_line_colors = np.asarray(line_sets[i].colors)
points[vertex_offset:vertex_offset + current_points.shape[0]] = current_points
lines[line_offset:line_offset + current_lines.shape[0]] = current_lines + vertex_offset
line_colors[vertex_color_offset:vertex_color_offset + current_line_colors.shape[0]] = current_line_colors
vertex_offset += current_points.shape[0]
line_offset += current_lines.shape[0]
vertex_color_offset += current_line_colors.shape[0]
# Create a merged line set object.
line_set = o3d.geometry.LineSet(o3d.utility.Vector3dVector(points), o3d.utility.Vector2iVector(lines))
line_set.colors = o3d.utility.Vector3dVector(line_colors)
return line_set
def merge_meshes(meshes):
# Compute total number of vertices and faces.
num_vertices = 0
num_triangles = 0
num_vertex_colors = 0
for i in range(len(meshes)):
num_vertices += np.asarray(meshes[i].vertices).shape[0]
num_triangles += np.asarray(meshes[i].triangles).shape[0]
num_vertex_colors += np.asarray(meshes[i].vertex_colors).shape[0]
# Merge vertices and faces.
vertices = np.zeros((num_vertices, 3), dtype=np.float64)
triangles = np.zeros((num_triangles, 3), dtype=np.int32)
vertex_colors = | np.zeros((num_vertex_colors, 3), dtype=np.float64) | numpy.zeros |
import itertools
import textwrap
import warnings
from datetime import datetime
from inspect import getfullargspec
from typing import Any, Iterable, Mapping, Tuple, Union
import numpy as np
import pandas as pd
from ..core.options import OPTIONS
from ..core.utils import is_scalar
try:
import nc_time_axis # noqa: F401
nc_time_axis_available = True
except ImportError:
nc_time_axis_available = False
ROBUST_PERCENTILE = 2.0
_registered = False
def register_pandas_datetime_converter_if_needed():
# based on https://github.com/pandas-dev/pandas/pull/17710
global _registered
if not _registered:
pd.plotting.register_matplotlib_converters()
_registered = True
def import_matplotlib_pyplot():
"""Import pyplot as register appropriate converters."""
register_pandas_datetime_converter_if_needed()
import matplotlib.pyplot as plt
return plt
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = "both"
elif extend_min:
extend = "min"
elif extend_max:
extend = "max"
else:
extend = "neither"
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = "max"
if extend == "both":
ext_n = 2
elif extend in ["min", "max"]:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, "name", cmap)
# copy colors to use for bad, under, and over values in case they have been
# set to non-default values
try:
# matplotlib<3.2 only uses bad color for masked values
bad = cmap(np.ma.masked_invalid([np.nan]))[0]
except TypeError:
# cmap was a str or list rather than a color-map object, so there are
# no bad, under or over values to check or copy
pass
else:
under = cmap(-np.inf)
over = cmap(np.inf)
new_cmap.set_bad(bad)
# Only update under and over if they were explicitly changed by the user
# (i.e. are different from the lowest or highest values in cmap). Otherwise
# leave unchanged so new_cmap uses its default values (its own lowest and
# highest values).
if under != cmap(0):
new_cmap.set_under(under)
if over != cmap(cmap.N - 1):
new_cmap.set_over(over)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1.0, n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, str):
# we have some sort of named palette
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
pal = cmap(colors_i)
except ValueError:
# ValueError happens when mpl doesn't like a colormap, try seaborn
try:
from seaborn import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ValueError, ImportError):
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
# _determine_cmap_params is adapted from Seaborn:
# https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
# Used under the terms of Seaborn's license, see licenses/SEABORN_LICENSE.
def _determine_cmap_params(
plot_data,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
extend=None,
levels=None,
filled=True,
norm=None,
_is_facetgrid=False,
):
"""
Use some heuristics to set good defaults for colorbar and range.
Parameters
==========
plot_data: Numpy array
Doesn't handle xarray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
import matplotlib as mpl
if isinstance(levels, Iterable):
levels = sorted(levels)
calc_data = np.ravel(plot_data[np.isfinite(plot_data)])
# Handle all-NaN input data gracefully
if calc_data.size == 0:
# Arbitrary default for when all values are NaN
calc_data = np.array(0.0)
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# Setting vmin or vmax implies linspaced levels
user_minmax = (vmin is not None) or (vmax is not None)
# vlim might be computed below
vlim = None
# save state; needed later
vmin_was_none = vmin is None
vmax_was_none = vmax is None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
levels_are_divergent = (
isinstance(levels, Iterable) and levels[0] * levels[-1] < 0
)
# kwargs not specific about divergent or not: infer defaults from data
divergent = (
((vmin < 0) and (vmax > 0)) or not center_is_none or levels_are_divergent
)
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# now check norm and harmonize with vmin, vmax
if norm is not None:
if norm.vmin is None:
norm.vmin = vmin
else:
if not vmin_was_none and vmin != norm.vmin:
raise ValueError("Cannot supply vmin and a norm with a different vmin.")
vmin = norm.vmin
if norm.vmax is None:
norm.vmax = vmax
else:
if not vmax_was_none and vmax != norm.vmax:
raise ValueError("Cannot supply vmax and a norm with a different vmax.")
vmax = norm.vmax
# if BoundaryNorm, then set levels
if isinstance(norm, mpl.colors.BoundaryNorm):
levels = norm.boundaries
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = OPTIONS["cmap_divergent"]
else:
cmap = OPTIONS["cmap_sequential"]
# Handle discrete levels
if levels is not None:
if is_scalar(levels):
if user_minmax:
levels = np.linspace(vmin, vmax, levels)
elif levels == 1:
levels = np.asarray([(vmin + vmax) / 2])
else:
# N in MaxNLocator refers to bins, not ticks
ticker = mpl.ticker.MaxNLocator(levels - 1)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
# GH3734
if vmin == vmax:
vmin, vmax = mpl.ticker.LinearLocator(2).tick_values(vmin, vmax)
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None or isinstance(norm, mpl.colors.BoundaryNorm):
cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled)
norm = newnorm if norm is None else norm
return dict(
vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm
)
def _infer_xy_labels_3d(darray, x, y, rgb):
"""
Determine x and y labels for showing RGB images.
Attempts to infer which dimension is RGB/RGBA by size and order of dims.
"""
assert rgb is None or rgb != x
assert rgb is None or rgb != y
# Start by detecting and reporting invalid combinations of arguments
assert darray.ndim == 3
not_none = [a for a in (x, y, rgb) if a is not None]
if len(set(not_none)) < len(not_none):
raise ValueError(
"Dimension names must be None or unique strings, but imshow was "
"passed x=%r, y=%r, and rgb=%r." % (x, y, rgb)
)
for label in not_none:
if label not in darray.dims:
raise ValueError(f"{label!r} is not a dimension")
# Then calculate rgb dimension if certain and check validity
could_be_color = [
label
for label in darray.dims
if darray[label].size in (3, 4) and label not in (x, y)
]
if rgb is None and not could_be_color:
raise ValueError(
"A 3-dimensional array was passed to imshow(), but there is no "
"dimension that could be color. At least one dimension must be "
"of size 3 (RGB) or 4 (RGBA), and not given as x or y."
)
if rgb is None and len(could_be_color) == 1:
rgb = could_be_color[0]
if rgb is not None and darray[rgb].size not in (3, 4):
raise ValueError(
"Cannot interpret dim %r of size %s as RGB or RGBA."
% (rgb, darray[rgb].size)
)
# If rgb dimension is still unknown, there must be two or three dimensions
# in could_be_color. We therefore warn, and use a heuristic to break ties.
if rgb is None:
assert len(could_be_color) in (2, 3)
rgb = could_be_color[-1]
warnings.warn(
"Several dimensions of this array could be colors. Xarray "
"will use the last possible dimension (%r) to match "
"matplotlib.pyplot.imshow. You can pass names of x, y, "
"and/or rgb dimensions to override this guess." % rgb
)
assert rgb is not None
# Finally, we pick out the red slice and delegate to the 2D version:
return _infer_xy_labels(darray.isel(**{rgb: 0}), x, y)
def _infer_xy_labels(darray, x, y, imshow=False, rgb=None):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array, or 3d for imshow only.
"""
assert x is None or x != y
if imshow and darray.ndim == 3:
return _infer_xy_labels_3d(darray, x, y, rgb)
if x is None and y is None:
if darray.ndim != 2:
raise ValueError("DataArray must be 2d")
y, x = darray.dims
elif x is None:
if y not in darray.dims and y not in darray.coords:
raise ValueError("y must be a dimension name if x is not supplied")
x = darray.dims[0] if y == darray.dims[1] else darray.dims[1]
elif y is None:
if x not in darray.dims and x not in darray.coords:
raise ValueError("x must be a dimension name if y is not supplied")
y = darray.dims[0] if x == darray.dims[1] else darray.dims[1]
elif any(k not in darray.coords and k not in darray.dims for k in (x, y)):
raise ValueError("x and y must be coordinate variables")
return x, y
def get_axis(figsize, size, aspect, ax):
import matplotlib as mpl
import matplotlib.pyplot as plt
if figsize is not None:
if ax is not None:
raise ValueError("cannot provide both `figsize` and " "`ax` arguments")
if size is not None:
raise ValueError("cannot provide both `figsize` and " "`size` arguments")
_, ax = plt.subplots(figsize=figsize)
elif size is not None:
if ax is not None:
raise ValueError("cannot provide both `size` and `ax` arguments")
if aspect is None:
width, height = mpl.rcParams["figure.figsize"]
aspect = width / height
figsize = (size * aspect, size)
_, ax = plt.subplots(figsize=figsize)
elif aspect is not None:
raise ValueError("cannot provide `aspect` argument without `size`")
if ax is None:
ax = plt.gca()
return ax
def label_from_attrs(da, extra="", wrap_width=30):
""" Makes informative labels if variable metadata (attrs) follows
CF conventions. """
if da.attrs.get("long_name"):
name = da.attrs["long_name"]
elif da.attrs.get("standard_name"):
name = da.attrs["standard_name"]
elif da.name is not None:
name = da.name
else:
name = ""
if da.attrs.get("units"):
units = " [{}]".format(da.attrs["units"])
else:
units = ""
return "\n".join(textwrap.wrap(name + extra + units, wrap_width))
def _interval_to_mid_points(array):
"""
Helper function which returns an array
with the Intervals' mid points.
"""
return np.array([x.mid for x in array])
def _interval_to_bound_points(array):
"""
Helper function which returns an array
with the Intervals' boundaries.
"""
array_boundaries = np.array([x.left for x in array])
array_boundaries = np.concatenate((array_boundaries, np.array([array[-1].right])))
return array_boundaries
def _interval_to_double_bound_points(xarray, yarray):
"""
Helper function to deal with a xarray consisting of pd.Intervals. Each
interval is replaced with both boundaries. I.e. the length of xarray
doubles. yarray is modified so it matches the new shape of xarray.
"""
xarray1 = np.array([x.left for x in xarray])
xarray2 = np.array([x.right for x in xarray])
xarray = list(itertools.chain.from_iterable(zip(xarray1, xarray2)))
yarray = list(itertools.chain.from_iterable(zip(yarray, yarray)))
return xarray, yarray
def _resolve_intervals_1dplot(xval, yval, xlabel, ylabel, kwargs):
"""
Helper function to replace the values of x and/or y coordinate arrays
containing pd.Interval with their mid-points or - for step plots - double
points which double the length.
"""
# Is it a step plot? (see matplotlib.Axes.step)
if kwargs.get("drawstyle", "").startswith("steps-"):
# Convert intervals to double points
if _valid_other_type(np.array([xval, yval]), [pd.Interval]):
raise TypeError("Can't step plot intervals against intervals.")
if _valid_other_type(xval, [pd.Interval]):
xval, yval = _interval_to_double_bound_points(xval, yval)
if _valid_other_type(yval, [pd.Interval]):
yval, xval = _interval_to_double_bound_points(yval, xval)
# Remove steps-* to be sure that matplotlib is not confused
del kwargs["drawstyle"]
# Is it another kind of plot?
else:
# Convert intervals to mid points and adjust labels
if _valid_other_type(xval, [pd.Interval]):
xval = _interval_to_mid_points(xval)
xlabel += "_center"
if _valid_other_type(yval, [pd.Interval]):
yval = _interval_to_mid_points(yval)
ylabel += "_center"
# return converted arguments
return xval, yval, xlabel, ylabel, kwargs
def _resolve_intervals_2dplot(val, func_name):
"""
Helper function to replace the values of a coordinate array containing
pd.Interval with their mid-points or - for pcolormesh - boundaries which
increases length by 1.
"""
label_extra = ""
if _valid_other_type(val, [pd.Interval]):
if func_name == "pcolormesh":
val = _interval_to_bound_points(val)
else:
val = _interval_to_mid_points(val)
label_extra = "_center"
return val, label_extra
def _valid_other_type(x, types):
"""
Do all elements of x have a type from types?
"""
return all(any(isinstance(el, t) for t in types) for el in np.ravel(x))
def _valid_numpy_subdtype(x, numpy_types):
"""
Is any dtype from numpy_types superior to the dtype of x?
"""
# If any of the types given in numpy_types is understood as numpy.generic,
# all possible x will be considered valid. This is probably unwanted.
for t in numpy_types:
assert not np.issubdtype(np.generic, t)
return any(np.issubdtype(x.dtype, t) for t in numpy_types)
def _ensure_plottable(*args):
"""
Raise exception if there is anything in args that can't be plotted on an
axis by matplotlib.
"""
numpy_types = [np.floating, np.integer, np.timedelta64, np.datetime64, np.bool_]
other_types = [datetime]
try:
import cftime
cftime_datetime = [cftime.datetime]
except ImportError:
cftime_datetime = []
other_types = other_types + cftime_datetime
for x in args:
if not (
_valid_numpy_subdtype(np.array(x), numpy_types)
or _valid_other_type(np.array(x), other_types)
):
raise TypeError(
"Plotting requires coordinates to be numeric, boolean, "
"or dates of type numpy.datetime64, "
"datetime.datetime, cftime.datetime or "
f"pandas.Interval. Received data of type {np.array(x).dtype} instead."
)
if (
_valid_other_type(np.array(x), cftime_datetime)
and not nc_time_axis_available
):
raise ImportError(
"Plotting of arrays of cftime.datetime "
"objects or arrays indexed by "
"cftime.datetime objects requires the "
"optional `nc-time-axis` (v1.2.0 or later) "
"package."
)
def _is_numeric(arr):
numpy_types = [np.floating, np.integer]
return _valid_numpy_subdtype(arr, numpy_types)
def _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params):
cbar_kwargs.setdefault("extend", cmap_params["extend"])
if cbar_ax is None:
cbar_kwargs.setdefault("ax", ax)
else:
cbar_kwargs.setdefault("cax", cbar_ax)
fig = ax.get_figure()
cbar = fig.colorbar(primitive, **cbar_kwargs)
return cbar
def _rescale_imshow_rgb(darray, vmin, vmax, robust):
assert robust or vmin is not None or vmax is not None
# Calculate vmin and vmax automatically for `robust=True`
if robust:
if vmax is None:
vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE)
if vmin is None:
vmin = np.nanpercentile(darray, ROBUST_PERCENTILE)
# If not robust and one bound is None, calculate the default other bound
# and check that an interval between them exists.
elif vmax is None:
vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1
if vmax < vmin:
raise ValueError(
"vmin=%r is less than the default vmax (%r) - you must supply "
"a vmax > vmin in this case." % (vmin, vmax)
)
elif vmin is None:
vmin = 0
if vmin > vmax:
raise ValueError(
"vmax=%r is less than the default vmin (0) - you must supply "
"a vmin < vmax in this case." % vmax
)
# Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float
# to avoid precision loss, integer over/underflow, etc with extreme inputs.
# After scaling, downcast to 32-bit float. This substantially reduces
# memory usage after we hand `darray` off to matplotlib.
darray = ((darray.astype("f8") - vmin) / (vmax - vmin)).astype("f4")
return np.minimum(np.maximum(darray, 0), 1)
def _update_axes(
ax,
xincrease,
yincrease,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
):
"""
Update axes with provided parameters
"""
if xincrease is None:
pass
elif xincrease and ax.xaxis_inverted():
ax.invert_xaxis()
elif not xincrease and not ax.xaxis_inverted():
ax.invert_xaxis()
if yincrease is None:
pass
elif yincrease and ax.yaxis_inverted():
ax.invert_yaxis()
elif not yincrease and not ax.yaxis_inverted():
ax.invert_yaxis()
# The default xscale, yscale needs to be None.
# If we set a scale it resets the axes formatters,
# This means that set_xscale('linear') on a datetime axis
# will remove the date labels. So only set the scale when explicitly
# asked to. https://github.com/matplotlib/matplotlib/issues/8740
if xscale is not None:
ax.set_xscale(xscale)
if yscale is not None:
ax.set_yscale(yscale)
if xticks is not None:
ax.set_xticks(xticks)
if yticks is not None:
ax.set_yticks(yticks)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
def _is_monotonic(coord, axis=0):
"""
>>> _is_monotonic(np.array([0, 1, 2]))
True
>>> _is_monotonic(np.array([2, 1, 0]))
True
>>> _is_monotonic(np.array([0, 2, 1]))
False
"""
if coord.shape[axis] < 3:
return True
else:
n = coord.shape[axis]
delta_pos = coord.take(np.arange(1, n), axis=axis) >= coord.take(
np.arange(0, n - 1), axis=axis
)
delta_neg = coord.take(np.arange(1, n), axis=axis) <= coord.take(
np.arange(0, n - 1), axis=axis
)
return np.all(delta_pos) or np.all(delta_neg)
def _infer_interval_breaks(coord, axis=0, check_monotonic=False):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
if check_monotonic and not _is_monotonic(coord, axis=axis):
raise ValueError(
"The input coordinate is not sorted in increasing "
"order along axis %d. This can lead to unexpected "
"results. Consider calling the `sortby` method on "
"the input DataArray. To plot data with categorical "
"axes, consider using the `heatmap` function from "
"the `seaborn` statistical plotting library." % axis
)
deltas = 0.5 * | np.diff(coord, axis=axis) | numpy.diff |
import ROOT
import numpy as np
import melp
from melp import Detector
from melp.clustering.misc import*
import melp.clustering.spatial_cluster as sclump
import melp.clustering.three_frame_cluster as clump_3
#----------------------------------------------
#simple threshold 1d time clustering
def time_clustering_frame(ttree_mu3e, printing = None):
time_clusters = {}
#same as time_clusters but without time information so it can be used with usual plotting routine
cluster_for_plt = {}
#set maximum time between hits to get assigned to same cluster
time_threshold = 0.4 #ns
#get hittimes (and hit tiles) in frame
hittimes_frame = hittimes_in_frame (ttree_mu3e)
hit_tiles_frame_arr = []
hit_times_frame_arr = []
for key in hittimes_frame.keys():
hit_tiles_frame_arr.append(key)
hit_times_frame_arr.append(hittimes_frame[key][0])
#build clusters
added_tiles = []
for key1 in hittimes_frame.keys():
if key1 not in added_tiles:
time_cluster_tmp = []
cluster_for_plt_tmp = []
time_cluster_tmp.append([key1, hittimes_frame[key1][0]])
cluster_for_plt_tmp.append(key1)
added_tiles.append(key1)
for key2 in hittimes_frame.keys():
if key2 not in added_tiles:
if np.abs(hittimes_frame[key1][0] - hittimes_frame[key2][0]) < time_threshold and key2 != key1:
time_cluster_tmp.append([key2, hittimes_frame[key2][0]])
cluster_for_plt_tmp.append(key2)
added_tiles.append(key2)
time_clusters[key1] = time_cluster_tmp
cluster_for_plt[key1] = cluster_for_plt_tmp
#get highest and lowest time
hittimes_frame = hittimes_in_frame (ttree_mu3e)
times_arr = []
for key in hittimes_frame.keys():
times_arr.append(hittimes_frame[key][0])
if printing == True:
print("Highest time: ", np.max(times_arr), " ns")
print("Lowest time: ", | np.min(times_arr) | numpy.min |
import unittest
import numpy as np
import scipy.sparse
from autosklearn.pipeline.implementations.MinorityCoalescer import MinorityCoalescer
class MinorityCoalescerTest(unittest.TestCase):
@property
def X1(self):
# Generates an array with categories 3, 4, 5, 6, 7 and occurences of 30%,
# 30%, 30%, 5% and 5% respectively
X = np.vstack((
np.ones((30, 10)) * 3,
np.ones((30, 10)) * 4,
np.ones((30, 10)) * 5,
np.ones((5, 10)) * 6,
np.ones((5, 10)) * 7,
))
for col in range(X.shape[1]):
| np.random.shuffle(X[:, col]) | numpy.random.shuffle |
# -*- coding: utf-8 -*-
'''
Tests for NDCube
'''
from collections import OrderedDict
import datetime
import pytest
import numpy as np
import astropy.units as u
from ndcube import NDCube, NDCubeOrdered
from ndcube.utils.wcs import WCS, _wcs_slicer
from ndcube.tests import helpers
from ndcube.ndcube_sequence import NDCubeSequence
# sample data for tests
# TODO: use a fixture reading from a test file. file TBD.
ht = {'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
'NAXIS2': 3,
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
wt = WCS(header=ht, naxis=3)
data = np.array([[[1, 2, 3, 4], [2, 4, 5, 3], [0, -1, 2, 3]],
[[2, 4, 5, 1], [10, 5, 2, 2], [10, 3, 3, 0]]])
hm = {'CTYPE1': 'WAVE ', 'CUNIT1': 'Angstrom', 'CDELT1': 0.2, 'CRPIX1': 0, 'CRVAL1': 10,
'NAXIS1': 4,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5,
'NAXIS2': 3,
'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 2}
wm = WCS(header=hm, naxis=3)
h_disordered = {
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 10,
'NAXIS2': 4,
'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 2, 'CRVAL3': 0.5,
'NAXIS3': 3,
'CTYPE4': 'HPLN-TAN', 'CUNIT4': 'deg', 'CDELT4': 0.4, 'CRPIX4': 2, 'CRVAL4': 1, 'NAXIS4': 2}
w_disordered = WCS(header=h_disordered, naxis=4)
data_disordered = np.zeros((2, 3, 4, 2))
data_disordered[:, :, :, 0] = data
data_disordered[:, :, :, 1] = data
h_ordered = {
'CTYPE1': 'HPLN-TAN', 'CUNIT1': 'deg', 'CDELT1': 0.4, 'CRPIX1': 2, 'CRVAL1': 1, 'NAXIS1': 2,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5,
'NAXIS2': 3,
'CTYPE3': 'WAVE ', 'CUNIT3': 'Angstrom', 'CDELT3': 0.2, 'CRPIX3': 0, 'CRVAL3': 10,
'NAXIS3': 4,
'CTYPE4': 'TIME ', 'CUNIT4': 'min', 'CDELT4': 0.4, 'CRPIX4': 0, 'CRVAL4': 0, 'NAXIS4': 2}
w_ordered = WCS(header=h_ordered, naxis=4)
data_ordered = np.zeros((2, 4, 3, 2))
data_ordered[0] = data.transpose()
data_ordered[1] = data.transpose()
h_rotated = {'CTYPE1': 'HPLN-TAN', 'CUNIT1': 'arcsec', 'CDELT1': 0.4, 'CRPIX1': 0,
'CRVAL1': 0, 'NAXIS1': 5,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'arcsec', 'CDELT2': 0.5, 'CRPIX2': 0,
'CRVAL2': 0, 'NAXIS2': 5,
'CTYPE3': 'Time ', 'CUNIT3': 'seconds', 'CDELT3': 0.3, 'CRPIX3': 0,
'CRVAL3': 0, 'NAXIS3': 2,
'PC1_1': 0.714963912964, 'PC1_2': -0.699137151241, 'PC1_3': 0.0,
'PC2_1': 0.699137151241, 'PC2_2': 0.714963912964, 'PC2_3': 0.0,
'PC3_1': 0.0, 'PC3_2': 0.0, 'PC3_3': 1.0}
w_rotated = WCS(header=h_rotated, naxis=3)
data_rotated = np.array([[[1, 2, 3, 4, 6], [2, 4, 5, 3, 1], [0, -1, 2, 4, 2], [3, 5, 1, 2, 0]],
[[2, 4, 5, 1, 3], [1, 5, 2, 2, 4], [2, 3, 4, 0, 5], [0, 1, 2, 3, 4]]])
mask_cubem = data > 0
mask_cube = data >= 0
uncertaintym = data
uncertainty = np.sqrt(data)
mask_disordered = data_disordered > 0
uncertainty_disordered = data_disordered
mask_ordered = data_ordered > 0
uncertainty_ordered = data_ordered
cubem = NDCube(
data,
wm,
mask=mask_cubem,
uncertainty=uncertaintym,
extra_coords=[('time', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data.shape[2]), unit=u.pix))])
cube_disordered_inputs = (
data_disordered, w_disordered, mask_disordered, uncertainty_disordered,
[('spam', 0, u.Quantity(range(data_disordered.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data_disordered.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data_disordered.shape[2]), unit=u.pix))])
cube_disordered = NDCube(cube_disordered_inputs[0], cube_disordered_inputs[1],
mask=cube_disordered_inputs[2], uncertainty=cube_disordered_inputs[3],
extra_coords=cube_disordered_inputs[4])
cube_ordered = NDCubeOrdered(
data_ordered,
w_ordered,
mask=mask_ordered,
uncertainty=uncertainty_ordered,
extra_coords=[('spam', 3, u.Quantity(range(data_disordered.shape[0]), unit=u.pix)),
('hello', 2, u.Quantity(range(data_disordered.shape[1]), unit=u.pix)),
('bye', 1, u.Quantity(range(data_disordered.shape[2]), unit=u.pix))])
cube = NDCube(
data,
wt,
mask=mask_cube,
uncertainty=uncertainty,
missing_axis=[False, False, False, True],
extra_coords=[('time', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data.shape[2]), unit=u.pix))])
cubet = NDCube(
data,
wm,
mask=mask_cubem,
uncertainty=uncertaintym,
extra_coords=[('time', 0, np.array([datetime.datetime(2000, 1, 1)+datetime.timedelta(minutes=i)
for i in range(data.shape[0])])),
('hello', 1, u.Quantity(range(data.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data.shape[2]), unit=u.pix))])
cube_rotated = NDCube(
data_rotated,
w_rotated,
mask=mask_cube,
uncertainty=uncertainty,
missing_axis=[False, False, False],
extra_coords=[('time', 0, u.Quantity(range(data_rotated.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data_rotated.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data_rotated.shape[2]), unit=u.pix))])
@pytest.mark.parametrize(
"test_input,expected,mask,wcs,uncertainty,dimensions,world_axis_physical_types,extra_coords",
[(cubem[:, 1],
NDCube,
mask_cubem[:, 1],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1)),
data[:, 1],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'em.wl'),
{'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)}}
),
(cubem[:, 0:2],
NDCube,
mask_cubem[:, 0:2],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), slice(0, 2, None))),
data[:, 0:2],
u.Quantity((2, 2, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)},
'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)}}
),
(cubem[:, :],
NDCube,
mask_cubem[:, :],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), slice(None, None, None))),
data[:, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, 1],
NDCube,
mask_cubem[1, 1],
_wcs_slicer(wm, [False, False, False], (1, 1)),
data[1, 1],
u.Quantity((4, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, 0:2],
NDCube,
mask_cubem[1, 0:2],
_wcs_slicer(wm, [False, False, False], (1, slice(0, 2, None))),
data[1, 0:2],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, :],
NDCube,
mask_cubem[1, :],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None))),
data[1, :],
u.Quantity((3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, 1],
NDCube,
mask_cube[:, 1],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), 1)),
uncertainty[:, 1],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, 0:2],
NDCube,
mask_cube[:, 0:2],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), slice(0, 2, None))),
uncertainty[:, 0:2],
u.Quantity((2, 2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, :],
NDCube,
mask_cube[:, :],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None))),
uncertainty[:, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, 1],
NDCube,
mask_cube[1, 1],
_wcs_slicer(wt, [True, False, False, False], (1, 1)),
uncertainty[1, 1],
u.Quantity((4, ), unit=u.pix),
tuple(['time']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, 0:2],
NDCube,
mask_cube[1, 0:2],
_wcs_slicer(wt, [True, False, False, False], (1, slice(0, 2, None))),
uncertainty[1, 0:2],
u.Quantity((2, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, :],
NDCube,
mask_cube[1, :],
_wcs_slicer(wt, [True, False, False, False], (1, slice(0, 2, None))),
uncertainty[1, :],
u.Quantity((3, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
)])
def test_slicing_second_axis(test_input, expected, mask, wcs, uncertainty,
dimensions, world_axis_physical_types, extra_coords):
assert isinstance(test_input, expected)
assert np.all(test_input.mask == mask)
helpers.assert_wcs_are_equal(test_input.wcs, wcs[0])
assert test_input.missing_axis == wcs[1]
assert test_input.uncertainty.array.shape == uncertainty.shape
assert np.all(test_input.dimensions.value == dimensions.value)
assert test_input.dimensions.unit == dimensions.unit
assert test_input.world_axis_physical_types == world_axis_physical_types
helpers.assert_extra_coords_equal(test_input.extra_coords, extra_coords)
@pytest.mark.parametrize(
"test_input,expected,mask,wcs,uncertainty,dimensions,world_axis_physical_types,extra_coords",
[(cubem[1],
NDCube,
mask_cubem[1],
_wcs_slicer(wm, [False, False, False], 1),
data[1],
u.Quantity((3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[0:2],
NDCube,
mask_cubem[0:2],
_wcs_slicer(wm, [False, False, False], slice(0, 2, None)),
data[0:2],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[:],
NDCube,
mask_cubem[:],
_wcs_slicer(wm, [False, False, False], slice(None, None, None)),
data[:],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cube[1],
NDCube,
mask_cube[1],
_wcs_slicer(wt, [True, False, False, False], 1),
uncertainty[1],
u.Quantity((3, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[0:2],
NDCube,
mask_cube[0:2],
_wcs_slicer(wt, [True, False, False, False], slice(0, 2, None)),
uncertainty[0:2],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:],
NDCube,
mask_cube[:],
_wcs_slicer(wt, [True, False, False, False], slice(None, None, None)),
uncertainty[:],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
)])
def test_slicing_first_axis(test_input, expected, mask, wcs, uncertainty,
dimensions, world_axis_physical_types, extra_coords):
assert isinstance(test_input, expected)
assert np.all(test_input.mask == mask)
helpers.assert_wcs_are_equal(test_input.wcs, wcs[0])
assert test_input.missing_axis == wcs[1]
assert test_input.uncertainty.array.shape == uncertainty.shape
assert np.all(test_input.dimensions.value == dimensions.value)
assert test_input.dimensions.unit == dimensions.unit
assert test_input.world_axis_physical_types == world_axis_physical_types
helpers.assert_extra_coords_equal(test_input.extra_coords, extra_coords)
@pytest.mark.parametrize(
"test_input,expected,mask,wcs,uncertainty,dimensions,world_axis_physical_types,extra_coords",
[(cubem[:, :, 1],
NDCube,
mask_cubem[:, :, 1],
_wcs_slicer(wm, [False, False, False],
(slice(None, None, None), slice(None, None, None), 1)),
data[:, :, 1],
u.Quantity((2, 3), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[:, :, 0:2],
NDCube,
mask_cubem[:, :, 0:2],
_wcs_slicer(wm, [False, False, False],
(slice(None, None, None), slice(None, None, None), slice(0, 2, None))),
data[:, :, 0:2],
u.Quantity((2, 3, 2), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[:, :, :],
NDCube,
mask_cubem[:, :, :],
_wcs_slicer(wm, [False, False, False],
(slice(None, None, None), slice(None, None, None), slice(None, None, None))),
data[:, :, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[:, 1, 1],
NDCube,
mask_cubem[:, 1, 1],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1, 1)),
data[:, 1, 1],
u.Quantity((2, ), unit=u.pix),
tuple(['custom:pos.helioprojective.lon']),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[:, 1, 0:2],
NDCube,
mask_cubem[:, 1, 0:2],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1, slice(0, 2, None))),
data[:, 1, 0:2],
u.Quantity((2, 2), unit=u.pix),
('custom:pos.helioprojective.lon', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[:, 1, :],
NDCube,
mask_cubem[:, 1, :],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1, slice(None, None, None))),
data[:, 1, :],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, :, 1],
NDCube,
mask_cubem[1, :, 1],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None), 1)),
data[1, :, 1],
u.Quantity((3, ), unit=u.pix),
tuple(['custom:pos.helioprojective.lat']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[1, :, 0:2],
NDCube,
mask_cubem[1, :, 0:2],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None), slice(0, 2, None))),
data[1, :, 0:2],
u.Quantity((3, 2), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[1, :, :],
NDCube,
mask_cubem[1, :, :],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None), slice(None, None, None))),
data[1, :, :],
u.Quantity((3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, 1, 1],
NDCube,
mask_cubem[1, 1, 1],
_wcs_slicer(wm, [False, False, False], (1, 1, 1)),
data[1, 1, 1],
u.Quantity((), unit=u.pix),
(),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[1, 1, 0:2],
NDCube,
mask_cubem[1, 1, 0:2],
_wcs_slicer(wm, [False, False, False], (1, 1, slice(0, 2, None))),
data[1, 1, 0:2],
u.Quantity((2, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[1, 1, :],
NDCube,
mask_cubem[1, 1, :],
_wcs_slicer(wm, [False, False, False], (1, 1, slice(None, None, None))),
data[1, 1, :],
u.Quantity((4, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, :, 1],
NDCube,
mask_cube[:, :, 1],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None), 1)),
uncertainty[:, :, 1],
u.Quantity((2, 3), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[:, :, 0:2],
NDCube,
mask_cube[:, :, 0:2],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None), slice(0, 2, None))),
uncertainty[:, :, 0:2],
u.Quantity((2, 3, 2), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[:, :, :],
NDCube,
mask_cube[:, :, :],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None), slice(None, None, None))),
uncertainty[:, :, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, 1, 1],
NDCube,
mask_cube[:, 1, 1],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), 1, 1)),
uncertainty[:, 1, 1],
u.Quantity((2, ), unit=u.pix),
tuple(['custom:pos.helioprojective.lat']),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[:, 1, 0:2],
NDCube,
mask_cube[:, 1, 0:2],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), 1, slice(0, 2, None))),
uncertainty[:, 1, 0:2],
u.Quantity((2, 2), unit=u.pix),
('custom:pos.helioprojective.lat', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[:, 1, :],
NDCube,
mask_cube[:, 1, :],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), 1, slice(None, None, None))),
uncertainty[:, 1, :],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, :, 1],
NDCube,
mask_cube[1, :, 1],
_wcs_slicer(wt, [True, False, False, False], (1, slice(None, None, None), 1)),
uncertainty[1, :, 1],
u.Quantity((3, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[1, :, 0:2],
NDCube,
mask_cube[1, :, 0:2],
_wcs_slicer(wt, [True, False, False, False], (1, slice(None, None, None), slice(0, 2, None))),
uncertainty[1, :, 0:2],
u.Quantity((3, 2), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[1, :, :],
NDCube,
mask_cube[1, :, :],
_wcs_slicer(wt, [True, False, False, False],
(1, slice(None, None, None), slice(None, None, None))),
uncertainty[1, :, :],
u.Quantity((3, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, 1, 1],
NDCube,
mask_cube[1, 1, 1],
_wcs_slicer(wt, [True, False, False, False], (1, 1, 1)),
uncertainty[1, 1, 1],
u.Quantity((), unit=u.pix),
(),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[1, 1, 0:2],
NDCube,
mask_cube[1, 1, 0:2],
_wcs_slicer(wt, [True, False, False, False], (1, 1, slice(0, 2, None))),
uncertainty[1, 1, 0:2],
u.Quantity((2, ), unit=u.pix),
tuple(['time']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[1, 1, :],
NDCube,
mask_cube[1, 1, :],
_wcs_slicer(wt, [True, False, False, False], (1, 1, slice(0, 2, None))),
uncertainty[1, 1, :],
u.Quantity((4, ), unit=u.pix),
tuple(['time']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
)])
def test_slicing_third_axis(test_input, expected, mask, wcs, uncertainty,
dimensions, world_axis_physical_types, extra_coords):
assert isinstance(test_input, expected)
assert np.all(test_input.mask == mask)
helpers.assert_wcs_are_equal(test_input.wcs, wcs[0])
assert test_input.missing_axis == wcs[1]
assert test_input.uncertainty.array.shape == uncertainty.shape
assert np.all(test_input.dimensions.value == dimensions.value)
assert test_input.dimensions.unit == dimensions.unit
assert test_input.world_axis_physical_types == world_axis_physical_types
helpers.assert_extra_coords_equal(test_input.extra_coords, extra_coords)
@pytest.mark.parametrize("test_input", [(cubem)])
def test_slicing_error(test_input):
with pytest.raises(IndexError):
test_input[None]
with pytest.raises(IndexError):
test_input[0, None]
@pytest.mark.parametrize("test_input,expected", [
(cubem[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wm.wcs.crpix[2] - 1, 0)[-2]),
(cubem[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wm.wcs.crpix[2] - 1, 0)[0]),
(cubem[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), 0)[-1]),
(cubem[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), 0)[1]),
(cubem[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[2],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), 0)[0]),
(cube[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[2] - 1,
wt.wcs.crpix[3] - 1, 0)[1]),
(cube[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[2] - 1,
wt.wcs.crpix[3] - 1, 0)[0]),
(cube[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[3] - 1, 0)[2]),
(cube[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[3] - 1, 0)[1]),
(cube[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[2],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[3] - 1, 0)[0])])
def test_pixel_to_world(test_input, expected):
assert np.all(test_input.value == expected)
@pytest.mark.parametrize("test_input,expected", [
(cubem[1].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m)
])[1],
wm.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m), wm.wcs.crpix[2] - 1, 0)[0]),
(cubem[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m)
])[0],
wm.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m), 0)[-1]),
(cubem[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m)
])[1],
wm.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m), 0)[1]),
(cubem[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m)
])[2],
wm.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m), 0)[0]),
(cube[1].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.m),
u.Quantity(np.arange(4), unit=u.min)
])[0],
wt.all_world2pix(
u.Quantity(np.arange(4), unit=u.m),
u.Quantity(np.arange(4), unit=u.min), wt.wcs.crpix[2] - 1,
wt.wcs.crpix[3] - 1, 0)[1]),
(cube[1].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.m),
u.Quantity(np.arange(4), unit=u.min)
])[1],
wt.all_world2pix(
u.Quantity(np.arange(4), unit=u.m),
u.Quantity(np.arange(4), unit=u.min), wt.wcs.crpix[2] - 1,
wt.wcs.crpix[3] - 1, 0)[0]),
(cube[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m),
u.Quantity(np.arange(4), unit=u.min)
])[0],
wt.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m),
u.Quantity(np.arange(4), unit=u.min), wt.wcs.crpix[3] - 1, 0)[2]),
(cube[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity( | np.arange(4) | numpy.arange |
#!/usr/bin/env python3
# dcfac0e3-1ade-11e8-9de3-00505601122b
# dce9cf60-42b6-11e9-b0fd-00505601122b
# 7d179d73-3e93-11e9-b0fd-00505601122b
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn.metrics
from sklearn.metrics.pairwise import rbf_kernel,polynomial_kernel
def kernel(x,y,gamma,degree,kernel):
if kernel == 'poly':
xTy = x.T * y
result = gamma * xTy + 1
return result**degree
elif kernel == 'rbf':
euclid = np.sum( (x - y) ** 2 )
result = -gamma * euclid
return np.exp(result)
def loss(true,preds,weights,bias):
bias = 0
result = 1/2 * np.square(true - preds - bias)
result = np.mean(result) + 1/2 * args.l2 * np.dot(weights,weights)
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--examples", default=50, type=int, help="Number of examples")
parser.add_argument("--kernel", default="rbf", type=str, help="Kernel type [poly|rbf]")
parser.add_argument("--kernel_degree", default=5, type=int, help="Degree for poly kernel")
parser.add_argument("--kernel_gamma", default=1.0, type=float, help="Gamma for poly and rbf kernel")
parser.add_argument("--iterations", default=1000, type=int, help="Number of training iterations")
parser.add_argument("--l2", default=0.0, type=float, help="L2 regularization weight")
parser.add_argument("--learning_rate", default=0.01, type=float, help="Learning rate")
parser.add_argument("--plot", default=False, action="store_true", help="Plot progress")
parser.add_argument("--seed", default=42, type=int, help="Random seed")
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Generate an artifical regression dataset
train_data = np.linspace(-1, 1, args.examples)
train_targets = np.sin(5 * train_data) + np.random.normal(scale=0.25, size=args.examples) + 1
test_data = np.linspace(-1.2, 1.2, 2 * args.examples)
test_targets = | np.sin(5 * test_data) | numpy.sin |
#!/usr/bin/env python
#
# slicecanvas.py - The SliceCanvas class.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`SliceCanvas` class, which contains the
functionality to display a 2D slice from a collection of 3D overlays.
"""
import logging
import OpenGL.GL as gl
import numpy as np
import fsl.data.image as fslimage
import fsl.utils.idle as idle
import fsleyes_widgets.utils.status as status
import fsleyes_props as props
import fsleyes.strings as strings
import fsleyes.displaycontext.canvasopts as canvasopts
import fsleyes.gl.routines as glroutines
import fsleyes.gl.resources as glresources
import fsleyes.gl.globject as globject
import fsleyes.gl.textures as textures
import fsleyes.gl.annotations as annotations
log = logging.getLogger(__name__)
class SliceCanvas(object):
"""The ``SliceCanvas`` represents a canvas which may be used to display a
single 2D slice from a collection of 3D overlays. See also the
:class:`.LightBoxCanvas`, a sub-class of ``SliceCanvas``.
.. note:: The :class:`SliceCanvas` class is not intended to be instantiated
directly - use one of these subclasses, depending on your
use-case:
- :class:`.OSMesaSliceCanvas` for static off-screen rendering of
a scene using OSMesa.
- :class:`.WXGLSliceCanvas` for interactive rendering on a
:class:`wx.glcanvas.GLCanvas` canvas.
The ``SliceCanvas`` creates a :class:`.SliceCanvasOpts` instance to manage
its settings. The scene scene displayed on a ``SliceCanvas`` instance can
be manipulated via the properties of its ``SliceCanvasOpts`` instnace,
which is accessed via the ``opts`` attribute.
**GL objects**
The ``SliceCanvas`` draws :class:`.GLObject` instances. When created, a
``SliceCanvas`` creates a :class:`.GLObject` instance for every overlay in
the :class:`.OverlayList`. When an overlay is added or removed, it
creates/destroys ``GLObject`` instances accordingly. Furthermore,
whenever the :attr:`.Display.overlayType` for an existing overlay
changes, the ``SliceCanvas`` destroys the old ``GLObject`` associated with
the overlay, and creates a new one.
The ``SliceCanvas`` also uses an :class:`.Annotations` instance, for
drawing simple annotations on top of the overlays. This ``Annotations``
instance can be accessed with the :meth:`getAnnotations` method.
**Performance optimisations**
The :attr:`.SliceCanvasOpts.renderMode` property controls the way in which
the ``SliceCanvas`` renders :class:`.GLObject` instances. It has three
settings:
============= ============================================================
``onscreen`` ``GLObject`` instances are rendered directly to the canvas.
``offscreen`` ``GLObject`` instances are rendered off-screen to a fixed
size 2D texture (a :class:`.RenderTexture`). This texture
is then rendered to the canvas. One :class:`.RenderTexture`
is used for every overlay in the :class:`.OverlayList`.
``prerender`` A stack of 2D slices for every ``GLObject`` instance is
pre-generated off-screen, and cached, using a
:class:`.RenderTextureStack`. When the ``SliceCanvas`` needs
to display a particular Z location, it retrieves the
appropriate slice from the stack, and renders it to the
canvas. One :class:`.RenderTextureStack` is used for every
overlay in the :class:`.OverlayList`.
============= ============================================================
**Attributes and methods**
The following attributes are available on a ``SliceCanvas``:
=============== ===========================================
``name`` A unique name for this ``SliceCanvas``
``opts`` Reference to the :class:`.SliceCanvasOpts`.
``overlayList`` Reference to the :class:`.OverlayList`.
``displayCtx`` Reference to the :class:`.DisplayContext`.
=============== ===========================================
The following convenience methods are available on a ``SliceCanvas``:
.. autosummary::
:nosignatures:
canvasToWorld
worldToCanvas
pixelSize
panDisplayBy
centreDisplayAt
panDisplayToShow
zoomTo
resetDisplay
getAnnotations
getViewport
"""
def __init__(self, overlayList, displayCtx, zax=0, opts=None):
"""Create a ``SliceCanvas``.
:arg overlayList: An :class:`.OverlayList` object containing a
collection of overlays to be displayed.
:arg displayCtx: A :class:`.DisplayContext` object which describes
how the overlays should be displayed.
:arg zax: Display coordinate system axis perpendicular to the
plane to be displayed (the *depth* axis), default 0.
"""
if opts is None:
opts = canvasopts.SliceCanvasOpts()
self.opts = opts
self.overlayList = overlayList
self.displayCtx = displayCtx
self.name = '{}_{}'.format(self.__class__.__name__, id(self))
# A GLObject instance is created for
# every overlay in the overlay list,
# and stored in this dictionary
self._glObjects = {}
# A copy of the final viewport is
# stored here on each call to _draw.
# It is accessible via the getViewport
# method.
self.__viewport = None
# If render mode is offscren or prerender, these
# dictionaries will contain a RenderTexture or
# RenderTextureStack instance for each overlay in
# the overlay list. These dictionaries are
# respectively of the form:
# { overlay : RenderTexture }
# { overlay : (RenderTextureStack, name) }
#
self._offscreenTextures = {}
self._prerenderTextures = {}
# The zax property is the image axis which
# maps to the 'depth' axis of this canvas.
opts.zax = zax
# when any of the properties of this
# canvas change, we need to redraw
opts.addListener('zax', self.name, self._zAxisChanged)
opts.addListener('pos', self.name, self.Refresh)
opts.addListener('displayBounds', self.name, self.Refresh)
opts.addListener('bgColour', self.name, self.Refresh)
opts.addListener('cursorColour', self.name, self.Refresh)
opts.addListener('showCursor', self.name, self.Refresh)
opts.addListener('cursorGap', self.name, self.Refresh)
opts.addListener('invertX', self.name, self.Refresh)
opts.addListener('invertY', self.name, self.Refresh)
opts.addListener('zoom', self.name, self._zoomChanged)
opts.addListener('renderMode', self.name, self._renderModeChange)
opts.addListener('highDpi', self.name, self._highDpiChange)
# When the overlay list changes, refresh the
# display, and update the display bounds
self.overlayList.addListener('overlays',
self.name,
self._overlayListChanged)
self.displayCtx .addListener('overlayOrder',
self.name,
self.Refresh)
self.displayCtx .addListener('bounds',
self.name,
self._overlayBoundsChanged)
self.displayCtx .addListener('displaySpace',
self.name,
self._displaySpaceChanged)
self.displayCtx .addListener('syncOverlayDisplay',
self.name,
self._syncOverlayDisplayChanged)
# The zAxisChanged method
# will kick everything off
self._annotations = annotations.Annotations(self,
self.opts.xax,
self.opts.yax)
self._zAxisChanged()
def destroy(self):
"""This method must be called when this ``SliceCanvas`` is no longer
being used.
It removes listeners from all :class:`.OverlayList`,
:class:`.DisplayContext`, and :class:`.Display` instances, and
destroys OpenGL representations of all overlays.
"""
opts = self.opts
opts.removeListener('zax', self.name)
opts.removeListener('pos', self.name)
opts.removeListener('displayBounds', self.name)
opts.removeListener('showCursor', self.name)
opts.removeListener('invertX', self.name)
opts.removeListener('invertY', self.name)
opts.removeListener('zoom', self.name)
opts.removeListener('renderMode', self.name)
opts.removeListener('highDpi', self.name)
self.overlayList.removeListener('overlays', self.name)
self.displayCtx .removeListener('bounds', self.name)
self.displayCtx .removeListener('displaySpace', self.name)
self.displayCtx .removeListener('overlayOrder', self.name)
for overlay in self.overlayList:
disp = self.displayCtx.getDisplay(overlay)
globj = self._glObjects.get(overlay)
disp.removeListener('overlayType', self.name)
disp.removeListener('enabled', self.name)
# globj could be None, or could
# be False - see genGLObject.
if globj:
globj.deregister(self.name)
globj.destroy()
rt, rtName = self._prerenderTextures.get(overlay, (None, None))
ot = self._offscreenTextures.get(overlay, None)
if rt is not None: glresources.delete(rtName)
if ot is not None: ot .destroy()
self.opts = None
self.overlayList = None
self.displayCtx = None
self._glObjects = None
self._prerenderTextures = None
self._offscreenTextures = None
@property
def destroyed(self):
"""Returns ``True`` if a call to :meth:`destroy` has been made,
``False`` otherwise.
"""
return self.overlayList is None
def canvasToWorld(self, xpos, ypos, invertX=None, invertY=None):
"""Given pixel x/y coordinates on this canvas, translates them
into xyz display coordinates.
:arg invertX: If ``None``, taken from :attr:`.invertX`.
:arg invertY: If ``None``, taken from :attr:`.invertY`.
"""
opts = self.opts
if invertX is None: invertX = opts.invertX
if invertY is None: invertY = opts.invertY
realWidth = opts.displayBounds.xlen
realHeight = opts.displayBounds.ylen
canvasWidth, canvasHeight = [float(s) for s in self.GetSize()]
if invertX: xpos = canvasWidth - xpos
if invertY: ypos = canvasHeight - ypos
if realWidth == 0 or \
canvasWidth == 0 or \
realHeight == 0 or \
canvasHeight == 0:
return None
xpos = opts.displayBounds.xlo + (xpos / canvasWidth) * realWidth
ypos = opts.displayBounds.ylo + (ypos / canvasHeight) * realHeight
pos = [None] * 3
pos[opts.xax] = xpos
pos[opts.yax] = ypos
pos[opts.zax] = opts.pos[opts.zax]
return pos
def worldToCanvas(self, pos):
"""Converts a location in the display coordinate system into
an x/y location in pixels relative to this ``SliceCanvas``.
"""
opts = self.opts
xpos = pos[opts.xax]
ypos = pos[opts.yax]
invertX = opts.invertX
invertY = opts.invertY
xmin = opts.displayBounds.xlo
xlen = opts.displayBounds.xlen
ymin = opts.displayBounds.ylo
ylen = opts.displayBounds.ylen
width, height = [float(s) for s in self.GetSize()]
if xlen == 0 or \
ylen == 0 or \
width == 0 or \
height == 0:
return None
xpos = width * ((xpos - xmin) / xlen)
ypos = height * ((ypos - ymin) / ylen)
if invertX: xpos = width - xpos
if invertY: ypos = height - ypos
return xpos, ypos
def pixelSize(self):
"""Returns the current (x, y) size of one logical pixel in display
coordinates.
"""
w, h = self.GetSize()
xlen = self.opts.displayBounds.xlen
ylen = self.opts.displayBounds.ylen
return (xlen / w, ylen / h)
def panDisplayBy(self, xoff, yoff):
"""Pans the canvas display by the given x/y offsets (specified in
display coordinates).
"""
if len(self.overlayList) == 0: return
xmin, xmax, ymin, ymax = self.opts.displayBounds[:]
xmin = xmin + xoff
xmax = xmax + xoff
ymin = ymin + yoff
ymax = ymax + yoff
self.opts.displayBounds[:] = [xmin, xmax, ymin, ymax]
def centreDisplayAt(self, xpos, ypos):
"""Pans the display so the given x/y position is in the centre. """
xcentre, ycentre = self.getDisplayCentre()
self.panDisplayBy(xpos - xcentre, ypos - ycentre)
def getDisplayCentre(self):
"""Returns the horizontal/vertical position, in display coordinates,
of the current centre of the display bounds.
"""
bounds = self.opts.displayBounds
xcentre = bounds.xlo + (bounds.xhi - bounds.xlo) * 0.5
ycentre = bounds.ylo + (bounds.yhi - bounds.ylo) * 0.5
return xcentre, ycentre
def panDisplayToShow(self, xpos, ypos):
"""Pans the display so that the given x/y position (in display
coordinates) is visible.
"""
bounds = self.opts.displayBounds
# Do nothing if the position
# is already being displayed
if xpos >= bounds.xlo and xpos <= bounds.xhi and \
ypos >= bounds.ylo and ypos <= bounds.yhi: return
xoff = 0
yoff = 0
if xpos <= bounds.xlo: xoff = xpos - bounds.xlo
elif xpos >= bounds.xhi: xoff = xpos - bounds.xhi
if ypos <= bounds.ylo: yoff = ypos - bounds.ylo
elif ypos >= bounds.yhi: yoff = ypos - bounds.yhi
if xoff != 0 or yoff != 0:
self.panDisplayBy(xoff, yoff)
def zoomTo(self, xlo, xhi, ylo, yhi):
"""Zooms the canvas to the given rectangle, specified in
horizontal/vertical display coordinates.
"""
# We are going to convert the rectangle specified by
# the inputs into a zoom value, set the canvas zoom
# level, and then centre the canvas on the rectangle.
# Middle of the rectangle, used
# at the end for centering
xmid = xlo + (xhi - xlo) / 2.0
ymid = ylo + (yhi - ylo) / 2.0
# Size of the rectangle
rectXlen = abs(xhi - xlo)
rectYlen = abs(yhi - ylo)
if rectXlen == 0: return
if rectYlen == 0: return
# Size of the overlay bounding
# box, and the zoom value limits
opts = self.opts
xmin, xmax = self.displayCtx.bounds.getRange(opts.xax)
ymin, ymax = self.displayCtx.bounds.getRange(opts.yax)
zoommin = opts.getAttribute('zoom', 'minval')
zoommax = opts.getAttribute('zoom', 'maxval')
xlen = xmax - xmin
ylen = ymax - ymin
zoomlen = zoommax - zoommin
# Calculate the ratio of the
# rectangle to the canvas limits
xratio = rectXlen / xlen
yratio = rectYlen / ylen
ratio = max(xratio, yratio)
# Calculate the zoom from this ratio -
# this is the inverse of the zoom->canvas
# bounds calculation, as implemented in
# _applyZoom.
zoom = zoommin / ratio
zoom = ((zoom - zoommin) / zoomlen) ** (1.0 / 3.0)
zoom = zoommin + zoom * zoomlen
# Update the zoom value, call updateDisplayBounds
# to apply the new zoom to the display bounds,
# then centre the display on the calculated
# centre point.
with props.skip(opts, 'zoom', self.name), \
props.skip(opts, 'displayBounds', self.name):
opts.zoom = zoom
self._updateDisplayBounds()
self.centreDisplayAt(xmid, ymid)
self.Refresh()
def resetDisplay(self):
"""Resets the :attr:`zoom` to 100%, and sets the canvas display
bounds to the overaly bounding box (from the
:attr:`.DisplayContext.bounds`)
"""
opts = self.opts
with props.skip(opts, 'zoom', self.name):
opts.zoom = 100
with props.suppress(opts, 'displayBounds'):
self._updateDisplayBounds()
self.Refresh()
def getAnnotations(self):
"""Returns an :class:`.Annotations` instance, which can be used to
annotate the canvas.
"""
return self._annotations
def getGLObject(self, overlay):
"""Returns the :class:`.GLObject` associated with the given
``overlay``, or ``None`` if there isn't one.
"""
globj = self._glObjects.get(overlay, None)
# globjs can be set to False
if not globj: return None
else: return globj
def getViewport(self):
"""Return the current viewport, as two tuples containing the
``(xlo, ylo, zlo)`` and ``(xhi, yhi, zhi)`` bounds.
This method will return ``None`` if :meth:`_draw` has not yet been
called.
"""
return self.__viewport
@property
def viewMatrix(self):
"""Returns the current model view matrix. """
return gl.glGetFloat(gl.GL_MODELVIEW_MATRIX)
@property
def projectionMatrix(self):
"""Returns the current projection matrix. """
return gl.glGetFloat(gl.GL_PROJECTION_MATRIX)
def _initGL(self):
"""Call the :meth:`_overlayListChanged` method - it will generate
any necessary GL data for each of the overlays.
"""
self._overlayListChanged()
def _updateRenderTextures(self):
"""Called when the :attr:`renderMode` changes, when the overlay
list changes, or when the GLObject representation of an overlay
changes.
If the :attr:`renderMode` property is ``onscreen``, this method does
nothing.
Otherwise, creates/destroys :class:`.RenderTexture` or
:class:`.RenderTextureStack` instances for newly added/removed
overlays.
"""
renderMode = self.opts.renderMode
if renderMode == 'onscreen':
return
# If any overlays have been removed from the overlay
# list, destroy the associated render texture stack
if renderMode == 'offscreen':
for ovl, texture in list(self._offscreenTextures.items()):
if ovl not in self.overlayList:
self._offscreenTextures.pop(ovl)
texture.destroy()
elif renderMode == 'prerender':
for ovl, (texture, name) in list(self._prerenderTextures.items()):
if ovl not in self.overlayList:
self._prerenderTextures.pop(ovl)
glresources.delete(name)
# If any overlays have been added to the list,
# create a new render textures for them
for overlay in self.overlayList:
if renderMode == 'offscreen':
if overlay in self._offscreenTextures:
continue
elif renderMode == 'prerender':
if overlay in self._prerenderTextures:
continue
globj = self._glObjects.get(overlay, None)
display = self.displayCtx.getDisplay(overlay)
if not globj:
continue
# For offscreen render mode, GLObjects are
# first rendered to an offscreen texture,
# and then that texture is rendered to the
# screen. The off-screen texture is managed
# by a RenderTexture object.
if renderMode == 'offscreen':
opts = self.opts
name = '{}_{}_{}'.format(display.name, opts.xax, opts.yax)
rt = textures.GLObjectRenderTexture(
name,
globj,
opts.xax,
opts.yax)
self._offscreenTextures[overlay] = rt
# For prerender mode, slices of the
# GLObjects are pre-rendered on a
# stack of off-screen textures, which
# is managed by a RenderTextureStack
# object.
elif renderMode == 'prerender':
rt, name = self._getPreRenderTexture(globj, overlay)
self._prerenderTextures[overlay] = rt, name
self.Refresh()
def _getPreRenderTexture(self, globj, overlay):
"""Creates/retrieves a :class:`.RenderTextureStack` for the given
:class:`.GLObject`. A tuple containing the ``RenderTextureStack``,
and its name, as passed to the :mod:`.resources` module, is returned.
:arg globj: The :class:`.GLObject` instance.
:arg overlay: The overlay object.
"""
display = self.displayCtx.getDisplay(overlay)
copts = self.opts
dopts = display.opts
name = '{}_{}_zax{}'.format(
id(overlay),
textures.RenderTextureStack.__name__,
copts.zax)
# If all display/opts properties
# are synchronised to the parent,
# then we use a texture stack that
# might be shared across multiple
# views.
#
# But if any display/opts properties
# are not synchronised, we'll use our
# own texture stack.
if not (display.getParent() and
display.allSyncedToParent() and
dopts .getParent() and
dopts .allSyncedToParent()):
name = '{}_{}'.format(id(self.displayCtx), name)
if glresources.exists(name):
rt = glresources.get(name)
else:
rt = textures.RenderTextureStack(globj)
rt.setAxes(copts.xax, copts.yax)
glresources.set(name, rt)
return rt, name
def _renderModeChange(self, *a):
"""Called when the :attr:`renderMode` property changes."""
renderMode = self.opts.renderMode
log.debug('Render mode changed: {}'.format(renderMode))
# destroy any existing render textures
for ovl, texture in list(self._offscreenTextures.items()):
self._offscreenTextures.pop(ovl)
texture.destroy()
for ovl, (texture, name) in list(self._prerenderTextures.items()):
self._prerenderTextures.pop(ovl)
glresources.delete(name)
# Onscreen rendering - each GLObject
# is rendered directly to the canvas
# displayed on the screen, so render
# textures are not needed.
if renderMode == 'onscreen':
self.Refresh()
return
# Off-screen or prerender rendering - update
# the render textures for every GLObject
self._updateRenderTextures()
def _highDpiChange(self, *a):
"""Called when the :attr:`.SliceCanvasOpts.highDpi` property
changes. Calls the :meth:`.GLCanvasTarget.EnableHighDPI` method.
"""
self.EnableHighDPI(self.opts.highDpi)
def _syncOverlayDisplayChanged(self, *a):
"""Called when the :attr:`.DisplayContext.syncOverlayDisplay`
property changes. If the current :attr:`renderMode` is ``prerender``,
the :class:`.RenderTextureStack` instances for each overlay are
re-created.
This is done because, if all display properties for an overlay are
synchronised, then a single ``RenderTextureStack`` can be shared
across multiple displays. However, if any display properties are not
synchronised, then a separate ``RenderTextureStack`` is needed for
the :class:`.DisplayContext` used by this ``SliceCanvas``.
"""
if self.opts.renderMode == 'prerender':
self._renderModeChange(self)
def _zAxisChanged(self, *a):
"""Called when the :attr:`zax` property is changed. Notifies
the :class:`.Annotations` instance, and calls :meth:`resetDisplay`.
"""
opts = self.opts
log.debug('{}'.format(opts.zax))
self._annotations.setAxes(opts.xax, opts.yax)
self.resetDisplay()
# If pre-rendering is enabled, the
# render textures need to be updated, as
# they are configured in terms of the
# display axes. Easiest way to do this
# is to destroy and re-create them
self._renderModeChange()
def __overlayTypeChanged(self, value, valid, display, name):
"""Called when the :attr:`.Display.overlayType` setting for any
overlay changes. Makes sure that an appropriate :class:`.GLObject`
has been created for the overlay (see the :meth:`__genGLObject`
method).
"""
log.debug('GLObject representation for {} '
'changed to {}'.format(display.name,
display.overlayType))
self.__regenGLObject(display.overlay)
self.Refresh()
def __regenGLObject(self,
overlay,
updateRenderTextures=True,
refresh=True):
"""Destroys any existing :class:`.GLObject` associated with the given
``overlay``, and creates a new one (via the :meth:`__genGLObject`
method).
If ``updateRenderTextures`` is ``True`` (the default), and the
:attr:`.SliceCanvasOpts.renderMode` is ``offscreen`` or ``prerender``,
any render texture associated with the overlay is destroyed.
"""
renderMode = self.opts.renderMode
# Tell the previous GLObject (if
# any) to clean up after itself
globj = self._glObjects.pop(overlay, None)
if globj:
globj.deregister(self.name)
globj.destroy()
if updateRenderTextures:
if renderMode == 'offscreen':
tex = self._offscreenTextures.pop(overlay, None)
if tex is not None:
tex.destroy()
elif renderMode == 'prerender':
tex, name = self._prerenderTextures.pop(
overlay, (None, None))
if tex is not None:
glresources.delete(name)
self.__genGLObject(overlay, updateRenderTextures, refresh)
def __genGLObject(self, overlay, updateRenderTextures=True, refresh=True):
"""Creates a :class:`.GLObject` instance for the given ``overlay``.
Does nothing if a ``GLObject`` already exists for the given overlay.
If ``updateRenderTextures`` is ``True`` (the default), and the
:attr:`.SliceCanvasOpts.renderMode` is ``offscreen`` or ``prerender``,
any textures for the overlay are updated.
If ``refresh`` is ``True`` (the default), the :meth:`Refresh` method
is called after the ``GLObject`` has been created.
.. note:: If running in ``wx`` (i.e. via a :class:`.WXGLSliceCanvas`),
the :class:`.GLObject` instnace will be created on the
``wx.EVT_IDLE`` lopp (via the :mod:`.idle` module).
"""
display = self.displayCtx.getDisplay(overlay)
if overlay in self._glObjects:
return
# We put a placeholder value in
# the globjects dictionary, so
# that the _draw method knows
# that creation for this overlay
# is pending.
self._glObjects[overlay] = False
def create():
if not self or self.destroyed:
return
# The overlay has been removed from the
# globjects dictionary between the time
# the pending flag was set above, and
# the time that this create() call was
# executed. Possibly because the overlay
# was removed between these two events.
# All is well, just ignore it.
if overlay not in self._glObjects:
return
# We need a GL context to create a new GL
# object. If we can't get it now, the GL
# object creation will be re-scheduled on
# the next call to _draw (via _getGLObjects).
if not self._setGLContext():
# Clear the pending flag so
# this GLObject creation
# gets re-scheduled.
self._glObjects.pop(overlay)
return
globj = globject.createGLObject(overlay,
self.overlayList,
self.displayCtx,
self,
False)
if globj is not None:
globj.register(self.name, self.__onGLObjectUpdate)
# A hack which allows us to easily
# retrieve the overlay associated
# with a given GLObject. See the
# __onGLObjectUpdate method.
globj._sc_overlay = overlay
self._glObjects[overlay] = globj
if updateRenderTextures:
self._updateRenderTextures()
display.addListener('overlayType',
self.name,
self.__overlayTypeChanged,
overwrite=True)
display.addListener('enabled',
self.name,
self.Refresh,
overwrite=True)
if refresh:
self.Refresh()
create = status.reportErrorDecorator(
strings.titles[ self, 'globjectError'],
strings.messages[self, 'globjectError'].format(overlay.name))(
create)
idle.idle(create)
def __onGLObjectUpdate(self, globj, *a):
"""Called when a :class:`.GLObject` has been updated, and needs to be
redrawn.
"""
# we can sometimes get called after
# being destroyed (e.g. during testing)
if self.destroyed:
return
# If we are in prerender mode, we
# need to tell the RenderTextureStack
# for this GLObject to update itself.
if self.opts.renderMode == 'prerender':
overlay = globj._sc_overlay
rt, name = self._prerenderTextures.get(overlay, (None, None))
if rt is not None:
rt.onGLObjectUpdate()
self.Refresh()
def _overlayListChanged(self, *args, **kwargs):
"""This method is called every time an overlay is added or removed
to/from the overlay list.
For newly added overlays, calls the :meth:`__genGLObject` method,
which initialises the OpenGL data necessary to render the
overlay.
"""
if self.destroyed:
return
# Destroy any GL objects for overlays
# which are no longer in the list
for ovl, globj in list(self._glObjects.items()):
if ovl not in self.overlayList:
self._glObjects.pop(ovl)
if globj:
globj.destroy()
# Create a GL object for any new overlays,
# and attach a listener to their display
# properties so we know when to refresh
# the canvas.
for overlay in self.overlayList:
# A GLObject already exists
# for this overlay
if overlay in self._glObjects:
continue
self.__regenGLObject(overlay,
updateRenderTextures=False,
refresh=False)
# All the GLObjects are created using
# idle.idle, so we call refresh in the
# same way to make sure it gets called
# after all the GLObject creations.
def refresh():
# This SliceCanvas might get
# destroyed before this idle
# task is executed
if not self or self.destroyed:
return
self._updateRenderTextures()
self.Refresh()
idle.idle(refresh)
def _getGLObjects(self):
"""Called by :meth:`_draw`. Builds a list of all :class:`.GLObjects`
to be drawn.
:returns: A list of overlays, and a list of corresponding
:class:`.GLObjects` to be drawn.
"""
overlays = []
globjs = []
for ovl in self.displayCtx.getOrderedOverlays():
globj = self._glObjects.get(ovl, None)
# If an overlay does not yet have a corresponding
# GLObject, we presume that it hasn't been created
# yet, so we'll tell genGLObject to create one for
# it.
if globj is None: self.__genGLObject(ovl)
# If there is a value for this overlay in
# the globjects dictionary, but it evaluates
# to False, then GLObject creation has been
# scheduled for the overlay - see genGLObject.
elif globj:
overlays.append(ovl)
globjs .append(globj)
return overlays, globjs
def _overlayBoundsChanged(self, *args, **kwargs):
"""Called when the :attr:`.DisplayContext.bounds` are changed.
Initialises/resets the display bounds, and/or preserves the zoom
level if necessary.
:arg preserveZoom: Must be passed as a keyword argument. If ``True``
(the default), the :attr:`zoom` value is adjusted
so that the effective zoom is preserved
"""
preserveZoom = kwargs.get('preserveZoom', True)
opts = self.opts
xax = opts.xax
yax = opts.yax
xmin = self.displayCtx.bounds.getLo(xax)
xmax = self.displayCtx.bounds.getHi(xax)
ymin = self.displayCtx.bounds.getLo(yax)
ymax = self.displayCtx.bounds.getHi(yax)
width, height = self.GetSize()
if np.isclose(xmin, xmax) or width == 0 or height == 0:
return
if not preserveZoom or opts.displayBounds.xlen == 0:
self.resetDisplay()
return
# Figure out the scaling factor that
# would preserve the current zoom
# level for the new display bounds.
xmin, xmax, ymin, ymax = glroutines.preserveAspectRatio(
width, height, xmin, xmax, ymin, ymax)
scale = opts.displayBounds.xlen / (xmax - xmin)
# Adjust the zoom value so that the
# effective zoom stays the same
with props.suppress(opts, 'zoom'):
opts.zoom = self.scaleToZoom(scale)
def _displaySpaceChanged(self, *a):
"""Called when the :attr:`.DisplayContext.displaySpace` changes. Resets
the display bounds and zoom.
"""
self.resetDisplay()
def _zoomChanged(self, *a):
"""Called when the :attr:`zoom` property changes. Updates the
display bounds.
"""
opts = self.opts
loc = [opts.pos[opts.xax], opts.pos[opts.yax]]
self._updateDisplayBounds(oldLoc=loc)
def zoomToScale(self, zoom):
"""Converts the given zoom value into a scaling factor that can be
multiplied by the display bounds width/height.
Zoom is specified as a percentage. At 100% the full scene takes up
the full display.
In order to make the zoom smoother at low levels, we re-scale the zoom
value to be exponential across the range.
This is done by transforming the zoom from ``[zmin, zmax]`` into
``[0.0, 1.0]``, then turning it from linear ``[0.0, 1.0]`` to
exponential ``[0.0, 1.0]``, and then finally transforming it back to
``[zmin - zmax]``.
However there is a slight hack in that, if the zoom value is less than
100%, it will be applied linearly (i.e. 50% will cause the width/height
to be scaled by 50%).
"""
# Assuming that minval == 100.0
opts = self.opts
zmin = opts.getAttribute('zoom', 'minval')
zmax = opts.getAttribute('zoom', 'maxval')
zlen = zmax - zmin
# Don't break the maths below
if zoom <= 0:
zoom = 1
# Normal behaviour
if zoom >= 100:
# [100 - zmax] -> [0.0 - 1.0] -> exponentify -> [100 - zmax]
zoom = (zoom - zmin) / zlen
zoom = zmin + (zoom ** 3) * zlen
# Then we transform the zoom from
# [100 - zmax] to [1.0 - 0.0] -
# this value is used to scale the
# bounds.
scale = zmin / zoom
# Hack for zoom < 100
else:
scale = 100.0 / zoom
return scale
def scaleToZoom(self, scale):
"""Converts the given zoom scaling factor into a zoom percentage.
This method performs the reverse operation to the :meth:`zoomToScale`
method.
"""
opts = self.opts
zmin = opts.getAttribute('zoom', 'minval')
zmax = opts.getAttribute('zoom', 'maxval')
zlen = zmax - zmin
if scale > 1:
zoom = 100.0 / scale
else:
# [100 - zmax] -> [0.0 - 1.0] -> de-exponentify -> [100 - zmax]
zoom = zmin / scale
zoom = (zoom - zmin) / zlen
zoom = np.power(zoom, 1.0 / 3.0)
zoom = zmin + zoom * zlen
return zoom
def _applyZoom(self, xmin, xmax, ymin, ymax):
"""*Zooms* in to the given rectangle according to the current value
of the zoom property Returns a 4-tuple containing the updated bound
values.
"""
zoomFactor = self.zoomToScale(self.opts.zoom)
xlen = xmax - xmin
ylen = ymax - ymin
newxlen = xlen * zoomFactor
newylen = ylen * zoomFactor
# centre the zoomed-in rectangle
# on the provided limits
xmid = xmin + 0.5 * xlen
ymid = ymin + 0.5 * ylen
# new x/y min/max bounds
xmin = xmid - 0.5 * newxlen
xmax = xmid + 0.5 * newxlen
ymin = ymid - 0.5 * newylen
ymax = ymid + 0.5 * newylen
return (xmin, xmax, ymin, ymax)
def _updateDisplayBounds(self, bbox=None, oldLoc=None):
"""Called on canvas resizes, overlay bound changes, and zoom changes.
Calculates the bounding box, in display coordinates, to be displayed
on the canvas. Stores this bounding box in the :attr:`displayBounds`
property. If any of the parameters are not provided, the
:attr:`.DisplayContext.bounds` are used.
.. note:: This method is used internally, and also by the
:class:`.WXGLSliceCanvas` class.
.. warning:: This code assumes that, if the display coordinate system
has changed, the display context location has already
been updated. See the
:meth:`.DisplayContext.__displaySpaceChanged` method.
:arg bbox: Tuple containing four values:
- Minimum x (horizontal) value to be in the display
bounds.
- Maximum x value to be in the display bounds.
- Minimum y (vertical) value to be in the display bounds.
- Maximum y value to be in the display bounds.
:arg oldLoc: If provided, should be the ``(x, y)`` location shown on
this ``SliceCanvas`` - the new display bounds will be
adjusted so that this location remains the same, with
respect to the new field of view.
"""
opts = self.opts
if bbox is None:
bbox = (self.displayCtx.bounds.getLo(opts.xax),
self.displayCtx.bounds.getHi(opts.xax),
self.displayCtx.bounds.getLo(opts.yax),
self.displayCtx.bounds.getHi(opts.yax))
xmin = bbox[0]
xmax = bbox[1]
ymin = bbox[2]
ymax = bbox[3]
# Save the display bounds in case
# we need to preserve them with
# respect to the current display
# location.
width, height = self.GetSize()
oldxmin, oldxmax, oldymin, oldymax = opts.displayBounds[:]
log.debug('{}: Required display bounds: '
'X: ({: 5.1f}, {: 5.1f}) Y: ({: 5.1f}, {: 5.1f})'.format(
opts.zax, xmin, xmax, ymin, ymax))
# Adjust the bounds to preserve the
# x/y aspect ratio, and according to
# the current zoom level.
xmin, xmax, ymin, ymax = glroutines.preserveAspectRatio(
width, height, xmin, xmax, ymin, ymax)
xmin, xmax, ymin, ymax = self._applyZoom(xmin, xmax, ymin, ymax)
# If a location (oldLoc) has been provided,
# adjust the bounds so they are consistent
# with respect to that location.
if oldLoc and (oldxmax > oldxmin) and (oldymax > oldymin):
# Calculate the normalised distance from the
# old cursor location to the old bound corner
oldxoff = (oldLoc[0] - oldxmin) / (oldxmax - oldxmin)
oldyoff = (oldLoc[1] - oldymin) / (oldymax - oldymin)
# Re-set the new bounds to the current
# display location, offset by the same
# amount that it used to be (as
# calculated above).
#
# N.B. This code assumes that, if the display
# coordinate system has changed, the display
# context location has already been updated.
# See the DisplayContext.__displaySpaceChanged
# method.
xloc = opts.pos[opts.xax]
yloc = opts.pos[opts.yax]
xlen = xmax - xmin
ylen = ymax - ymin
xmin = xloc - oldxoff * xlen
ymin = yloc - oldyoff * ylen
xmax = xmin + xlen
ymax = ymin + ylen
log.debug('{}: Final display bounds: '
'X: ({: 5.1f}, {: 5.1f}) Y: ({: 5.1f}, {: 5.1f})'.format(
opts.zax, xmin, xmax, ymin, ymax))
opts.displayBounds[:] = (xmin, xmax, ymin, ymax)
def _setViewport(self, invertX=None, invertY=None):
"""Sets up the GL canvas size, viewport, and projection.
:arg invertX: Invert the X axis. If not provided, taken from
:attr:`invertX`.
:arg invertY: Invert the Y axis. If not provided, taken from
:attr:`invertY`.
:returns: A sequence of three ``(low, high)`` values, defining the
display coordinate system bounding box.
"""
opts = self.opts
xax = opts.xax
yax = opts.yax
zax = opts.zax
xmin = opts.displayBounds.xlo
xmax = opts.displayBounds.xhi
ymin = opts.displayBounds.ylo
ymax = opts.displayBounds.yhi
zmin = self.displayCtx.bounds.getLo(zax)
zmax = self.displayCtx.bounds.getHi(zax)
width, height = self.GetScaledSize()
if invertX is None: invertX = opts.invertX
if invertY is None: invertY = opts.invertY
# If there are no images to be displayed,
# or no space to draw, do nothing
if (len(self.overlayList) == 0) or \
(width == 0) or \
(height == 0) or \
(xmin == xmax) or \
(ymin == ymax):
return [(0, 0), (0, 0), (0, 0)]
log.debug('Setting canvas bounds (size {}, {}): '
'X {: 5.1f} - {: 5.1f},'
'Y {: 5.1f} - {: 5.1f},'
'Z {: 5.1f} - {: 5.1f}'.format(
width, height, xmin, xmax, ymin, ymax, zmin, zmax))
# Add a bit of padding to the depth limits
zmin -= 1e-3
zmax += 1e-3
lo = [None] * 3
hi = [None] * 3
lo[xax], hi[xax] = xmin, xmax
lo[yax], hi[yax] = ymin, ymax
lo[zax], hi[zax] = zmin, zmax
# store a copy of the final bounds -
# interested parties can retrieve it
# via the getViewport method.
self.__viewport = (tuple(lo), tuple(hi))
# set up 2D orthographic drawing
glroutines.show2D(xax,
yax,
width,
height,
lo,
hi,
invertX,
invertY)
return [(lo[0], hi[0]), (lo[1], hi[1]), (lo[2], hi[2])]
def _drawCursor(self):
"""Draws a green cursor at the current X/Y position."""
copts = self.opts
ovl = self.displayCtx.getSelectedOverlay()
xmin, xmax = self.displayCtx.bounds.getRange(copts.xax)
ymin, ymax = self.displayCtx.bounds.getRange(copts.yax)
x = copts.pos[copts.xax]
y = copts.pos[copts.yax]
lines = []
# Just show a vertical line at xpos,
# and a horizontal line at ypos
if not copts.cursorGap:
lines.append((x, ymin, x, ymax))
lines.append((xmin, y, xmax, y))
# Draw vertical/horizontal cursor lines,
# with a gap at the cursor centre
# Not a NIFTI image - just
# use a fixed gap size
elif ovl is None or not isinstance(ovl, fslimage.Nifti):
lines.append((xmin, y, x - 0.5, y))
lines.append((x + 0.5, y, xmax, y))
lines.append((x, ymin, x, y - 0.5))
lines.append((x, y + 0.5, x, ymax))
# If the current overlay is NIFTI, make
# the gap size match its voxel size
else:
# Get the current voxel
# coordinates,
dopts = self.displayCtx.getOpts(ovl)
vox = dopts.getVoxel(vround=False)
# Out of bounds of the current
# overlay, fall back to using
# a fixed size gap
if vox is None:
xlow = x - 0.5
xhigh = x + 0.5
ylow = y - 0.5
yhigh = y + 0.5
else:
vox = np.array(vox, dtype=np.float32)
# Figure out the voxel coord axes
# that (approximately) correspond
# with the display x/y axes.
axes = ovl.axisMapping(dopts.getTransform('voxel', 'display'))
axes = np.abs(axes) - 1
xax = axes[copts.xax]
yax = axes[copts.yax]
# Clamp the voxel x/y coords to
# the voxel edge (round, then
# offset by 0.5 - integer coords
# correspond to the voxel centre).
vox[xax] = np.round(vox[xax]) - 0.5
vox[yax] = np.round(vox[yax]) - 0.5
# Get the voxels that are above
# and next to our current voxel.
voxx = np.copy(vox)
voxy = | np.copy(vox) | numpy.copy |
import gym
import time
from gym import spaces
from maplewrapper import wrapper
import numpy as np
import pydirectinput
import cv2
class MapleEnv(gym.Env):
"""
Description:
Gym environment of MapleStory v.90 and below using extracted information from maplewrapper.
See https://github.com/vinmorel/MapleWrapper
Observation:
Type: Dict "MapleWrapper" : box(4)
Num Observation Min Max
1 Player X1 0 825
2 Mob X1 (1) 0 825
3 Player Facing Direction 0 1
4 Attacked 0 1
Actions:
Type: Discrete(4)
Num Action
0 Walk left
1 Walk right
2 Attack 1
3 Attack 2
Reward:
Reward is the sum of gained exp minus health damage, mp consumption and time penalities
Starting State:
All observations are intialized according to game information
Episode Termination:
Episode terminates every 10 minutes
"""
metadata = {'render.modes': ['human']}
def __init__(self,w):
pydirectinput.PAUSE = 0.0
self.w = w
self.lvl, self.max_hp, self.max_mp, self.max_exp = self.w.get_basestats()
self.B_X = 850 # Bounding box max X
self.Min = | np.array([0] * 4,dtype=np.float32) | numpy.array |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2019-08-02 14:38:36
# @Last Modified by: <NAME>
# @Last Modified time: 2020-07-03 09:23:07
# @Email: <EMAIL>
import cv2
import math
import numpy as np
import torch
import transforms3d
class Compose(object):
def __init__(self, transforms):
self.transformers = []
for tr in transforms:
transformer = eval(tr['callback'])
parameters = tr['parameters'] if 'parameters' in tr else None
self.transformers.append({
'callback': transformer(parameters),
'objects': tr['objects']
}) # yapf: disable
def __call__(self, data):
for tr in self.transformers:
transform = tr['callback']
objects = tr['objects']
rnd_value = np.random.uniform(0, 1)
if transform.__class__ in [NormalizeObjectPose]:
data = transform(data)
else:
for k, v in data.items():
if k in objects and k in data:
if transform.__class__ in [
RandomCrop, RandomFlip, RandomRotatePoints, RandomScalePoints, RandomMirrorPoints
]:
data[k] = transform(v, rnd_value)
else:
data[k] = transform(v)
return data
class ToTensor(object):
def __init__(self, parameters):
pass
def __call__(self, arr):
shape = arr.shape
if len(shape) == 3: # RGB/Depth Images
arr = arr.transpose(2, 0, 1)
# Ref: https://discuss.pytorch.org/t/torch-from-numpy-not-support-negative-strides/3663/2
return torch.from_numpy(arr.copy()).float()
class Normalize(object):
def __init__(self, parameters):
self.mean = parameters['mean']
self.std = parameters['std']
def __call__(self, arr):
arr = arr.astype(np.float32)
arr /= self.std
arr -= self.mean
return arr
class CenterCrop(object):
def __init__(self, parameters):
self.img_size_h = parameters['img_size'][0]
self.img_size_w = parameters['img_size'][1]
self.crop_size_h = parameters['crop_size'][0]
self.crop_size_w = parameters['crop_size'][1]
def __call__(self, img):
img_w, img_h, _ = img.shape
x_left = (img_w - self.crop_size_w) * .5
x_right = x_left + self.crop_size_w
y_top = (img_h - self.crop_size_h) * .5
y_bottom = y_top + self.crop_size_h
# Crop the image
img = cv2.resize(img[int(y_top):int(y_bottom), int(x_left):int(x_right)], (self.img_size_w, self.img_size_h))
img = img[..., np.newaxis] if len(img.shape) == 2 else img
return img
class RandomCrop(object):
def __init__(self, parameters):
self.img_size_h = parameters['img_size'][0]
self.img_size_w = parameters['img_size'][1]
self.crop_size_h = parameters['crop_size'][0]
self.crop_size_w = parameters['crop_size'][1]
def __call__(self, img, rnd_value):
img_w, img_h, _ = img.shape
x_left = (img_w - self.crop_size_w) * rnd_value
x_right = x_left + self.crop_size_w
y_top = (img_h - self.crop_size_h) * rnd_value
y_bottom = y_top + self.crop_size_h
# Crop the image
img = cv2.resize(img[int(y_top):int(y_bottom), int(x_left):int(x_right)], (self.img_size_w, self.img_size_h))
img = img[..., np.newaxis] if len(img.shape) == 2 else img
return img
class RandomFlip(object):
def __init__(self, parameters):
pass
def __call__(self, img, rnd_value):
if rnd_value > 0.5:
img = np.fliplr(img)
return img
class RandomPermuteRGB(object):
def __init__(self, parameters):
pass
def __call__(self, img):
rgb_permutation = np.random.permutation(3)
return img[..., rgb_permutation]
class RandomBackground(object):
def __init__(self, parameters):
self.random_bg_color_range = parameters['bg_color']
def __call__(self, img):
img_h, img_w, img_c = img.shape
if not img_c == 4:
return img
r, g, b = [
np.random.randint(self.random_bg_color_range[i][0], self.random_bg_color_range[i][1] + 1) for i in range(3)
]
alpha = (np.expand_dims(img[:, :, 3], axis=2) == 0).astype(np.float32)
img = img[:, :, :3]
bg_color = np.array([[[r, g, b]]]) / 255.
img = alpha * bg_color + (1 - alpha) * img
return img
class UpSamplePoints(object):
def __init__(self, parameters):
self.n_points = parameters['n_points']
def __call__(self, ptcloud):
curr = ptcloud.shape[0]
need = self.n_points - curr
if need < 0:
return ptcloud[np.random.permutation(self.n_points)]
while curr <= need:
ptcloud = np.tile(ptcloud, (2, 1))
need -= curr
curr *= 2
choice = np.random.permutation(need)
ptcloud = np.concatenate((ptcloud, ptcloud[choice]))
return ptcloud
class RandomSamplePoints(object):
def __init__(self, parameters):
self.n_points = parameters['n_points']
def __call__(self, ptcloud):
choice = np.random.permutation(ptcloud.shape[0])
ptcloud = ptcloud[choice[:self.n_points]]
if ptcloud.shape[0] < self.n_points:
zeros = np.zeros((self.n_points - ptcloud.shape[0], 3))
ptcloud = np.concatenate([ptcloud, zeros])
return ptcloud
class RandomClipPoints(object):
def __init__(self, parameters):
self.sigma = parameters['sigma'] if 'sigma' in parameters else 0.01
self.clip = parameters['clip'] if 'clip' in parameters else 0.05
def __call__(self, ptcloud):
ptcloud += np.clip(self.sigma * np.random.randn(*ptcloud.shape), -self.clip, self.clip).astype(np.float32)
return ptcloud
class RandomRotatePoints(object):
def __init__(self, parameters):
pass
def __call__(self, ptcloud, rnd_value):
trfm_mat = transforms3d.zooms.zfdir2mat(1)
angle = 2 * math.pi * rnd_value
trfm_mat = np.dot(transforms3d.axangles.axangle2mat([0, 1, 0], angle), trfm_mat)
ptcloud[:, :3] = np.dot(ptcloud[:, :3], trfm_mat.T)
return ptcloud
class RandomScalePoints(object):
def __init__(self, parameters):
self.scale = parameters['scale']
def __call__(self, ptcloud, rnd_value):
trfm_mat = transforms3d.zooms.zfdir2mat(1)
scale = np.random.uniform(1.0 / self.scale * rnd_value, self.scale * rnd_value)
trfm_mat = np.dot(transforms3d.zooms.zfdir2mat(scale), trfm_mat)
ptcloud[:, :3] = np.dot(ptcloud[:, :3], trfm_mat.T)
return ptcloud
class RandomMirrorPoints(object):
def __init__(self, parameters):
pass
def __call__(self, ptcloud, rnd_value):
trfm_mat = transforms3d.zooms.zfdir2mat(1)
trfm_mat_x = np.dot(transforms3d.zooms.zfdir2mat(-1, [1, 0, 0]), trfm_mat)
trfm_mat_z = np.dot(transforms3d.zooms.zfdir2mat(-1, [0, 0, 1]), trfm_mat)
if rnd_value <= 0.25:
trfm_mat = np.dot(trfm_mat_x, trfm_mat)
trfm_mat = np.dot(trfm_mat_z, trfm_mat)
elif rnd_value > 0.25 and rnd_value <= 0.5: # lgtm [py/redundant-comparison]
trfm_mat = np.dot(trfm_mat_x, trfm_mat)
elif rnd_value > 0.5 and rnd_value <= 0.75:
trfm_mat = np.dot(trfm_mat_z, trfm_mat)
ptcloud[:, :3] = | np.dot(ptcloud[:, :3], trfm_mat.T) | numpy.dot |
import operator
import os
import sys
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics.scorer import accuracy_scorer, precision_scorer, recall_scorer, f1_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from tqdm import tqdm
from xgboost import XGBClassifier
from features import get_features, boosting_params, rf_params, svm_params
from results_records import TestResults, ResultsRecord, AnswersResults
from utils import LOGGER
from common.utils import OUTPUTS_DIR
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1)
scoring = {
'accuracy': accuracy_scorer,
'precision': precision_scorer,
'recall': recall_scorer,
'f1': f1_scorer,
}
def classify_cv_results(X, y, model, params, test_name, debug, cv=5):
"""
Used to get cross-validated results.
:param X: features
:param y: gold label
:param model: model to fit
:param params: parameters for GridSearchCV
:param test_name: name of the test
:param debug: print debug info to logger
:param cv: cross validation number
:return: classifier_name, accuracy, precision, recall, f1
"""
gcv = GridSearchCV(model, params, cv=cv, scoring=scoring, refit='accuracy', iid=False)
gcv.fit(X, y)
best_model = gcv.best_estimator_
classifier_name = best_model.__class__.__name__
accuracy = | np.mean(gcv.cv_results_['mean_test_accuracy']) | numpy.mean |
#!/usr/bin/env python
# coding: utf-8
# Edited May, 27th 2020
## This is vHULK: viral Host Unveiling Kit
# Developed by <NAME> and <NAME>
# Creative commons
# Import required Python modules
import numpy as np
import pandas as pd
from Bio import SeqIO
import re
import sys
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import subprocess
import datetime
import argparse
import warnings
import csv
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.simplefilter(action="ignore", category=FutureWarning)
from time import gmtime, strftime
from tensorflow.keras.layers import Dense, Activation, LeakyReLU, ReLU
from tensorflow.keras.models import load_model
from scipy.special import entr
# Function declarations
# Run prokka
def run_prokka(binn, input_folder, threads):
# Check the fasta format
prefix = get_prefix(binn)
# Filehandle where the output of prokka will be saved
# output_prokka = open(str(prefix)+'prokka.output', mode='w')
# Full command line for prokka
command_line = (
"prokka --kingdom Viruses --centre X --compliant --gcode 11 --cpus "
+ threads
+ " --force --quiet --prefix prokka_results_"
+ str(prefix)
+ " --fast --norrna --notrna --outdir "
+ input_folder
+ "results/prokka/"
+ str(prefix)
+ " --cdsrnaolap --noanno "
+ input_folder
+ str(binn)
).split()
return_code = subprocess.call(command_line, stderr=subprocess.PIPE)
# Check with prokka run smothly
if return_code == 1:
print("Prokka may not be correctly installed. Please check that.")
sys.exit(1)
# Get prefix from bins
def get_prefix(binn):
if re.search(".fasta", binn):
prefix = re.sub(".fasta", "", binn)
else:
prefix = re.sub(".fa", "", binn)
return prefix
# Extract Matrix
###
### Main code
###
# Set arguments
# Modification to use argparse
parser = argparse.ArgumentParser(
description="Predict phage draft genomes in metagenomic bins."
)
parser.add_argument(
"-i",
action="store",
required=True,
dest="input_folder",
help="Path to a folder containing metagenomic bins in .fa or .fasta format (required!)",
)
parser.add_argument(
"-t",
action="store",
dest="threads",
default="1",
help="Number of CPU threads to be used by Prokka and hmmscan (default=1)",
)
args = parser.parse_args()
# Greeting message
print("\n**Welcome v.HULK, a toolkit for phage host prediction!\n")
# Verify databases
if not os.path.isfile("/opt/vHULK/models/all_vogs_hmm_profiles_feb2018.hmm"):
print(
"**Your database and models are not set. Please, run: python download_and_set_models.py \n"
)
sys.exit(1)
# Create Filehandle for warnings
# warnings_handle = open('marvel-warnings.txt', 'w')
# Important variables
input_folder = args.input_folder
threads = args.threads
# Fix input folder path if missing '/'
if not re.search("/$", input_folder):
input_folder = input_folder + "/"
# Take the input folder and list all multifasta (bins) contained inside it
# print(input_folder)
list_bins_temp = os.listdir(input_folder)
list_bins = []
count_bins = 0
# Empty folder
if list_bins_temp == []:
print("**Input folder is empty. Exiting...\n")
sys.exit(1)
else:
for each_bin in list_bins_temp:
if re.search(".fasta$", each_bin, re.IGNORECASE):
list_bins.append(each_bin)
count_bins += 1
elif re.search(".fa$", each_bin, re.IGNORECASE):
list_bins.append(each_bin)
count_bins += 1
if count_bins == 0:
print(
"**There is no valid genome inside the input folder (%s).\n\
Genome or bins should be in '.fasta' or '.fa' format.\nExiting..."
% input_folder
)
sys.exit(1)
print(
"**Arguments are OK. Checked the input folder and found %d genomes.\n"
% count_bins
)
print("**" + str(datetime.datetime.now()))
# Create results folder
try:
os.stat(input_folder + "results/")
except:
os.mkdir(input_folder + "results/")
#####
# PROKKA
#####
# Running prokka for all the bins multfasta files in input folder
# Perform a check in each bin, then call the execute_prokka function individually
# It may take awhile
count_prokka = 0
print("**Prokka has started, this may take awhile. Be patient.\n")
for binn in list_bins:
# Verify bin/Genome size
len_bin = 0
for record in SeqIO.parse(input_folder + binn, "fasta"):
len_bin += len(record.seq)
# If a bin/genome is too short, skip it
if len_bin < 5000:
print(
"**v.HULK has found a genome or bin, which is too short to code \
proteins (<5000pb). As CDSs are an important feature for v.HULK, \
we will be skipping this: "
+ binn
)
continue
run_prokka(binn, input_folder, threads)
count_prokka += 1
if count_prokka % 10 == 0:
print("**Done with %d genomes..." % count_prokka)
print("**Prokka tasks have finished!\n")
####
# HMM SEARCHES
####
print("**" + str(datetime.datetime.now()))
print("**Starting HMM scan, this may take awhile. Be patient.\n")
# print(str(datetime.datetime.now()))
# Create a new results folder for hmmscan output
try:
os.stat(input_folder + "results/hmmscan/")
except:
os.mkdir(input_folder + "results/hmmscan/")
# Call HMMscan to all genomes
dic_matrices_by_genome = {}
prop_hmms_hits = {}
count_hmm = 0
for binn in list_bins:
# Prefix for naming results
prefix = get_prefix(binn)
command_line_hmmscan = (
"hmmscan -o "
+ input_folder
+ "results/hmmscan/"
+ prefix
+ "_hmmscan.out --cpu "
+ threads
+ " --tblout "
+ input_folder
+ "results/hmmscan/"
+ prefix
+ "_hmmscan.tbl --noali /opt/vHULK/models/all_vogs_hmm_profiles_feb2018.hmm "
+ input_folder
+ "results/prokka/"
+ prefix
+ "/prokka_results_"
+ prefix
+ ".faa"
)
# print(command_line_hmmscan)
# Use -E 1 for next time running HMMscan or leave the fix down there
# In case hmmscan returns an error - Added only because it stopped in half
# if os.path.exists(input_folder + 'results/hmmscan/' + prefix + '_hmmscan.tbl'):
# continue
try:
subprocess.call(command_line_hmmscan, shell=True)
# Comment line above and uncomment line below in case you want to run v.HULK without running hmmscan all over again
# True
except:
print("**Error calling HMMscan:", command_line_hmmscan)
sys.exit(1)
count_hmm += 1
# Iteration control
print("**Done with %d bins HMM searches..." % count_hmm)
## Create dictionary as ref of collumns - pVOGs
dic_vogs_headers = {}
with open("/opt/vHULK/files/VOGs_header.txt", "r") as file2:
for line2 in file2:
key = re.match("(.+)\n", line2).group(1)
dic_vogs_headers[key] = np.float32(0.0)
#
# Parse hmmscan results by gene
num_proteins_bin = 0
with open(
input_folder
+ "results/prokka/"
+ prefix
+ "/prokka_results_"
+ prefix
+ ".faa",
"r",
) as faa:
for line in faa:
if re.search("^>", line):
num_proteins_bin += 1
# Get gene name here
gene_name = re.search("^>(.*)", line).group(1)
dic_matches = {}
# Parse hmmout
with open(
input_folder + "results/hmmscan/" + prefix + "_hmmscan.tbl", "r"
) as hmmscan_out:
dic_genes_scores = {}
for line in hmmscan_out:
vog = ""
gene = ""
evalue = np.float32(0.0)
score = np.float32(0.0)
bias = np.float32(0.0)
if re.match("^VOG", line):
matches = re.match(
"^(VOG[\d\w]+)\s+-\s+([^\s]+)[^\d]+([^\s]+)\s+([^\s]+)\s+([^\s]+)",
line,
)
vog = matches[1]
gene = matches[2]
evalue = float(matches[3])
score = float(matches[4])
bias = float(matches[5])
if gene in dic_genes_scores:
dic_genes_scores[gene].append([vog, evalue, score, bias])
else:
dic_genes_scores[gene] = [[vog, evalue, score, bias]]
# Here goes the continuation
# Create a matrix by accession
dic_matrices_by_genome[prefix] = pd.DataFrame(
index=dic_genes_scores.keys(),
columns=dic_vogs_headers.keys(),
dtype=float,
)
dic_matrices_by_genome[prefix].fillna(value=np.float32(0.0), inplace=True)
# Fill in evalue values
for gene in dic_genes_scores:
for each_match in dic_genes_scores[gene]:
# print(each_match[1], gene)
# Fix for evalue values greater than 1
if each_match[1] > 1:
# print(each_match[1])
each_match[1] = 1
# print(each_match[1])
dic_matrices_by_genome[prefix][each_match[0]][gene] = np.float32(
1.0
) - np.float32(each_match[1])
print("\n**HMMscan has finished.")
# Condense matrices to array by suming up columns
list_condensed_matrices = []
list_file_names = []
for matrix in dic_matrices_by_genome:
temp = list(dic_matrices_by_genome[matrix].sum(axis=0, skipna=True))
list_file_names.append(matrix)
# Parse tag
# if re.search('^NC_.*', matrix):
# matrix = matrix.replace("NC_", "NC")
# [0]accession [1]genus [2]species
# tags = matrix.split("_")
# For Genus
# temp.append(tags[1])
# temp.append(tags[0])
# For Species
# temp.append(tag[1]+"_"+tag[2])
# temp.append(tag[0])
list_condensed_matrices.append(temp)
# Convert to array
# import numpy as np
array = np.array(list_condensed_matrices)
# print("ARRAY-SHAPE: ", len(array))
###
# Predictions
###
print("\n**Starting deeplearning predictions...")
# load models
model_genus_relu = load_model(
"/opt/vHULK/models/model_genus_total_fixed_relu_08mar_2020.h5",
custom_objects={"LeakyReLU": LeakyReLU, "ReLU": ReLU},
)
model_genus_sm = load_model(
"/opt/vHULK/models/model_genus_total_fixed_softmax_01mar_2020.h5",
custom_objects={"LeakyReLU": LeakyReLU, "ReLU": ReLU},
)
model_species_relu = load_model(
"/opt/vHULK/models/model_species_total_fixed_relu_08mar_2020.h5",
custom_objects={"LeakyReLU": LeakyReLU, "ReLU": ReLU},
)
model_species_sm = load_model(
"/opt/vHULK/models/model_species_total_fixed_softmax_01mar_2020.h5",
custom_objects={"LeakyReLU": LeakyReLU, "ReLU": ReLU},
)
with open(input_folder + "results/results.csv", "w") as file:
file.write(
"BIN/genome,pred_genus_relu,score_genus_relu,Pred_genus_softmax,score_genus_softmax,pred_species_relu,score_species_relu,pred_species_softmax,score_species_softmax,final_prediction,entropy\n"
)
for i in range(0, len(array)):
# Genus ReLu
# print(list_file_names[i])
pred_gen_relu = model_genus_relu.predict(np.array([array[i]]))
# print("Genus:ReLu")
# print(pred_gen_relu)
position_pred_gen_relu = np.argmax(pred_gen_relu)
if not pred_gen_relu.any():
name_pred_gen_relu = "None"
score_pred_gen_relu = "0"
else:
list_hosts_genus = [
line.rstrip("\n") for line in open("/opt/vHULK/files/list_hosts_genus.txt")
]
name_pred_gen_relu = list_hosts_genus[position_pred_gen_relu]
score_pred_gen_relu = str(pred_gen_relu[0][position_pred_gen_relu])
# print(list_hosts_genus[position_pred_gen_relu])
# print(position_pred_gen_relu, pred_gen_relu[0][position_pred_gen_relu])
# Genus softmax
pred_gen_sm = model_genus_sm.predict(np.array([array[i]]))
# print("Genus:Softmax")
# print(pred_gen_sm)
position_pred_gen_sm = np.argmax(pred_gen_sm)
list_hosts_genus = [
line.rstrip("\n") for line in open("/opt/vHULK/files/list_hosts_genus.txt")
]
name_pred_gen_sm = list_hosts_genus[position_pred_gen_sm]
score_pred_gen_sm = str(pred_gen_sm[0][position_pred_gen_sm])
# print(list_hosts_genus[position_pred_gen_sm])
# print(position_pred_gen_sm, pred_gen_sm[0][position_pred_gen_sm])
# Species Relu
pred_sp_relu = model_species_relu.predict(np.array([array[i]]))
# print("Species:ReLu")
# print(pred_sp_relu)
position_pred_sp_relu = np.argmax(pred_sp_relu)
if not pred_sp_relu.any():
name_pred_sp_relu = "None"
score_pred_sp_relu = "0"
else:
list_hosts_sp = [
line.rstrip("\n") for line in open("/opt/vHULK/files/list_hosts_species.txt")
]
# print(list_hosts_sp)
name_pred_sp_relu = list_hosts_sp[position_pred_sp_relu]
score_pred_sp_relu = str(pred_sp_relu[0][position_pred_sp_relu])
# print(list_hosts_sp[position_pred_sp_relu])
# print(position_pred_sp_relu, pred_sp_relu[0][position_pred_sp_relu])
# Species softmax
pred_sp_sm = model_species_sm.predict(np.array([array[i]]))
# print("Species:Softmax")
# print(pred_sp_sm)
position_pred_sp_sm = | np.argmax(pred_sp_sm) | numpy.argmax |
import numpy as np
from integrator import *
from qoi import *
import parallel as par
def simSetUp(inpt):
Sim = {}
Ndof = int(inpt['Ndof'])
Timestep = float(inpt['Timestep'])
Tf = float(inpt['Tf'])
NSim = int(inpt['NSim'])
Sim['Simulation name'] = inpt['Simulation name']
Sim['Ndof'] = Ndof
Sim['Timestep'] = Timestep
Sim['Tf'] = Tf
Sim['NSim'] = NSim
Sim['Record solution'] = (inpt['Record solution']=="True")
# MPI parallelization
nSim_, startSim_ = par.partitionSim(NSim)
Sim['nSim_'] = nSim_
Sim['startSim_'] = startSim_
if par.nProc > 1:
Sim['reconstruct Sol'] = (inpt['reconstruct Sol']=="True")
Sim['reconstruct QOI'] = (inpt['reconstruct QOI']=="True")
# Post proc
Sim['Plot'] = (inpt['Plot']=="True")
Sim['Build CDF'] = (inpt['Build CDF']=="True")
if Sim['Build CDF']:
Sim['Plot CDF'] = (inpt['Plot CDF']=="True")
Sim['Build rare paths'] = (inpt['Build rare paths']=="True")
if Sim['Build rare paths']:
Sim['Levels'] = [float(lev) for lev in inpt['Levels'].split()]
Sim['Plot rare paths'] = (inpt['Plot rare paths']=="True")
if inpt['Simulation name'] == 'KS':
# scalars for ETDRK4
h = Timestep
Sim['Lx/pi'] = float(inpt['Lx/pi'])
k = np.transpose(np.conj(np.concatenate((np.arange(0, Ndof/2.0), np.array([0]), np.arange(-Ndof/2.0+1.0, 0))))) / (float(inpt['Lx/pi'])/2.0)
ksorted = list(abs(k))
ksorted.sort()
kalias = ksorted[int(len(ksorted)*2/3)]
indexAlias = np.argwhere(abs(k)>kalias)
L = k**2 - k**4
E = np.exp(h*L)
E_2 = np.exp(h*L/2)
M = 16
r = np.exp(1j*np.pi*(np.arange(1, M+1)-0.5) / M)
LR = h*np.transpose(np.repeat([L], M, axis=0)) + np.repeat([r], Ndof, axis=0)
Q = h*np.real(np.mean((np.exp(LR/2)-1)/LR, axis=1))
f1 = h*np.real(np.mean((-4-LR+np.exp(LR)*(4-3*LR+LR**2))/LR**3, axis=1))
f2 = h*np.real(np.mean((2+LR+np.exp(LR)*(-2+LR))/LR**3, axis=1))
f3 = h*np.real(np.mean((-4-3*LR-LR**2+np.exp(LR)*(4-LR))/LR**3, axis=1))
tmax = Tf
nmax = round(tmax/h)
g = -0.5j*k
# Necessary data for simulations
Sim['x'] = float(inpt['Lx/pi'])*np.pi*np.linspace(1,Ndof,Ndof)/Ndof
Sim['E'] = np.reshape(E,(Ndof,1))
Sim['E_2'] = np.reshape(E_2,(Ndof,1))
Sim['Q'] = np.reshape(Q,(Ndof,1))
Sim['f1'] = np.reshape(f1,(Ndof,1))
Sim['f2'] = np.reshape(f2,(Ndof,1))
Sim['f3'] = np.reshape(f3,(Ndof,1))
Sim['nmax'] = nmax
Sim['nplt'] = 1
Sim['g'] = np.reshape(g,(Ndof,1))
Sim['k'] = np.reshape(k,(Ndof,1))
Sim['indexAlias'] = indexAlias
Sim['epsilon_init'] = float(inpt['epsilon_init'])
# forward step and qoi
Sim['stepFunc'] = ksStepETDRK4
Sim['qoiFunc'] = ksqoi
# Initial conditions
ICType = inpt['ICType']
if ICType=='file':
fileNameIC = inpt['fileNameIC']
Sim['u0'] = np.load(fileNameIC)
elif ICType=='default':
x = Sim['x']
Sim['u0'] = np.cos(x/16)*(1+np.sin(x/16))
else :
print('IC type not recognized')
if inpt['Simulation name'] == 'KSFrontBack':
# scalars for ETDRK4
h = Timestep
Sim['Lx/pi'] = float(inpt['Lx/pi'])
k = np.transpose(np.conj(np.concatenate((np.arange(0, Ndof/2.0), np.array([0]), np.arange(-Ndof/2.0+1.0, 0))))) / (float(inpt['Lx/pi'])/2.0)
ksorted = list(abs(k))
ksorted.sort()
kalias = ksorted[int(len(ksorted)*2/3)]
indexAlias = np.argwhere(abs(k)>kalias)
L = k**2 - k**4
E = np.exp(h*L)
E_2 = np.exp(h*L/2)
M = 16
r = np.exp(1j*np.pi*(np.arange(1, M+1)-0.5) / M)
LR = h*np.transpose(np.repeat([L], M, axis=0)) + np.repeat([r], Ndof, axis=0)
Q = h*np.real(np.mean((np.exp(LR/2)-1)/LR, axis=1))
f1 = h*np.real(np.mean((-4-LR+np.exp(LR)*(4-3*LR+LR**2))/LR**3, axis=1))
f2 = h*np.real(np.mean((2+LR+np.exp(LR)*(-2+LR))/LR**3, axis=1))
f3 = h*np.real(np.mean((-4-3*LR-LR**2+np.exp(LR)*(4-LR))/LR**3, axis=1))
tmax = Tf
nmax = round(tmax/h)
g = -0.5j*k
Sim['x'] = float(inpt['Lx/pi'])*np.pi*np.linspace(1,Ndof,Ndof)/Ndof
Sim['E'] = np.reshape(E,(Ndof,1))
Sim['E_2'] = np.reshape(E_2,(Ndof,1))
Sim['Q'] = np.reshape(Q,(Ndof,1))
Sim['f1'] = np.reshape(f1,(Ndof,1))
Sim['f2'] = np.reshape(f2,(Ndof,1))
Sim['f3'] = np.reshape(f3,(Ndof,1))
Sim['nmax'] = nmax
Sim['nplt'] = 1
Sim['g'] = np.reshape(g,(Ndof,1))
Sim['k'] = np.reshape(k,(Ndof,1))
Sim['indexAlias'] = indexAlias
# Necessary data for simulations
beta = float(inpt['beta'])
Lback = (k**2 - k**4)/(1+beta*k**4)
Eback = np.exp(-h*Lback)
E_2back = np.exp(-h*Lback/2)
LRback = -h*np.transpose(np.repeat([Lback], M, axis=0)) + np.repeat([r], Ndof, axis=0)
Qback = -h*np.real(np.mean((np.exp(LRback/2)-1)/LRback, axis=1))
f1back = -h*np.real(np.mean((-4-LRback+np.exp(LRback)*(4-3*LRback+LRback**2))/LRback**3, axis=1)/(1+beta*k**4))
f2back = -h*np.real(np.mean((2+LRback+np.exp(LRback)*(-2+LRback))/LRback**3, axis=1)/(1+beta*k**4))
f3back = -h*np.real(np.mean((-4-3*LRback-LRback**2+np.exp(LRback)*(4-LRback))/LRback**3, axis=1)/(1+beta*k**4))
g = -0.5j*k
Sim['Eback'] = np.reshape(Eback,(Ndof,1))
Sim['E_2back'] = np.reshape(E_2back,(Ndof,1))
Sim['Qback'] = np.reshape(Qback,(Ndof,1))
Sim['f1back'] = np.reshape(f1back,(Ndof,1))
Sim['f2back'] = np.reshape(f2back,(Ndof,1))
Sim['f3back'] = np.reshape(f3back,(Ndof,1))
Sim['beta'] = float(inpt['beta'])
# forward step and qoi
Sim['forwardStepFunc'] = ksStepETDRK4
Sim['backwardStepFunc'] = ksStepBackRegularizedETDRK4
Sim['qoiFunc'] = ksqoi
# Initial conditions
Sim['epsilon_init'] = float(inpt['epsilon_init'])
ICType = inpt['ICType']
if ICType=='file':
fileNameIC = inpt['fileNameIC']
Sim['u0'] = np.load(fileNameIC)
elif ICType=='default':
Sim['u0'] = np.cos(x/16)*(1+np.sin(x/16))
else :
print('IC type not recognized')
# Initial conditions
ICType = inpt['ICType']
if ICType=='file':
fileNameIC = inpt['fileNameIC']
Sim['u0'] = | np.load(fileNameIC) | numpy.load |
import os.path
import random
import cv2
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset
from utils.dataset_utils import letterbox_image
# 随机数生成,用于随机数据增强
def rand(a=0, b=1):
return np.random.rand() * (b - a) + a
# DataLoader中collate_fn参数 将一个batch中的np数组类型的图像和标签拼接起来
# batchsize=64时,images (192, 3, 224, 224)
def dataset_collate(batch):
images = []
labels = []
for img, label in batch:
images.append(img)
labels.append(label)
images1 = np.array(images)[:, 0, :, :, :]
images2 = np.array(images)[:, 1, :, :, :]
images3 = np.array(images)[:, 2, :, :, :]
images = np.concatenate([images1, images2, images3], 0)
labels1 = np.array(labels)[:, 0]
labels2 = np.array(labels)[:, 1]
labels3 = np.array(labels)[:, 2]
labels = np.concatenate([labels1, labels2, labels3], 0)
return images, labels
class DogFaceDataset(Dataset):
# input_shape (H, W, C) (224, 224, 3)
def __init__(self, input_shape, dataset_path, num_train, num_classes):
super(DogFaceDataset, self).__init__()
self.dataset_path = dataset_path
self.image_height = input_shape[0]
self.image_width = input_shape[1]
self.channel = input_shape[2]
self.paths = []
self.labels = []
self.num_train = num_train
self.num_classes = num_classes
self.load_dataset()
def __len__(self):
return self.num_train
# 从cls_train.txt中读取信息,获得路径和标签
def load_dataset(self):
for path in self.dataset_path:
# cls_train.txt 中,;前为类别,后为路径
path_split = path.split(";")
self.paths.append(path_split[1].split()[0])
self.labels.append(int(path_split[0]))
self.paths = | np.array(self.paths, dtype=np.object) | numpy.array |
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from pdb import set_trace
import cvnn.layers as complex_layers
from cvnn.montecarlo import run_montecarlo
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
@tf.autograph.experimental.do_not_convert
def simple_random_example():
tf.random.set_seed(0)
layer = complex_layers.ComplexDropout(.2, input_shape=(2,), seed=0)
data = np.arange(10).reshape(5, 2).astype(np.float32)
data = tf.complex(data, data)
outputs = layer(data, training=True)
expected_out = np.array([[0. + 0.j, 0. + 0.j],
[0. + 0.j, 3.75 + 3.75j],
[5. + 5.j, 6.25 + 6.25j],
[7.5 + 7.5j, 8.75 + 8.75j],
[10. + 10.j, 11.25 + 11.25j]])
assert np.all(data == layer(data, training=False))
assert np.all(outputs == expected_out)
tf.random.set_seed(0)
layer = tf.keras.layers.Dropout(.2, input_shape=(2,), seed=0)
real_outputs = layer(tf.math.real(data), training=True)
assert np.all(real_outputs == tf.math.real(outputs))
def get_real_mnist_model():
in1 = tf.keras.layers.Input(shape=(28, 28, 1))
flat = tf.keras.layers.Flatten(input_shape=(28, 28, 1))(in1)
dense = tf.keras.layers.Dense(128, activation='cart_relu')(flat)
# drop = complex_layers.ComplexDropout(rate=0.5)(dense)
drop = tf.keras.layers.Dropout(0.5)(dense)
out = tf.keras.layers.Dense(10, activation='softmax_real_with_abs', kernel_initializer="ComplexGlorotUniform")(drop)
real_model = tf.keras.Model(in1, out, name="tf_rvnn")
real_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
real_intermediate_model = tf.keras.Model(in1, drop)
return real_model, real_intermediate_model
def get_complex_mnist_model():
inputs = complex_layers.complex_input(shape=(28, 28, 1), dtype=np.float32)
flat = complex_layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32)(inputs)
dense = complex_layers.ComplexDense(128, activation='cart_relu', dtype=np.float32)(flat)
drop = complex_layers.ComplexDropout(rate=0.5)(dense)
out = complex_layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32)(drop)
complex_model = tf.keras.Model(inputs, out, name="rvnn")
complex_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
complex_intermediate_model = tf.keras.Model(inputs, drop)
return complex_model, complex_intermediate_model
def dropout():
ds_train, ds_test = get_dataset()
train_images, train_labels = convert_to_numpy(ds_train)
test_images, test_labels = convert_to_numpy(ds_test)
img, label = next(iter(ds_test))
tf.random.set_seed(0)
complex_model, complex_intermediate_model = get_complex_mnist_model()
tf.random.set_seed(0)
real_model, real_intermediate_model = get_real_mnist_model()
c_before_train_eval = complex_intermediate_model(img, training=False)
r_before_train_eval = real_intermediate_model(img, training=False)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal after drop with training=False"
assert np.all(real_model.layers[2].get_weights()[0] == complex_model.layers[2].get_weights()[
0]), f"Output layer weights are not equal before any call"
assert np.all(real_model.layers[-1].get_weights()[0] == complex_model.layers[-1].get_weights()[
0]), f"Output layer weights are not equal before any call"
c_before_train_eval = complex_model(img, training=False)
r_before_train_eval = real_model(img, training=False)
assert | np.all(r_before_train_eval == c_before_train_eval) | numpy.all |
# Authors: <NAME>
# License: BSD 3 Clause
"""
PyMF Convex Hull Non-negative Matrix Factorization [1]
CHNMF(NMF) : Class for Convex-hull NMF
quickhull : Function for finding the convex hull in 2D
[1] <NAME>, <NAME>, and <NAME>. Convex Non-Negative Matrix
Factorization in the Wild. ICDM 2009.
"""
import numpy as np
from itertools import combinations
from pymf.dist import vq
from pymf.pca import PCA
from pymf.aa import AA
__all__ = ["CHNMF"]
def quickhull(sample):
""" Find data points on the convex hull of a supplied data set
Args:
sample: data points as column vectors n x d
n - number samples
d - data dimension (should be two)
Returns:
a k x d matrix containint the convex hull data points
"""
link = lambda a, b: np.concatenate((a, b[1:]))
edge = lambda a, b: np.concatenate(([a], [b]))
def dome(sample, base):
h, t = base
dists = np.dot(sample - h, np.dot(((0, -1), (1, 0)), (t - h)))
outer = np.repeat(sample, dists > 0, axis=0)
if len(outer):
pivot = sample[np.argmax(dists)]
return link(dome(outer, edge(h, pivot)),
dome(outer, edge(pivot, t)))
else:
return base
if len(sample) > 2:
axis = sample[:, 0]
base = np.take(sample, [np.argmin(axis), | np.argmax(axis) | numpy.argmax |
"""
The classes defined here are for querying the `ATNF pulsar catalogue
<http://www.atnf.csiro.au/people/pulsar/psrcat/>`_ and viewing the resulting
information.
"""
from __future__ import print_function, division
import warnings
from collections import OrderedDict
import re
import six
from six.moves import cPickle as pickle
from six import string_types
import numpy as np
from astropy.coordinates import SkyCoord, ICRS, BarycentricTrueEcliptic, Galactic
import astropy.units as aunits
from astropy.constants import c, GM_sun
from astropy.table import Table
from pandas import DataFrame, Series
from copy import deepcopy
from .config import ATNF_BASE_URL, PSR_ALL, PSR_ALL_PARS, PSR_TYPE, PSR_ASSOC_TYPE, PSR_BINARY_TYPE
from .utils import condition, age_pdot, B_field_pdot
class QueryATNF(object):
"""
A class to generate a query of the
`ATNF pulsar catalogue <http://www.atnf.csiro.au/people/pulsar/psrcat/>`_.
By default, this class will download and cache the latest version of the
catalogue database file. The catalogue can be queried for specificpulsar
parameters and for specific named pulsars. Conditions on the parameter can
be specified. The results will be stored as a :class:`pandas.DataFrame`,
but can also be accessed as an :class:`astropy.table.Table`.
Args:
params (str, :obj:`list`): a list of strings with the
pulsar `parameters
<http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html?type=expert#par_list>`_
to query. The parameter names are case insensitive. If this is not
given, then all parameters will be returned by default.
condition (str): a string with logical conditions for the returned
parameters. The allowed format of the condition string is given
`here
<http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html#condition>`_.
Defaults to None.
psrtype (str, :obj:`list`): a list of strings, or single string, of
conditions on the `type
<http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html#psr_types>`_
of pulsars to return (logical AND will be used for any listed
types). Defaults to None.
assoc (:obj:`list`, str): a condition on the associations of pulsars to
return (logical AND will be used for any listed associations).
Defaults to None.
bincomp (str, :obj:`list`): a list of strings, or single string, of
conditions on the
`binary
<http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html#bincomp_type>`_
companion types of pulsars to return (logical AND will be used for
any listed associations). Defaults to None.
exactmatch (bool): a boolean stating whether associations and types
given as the condition should be an exact match. Defaults to False.
sort_attr (str): the (case insensitive) parameter name on which with
sort the returned pulsars. Defaults to ``JName``.
sort_ord (str): the order of the sorting, can be either ``asc`` or
``desc``. Defaults to ascending.
psrs (:obj:`list`): a list of pulsar names for which to get the
requested parameters. Defaults to None.
circular_boundary (:obj:`list`, tuple): a list containing three entries
defining the centre (in right ascension and declination), and
radius of a circle in which to search for and return pulsars. The
first entry is the centre point right ascension as a string in
format 'hh:mm:ss', the second entry is the centre point declination
as a string in format 'dd:mm:ss', and the final entry is the
circle's radius in degrees. This condition will only be applied if
viewing the results as an :class:`astropy.table.Table`.
Alternatively, `coord1`, `coord2`, and `radius` can be used.
coord1 (str): a string containing a right ascension in the format
('hh:mm:ss') that centres a circular boundary in which to search
for pulsars (requires coord2 and radius to be set).
coord2 (str): a string containing a declination in the format
('dd:mm:ss') that centres a circular boundary in which to search
for pulsars (requires coord1 and radius to be set).
radius (float): the radius (in degrees) of a circular boundary in which
to search for pulsars (requires coord1 and coord2 to be set).
include_errs (bool): Set if wanting parameter errors to be returned.
Defaults to True.
include_refs (bool): Set if wanting to include references tags in the
output tables. Defaults to False.
adsref (bool): Set if wanting to use an :class:`ads.search.SearchQuery`
to get reference information. Defaults to False.
loadfromdb (str): Load a pulsar database file from a given path rather
than using the ATNF Pulsar Catalogue database. Defaults to None.
loadquery (str): load an instance of :class:`~psrqpy.search.QueryATNF`
from the given file, rather than performing a new query. This was
`loadfromfile` in earlier versions, which still works but has been
deprecated. Defaults to None.
cache (bool): Cache the catalogue database file for future use. This is
ignored if `loadfromdb` is given. Defaults to True.
checkupdate (bool): If True then check whether a cached catalogue file
has an update available, and re-download if there is an update.
Defaults to False.
frompandas (:class:`pandas.DataFrame`): create a new
:class:`psrqpy.QueryATNF` object from an existing
:class:`pandas.DataFrame`.
fromtable (:class:`astropy.table.Table`): create a new
:class:`psrqpy.QueryATNF` object from an existing
:class:`astropy.table.Table`.
"""
def __init__(self, params=None, condition=None, psrtype=None, assoc=None,
bincomp=None, exactmatch=False, sort_attr='jname',
sort_order='asc', psrs=None, include_errs=True,
include_refs=False, adsref=False, loadfromfile=None,
loadquery=None, loadfromdb=None, cache=True,
checkupdate=False, circular_boundary=None, coord1=None,
coord2=None, radius=0., frompandas=None, fromtable=None):
if loadfromfile is not None and loadquery is None:
loadquery = loadfromfile
if loadquery:
self.load(loadquery)
return
self.__dataframe = DataFrame()
self.include_errs = include_errs
self._include_refs = include_refs
self._savefile = None # file to save class to
self._loadfile = None # file class loaded from
self.condition = condition
self.exactmatch = exactmatch
self.psrs = psrs
self._sort_order = sort_order
self.sort_key = sort_attr.upper()
self._useads = adsref
# conditions for finding pulsars within a circular boundary
self._coord1 = coord1
self._coord2 = coord2
self._radius = radius
if isinstance(circular_boundary, (list, tuple)):
if len(circular_boundary) != 3:
raise ValueError("Circular boundary must contain three values")
self._coord1 = circular_boundary[0]
self._coord2 = circular_boundary[1]
self._radius = circular_boundary[2]
elif self._coord1 is None or self._coord2 is None or self._radius == 0.:
# if any are not set then we can't define a boundary
self._coord1 = self._coord2 = ''
self._radius = 0.
else:
if (not isinstance(self._coord1, string_types)
or not isinstance(self._coord2, string_types)):
raise ValueError("Circular boundary centre coordinates must "
"be strings")
if not isinstance(self._radius, float) and not isinstance(self._radius, int):
raise ValueError("Circular boundary radius must be a float or "
"int")
self._coord = None
if self._coord1 and self._coord2 and self._radius != 0.:
if params is None:
params = []
# set centre coordinate as an astropy SkyCoord
self._coord = SkyCoord(self._coord1, self._coord2,
unit=(aunits.hourangle, aunits.deg))
# set conditions
condparse = self.parse_conditions(psrtype=psrtype, assoc=assoc,
bincomp=bincomp)
if len(condparse) > 0:
if self.condition is None:
self.condition = condparse
else:
self._condition += ' && {}'.format(condparse)
self.query_params = params
self._refs = None # set of pulsar references
self._pulsars = None # gets set to a Pulsars object by get_pulsars()
# store passed pandas DataFrame
if isinstance(frompandas, DataFrame):
self.__dataframe = frompandas.copy()
return
# store passed astropy Table
if isinstance(fromtable, Table):
self.__dataframe = fromtable.to_pandas()
return
# download and cache (if requested) the database file
try:
_ = self.get_catalogue(path_to_db=loadfromdb, cache=cache,
update=checkupdate)
except IOError:
raise IOError("Could not get catalogue database file")
# perform requested sorting
_ = self.sort(inplace=True)
def get_references(self, useads=False, cache=True):
"""
Get a dictionary of short reference tags keyed to the full reference
string. If requested also get a dictionary of reference tags keyed to
NASA ADS URLs for the given reference. This uses the function
:func:`psrqpy.utils.get_references`.
Args:
useads (bool): Set this to True to get the NASA ADS reference
URLs. Defaults to False.
cache (bool): The flag sets whether or not to use a pre-cached
database of references. Defaults to True.
"""
from .utils import get_references
self._refs = None
self._adsrefs = None
useadst = useads or self._useads
if useadst:
self._refs, self._adsrefs = get_references(useadst,
cache=cache)
else:
self._refs = get_references(False, cache=cache)
def parse_ref(self, refs, useads=False):
"""
This function takes a short format reference string from the ATNF
Pulsar Catalogue and returns the full format reference. It can
also return a NASA ADS URL if requested and present.
Args:
refs (str, array_like): a single short reference string, or
an array of reference strings.
useads (bool): Set whether or not to also return a NASA ADS
reference URL if present.
Returns:
array_like: a single full reference string, or an array of full
reference strings. If NASA ADS URLs are requested, each return
value will be a tuple containing the reference string and URL.
"""
useadst = useads or self._useads
if self._refs is None:
self.get_references(useads=useadst)
elif self._adsrefs is None and useadst:
self.get_references(useads=useadst)
singleref = False
if isinstance(refs, string_types):
singleref = True
refs = [refs]
# check refs is an iterable object
if not hasattr(refs, '__iter__'):
raise ValueError("Reference tags must be a string or array like")
refstrs = []
for ref in refs:
if isinstance(ref, string_types):
if ref in self._refs:
if useadst:
if ref in self._adsrefs:
refstrs.append((self._refs[ref], self._adsrefs[ref]))
else:
refstrs.append((self._refs[ref], None))
else:
refstrs.append(self._refs[ref])
else:
if useadst:
refstrs.append((None, None))
else:
refstrs.append(None)
else:
if useadst:
refstrs.append((None, None))
else:
refstrs.append(None)
# just return a single value if only one input
if singleref:
return refstrs[0]
return refstrs
def get_catalogue(self, path_to_db=None, cache=True, update=False,
overwrite=True):
"""
Call the :func:`psrqpy.utils.get_catalogue` function to download the
ATNF Pulsar Catalogue, or load a given catalogue path.
Args:
path_to_db (str): if the path to a local version of the database
file is given then that will be read in rather than attempting to
download the file (defaults to None).
cache (bool): cache the downloaded ATNF Pulsar Catalogue file. Defaults
to True. This is ignored if `path_to_db` is given.
update (bool): if True the ATNF Pulsar Catalogue will be
re-downloaded and cached if there has been a change compared to the
currently cached version. This is ignored if `path_to_db` is given.
overwrite (bool): if True the returned catalogue will overwrite the
catalogue currently contained within the :class:`~psrqpy.QueryATNF`
class. If False then a new :class:`~psrqpy.QueryATNF` copy of the
catalogue will be returned.
Returns:
:class:`psrqpy.QueryATNF`: a table containing the catalogue.
"""
from .utils import get_catalogue
try:
dbtable = get_catalogue(path_to_db=path_to_db, cache=cache,
update=update, pandas=True)
except Exception as e:
raise RuntimeError("Problem getting catalogue: {}".format(str(e)))
if not overwrite:
newcatalogue = QueryATNF(params=self.query_params,
condition=self.condition,
exactmatch=self.exactmatch,
sort_attr=self._sort_attr,
sort_order=self._sort_order,
psrs=self.psrs,
include_errs=self._include_errs,
include_refs=self._include_refs,
adsref=self._useads, cache=False,
coord1=self._coord1, coord2=self._coord2,
radius=self._radius,
frompandas=dbtable)
return newcatalogue
# update current catalogue
self.__dataframe = DataFrame(dbtable)
self.__dataframe.version = dbtable.version
self._dbfile = path_to_db
self._checkupdate = update
self._cache = cache
# calculate derived parameters
self.set_derived()
self.parse_types()
return self
@property
def columns(self):
"""
Return the table column names.
"""
return self.__dataframe.columns
def update(self, column, name=None, overwrite=False):
"""
Update a column in the internal :class:`pandas.DataFrame` table using
:meth:`pandas.DataFrame.update`. If the column does not exist, it will
be added to the table.
Args:
column (:class:`pandas.Series`): a named column of values.
name (str): the name of the column (required if `column` is not a
:class:`pandas.Series`, or to overwrite the current column
name)
overwrite (bool): set whether to overwrite non-NA values or not if
the column already exists. Defaults to False, so non-NA values
will not be overwritten.
"""
# get column name to update/add
if name is not None:
# use input `name` by default
colname = name
else:
try:
colname = column.name
except AttributeError:
colname = None
if colname is not None:
if colname in self.columns:
if not isinstance(column, Series):
column = Series(column, name=colname)
if column.dtype == self.__dataframe[colname].dtype:
self.__dataframe.update(column, overwrite=overwrite)
else:
raise ValueError("Could not update table with "
"supplied column")
else:
try:
self.catalogue[colname] = column
except Exception as e:
raise ValueError("Could not add supplied columns to "
"table: {}".format(str(e)))
else:
raise ValueError("No column name given")
@property
def sort_key(self):
return self._sort_attr
@sort_key.setter
def sort_key(self, value):
"""
Set the parameter to sort on.
"""
if not isinstance(value, string_types):
raise ValueError("Sort parameter must be a string")
self._sort_attr = value
def sort(self, sort_attr='JNAME', sort_order='asc', inplace=False):
"""
Sort the generated catalogue :class:`~pandas.DataFrame` on a given
attribute and in either ascending or descending order.
Args:
sort_attr (str): The parameter on which to perform the sorting of
the query output. Defaults to 'JNAME'.
sort_order (str): Set to 'asc' to sort the parameter values in
ascending order, or 'desc' to sort in descending order.
Defaults to ascending.
inplace (bool): If True, and sorting the class' internal
:class:`~pandas.DataFrame`, then the sorting will be done
in place without returning a copy of the table, otherwise
a sorted copy of the table will be returned.
Returns:
:class:`~pandas.DataFrame`: a table containing the sorted
catalogue.
"""
if sort_attr is not None:
self.sort_key = sort_attr.upper()
if self.sort_key not in self.columns:
raise KeyError("Sorting by attribute '{}' is not possible as it "
"is not in the table".format(self.sort_key))
self._sort_order = sort_order
# check sort order is either 'asc' or 'desc' (or some synonyms)
if self._sort_order.lower() in ['asc', 'ascending', 'up', '^']:
self._sort_order = 'asc'
elif self._sort_order.lower() in ['desc', 'descending', 'down', 'v']:
self._sort_order = 'desc'
else:
warnings.warn("Unrecognised sort order '{}', defaulting to "
"'ascending'".format(sort_order), UserWarning)
self._sort_order = 'asc'
sortorder = True if self._sort_order == 'asc' else False
if inplace:
# sort the stored dataframe
_ = self.__dataframe.sort_values(self.sort_key,
ascending=sortorder,
inplace=inplace)
return self.__dataframe
else:
return self.__dataframe.sort_values(self.sort_key,
ascending=sortorder)
def __getitem__(self, key):
if key not in self.pandas.columns:
raise KeyError("Key '{}' not in queried results".format(key))
# return astropy table column
return self.table[key]
def __getstate__(self):
"""
Define to allow pickling of whole object.
See, e.g., https://stackoverflow.com/a/2050357/1862861.
"""
# Pulsars() object can cause pickling issues (in Python 2.7), so have
# workaround
from .pulsar import Pulsars
if isinstance(self._pulsars, Pulsars):
del self._pulsars
self._pulsars = True
# save ATNF version information from DataFrame separately
self._atnf_version = self.catalogue.version
return self.__dict__
def __setstate__(self, d):
"""
Define to allow pickling.
"""
self.__dict__.update(d)
# restore ATNF version info to catalogue
self.__dataframe.version = self._atnf_version
# Pulsars() object can cause pickling issues (in Python 2.7), so have
# workaround
if isinstance(self._pulsars, bool):
from .pulsar import Pulsars
if self._pulsars:
# get the Pulsars() object
self._pulsars = None
_ = self.get_pulsars()
def save(self, fname):
"""
Output the :class:`~psrqpy.search.QueryATNF` instance to a pickle file
for future loading.
Args:
fname (str): the filename to output the pickle to
"""
try:
fp = open(fname, 'wb')
pickle.dump(self, fp, 2)
fp.close()
self._savefile = fname
except IOError:
raise IOError("Error outputing class to pickle file")
def load(self, fname):
"""
Load a previously saved pickle of this class.
Args:
fname (str): the filename of the pickled object
"""
try:
fp = open(fname, 'rb')
tmpdict = pickle.load(fp)
fp.close()
self.__dict__.clear() # clear current self
self.__dict__.update(tmpdict.__dict__)
self._loadfile = fname
except IOError:
raise IOError("Error reading in pickle")
def as_array(self):
"""
Returns:
:class:`~numpy.ndarray`: the output table as an array.
"""
return self.table.as_array()
@property
def psrs(self):
"""
Return the name(s) of particular pulsars asked for in the query.
"""
return self._psrs
@psrs.setter
def psrs(self, psrnames=None):
"""
Set a list of names of pulsars to be returned by the query.
Args:
psrnames (str, list): a list of names, or a single name, of pulsars
to be returned by the query.
"""
# set the pulsar name list
if psrnames is None:
self._psrs = None
else:
if isinstance(psrnames, string_types):
self._psrs = [psrnames]
elif isinstance(psrnames, list):
self._psrs = psrnames
elif isinstance(psrnames, np.ndarray):
if psrnames.dtype == np.str or psrnames.dtype == np.unicode:
self._psrs = psrnames.tolist()
else:
raise TypeError("psrnames must be a list of strings")
else:
raise TypeError("psrnames must be a list of strings")
@property
def num_pulsars(self):
"""
Return the number of pulsars found in with query
"""
return len(self)
@property
def table(self):
"""
Return a :class:`astropy.table.Table` based on the query.
"""
# convert to astropy table
thistable = Table.from_pandas(self.pandas)
# add units if known
for key in PSR_ALL_PARS:
if key in thistable.colnames:
if PSR_ALL[key]['units']:
thistable.columns[key].unit = PSR_ALL[key]['units']
if (PSR_ALL[key]['err'] and
key+'_ERR' in thistable.colnames):
thistable.columns[key+'_ERR'].unit = PSR_ALL[key]['units']
# add catalogue version to metadata
thistable.meta['version'] = self.get_version
return thistable
@property
def catalogue_table(self):
"""
Return the full catalogue as a :class:`astropy.table.Table` without
any query conditions applied.
Note: in this returned table any references will not be converted into
actual reference strings, but will still be the ATNF Pulsar Catalogue
tags.
"""
# convert catalogue to astropy table
thistable = Table.from_pandas(self.catalogue)
# add units if known
for key in PSR_ALL_PARS:
if key in thistable.colnames:
if PSR_ALL[key]['units']:
thistable.columns[key].unit = PSR_ALL[key]['units']
if (PSR_ALL[key]['err'] and
key+'_ERR' in thistable.colnames):
thistable.columns[key+'_ERR'].unit = PSR_ALL[key]['units']
# add catalogue version to metadata
thistable.meta['version'] = self.get_version
return thistable
@property
def empty(self):
"""
Return True if the :class:`pandas.DataFrame` containing the catalogue
is empty.
"""
return self.__dataframe.empty
def query_table(self, query_params=None, usecondition=True,
useseparation=True):
"""
Return an :class:`astropy.table.Table` from the query with new
parameters or conditions if given.
Args:
query_params (str, list): a parameter, or list of parameters, to
return from the query. If this is None then all parameters are
returned.
usecondition (bool, str): If True then the condition parsed to the
:class:`psrqpy.QueryATNF`: class will be used when returning
the table. If False no condition will be applied to the
returned table. If a string is given then that will be the
assumed condition string.
useseparation (bool): If True and a set of sky coordinates and
radius around which to return pulsars was set in the
:class:`psrqpy.QueryATNF`: class then only pulsars within the
given radius of the sky position will be returned. Otherwise
all pulsars will be returned.
Returns:
:class:`astropy.table.Table`: a table of the pulsar data returned
by the query.
"""
if not self.empty: # convert to Table if DataFrame is not empty
if query_params is None:
query_params = self.columns
elif isinstance(query_params, string_types):
query_params = [query_params]
elif not isinstance(query_params, list):
raise TypeError("query_params must be a string or list.")
# convert to numpy array
query_params = np.array(query_params)
# check parameters are in table
intab = np.array([par in self.columns for par in query_params])
if not np.all(intab):
warnings.warn("Not all request parameters '{}' were in the "
"table".format(query_params[~intab].tolist()))
if not np.any(intab):
warnings.warn("No requested parameters were in the table")
# return given the condition
expression = None
if usecondition is True and isinstance(self.condition, string_types):
expression = self.condition
elif isinstance(usecondition, string_types):
expression = usecondition
# sort table
dftable = self.sort(self.sort_key, self._sort_order)
if expression is not None:
# apply conditions
dftable = condition(dftable, expression, self._exactmatch)
# return only requested parameters and convert to table
table = Table.from_pandas(dftable[query_params[intab].tolist()])
# add units if known
for key in PSR_ALL_PARS:
if key in table.colnames:
if PSR_ALL[key]['units']:
table.columns[key].unit = PSR_ALL[key]['units']
if PSR_ALL[key]['err'] and key+'_ERR' in table.colnames:
table.columns[key+'_ERR'].unit = PSR_ALL[key]['units']
# add catalogue version to metadata
table.meta['version'] = self.get_version
table.meta['ATNF Pulsar Catalogue'] = ATNF_BASE_URL
if (useseparation and self._coord is not None and 'RAJ' in
table.colnames and 'DECJ' in table.colnames):
# apply sky coordinate constraint
catalog = SkyCoord(table['RAJ'], table['DECJ'],
unit=(aunits.hourangle, aunits.deg))
# get seperations
d2d = self._coord.separation(catalog)
# find seperations within required radius
catalogmsk = d2d < self._radius*aunits.deg
table = table[catalogmsk]
return table
else:
# return an empty table
return Table()
@property
def condition(self):
"""
Return the string of logical conditions applied to the pulsars.
"""
return self._condition
@condition.setter
def condition(self, expression):
"""
Set the logical condition string to apply to queried pulsars.
Args:
expression (str): A string containing logical expressions to apply
to queried pulsars.
"""
if not isinstance(expression, string_types) and expression is not None:
raise TypeError("Condition must be a string")
self._condition = expression
@property
def include_errs(self):
"""
Return a boolean stating whether errors are to be included.
"""
return self._include_errs
@include_errs.setter
def include_errs(self, inclerr):
"""
Set whether to include errors with queried pulsars.
Args:
inclerr (bool): Set to True to include errors.
"""
if isinstance(inclerr, (bool, int)):
self._include_errs = bool(inclerr)
else:
TypeError("Flag must be boolean")
@property
def exactmatch(self):
"""
Return the boolean stating whether certain conditions should apply an
exact match.
"""
return self._exactmatch
@exactmatch.setter
def exactmatch(self, match):
"""
Set whether to apply an exact match criterion for certain conditions.
Args:
match (bool): A boolean stating whether or not to apply an exact
match.
"""
if not isinstance(match, bool):
if isinstance(match, int):
if match != 0 and match != 1:
raise TypeError("Exact match requires boolean")
else:
raise TypeError("Exact match requires boolean")
self._exactmatch = bool(match)
@property
def query_params(self):
"""
Return the parameters required for the query.
"""
return self._query_params
@query_params.setter
def query_params(self, params):
"""
Set the parameters with which to query from the catalogue.
Args:
params (list, str): A list of parameter names to query from the
catalogue.
"""
self._query_params = None
if isinstance(params, list):
if len(params) == 0:
print('No query parameters have been specified')
for p in params:
if not isinstance(p, string_types):
raise TypeError("Non-string value '{}' found in params "
"list".format(p))
self._query_params = [p.upper() for p in params]
elif isinstance(params, string_types):
# make sure parameter is all upper case
self._query_params = [params.upper()]
elif params is not None:
raise TypeError("'params' must be a list or string")
# remove any duplicate
if self._query_params is not None:
self._query_params = list(set(self._query_params))
for p in list(self._query_params):
if p not in PSR_ALL_PARS:
warnings.warn("Parameter '{}' not recognised.".format(p),
UserWarning)
@property
def catalogue(self):
"""
Return the entire stored :class:`~pandas.DataFrame` catalogue
without any sorting or conditions applied.
"""
return self.__dataframe
@property
def dataframe(self):
"""
Return the query table as a :class:`pandas.DataFrame`.
"""
return self.pandas
@property
def pandas(self):
"""
Return the query table as a :class:`pandas.DataFrame`.
"""
# get only required parameters and sort
dftable = self.sort(self.sort_key, self._sort_order)
if (self._coord is not None and 'RAJD' in dftable.columns
and 'DECJD' in dftable.columns):
# apply sky coordinate constraint
catalog = SkyCoord(dftable['RAJD'], dftable['DECJD'],
unit=(aunits.deg, aunits.deg))
# get seperations
d2d = self._coord.separation(catalog)
# find seperations within required radius
catalogmsk = d2d < self._radius*aunits.deg
dftable = dftable[catalogmsk]
if self._condition is not None:
# apply condition
dftable = condition(dftable, self._condition, self._exactmatch)
# return only requested pulsars
if self.psrs is not None:
jnames = np.zeros(len(dftable), dtype=np.bool)
if 'JNAME' in dftable.columns:
jnames = np.array([psr in self.psrs
for psr in dftable['JNAME']])
bnames = np.zeros(len(dftable), dtype=np.bool)
if 'BNAME' in dftable.columns:
bnames = np.array([psr in self.psrs
for psr in dftable['BNAME']])
if np.any(jnames) and np.any(bnames):
allnames = jnames | bnames
elif np.any(jnames):
allnames = jnames
elif np.any(bnames):
allnames = bnames
else:
warnings.warn("No requested pulsars '{}' were "
"found.".format(self.psrs), UserWarning)
return DataFrame() # empty dataframe
dftable = dftable[allnames]
# return only the required query parameters
if isinstance(self.query_params, list):
retpars = list(self.query_params) # return parameters
for par in self.query_params:
if par in PSR_ALL_PARS:
if PSR_ALL[par]['err'] and self._include_errs:
retpars.append(par+'_ERR')
if PSR_ALL[par]['ref'] and self._include_refs:
retpars.append(par+'_REF')
retpars = list(set(retpars)) # remove duplicates
dftable = dftable[retpars]
# reset the indices to zero in the dataframe
return dftable.reset_index(drop=True)
def parse_types(self):
"""
Parse information in 'ASSOC', 'TYPE', and 'BINCOMP', as described in
`<http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html#psr_types>`_.
"""
self.parse_assoc() # parse the association parameter
self.parse_type() # parse the type parameter
self.parse_bincomp() # parse the binary companion parameter
def parse_assoc(self):
"""
Parse default string representing source associations, extracting (first) value
and reference. Multiple values and references currently not supported.
"""
if 'ASSOC' not in self.columns:
return
# Save original parameter in new column
ASSOCorig = self.catalogue['ASSOC'].copy()
ASSOCorig.name = 'ASSOC_ORIG'
self.update(ASSOCorig, name='ASSOC_ORIG')
ASSOCnew = self.catalogue['ASSOC'].copy()
idxassoc = ~ASSOCnew.isna()
# Set references first
if 'ASSOC_REF' not in self.columns:
ASSOCREFnew = Series(np.full(self.catalogue_len, '', dtype='U64'),
name='ASSOC_REF')
else:
ASSOCREFnew = self.catalogue['ASSOC_REF'].copy()
ASSOCREFnew[idxassoc] = ASSOCnew[idxassoc].apply(lambda x:
re.split(r'\]', re.split(r'\[', x)[1])[0]
if len(re.split(r'\[', x)) > 1 else np.nan)
# Set values
ASSOCnew[idxassoc] = ASSOCnew[idxassoc].apply(lambda x: re.split(r'\[|,|\(|:', x)[0])
self.update(ASSOCnew, overwrite=True)
self.update(ASSOCREFnew, overwrite=True)
def parse_type(self):
"""
Parse default string representing source type, extracting (first) value
and reference. Multiple values and references currently not supported.
"""
if 'TYPE' not in self.columns:
return
# Save original parameter in new column
TYPEorig = self.catalogue['TYPE'].copy()
TYPEorig.name = 'TYPE_ORIG'
self.update(TYPEorig, 'TYPE_ORIG')
TYPEnew = self.catalogue['TYPE'].copy()
idxtype = ~TYPEnew.isna()
# Set references first
if 'TYPE_REF' not in self.columns:
TYPEREFnew = Series(np.full(self.catalogue_len, '', dtype='U64'),
name='TYPE_REF')
else:
TYPEREFnew = self.catalogue['TYPE_REF'].copy()
TYPEREFnew[idxtype] = TYPEnew[idxtype].apply(lambda x:
re.split(r'\]', re.split(r'\[', x)[1])[0]
if len(re.split(r'\[', x)) > 1 else np.nan)
# Set values
TYPEnew[idxtype] = TYPEnew[idxtype].apply(lambda x: re.split(r'\[|,|\(|:', x)[0])
self.update(TYPEnew, overwrite=True)
self.update(TYPEREFnew, overwrite=True)
def parse_bincomp(self):
"""
Parse default string representing source companion type, extracting (first) value
and reference. Multiple values and references currently not supported.
"""
if 'BINCOMP' not in self.columns:
return
# Save original parameter in new column
BINCOMPorig = self.catalogue['BINCOMP'].copy()
BINCOMPorig.name = 'BINCOMP_ORIG'
self.update(BINCOMPorig)
BINCOMPnew = self.catalogue['BINCOMP'].copy()
idxbincomp = ~BINCOMPnew.isna()
# Set references first
if 'BINCOMP_REF' not in self.columns:
BINCOMPREFnew = Series(np.full(self.catalogue_len, '', dtype='U64'),
name='BINCOMP_REF')
else:
BINCOMPREFnew = self.catalogue['BINCOMP_REF'].copy()
BINCOMPREFnew[idxbincomp] = BINCOMPnew[idxbincomp]\
.apply(lambda x: re.split(r'\]', re.split(r'\[', x)[1])[0]
if len(re.split(r'\[', x)) > 1 else np.nan)
# Set values
BINCOMPnew[idxbincomp] = BINCOMPnew[idxbincomp]\
.apply(lambda x: re.split(r'\[|,|\(|:', x)[0])
self.update(BINCOMPnew, overwrite=True)
self.update(BINCOMPREFnew, overwrite=True)
def set_derived(self):
"""
Compute any derived parameters and add them to the class.
These calculations are based on those in the `readCatalogue.c` and
`defineParameters.c` files from the `PSRCAT`
`code <http://www.atnf.csiro.au/research/pulsar/psrcat/download.html>`_.
"""
self.define_dist() # define the DIST and DIST1 parameters
self.derived_ecliptic() # derive the ecliptic coordinates if not given
self.derived_equatorial() # derive equatorial coords from ecliptic
self.define_galactic() # define the galactic coordinates
self.derived_p0() # derive P0 from F0 if not given
self.derived_f0() # derive F0 from P0 if not given
self.derived_p1() # derive P1 from F1 if not given
self.derived_f1() # derive F1 from P1 if not given
self.derived_pb() # derive binary period from FB0
self.derived_pbdot() # derive Pbdot from FB1
self.derived_fb0() # derive orbital frequency from period
self.derived_fb1() # derive FB1 from PBDOT
self.derived_age() # characteristic age
self.derived_bsurf() # surface magnetic field
self.derived_b_lc() # magnetic field at light cylinder
self.derived_edot() # spin-down luminosity
self.derived_edotd2() # spin-down flux at Sun
self.derived_pmtot() # total proper motion
self.derived_vtrans() # transverse velocity
self.derived_p1_i() # instrinsic period derivative
self.derived_age_i() # intrinsic age
self.derived_bsurf_i() # intrinsic Bsurf
self.derived_edot_i() # intrinsic luminosity
self.derived_flux() # radio flux
self.derived_binary() # derived binary parameters
def define_dist(self):
"""
Set the `DIST` and `DIST1` parameters using other values.
"""
if 'PX' in self.columns:
PX = self.catalogue['PX']
else:
PX = np.full(self.catalogue_len, np.nan)
if 'PX_ERR' in self.columns:
PXERR = self.catalogue['PX_ERR']
else:
PXERR = np.full(self.catalogue_len, np.nan)
if 'DIST_A' in self.columns:
DIST_A = self.catalogue['DIST_A']
else:
DIST_A = np.full(self.catalogue_len, np.nan)
if 'DIST_AMN' in self.columns:
DIST_AMN = self.catalogue['DIST_AMN']
else:
DIST_AMN = np.full(self.catalogue_len, np.nan)
if 'DIST_AMX' in self.columns:
DIST_AMX = self.catalogue['DIST_AMX']
else:
DIST_AMX = np.full(self.catalogue_len, np.nan)
if 'DIST_DM' in self.columns:
DIST_DM = self.catalogue['DIST_DM']
else:
DIST_DM = np.full(self.catalogue_len, np.nan)
if 'DIST_DM1' in self.columns:
DIST_DM1 = self.catalogue['DIST_DM1']
else:
DIST_DM1 = np.full(self.catalogue_len, np.nan)
# DIST defaults to DM distance
DIST = DIST_DM.copy()
# DIST1 defaults to DM1 distance
DIST1 = DIST_DM1.copy()
ONEAU = 149597870. # AU in km (from psrcat.h)
ONEPC = 30.857e12 # 1 pc in km (from psrcat.h)
idxpx = np.isfinite(PX) & np.isfinite(PXERR)
# set distances using parallax if parallax has greater than 3 sigma significance
pxsigma = np.zeros(self.catalogue_len)
pxsigma[idxpx] = np.abs(PX[idxpx])/PXERR[idxpx]
# use DIST_A if available
idxdista = np.isfinite(DIST_A)
DIST[idxdista] = DIST_A[idxdista]
DIST1[idxdista] = DIST_A[idxdista]
# indexes of parallaxes with greater than 3 sigma significance
idxpxgt3 = (pxsigma > 3.) & ~np.isfinite(DIST_A)
DIST[idxpxgt3] = (ONEAU/ONEPC)*(60.*60.*180)/(PX[idxpxgt3]*np.pi)
DIST1[idxpxgt3] = (ONEAU/ONEPC)*(60.*60.*180)/(PX[idxpxgt3]*np.pi)
# if dist_amn and dist_amx exist and dist_dm lies within boundary
# then use dist_dm else use the closest limit to dist_dm
# if dist_dm is not defined then use (dism_amn + dist_amx)/2
idxdist = np.isfinite(DIST) & ~idxpxgt3
idxdist1 = np.isfinite(DIST1) & ~idxpxgt3
# ignore warnings from comparisons that involve NaN
with np.errstate(invalid='ignore'):
idxa = ~((DIST <= DIST_AMX) & (DIST >= DIST_AMN))
idxa1 = ~((DIST1 <= DIST_AMX) & (DIST1 >= DIST_AMN))
DIST[idxa & idxdist & (DIST >= DIST_AMX)] = DIST_AMX[idxa & idxdist &
(DIST >= DIST_AMX)]
DIST1[idxa1 & idxdist1 & (DIST1 >= DIST_AMX)] = DIST_AMX[(idxa1 & idxdist1
& (DIST1 >= DIST_AMX))]
DIST[idxa & idxdist & (DIST < DIST_AMX)] = DIST_AMN[idxa & idxdist & (DIST < DIST_AMX)]
DIST1[idxa1 & idxdist1 & (DIST1 < DIST_AMX)] = DIST_AMN[(idxa1 & idxdist1
& (DIST1 < DIST_AMX))]
idxdist = (~np.isfinite(DIST) & ~idxpxgt3 &
np.isfinite(DIST_AMN) & np.isfinite(DIST_AMX))
idxdist1 = (~np.isfinite(DIST) & ~idxpxgt3 &
np.isfinite(DIST_AMN) & np.isfinite(DIST_AMX))
DIST[idxdist] = 0.5*(DIST_AMN[idxdist] + DIST_AMX[idxdist])
DIST1[idxdist1] = 0.5*(DIST_AMN[idxdist1] + DIST_AMX[idxdist1])
self.update(DIST, name='DIST')
self.update(DIST1, name='DIST1')
def derived_equatorial(self):
"""
Calculate equatorial coordinates if only ecliptic coordinates are
given. Unlike `psrcat` this function does not currently convert
errors on ecliptic coordinates into equavalent errors on equatorial
coordinates.
"""
reqpar = ['ELONG', 'ELAT']
if not np.all([p in self.columns for p in reqpar]):
return
ELONG = self.catalogue['ELONG']
ELAT = self.catalogue['ELAT']
RAJDnew = np.full(self.catalogue_len, np.nan)
DECJDnew = np.full(self.catalogue_len, np.nan)
RAJnew = np.full(self.catalogue_len, '', dtype='U32')
DECJnew = np.full(self.catalogue_len, '', dtype='U32')
idx = np.isfinite(ELONG) & np.isfinite(ELAT)
# get sky coordinates
sc = BarycentricTrueEcliptic(ELONG.values[idx]*aunits.deg,
ELAT.values[idx]*aunits.deg
).transform_to(ICRS())
RAJDnew[idx] = sc.ra.value
DECJDnew[idx] = sc.dec.value
RAJnew[idx] = sc.ra.to('hourangle').to_string(sep=':', pad=True)
DECJnew[idx] = sc.dec.to_string(sep=':', pad=True, alwayssign=True)
self.update(RAJDnew, name='RAJD')
self.update(DECJDnew, name='DECJD')
self.update(RAJnew, name='RAJ')
self.update(DECJnew, name='DECJ')
# set references
if 'ELONG_REF' in self.columns:
RAJREFnew = np.full(self.catalogue_len, '', dtype='U32')
DECJREFnew = np.full(self.catalogue_len, '', dtype='U32')
ELONGREF = self.catalogue['ELONG_REF']
DECJREFnew[idx] = ELONGREF[idx]
RAJREFnew[idx] = ELONGREF[idx]
self.update(DECJREFnew, name='RAJ_REF')
self.update(RAJREFnew, name='DECJ_REF')
# get PMRA and PMDEC if not given
reqpar = ['PMELONG', 'PMELAT']
if np.all([p in self.columns for p in reqpar]):
PMELONG = self.catalogue['PMELONG']
PMELAT = self.catalogue['PMELAT']
PMRAnew = np.full(self.catalogue_len, np.nan)
PMDECnew = np.full(self.catalogue_len, np.nan)
idx = idx & np.isfinite(PMELONG) & np.isfinite(PMELAT)
sc = BarycentricTrueEcliptic(
ELONG[idx].values*aunits.deg,
ELAT[idx].values*aunits.deg,
pm_lon_coslat=PMELONG[idx].values*aunits.mas/aunits.yr,
pm_lat=PMELAT[idx].values*aunits.mas/aunits.yr
).transform_to(ICRS())
PMRAnew[idx] = sc.pm_ra_cosdec.value
PMDECnew[idx] = sc.pm_dec.value
self.update(PMRAnew, name='PMRA')
self.update(PMDECnew, name='PMDEC')
def derived_ecliptic(self):
"""
Calculate the ecliptic coordinates, and proper motions, from the
right ascension and declination if they are not already given.
The ecliptic used here is the astropy's `BarycentricTrueEcliptic
<http://docs.astropy.org/en/stable/api/astropy.coordinates.BarycentricTrueEcliptic.html>`_,
which may not exactly match that used in `psrcat`.
"""
reqpar = ['RAJD', 'DECJD']
if not np.all([p in self.columns for p in reqpar]):
return
RAJD = self.catalogue['RAJD']
DECJD = self.catalogue['DECJD']
ELONGnew = np.full(self.catalogue_len, np.nan)
ELATnew = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(RAJD) & np.isfinite(DECJD)
# get sky coordinates
sc = SkyCoord(RAJD[idx].values*aunits.deg,
DECJD[idx].values*aunits.deg)
ELONGnew[idx] = sc.barycentrictrueecliptic.lon.value
ELATnew[idx] = sc.barycentrictrueecliptic.lat.value
self.update(ELONGnew, name='ELONG')
self.update(ELATnew, name='ELAT')
# get references
refpar = ['RAJ_REF', 'DECJ_REF']
if np.all([p in self.columns for p in refpar]):
RAJREF = self.catalogue['RAJ_REF']
DECJREF = self.catalogue['DECJ_REF']
ELONGREFnew = np.full(self.catalogue_len, '', dtype='U32')
ELATREFnew = np.full(self.catalogue_len, '', dtype='U32')
ELONGREFnew[idx] = RAJREF[idx]
ELATREFnew[idx] = DECJREF[idx]
self.update(ELONGREFnew, name='ELONG_REF')
self.update(ELATREFnew, name='ELAT_REF')
# get PMELONG and PMELAT if not given
reqpar = ['PMRA', 'PMDEC']
if np.all([p in self.columns for p in reqpar]):
PMELONGnew = np.full(self.catalogue_len, np.nan)
PMELATnew = np.full(self.catalogue_len, np.nan)
PMRA = self.catalogue['PMRA']
PMDEC = self.catalogue['PMDEC']
idx = idx & np.isfinite(PMRA) & np.isfinite(PMDEC)
sc = ICRS(
RAJD[idx].values*aunits.deg,
DECJD[idx].values*aunits.deg,
pm_ra_cosdec=PMRA[idx].values*aunits.mas/aunits.yr,
pm_dec=PMDEC[idx].values*aunits.mas/aunits.yr
).transform_to(BarycentricTrueEcliptic())
PMELONGnew[idx] = sc.pm_lon_coslat.value
PMELATnew[idx] = sc.pm_lat.value
self.update(PMELONGnew, name='PMELONG')
self.update(PMELATnew, name='PMELAT')
def define_galactic(self):
"""
Calculate the galactic longitude, latitude and position.
.. note::
The cartesian galactic coordinates returned by this function *do
not* match those returned by the ATNF Pulsar Catalogue and the
``psrcat`` software. They are defined using the conventions in the
:class:`astropy.coordinates.Galactocentric` class. This uses a
Galactic centre distance of 8.3 kpc compared to 8.5 kpc in
``psrcat`` and rotated 90 degrees anticlockwise compared to
``psrcat``.
The Galactic coordinate proper motions returned by this function
*do not* match those returned by the ATNF Pulsar Catalogue and the
``psrcat`` software. The values returned here convert the observed
proper motions in right ascension and declination (or elliptic
longitude and latitude) into equivalent values in the Galactic
coordinate system (via the :class:`astropy.coordinates.Galactic`
class). However, the values returned by the ATNF Pulsar Catalogue
and the ``psrcat`` software are in the Galactic cooridinate system,
but additionally have the local solar system velocity and Galactic
rotation of the pulsar removed from them as described in Section 3
of `<NAME> & Anderson (1993) <https://ui.adsabs.harvard.edu/?#abs/1993MNRAS.261..113H>`_.
"""
galpars = ['GL', 'GB', 'ZZ', 'XX', 'YY', 'DMSINB']
if np.all([p in self.columns for p in galpars]):
return
reqpars = ['RAJD', 'DECJD']
if not np.all([p in self.columns for p in reqpars]):
return
# get distance if required
if 'DIST' not in self.columns:
self.define_dist()
if 'DIST' not in self.columns:
return
RAJD = self.catalogue['RAJD'].values.copy()
DECJD = self.catalogue['DECJD'].values.copy()
DIST = self.catalogue['DIST'].values.copy()
GL = np.full(self.catalogue_len, np.nan)
GB = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(RAJD) & np.isfinite(DECJD) & np.isfinite(DIST)
# get sky coordinates
sc = SkyCoord(RAJD[idx]*aunits.deg, DECJD[idx]*aunits.deg,
DIST[idx]*aunits.kpc)
GL[idx] = sc.galactic.l.value
GB[idx] = sc.galactic.b.value
# set galactic longitude and latitude
self.update(GL, name='GL')
self.update(GB, name='GB')
XX = np.full(self.catalogue_len, np.nan)
YY = np.full(self.catalogue_len, np.nan)
ZZ = np.full(self.catalogue_len, np.nan)
# set galactocentric cartesian position (these seem to have a
# different orientation (rotated 90 deg anticlockwise) to that
# defined in the ATNF catalogue, and using a slightly different
# distance to the galactic centre 8.3 kpc in astropy and 8.5 in psrcat)
XX[idx] = sc.galactocentric.cartesian.x.value
YY[idx] = sc.galactocentric.cartesian.y.value
ZZ[idx] = sc.galactocentric.cartesian.z.value
self.update(XX, name='XX')
self.update(YY, name='YY')
self.update(ZZ, name='ZZ')
# set DMSINB
if 'DM' in self.columns:
DM = self.catalogue['DM']
GB = self.catalogue['GB']
DMSINB = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(GB) & np.isfinite(DM)
DMSINB[idx] = DM[idx]*np.sin(np.deg2rad(GB[idx]))
self.update(DMSINB, name='DMSINB')
# galactic proper motion (in the Local Standard of Rest)
if not np.all([p in self.columns for p in ['PMB', 'PML']]):
reqpar = ['PMRA', 'PMDEC']
if np.all([p in self.columns for p in reqpar]):
PMRA = self.catalogue['PMRA'].values.copy()
PMDEC = self.catalogue['PMDEC'].values.copy()
PMB = np.full(self.catalogue_len, np.nan)
PML = np.full(self.catalogue_len, np.nan)
idx = (np.isfinite(PMRA) & np.isfinite(PMDEC) &
np.isfinite(RAJD) & np.isfinite(DECJD) &
np.isfinite(DIST))
sc = ICRS(RAJD[idx]*aunits.deg, DECJD[idx]*aunits.deg,
distance=DIST[idx]*aunits.kpc,
pm_ra_cosdec=PMRA[idx]*aunits.mas/aunits.yr,
pm_dec=PMDEC[idx]*aunits.mas/aunits.yr
).transform_to(Galactic())
PMB[idx] = sc.pm_b.value
PML[idx] = sc.pm_l_cosb.value
self.update(PMB, name='PMB')
self.update(PML, name='PML')
def derived_binary(self):
"""
Calculate derived binary system parameters.
"""
MASS_PSR = 1.35 # canonical pulsar mass (solar masses)
# derive mass function
reqpars = ['A1', 'PB']
if np.all([p in self.columns for p in reqpars]):
A1 = self.catalogue['A1'].values.copy()*c.value # convert to m
PB = self.catalogue['PB'].values.copy()*86400. # convert to sec
idx = np.isfinite(A1) & np.isfinite(PB)
MASSFN = np.full(self.catalogue_len, np.nan)
MASSFN[idx] = (4.*np.pi**2/GM_sun.value)*A1[idx]**3/(PB[idx]**2)
self.update(MASSFN, name='MASSFN')
# derive minimum, median and 90% UL for mass
MINMASS = np.full(self.catalogue_len, np.nan)
MEDMASS = np.full(self.catalogue_len, np.nan)
UPRMASS = np.full(self.catalogue_len, np.nan)
from scipy.optimize import newton
def solfunc(m2, sini, mf, m1):
return (m1 + m2)**2 - (m2*sini)**3/mf
SINI_MIN = 1.0 # inclination for minimum mass
SINI_MED = 0.866025403 # inclination of 60deg for median mass
SINI_90 = 0.438371146 # inclination for 90% UL mass
for i, mf in enumerate(MASSFN):
if ~np.isfinite(mf):
continue
try:
MINMASS[i] = newton(solfunc, MASS_PSR,
args=(SINI_MIN, mf, MASS_PSR),
maxiter=1000)
except RuntimeError:
MINMASS[i] = np.nan
try:
MEDMASS[i] = newton(solfunc, MASS_PSR,
args=(SINI_MED, mf, MASS_PSR),
maxiter=1000)
except RuntimeError:
MEDMASS[i] = np.nan
try:
UPRMASS[i] = newton(solfunc, MASS_PSR,
args=(SINI_90, mf, MASS_PSR),
maxiter=1000)
except RuntimeError:
UPRMASS[i] = np.nan
self.update(MINMASS, name='MINMASS')
self.update(MEDMASS, name='MEDMASS')
self.update(UPRMASS, name='UPRMASS')
# add uncertainty on mass function
reqpars = ['A1_ERR', 'PB_ERR']
if np.all([p in self.columns for p in reqpars]):
# convert to metres
A1ERR = self.catalogue['A1_ERR'].values.copy()*c.value
# convert to seconds
PBERR = self.catalogue['PB_ERR'].values.copy()*86400.
idx = (np.isfinite(MASSFN) & np.isfinite(A1ERR) &
np.isfinite(PBERR))
MASSFN_ERR = np.full(self.catalogue_len, np.nan)
MASSFN_ERR[idx] = (
MASSFN[idx] * np.sqrt((3.*A1ERR[idx]/A1[idx])**2 +
(2.*PBERR[idx]/PB[idx])**2)
)
self.update(MASSFN_ERR, name='MASSFN_ERR')
# derive eccentricity from EPS1 and EPS2
reqpars = ['EPS1', 'EPS2']
if np.all([p in self.columns for p in reqpars]):
EPS1 = self.catalogue['EPS1'].values.copy()
EPS2 = self.catalogue['EPS2'].values.copy()
ECCnew = np.full(self.catalogue_len, np.nan)
OMnew = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(EPS1) & np.isfinite(EPS2)
# set eccentricities
ECCnew[idx] = np.sqrt(EPS1[idx]**2+EPS2[idx]**2)
self.update(ECCnew, name='ECC')
# set angle of peristron
idxn = idx & (ECCnew != 0.)
OMnew[idxn] = np.arctan2(EPS1[idxn],
EPS2[idxn])*180./np.pi
OMnew = np.mod(OMnew+360., 360.) # make sure angles are positive
self.update(OMnew, name='OM')
# set errors
reqpars = ['EPS1_ERR', 'EPS2_ERR']
if np.all([p in self.columns for p in reqpars]):
EPS1ERR = self.catalogue['EPS1_ERR'].values.copy()
EPS2ERR = self.catalogue['EPS2_ERR'].values.copy()
ECCERRnew = np.full(self.catalogue_len, np.nan)
OMERRnew = np.full(self.catalogue_len, np.nan)
idxn = idx & (np.isfinite(EPS1ERR) & np.isfinite(EPS2ERR) &
(ECCnew != 0.))
OMERRnew[idxn] = (
np.sqrt((EPS2[idxn]*EPS1ERR[idxn])**2
+ (EPS1[idxn]*EPS2ERR[idxn])**2) /
(ECCnew[idxn])**2
)*180.0/np.pi
self.update(OMERRnew, name='OM_ERR')
ECCERRnew[idxn] = (
np.sqrt((EPS1[idxn]*EPS1ERR[idxn])**2
+ (EPS2[idxn]*EPS2ERR[idxn])**2) / ECCnew[idxn]
)
self.update(ECCERRnew, name='ECC_ERR')
# derive EPS1 and EPS2 from ECC and OM
reqpars = ['ECC', 'OM']
if np.all([p in self.columns for p in reqpars]):
ECC = self.catalogue['ECC'].values.copy()
OM = self.catalogue['OM'].values.copy()
EPS1new = np.full(self.catalogue_len, np.nan)
EPS2new = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(ECC) & np.isfinite(OM)
EPS1new[idx] = ECC[idx] * np.sin(OM[idx])
EPS2new[idx] = ECC[idx] * np.cos(OM[idx])
self.update(EPS1new, name='EPS1')
self.update(EPS2new, name='EPS2')
# set errors
reqpars = ['ECC_ERR', 'OM_ERR']
if np.all([p in self.columns for p in reqpars]):
ECCERR = self.catalogue['ECC_ERR'].values.copy()
OMERR = self.catalogue['OM_ERR'].values.copy()
EPS1ERRnew = np.full(self.catalogue_len, np.nan)
EPS2ERRnew = np.full(self.catalogue_len, np.nan)
idxn = idx & np.isfinite(ECCERR) & np.isfinite(OMERR)
EPS1ERRnew[idxn] = (
np.abs(EPS1new[idxn]) *
np.sqrt((ECCERR[idxn]/ECC[idxn])**2 +
(np.abs(np.cos(np.deg2rad(OM[idxn]))) * np.deg2rad(OMERR[idxn]) /
np.abs(np.sin(np.deg2rad(OM[idxn]))))**2)
)
EPS2ERRnew[idxn] = (
np.abs(EPS2new[idxn]) *
np.sqrt((ECCERR[idxn]/ECC[idxn])**2 +
(np.abs(np.sin(np.deg2rad(OM[idxn]))) * np.deg2rad(OMERR[idxn]) /
np.abs(np.cos(np.deg2rad(OM[idxn]))))**2)
)
self.update(EPS1ERRnew, name='EPS1_ERR')
self.update(EPS2ERRnew, name='EPS2_ERR')
# derive MINOMDOT
reqpars = ['ECC', 'PB', 'MINMASS']
if np.all([p in self.columns for p in reqpars]):
MINMASS = self.catalogue['MINMASS'].values.copy()
PB = self.catalogue['PB'].values.copy()*86400.
ECC = self.catalogue['ECC'].values.copy()
MINOMDOT = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(MINMASS) & np.isfinite(PB) & np.isfinite(ECC)
MINOMDOT[idx] = (3.*(2.*np.pi/PB[idx])**(5./3.) *
((MASS_PSR+MINMASS[idx]) *
4.925490946e-6)**(2./3.) /
(1.-ECC[idx]**2))
MINOMDOT[idx] = np.rad2deg(MINOMDOT[idx])*86400.*365.25
self.update(MINOMDOT, name='MINOMDOT')
def derived_p0(self):
"""
Calculate the period from the frequency in cases where period is not
given.
"""
if 'F0' not in self.columns:
return
F0 = self.catalogue['F0']
P0new = np.full(self.catalogue_len, np.nan)
# find indices where P0 needs to be set from F0
idx = np.isfinite(F0)
P0new[idx] = 1./F0[idx]
self.update(P0new, name='P0')
# set the references
if 'F0_REF' in self.columns:
P0REFnew = np.full(self.catalogue_len, '', dtype='U32')
F0REF = self.catalogue['F0_REF']
P0REFnew[idx] = F0REF[idx]
self.update(P0REFnew, 'P0_REF')
# set the errors
if 'F0_ERR' in self.columns:
P0ERRnew = np.full(self.catalogue_len, np.nan)
F0ERR = self.catalogue['F0_ERR']
idx = idx & np.isfinite(F0ERR)
P0ERRnew[idx] = F0ERR[idx]*P0new[idx]**2
self.update(P0ERRnew, 'P0_ERR')
def derived_f0(self):
"""
Calculate the frequency from the period in cases where frequency is not
given.
"""
if 'P0' not in self.columns:
return
P0 = self.catalogue['P0']
F0new = np.full(self.catalogue_len, np.nan)
# find indices where F0 needs to be set from P0
idx = np.isfinite(P0)
F0new[idx] = 1./P0[idx]
self.update(F0new, name='F0')
# set the references
if 'P0_REF' in self.columns:
F0REFnew = np.full(self.catalogue_len, '', dtype='U32')
P0REF = self.catalogue['P0_REF']
F0REFnew[idx] = P0REF[idx]
self.update(F0REFnew, name='F0_REF')
# set the errors
if 'P0_ERR' in self.columns:
F0ERRnew = np.full(self.catalogue_len, np.nan)
P0ERR = self.catalogue['P0_ERR']
idx = idx & np.isfinite(P0ERR)
F0ERRnew[idx] = P0ERR[idx]*F0new[idx]**2
self.update(F0ERRnew, name='F0_ERR')
def derived_p1(self):
"""
Calculate the period derivative from the frequency derivative in cases
where period derivative is not given.
"""
reqpars = ['P0', 'F1']
if not np.all([p in self.columns for p in reqpars]):
return
P0 = self.catalogue['P0']
F1 = self.catalogue['F1']
P1new = np.full(self.catalogue_len, np.nan)
# find indices where P0 needs to be set from F0
idx = np.isfinite(P0) & np.isfinite(F1)
P1new[idx] = -(P0[idx]**2)*F1[idx]
self.update(P1new, name='P1')
# set the references
if 'F1_REF' in self.columns:
P1REFnew = np.full(self.catalogue_len, '', dtype='U32')
F1REF = self.catalogue['F1_REF']
P1REFnew[idx] = F1REF[idx]
self.update(P1REFnew, name='P1_REF')
# set the errors
reqpars = ['F0_ERR', 'F1_ERR']
if np.all([p in self.columns for p in reqpars]):
P1ERRnew = np.full(self.catalogue_len, np.nan)
F1ERR = self.catalogue['F1_ERR']
F0ERR = self.catalogue['F0_ERR']
idx = idx & (np.isfinite(F1ERR) & np.isfinite(F0ERR))
P1ERRnew[idx] = np.sqrt(
(P0[idx]**2*F1ERR[idx])**2
+ (2.0*P0[idx]**3*F1[idx]*F0ERR[idx])**2)
self.update(P1ERRnew, name='P1_ERR')
def derived_f1(self):
"""
Calculate the frequency derivative from the period derivative in cases
where frequency derivative is not given.
"""
reqpars = ['F0', 'P1']
if not np.all([p in self.columns for p in reqpars]):
return
F0 = self.catalogue['F0']
P1 = self.catalogue['P1']
F1new = np.full(self.catalogue_len, np.nan)
# find indices where P0 needs to be set from F0
idx = np.isfinite(P1) & np.isfinite(F0)
F1new[idx] = -(F0[idx]**2)*P1[idx]
self.update(F1new, name='F1')
# set the references
if 'P1_REF' in self.columns:
F1REFnew = np.full(self.catalogue_len, '', dtype='U32')
P1REF = self.catalogue['P1_REF']
F1REFnew[idx] = P1REF[idx]
self.update(F1REFnew, name='F1_REF')
# set the errors
reqpars = ['P0_ERR', 'P1_ERR']
if np.all([p in self.columns for p in reqpars]):
F1ERRnew = np.full(self.catalogue_len, np.nan)
P1ERR = self.catalogue['P1_ERR']
P0ERR = self.catalogue['P0_ERR']
idx = idx & np.isfinite(P1ERR) & np.isfinite(P0ERR)
F1ERRnew[idx] = np.sqrt(
(F0[idx]**2*P1ERR[idx])**2
+ (2.0*F0[idx]**3*P1[idx]*P0ERR[idx])**2)
self.update(F1ERRnew, name='F1_ERR')
def derived_pb(self):
"""
Calculate binary orbital period from orbital frequency.
"""
if 'FB0' not in self.columns:
return
FB0 = self.catalogue['FB0']
PBnew = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(FB0)
PBnew[idx] = 1./(FB0[idx]*86400.)
self.update(PBnew, name='PB')
# set the references
if 'FB0_REF' in self.columns:
PBREFnew = np.full(self.catalogue_len, '', dtype='U32')
FB0REF = self.catalogue['FB0_REF']
PBREFnew[idx] = FB0REF[idx]
self.update(PBREFnew, name='PB_REF')
# set the errors
if 'FB0_ERR' in self.columns:
PBERRnew = np.full(self.catalogue_len, np.nan)
FB0ERR = self.catalogue['FB0_ERR']
idx = idx & np.isfinite(FB0ERR)
PBERRnew[idx] = FB0ERR[idx]*PBnew[idx]**2*86400.
self.update(PBERRnew, name='PB_ERR')
def derived_pbdot(self):
"""
Calculate binary orbital period derivative from orbital frequency
derivative.
"""
reqpars = ['FB1', 'PB']
if not np.all([p in self.columns for p in reqpars]):
return
FB1 = self.catalogue['FB1']
PB = self.catalogue['PB']
PBDOTnew = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(PB) & np.isfinite(FB1)
PBDOTnew[idx] = -(PB[idx]**2*FB1[idx])
self.update(PBDOTnew, name='PBDOT')
# set the references
if 'FB1_REF' in self.columns:
PBDOTREFnew = np.full(self.catalogue_len, '', dtype='U32')
FB1REF = self.catalogue['FB1_REF']
PBDOTREFnew[idx] = FB1REF[idx]
self.update(PBDOTREFnew, name='PBDOT_REF')
# set the errors
reqpars = ['FB1_ERR', 'FB0_ERR']
if np.all([p in self.columns for p in reqpars]):
PBDOTERRnew = np.full(self.catalogue_len, np.nan)
FB1ERR = self.catalogue['FB1_ERR']
FB0ERR = self.catalogue['FB0_ERR']
idx = idx & np.isfinite(FB1ERR) & np.isfinite(FB0ERR)
PBDOTERRnew[idx] = np.sqrt((PB[idx]**2 * FB1ERR[idx])**2
+ (2.0 * PB[idx]**3 * FB1[idx]
* FB0ERR[idx])**2)
self.update(PBDOTERRnew, name='PBDOT_ERR')
def derived_fb0(self):
"""
Calculate orbital frequency from orbital period.
"""
if 'PB' not in self.columns:
return
PB = self.catalogue['PB']
FB0new = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(PB)
FB0new[idx] = 1./(PB[idx]*86400.)
self.update(FB0new, name='FB0')
# set the references
if 'PB_REF' in self.columns:
FB0REFnew = np.full(self.catalogue_len, '', dtype='U32')
PBREF = self.catalogue['PB_REF']
FB0REFnew[idx] = PBREF[idx]
self.update(FB0REFnew, name='FB0_REF')
# set the errors
if 'PB_ERR' in self.columns:
FB0ERRnew = np.full(self.catalogue_len, np.nan)
PBERR = self.catalogue['PB_ERR']
idx = idx & np.isfinite(PBERR)
FB0ERRnew[idx] = PBERR[idx]*(FB0new[idx]**2)*86400.
self.update(FB0ERRnew, name='FB0_ERR')
def derived_fb1(self):
"""
Calculate the orbital frequency derivative from the binary orbital
period derivative.
"""
reqpars = ['PBDOT', 'FB0']
if not np.all([p in self.columns for p in reqpars]):
return
PBDOT = self.catalogue['PBDOT']
FB0 = self.catalogue['FB0']
FB1new = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(FB0) & np.isfinite(PBDOT)
FB1new[idx] = -(FB0[idx]**2*PBDOT[idx])
self.update(FB1new, name='FB1')
# set the references
if 'PBDOT_REF' in self.columns:
FB1REFnew = np.full(self.catalogue_len, '', dtype='U32')
PBDOTREF = self.catalogue['PBDOT_REF']
FB1REFnew[idx] = PBDOTREF[idx]
self.update(FB1REFnew, name='FB1_REF')
# set the errors
reqpars = ['PBDOT_ERR', 'PB_ERR']
if np.all([p in self.columns for p in reqpars]):
FB1ERRnew = np.full(self.catalogue_len, np.nan)
PBDOTERR = self.catalogue['PBDOT_ERR']
PBERR = self.catalogue['PB_ERR']
idx = idx & np.isfinite(PBERR) & np.isfinite(PBDOTERR)
FB1ERRnew[idx] = np.sqrt(
(FB0[idx]**2 * PBDOTERR[idx])**2
+ (2.0 * FB0[idx]**3 * PBDOT[idx] *
PBERR[idx] * 86400.)**2)
self.update(FB1ERRnew, name='FB1_ERR')
def derived_p1_i(self):
"""
Calculate the intrinsic period derivative.
"""
if 'VTRANS' not in self.columns:
self.derived_vtrans()
reqpars = ['VTRANS', 'P0', 'P1', 'DIST']
if not np.all([p in self.columns for p in reqpars]):
return
# get required parameters
VTRANS = self.catalogue['VTRANS']
P0 = self.catalogue['P0']
P1 = self.catalogue['P1']
DIST = self.catalogue['DIST']
P1I = np.full(self.catalogue_len, np.nan)
idx = (np.isfinite(P1) & np.isfinite(P0) & np.isfinite(VTRANS) &
np.isfinite(DIST))
P1I[idx] = ((P1[idx]/1.0e-15) -
VTRANS[idx]**2 * 1.0e10 * P0[idx] /
(DIST[idx] * 3.086e6)/2.9979e10) * 1.0e-15
self.update(P1I, name='P1_I')
def derived_age(self):
"""
Calculate the characteristic age in years (see
:func:`~psrqpy.utils.characteristic_age`, with an assumed braking index
of n=3).
"""
from .utils import characteristic_age
if not np.all([p in self.columns for p in ['P0', 'P1']]):
return
# get period and period derivative
P0 = self.catalogue['P0']
P1 = self.catalogue['P1']
AGE = characteristic_age(P0, P1)
self.update(AGE, name='AGE')
def derived_age_i(self):
"""
Calculate the characteristic age (in years), dervied from period and
intrinsic period derivative.
"""
from .utils import characteristic_age
if 'P1_I' not in self.columns:
self.derived_p1_i()
if not np.all([p in self.columns for p in ['P0', 'P1_I']]):
return
# get period and period derivative
P0 = self.catalogue['P0']
P1_I = self.catalogue['P1_I']
AGEI = characteristic_age(P0, P1_I)
self.update(AGEI, name='AGE_I')
def derived_bsurf(self):
"""
Calculate the surface magnetic field strength (see
:func:`~psrqpy.utils.B_field`).
"""
from .utils import B_field
if not np.all([p in self.columns for p in ['P0', 'P1']]):
return
# get period and period derivative
P0 = self.catalogue['P0']
P1 = self.catalogue['P1']
BSURF = B_field(P0, P1)
self.update(BSURF, name='BSURF')
def derived_bsurf_i(self):
"""
Calculate the surface magnetic field strength, dervied from period and
intrinsic period derivative.
"""
from .utils import B_field
if 'P1_I' not in self.columns:
self.derived_p1_i()
if not np.all([p in self.columns for p in ['P0', 'P1_I']]):
return
# get period and period derivative
P0 = self.catalogue['P0']
P1_I = self.catalogue['P1_I']
BSURFI = B_field(P0, P1_I)
self.update(BSURFI, name='BSURF_I')
def derived_b_lc(self):
"""
Calculate the magnetic field strength at the light cylinder.
"""
if not np.all([p in self.columns for p in ['P0', 'P1']]):
return
# get period and period derivative
P0 = self.catalogue['P0']
P1 = self.catalogue['P1']
BLC = np.full(self.catalogue_len, np.nan)
idx = (P1 > 0.) & np.isfinite(P1) & np.isfinite(P0)
BLC[idx] = 3.0e8*np.sqrt(P1[idx])*np.abs(P0[idx])**(-5./2.)
self.update(BLC, name='B_LC')
def derived_edot(self):
"""
Calculate the spin-down luminosity.
"""
if not np.all([p in self.columns for p in ['P0', 'P1']]):
return
# get period and period derivative
P0 = self.catalogue['P0']
P1 = self.catalogue['P1']
EDOT = np.full(self.catalogue_len, np.nan)
idx = (P1 > 0.) & np.isfinite(P1) & np.isfinite(P0)
EDOT[idx] = 4.0 * np.pi**2 * 1e45 * P1[idx] / P0[idx]**3
self.update(EDOT, name='EDOT')
def derived_edot_i(self):
"""
Calculate the spin-down luminosity, dervied from period and intrinsic
period derivative.
"""
if 'P1_I' not in self.columns:
self.derived_p1_i()
if not np.all([p in self.columns for p in ['P0', 'P1_I']]):
return
# get period and period derivative
P0 = self.catalogue['P0']
P1_I = self.catalogue['P1_I']
EDOT_I = np.full(self.catalogue_len, np.nan)
idx = (P1_I > 0.) & np.isfinite(P1_I) & np.isfinite(P0)
EDOT_I[idx] = 4.0 * np.pi**2 * 1e45 * P1_I[idx] / P0[idx]**3
self.update(EDOT_I, name='EDOT_I')
def derived_edotd2(self):
"""
Calculate the spin-down luminosity flux at the Sun.
"""
reqpars = ['P0', 'P1', 'DIST']
if not np.all([p in self.columns for p in reqpars]):
return
# get period, period derivative and distance
P0 = self.catalogue['P0']
P1 = self.catalogue['P1']
DIST = self.catalogue['DIST']
EDOTD2 = np.full(self.catalogue_len, np.nan)
idx = (P0 > 0.) & np.isfinite(P1) & np.isfinite(P0) & np.isfinite(DIST)
EDOTD2[idx] = 4.0*np.pi**2*1e45*((P1[idx]/P0[idx]**3)/DIST[idx]**2)
self.update(EDOTD2, name='EDOTD2')
def derived_pmtot(self):
"""
Calculate the total proper motion and error.
"""
reqpars = ['PMRA', 'PMDEC', 'PMELONG', 'PMELAT']
if not np.all([p in self.columns for p in reqpars]):
return
# get PMRA and PMDEC
PMRA = self.catalogue['PMRA'].copy()
PMDEC = self.catalogue['PMDEC'].copy()
PMELONG = self.catalogue['PMELONG']
PMELAT = self.catalogue['PMELAT']
# use PM ELONG or ELAT if no RA and DEC
useelong = ~np.isfinite(PMRA) & np.isfinite(PMELONG)
useelat = ~np.isfinite(PMDEC) & np.isfinite(PMELAT)
PMRA[useelong] = PMELONG[useelong]
PMDEC[useelat] = PMELAT[useelat]
PMTOT = np.sqrt(PMRA**2+PMDEC**2)
self.update(PMTOT, name='PMTOT')
# get the error
reqpars1 = ['PMRA_ERR', 'PMDEC_ERR']
reqpars2 = ['PMELONG_ERR', 'PMELAT_ERR']
if (not np.all([p in self.columns for p in reqpars1]) and
not np.all([p in self.columns for p in reqpars2])):
return
if 'PMRA_ERR' in self.columns:
PMRA_ERR = self.catalogue['PMRA_ERR'].copy()
else:
PMRA_ERR = np.full(self.catalogue_len, np.nan)
if 'PMDEC_ERR' in self.columns:
PMDEC_ERR = self.catalogue['PMDEC_ERR'].copy()
else:
PMDEC_ERR = np.full(self.catalogue_len, np.nan)
if 'PMELONG_ERR' in self.columns:
PMELONG_ERR = self.catalogue['PMELONG_ERR'].copy()
else:
PMELONG_ERR = np.full(self.catalogue_len, np.nan)
if 'PMELAT_ERR' in self.columns:
PMELAT_ERR = self.catalogue['PMELAT_ERR'].copy()
else:
PMELAT_ERR = np.full(self.catalogue_len, np.nan)
PMLAT = np.full(self.catalogue_len, np.nan)
PMLONG = np.full(self.catalogue_len, np.nan)
PMLATERR = np.full(self.catalogue_len, np.nan)
PMLONGERR = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(PMELONG) & np.isfinite(PMELONG_ERR)
PMLONG[idx] = PMELONG[idx]
PMLONGERR[idx] = PMELONG_ERR[idx]
idx = np.isfinite(PMRA) & np.isfinite(PMRA_ERR)
PMLONG[idx] = PMRA[idx]
PMLONGERR[idx] = PMRA_ERR[idx]
idx = np.isfinite(PMELAT) & np.isfinite(PMELAT_ERR)
PMLAT[idx] = PMELAT[idx]
PMLATERR[idx] = PMELAT_ERR[idx]
idx = np.isfinite(PMDEC) & np.isfinite(PMDEC_ERR)
PMLAT[idx] = PMDEC[idx]
PMLATERR[idx] = PMDEC_ERR[idx]
PMTOTERR = np.sqrt(((PMLONG*PMLONGERR)**2+(PMLAT*PMLATERR)**2) /
(PMLONG**2 + PMLAT**2))
self.update(PMTOTERR, name='PMTOT_ERR')
def derived_vtrans(self):
"""
Calculate the transverse velocity.
"""
if 'PMTOT' not in self.columns:
self.derived_pmtot()
if not np.all([p in self.columns for p in ['PMTOT', 'DIST']]):
return
PMTOT = self.catalogue['PMTOT']
DIST = self.catalogue['DIST']
VTRANS = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(PMTOT) & np.isfinite(DIST)
VTRANS[idx] = (PMTOT[idx] * np.pi / (1000.0*3600.0*180.0*365.25 *
86400.0))*3.086e16*DIST[idx]
self.update(VTRANS, name='VTRANS')
def derived_flux(self):
"""
Calculate spectral index between 400 and 1400 MHz and radio
flux at 400 and 1400 MHz.
"""
if not np.all([p in self.columns for p in ['S1400', 'S400']]):
return
S1400 = self.catalogue['S1400']
S400 = self.catalogue['S400']
SI414 = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(S1400) & np.isfinite(S400) & (S1400 > 0.) & (S400 > 0.)
fac = np.log10(400.0/1400.0)
SI414[idx] = -(np.log10(S400[idx]/S1400[idx])/fac)
self.update(SI414, name='SI414')
# need distance for flux
if 'DIST' not in self.columns:
self.define_dist()
if 'DIST' not in self.columns:
return
DIST = self.catalogue['DIST']
R_LUM = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(S400) & np.isfinite(DIST)
R_LUM[idx] = S400[idx] * DIST[idx]**2
self.update(R_LUM, name='R_LUM')
R_LUM14 = np.full(self.catalogue_len, np.nan)
idx = np.isfinite(S1400) & np.isfinite(DIST)
R_LUM14[idx] = S1400[idx] * DIST[idx]**2
self.update(R_LUM14, name='R_LUM14')
def get_pulsar(self, psr, selected=False):
"""
Return the table row for a particular pulsar for all the catalogue
parameters.
Args:
psr (str): The name of a pulsar to return.
selected (bool): If True then output return a table row containing
parameters specified by :meth:`~psrqpy.QueryATNF.query_params`,
otherwise return all parameters. Defaults to False.
Returns:
:class:`astropy.table.Table`: a table row
"""
namepars = ['PSRJ', 'PSRB', 'BNAME', 'JNAME', 'NAME']
if not np.any([p in self.columns for p in namepars]):
warnings.warn("No 'NAME' parameter in table!")
return None
# try searching for the name in each potential name-type
for namepar in namepars:
if namepar in self.columns:
names = self.catalogue[namepar]
if np.any(psr == names):
psrrow = self.catalogue_table[(psr == names).tolist()]
if selected:
return psrrow[self.query_params]
else:
return psrrow
return None
def get_ephemeris(self, psr, precision=15, selected=False):
"""
Return the table row for a particular pulsar and output it as an
ephemeris-style string output.
Args:
psr (str): The name of a pulsar to return.
precision (int): The precision (number of decimal places) at which
to output numbers. Defaults to 15.
selected (bool): If True only output the parameters specified by
:meth:`~psrqpy.QueryATNF.query_params`, otherwise output all
parameters. Defaults to False.
Returns:
str: an ephemeris
"""
psr = self.get_pulsar(psr, selected=selected)
if psr is None:
return None
ephemstr = ''
# get variables that are set
variables = []
values = []
errors = []
for par in PSR_ALL_PARS:
if par in psr.columns:
parval = psr[par]
if not parval.mask[0]:
variables.append(par)
values.append(parval[0])
if par+'_ERR' in psr.columns:
errval = psr[par+'_ERR']
if not errval.mask[0]:
errors.append(errval[0])
else:
errors.append(None)
else:
errors.append(None)
mkl = max([len(kn) for kn in variables])+2 # max key length for output alignment
vlb = precision + 10 # allow extra space for minus sign/exponents
outputstr = '{{name: <{0}}}{{value: <{1}}}\t{{error}}'.format(mkl, vlb)
for varname, varval, varerr in zip(variables, values, errors):
outputdic = {}
outputdic['name'] = varname
if isinstance(varval, float):
if varval.is_integer():
precstr = '{0:.0f}' # print out an integer
else:
precstr = '{{0:.{}f}}'.format(precision) # print out float
if abs(varval) < 1e-6 or abs(varval) > 1e6:
# print out float in scientific notation
precstr = '{{0:.{}e}}'.format(precision)
if varerr is not None:
if varerr.is_integer():
precstre = '{0:.0f}' # print out an integer
else:
precstre = '{{0:.{}f}}'.format(precision) # print out float
if varerr is not None:
if abs(varerr) < 1e-6 or abs(varerr) > 1e6:
# print out float in scientific notation
precstre = '{{0:.{}e}}'.format(precision)
outputdic['value'] = precstr.format(varval)
outputdic['error'] = precstre.format(varerr) if varerr is not None else ''
else:
outputdic['value'] = varval
if isinstance(varerr, float):
precstre = '{{0:.{}f}}'.format(precision) # print out float
if abs(varerr) < 1e-6 or abs(varerr) > 1e6:
# print out float in scientific notation
precstre = '{{0:.{}e}}'.format(precision)
outputdic['error'] = precstre.format(varerr)
else:
outputdic['error'] = ''
ephemstr += outputstr.format(**outputdic).strip()+'\n'
return ephemstr
def get_pulsars(self):
"""
Returns:
:class:`psrqpy.pulsar.Pulsars`: the queried pulsars returned as a
:class:`~psrqpy.pulsar.Pulsars` object, which is a dictionary of
:class:`~psrqpy.pulsar.Pulsar` objects.
"""
if not self._pulsars:
from .pulsar import Pulsar, Pulsars
self._pulsars = Pulsars()
# add pulsars one by one
qparams = deepcopy(self.query_params)
if isinstance(qparams, list):
if 'JNAME' not in qparams:
self.query_params = self.query_params + ['JNAME']
else:
self.query_params = ['JNAME']
psrtable = self.table
for row in psrtable:
P = Pulsar(row['JNAME'], query=self)
self._pulsars.add_pulsar(P)
self.query_params = qparams # revert to previous query parameters
return self._pulsars
@property
def get_version(self):
"""
Return a string with the ATNF version number, or None if not found.
Returns:
str: the ATNF version number.
"""
return self.catalogue.version
def parse_conditions(self, psrtype=None, assoc=None, bincomp=None):
"""
Parse a string of `conditions
<http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html#condition>`_,
i.e., logical statements with which to apply to a catalogue query,
e.g., ``condition = 'f0 > 2.5 && assoc(GC)'``, so that they are in the
format required for the query URL.
Args:
psrtype (list, str): a list of strings, or single string, of
conditions on the
`type <http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html#psr_types>`_
of pulsars to return (logical AND will be used for any listed
types)
assoc (list, str): a list of strings, or single string, of
conditions on the associations of pulsars to return (logical
AND will be used for any listed associations)
bincomp (list, str): a list of strings, or single string, of
conditions on the
`binary companion <http://www.atnf.csiro.au/research/pulsar/psrcat/psrcat_help.html?type=normal#bincomp_type>`_
types of pulsars to return (logical AND will be used for any
listed associations)
exactmatch (bool): a boolean stating whether assciations and types
given as the condition should be an exact match
Returns:
str: a string with the format required for use in
:attr:`~psrqpy.config.QUERY_URL`
"""
conditionparse = ''
# add on any extra given pulsar types
if psrtype is not None:
if isinstance(psrtype, list):
if len(psrtype) == 0:
raise Exception("No pulsar types in list")
for p in psrtype:
if not isinstance(p, string_types):
raise Exception("Non-string value '{}' found in pulsar type list"
.format(p))
self._query_psr_types = psrtype
else:
if isinstance(psrtype, string_types):
self._query_psr_types = [psrtype]
else:
raise Exception("'psrtype' must be a list or string")
for p in list(self._query_psr_types):
if p.upper() not in PSR_TYPE:
warnings.warn("Pulsar type '{}' is not recognised, no type will be required"
.format(p))
self._query_psr_types.remove(p)
else:
if len(conditionparse) == 0:
conditionparse = 'type({})'.format(p.upper())
else:
conditionparse += ' && type({})'.format(p.upper())
# add on any extra given associations
if assoc is not None:
if isinstance(assoc, list):
if len(assoc) == 0:
raise Exception("No pulsar types in list")
for p in assoc:
if not isinstance(p, string_types):
raise Exception("Non-string value '{}' found in associations list"
.format(p))
self._query_assocs = assoc
else:
if isinstance(assoc, string_types):
self._query_assocs = [assoc]
else:
raise Exception("'assoc' must be a list or string")
for p in list(self._query_assocs):
if p.upper() not in PSR_ASSOC_TYPE:
warnings.warn("Pulsar association '{}' is not recognised, "
"no type will be required".format(p))
self._query_assocs.remove(p)
else:
if len(conditionparse) == 0:
conditionparse = 'assoc({})'.format(p.upper())
else:
conditionparse += ' && assoc({})'.format(p.upper())
# add on any extra given binary companion types
if bincomp is not None:
if isinstance(bincomp, list):
if len(assoc) == 0:
raise Exception("No pulsar types in list")
for p in bincomp:
if not isinstance(p, string_types):
raise Exception("Non-string value '{}' found in binary "
"companions list".format(p))
self._query_bincomps = bincomp
else:
if isinstance(bincomp, string_types):
self._query_bincomps = [bincomp]
else:
raise Exception("'bincomp' must be a list or string")
for p in list(self._query_bincomps):
if p.upper() not in PSR_BINARY_TYPE:
warnings.warn("Pulsar binary companion '{}' is not recognised, "
"no type will be required".format(p))
self._query_bincomps.remove(p)
else:
if len(conditionparse) == 0:
conditionparse = 'bincomp({})'.format(p.upper())
else:
conditionparse += ' && bincomp({})'.format(p.upper())
return conditionparse
@property
def catalogue_shape(self):
"""
The shape of the entire catalogue table as a tuple containing the
number of rows and the number of columns.
"""
return self.catalogue.shape
@property
def catalogue_nrows(self):
"""
The number of rows in the entire catalogue, i.e. the number of pulsars
it contains.
"""
return self.catalogue.shape[0]
@property
def catalogue_ncols(self):
"""
The number of columns in the entire catalogue, i.e. the number of
parameters it contains.
"""
return self.catalogue.shape[1]
@property
def catalogue_len(self):
"""
The length of the entire catalogue, i.e., the number of pulsars it
contains. This should be the same as `catalogue_nrows`.
"""
return len(self.catalogue)
def __len__(self):
"""
Returns:
int: :func:`len` method returns the number of pulsars
"""
return len(self.pandas)
def __str__(self):
"""
Returns:
str: :func:`str` method returns the str method of an :class:`astropy.table.Table`.
"""
return str(self.table)
def __repr__(self):
"""
Returns:
str: :func:`repr` method returns the repr method of an :class:`astropy.table.Table`.
"""
return repr(self.table)
def ppdot(self, intrinsicpdot=False, excludeGCs=False, showtypes=[],
showGCs=False, showSNRs=False, markertypes={}, deathline=True,
deathmodel='Ip', filldeath=True, filldeathtype={}, showtau=True,
brakingidx=3, tau=None, showB=True, Bfield=None, pdotlims=None,
periodlims=None, usecondition=True, rcparams={}):
"""
Draw a lovely period vs period derivative diagram.
Args:
intrinsicpdot (bool): use the intrinsic period derivative corrected
for the `Shklovskii effect <https://en.wikibooks.org/wiki/Pulsars_and_neutron_stars/Pulsar_properties#Pulse_period>`_
rather than the observed value. Defaults to False.
excludeGCs (bool): exclude globular cluster pulsars as their period
derivatives can be contaminated by intra-cluster accelerations.
Defaults to False.
showtypes (list, str): a list of pulsar types to highlight with
markers in the plot. These can contain any of the following:
``BINARY``, ``HE``, ``NRAD``, ``RRAT``, ``XINS``, ``AXP`` or
``SGR``, or ``ALL`` to show all types. Default to showing no
types.
showGCs (bool): show markers to denote the pulsars in globular
clusters. Defaults to False.
showSNRs (bool): show markers to denote the pulsars with supernova
remnants associated with them. Defaults to False.
markertypes (dict): a dictionary of marker styles and colors keyed
to the pulsar types above
deathline (bool): draw the pulsar death line. Defaults to True.
deathmodel (str): the type of death line to draw based on the
models in :func:`psrqpy.utils.death_line`. Defaults to
``'Ip'``.
filldeath (bool): set whether to fill the pulsar graveyard under
the death line. Defaults to True.
filldeathtype (dict): a dictionary of keyword arguments for the
fill style of the pulsar graveyard.
showtau (bool): show lines for a selection of characteritic ages.
Defaults to True, and shows lines for :math:`10^5` through to
:math:`10^9` yrs with steps in powers of 10.
brakingidx (int): a braking index to use for the calculation of the
characteristic age lines. Defaults to 3 for magnetic dipole
radiation.
tau (list): a list of characteristic ages to show on the plot.
showB (bool): show lines of constant magnetic field strength.
Defaults to True, and shows lines for :math:`10^{10}` through
to :math:`10^{14}` gauss with steps in powers of 10.
Bfield (list): a list of magnetic field strengths to plot.
periodlims (array_like): the [min, max] period limits to plot with
pdotlims (array_like): the [min, max] pdot limits to plot with
usecondition (bool): if True create the P-Pdot diagram only with
pulsars that conform the the original query condition values.
Defaults to True.
rcparams (dict): a dictionary of :py:obj:`matplotlib.rcParams`
setup parameters for the plot.
Returns:
:class:`matplotlib.figure.Figure`: the figure object
"""
try:
import matplotlib as mpl
from matplotlib import pyplot as pl
except ImportError:
raise ImportError('Cannot produce P-Pdot plot as Matplotlib is '
'not available')
from .utils import death_line, label_line
# get table containing all required parameters
table = self.query_table(usecondition=usecondition,
query_params=['P0', 'P1', 'P1_I', 'ASSOC',
'BINARY', 'TYPE'])
if len(table) == 0:
print("No pulsars found, so no P-Pdot plot has been produced")
return None
if isinstance(showtypes, string_types):
nshowtypes = [showtypes]
else:
nshowtypes = showtypes
for stype in list(nshowtypes):
if 'ALL' == stype.upper():
nshowtypes = list(PSR_TYPE)
# remove radio as none are returned as this
del nshowtypes[nshowtypes.index('RADIO')]
break
elif stype.upper() not in list(PSR_TYPE):
warnings.warn('"TYPE" {} is not recognised, so will not be '
'included'.format(stype))
del nshowtypes[nshowtypes.index(stype)]
if 'SGR' == stype.upper(): # synonym for AXP
nshowtypes[nshowtypes.index(stype)] = 'AXP'
# set plot parameters
rcparams['figure.figsize'] = rcparams['figure.figsize'] if \
'figure.figsize' in rcparams else (9, 9.5)
rcparams['figure.dpi'] = rcparams['figure.dpi'] if \
'figure.dpi' in rcparams else 250
rcparams['text.usetex'] = rcparams['text.usetex'] if \
'text.usetex' in rcparams else True
rcparams['axes.linewidth'] = rcparams['axes.linewidth'] if \
'axes.linewidth' in rcparams else 0.5
rcparams['axes.grid'] = rcparams['axes.grid'] if \
'axes.grid' in rcparams else False
rcparams['font.family'] = rcparams['font.family'] if \
'font.family' in rcparams else 'sans-serif'
rcparams['font.sans-serif'] = rcparams['font.sans-serif'] if \
'font.sans-serif' in rcparams else \
'Avant Garde, Helvetica, Computer Modern Sans serif'
rcparams['font.size'] = rcparams['font.size'] if \
'font.size' in rcparams else 20
rcparams['legend.fontsize'] = rcparams['legend.fontsize'] if \
'legend.fontsize' in rcparams else 16
rcparams['legend.frameon'] = rcparams['legend.frameon'] if \
'legend.frameon' in rcparams else False
mpl.rcParams.update(rcparams)
fig, ax = pl.subplots()
# extract periods and period derivatives
periods = table['P0']
pdots = table['P1']
if intrinsicpdot: # use instrinsic period derivatives if requested
ipdotidx = np.isfinite(table['P1_I'])
pdots[ipdotidx] = table['P1_I'][ipdotidx]
# get only finite values
pidx = ( | np.isfinite(periods) | numpy.isfinite |
'''
Created on Nov 27, 2017
@author: fan
'''
import numpy as np
import scipy.stats
import pyfan.amto.array.mesh as mesh
def three_vec_grids(vara_min, vara_max, vara_grid, vara_grid_add=None,
varb_min=None, varb_max=None, varb_grid=None, varb_grid_add=None,
varc_min=None, varc_max=None, varc_grid=None, varc_grid_add=None,
gridtype='grid', tomesh=False,
return_joint=False, return_single_col=False,
seed=999):
"""Grid for VFI
Temporary code, so that I can deal with minimal school hour. should be
deleted in the future. and combined with the method above
"""
if (gridtype == 'grid'):
a = np.linspace(vara_min, vara_max, vara_grid)
if (varb_grid is not None):
b = np.linspace(varb_min, varb_max, varb_grid)
if (varc_grid is not None):
c = np.linspace(varc_min, varc_max, varc_grid)
if (varc_grid_add is not None):
c = np.append(c, varc_grid_add)
if (gridtype == 'rand'):
np.random.seed(seed)
a = random_vector_min_max(vara_min, vara_max, vara_grid)
if (varb_grid is not None):
b = random_vector_min_max(varb_min, varb_max, varb_grid)
if (varc_grid is not None):
c = random_vector_min_max(varc_min, varc_max, varc_grid)
if (vara_grid_add is not None):
a = np.append(a, vara_grid_add)
if (varb_grid_add is not None):
b = np.append(b, varb_grid_add)
if (varc_grid_add is not None):
c = np.append(c, varc_grid_add)
a = np.sort(a)
a = | np.reshape(a, (-1, 1)) | numpy.reshape |
from builtins import range
from functools import reduce
import numpy as np
""" Factor Graph classes forming structure for PGMs
Basic structure is port of MATLAB code by <NAME>
Central difference: nbrs stored as references, not ids
(makes message propagation easier)
Note to self: use %pdb and %load_ext autoreload followed by %autoreload 2
"""
class Node(object):
""" Superclass for graph nodes
"""
epsilon = 10**(-4)
def __init__(self, nid):
self.enabled = True
self.nid = nid
self.nbrs = []
self.incoming = []
self.outgoing = []
self.oldoutgoing = []
def reset(self):
self.enabled = True
def disable(self):
self.enabled = False
def enable(self):
self.enabled = True
for n in self.nbrs:
# don't call enable() as it will recursively enable entire graph
n.enabled = True
def nextStep(self):
""" Used to have this line in prepMessages
but it didn't work?
"""
self.oldoutgoing = self.outgoing[:]
def normalizeMessages(self):
""" Normalize to sum to 1
"""
self.outgoing = [x / np.sum(x) for x in self.outgoing]
def receiveMessage(self, f, m):
""" Places new message into correct location in new message list
"""
if self.enabled:
i = self.nbrs.index(f)
self.incoming[i] = m
def sendMessages(self):
""" Sends all outgoing messages
"""
for i in range(0, len(self.outgoing)):
self.nbrs[i].receiveMessage(self, self.outgoing[i])
def checkConvergence(self):
""" Check if any messages have changed
"""
if self.enabled:
for i in range(0, len(self.outgoing)):
# check messages have same shape
self.oldoutgoing[i].shape = self.outgoing[i].shape
delta = np.absolute(self.outgoing[i] - self.oldoutgoing[i])
if (delta > Node.epsilon).any(): # if there has been change
return False
return True
else:
# Always return True if disabled to avoid interrupting check
return True
class VarNode(Node):
""" Variable node in factor graph
"""
def __init__(self, name, dim, nid):
super(VarNode, self).__init__(nid)
self.name = name
self.dim = dim
self.observed = -1 # only >= 0 if variable is observed
def reset(self):
super(VarNode, self).reset()
size = range(0, len(self.incoming))
self.incoming = [np.ones((self.dim,1)) for i in size]
self.outgoing = [np.ones((self.dim,1)) for i in size]
self.oldoutgoing = [np.ones((self.dim,1)) for i in size]
self.observed = -1
def condition(self, observation):
""" Condition on observing certain value
"""
self.enable()
self.observed = observation
# set messages (won't change)
for i in range(0, len(self.outgoing)):
self.outgoing[i] = np.zeros((self.dim,1))
self.outgoing[i][self.observed] = 1.
self.nextStep() # copy into oldoutgoing
def prepMessages(self):
""" Multiplies together incoming messages to make new outgoing
"""
# compute new messages if no observation has been made
if self.enabled and self.observed < 0 and len(self.nbrs) > 1:
# switch reference for old messages
self.nextStep()
for i in range(0, len(self.incoming)):
# multiply together all excluding message at current index
curr = self.incoming[:]
del curr[i]
self.outgoing[i] = reduce(np.multiply, curr)
# normalize once finished with all messages
self.normalizeMessages()
class FacNode(Node):
""" Factor node in factor graph
"""
def __init__(self, P, nid, *args):
super(FacNode, self).__init__(nid)
self.P = P
self.nbrs = list(args) # list storing refs to variable nodes
# num of edges
numNbrs = len(self.nbrs)
numDependencies = self.P.squeeze().ndim
# init messages
for i in range(0,numNbrs):
v = self.nbrs[i]
vdim = v.dim
# init for factor
self.incoming.append(np.ones((vdim,1)))
self.outgoing.append(np.ones((vdim,1)))
self.oldoutgoing.append(np.ones((vdim,1)))
# init for variable
v.nbrs.append(self)
v.incoming.append(np.ones((vdim,1)))
v.outgoing.append(np.ones((vdim,1)))
v.oldoutgoing.append(np.ones((vdim,1)))
# error check
assert (numNbrs == numDependencies), "Factor dimensions does not match size of domain."
def reset(self):
super(FacNode, self).reset()
for i in range(0, len(self.incoming)):
self.incoming[i] = np.ones((self.nbrs[i].dim,1))
self.outgoing[i] = | np.ones((self.nbrs[i].dim,1)) | numpy.ones |
import os
import intake_io
import numpy as np
import xarray as xr
from am_utils.parallel import run_parallel
from am_utils.utils import walk_dir
from cellpose import models
from scipy import ndimage
from skimage import filters
from skimage.feature import blob_log
from skimage.morphology import remove_small_objects
from skimage.segmentation import clear_border as sk_clear_border
from skimage.segmentation import watershed
from tqdm import tqdm
from .preprocess import rescale_intensity
from .utils import display_cellpose_results
def __get_images(dataset, do_3D, channel):
channels = [0, 0]
if 'c' in dataset.dims:
if channel is not None:
ch_names = dataset.coords['c'].data
channel = np.ravel(channel)
if len(channel) == 1:
imgs = dataset.loc[dict(c=ch_names[channel[0]])]['image'].data
else:
nuclei = dataset.loc[dict(c=ch_names[channel[0]])]['image'].data
cells = dataset.loc[dict(c=ch_names[channel[1]])]['image'].data
imgs = np.array([np.zeros_like(nuclei), cells, nuclei])
imgs = np.moveaxis(imgs, 0, -1)
channels = [2, 3]
else:
raise ValueError("The image has multiples channels. Provide channel to segment.")
else:
imgs = dataset['image'].data
# imgs = rescale_intensity(np.array(imgs))
if 'z' not in dataset.dims:
imgs = [imgs]
if do_3D:
spacing = intake_io.get_spacing(dataset)
anisotropy = spacing[0] / spacing[-1]
imgs = [imgs]
else:
anisotropy = None
return imgs, anisotropy, channels
def __reshape_output(masks, dataset, do_3D):
if 'z' not in dataset.dims or do_3D:
masks = masks[0]
else:
masks = np.array(masks)
return masks
def __combine_3D(masks, do_3D, diameter,
remove_small_mode='3D', remove_small_diam_fraction=0.5,
clear_border=False):
if do_3D is False and len(masks.shape) > 2:
area = (masks > 0).sum(-1).sum(-1)
if len(area) > 21:
ind = np.argmax(area[10:-10]) + 10
else:
ind = np.argmax(area)
minrad = diameter / 2 * remove_small_diam_fraction
labels = masks[ind:ind + 1].copy()
if clear_border:
labels = np.expand_dims(sk_clear_border(labels.max(0)), 0)
masks = ndimage.median_filter(masks > 0, 3)
if remove_small_mode == '3D':
masks = masks * labels
minvol = 4. / 3 * np.pi * minrad ** 3
masks = remove_small_objects(masks, min_size=minvol)
elif remove_small_mode == '2D':
minarea = np.pi * minrad ** 2
labels = remove_small_objects(labels, min_size=minarea)
masks = masks * labels
else:
raise ValueError("Invalid value for 'remove_small_mode', must be '3D' or '2D'")
return masks
def segment_roi(dataset, channel=None, do_3D=False,
gpu=True, model_type='cyto', diameter=None,
remove_small_mode='3D', remove_small_diam_fraction=0.5,
clear_border=False, add_to_input=False,
show_cellpose_debug=False,
**cellpose_kwargs):
"""
Segment ROI (cells or nuclei) in one image using cellpose.
Parameters
----------
dataset : xr.Dataset
Image in the form of an xarrray dataset (read with intake_io).
channel : int, optional
Channel number to use for segmentation, starting from 0.
If the image has only one channel, this can be left out.
do_3D : bool, optional
If True, segment in the 3D mode with cellpose (computationally expensive).
If False, segment each z-layer and then label in 3D.
Default is False.
gpu : bool, optional
If True, use gpu for cellpose segmentation.
Default: True
model_type : str
Cellpose model type ('cyto' or 'nuclei')
Use 'cyto' for irregular nuclei.
Default: 'cyto'.
channels : tuple or list
The 'channels' parameter of cellpose.
Default: [0,0] (gray scale)
diameter : int, optional
Target ROI diameter in pixels.
If None, will be calculated as 12 microns converted to pixels.
Default: None.
remove_small_mode : str, optional
'2D', or '3D'.
Used to remove small ROI by volume (3D) or area (2D).
For a thin stack (as in the example data), use '2D'.
Default: 3D.
remove_small_diam_fraction : float, optional
Minimal diameter for the ROI.
Provided as a fraction of the target diameter (the `diameter` parameter of the Cellpose).
Default: 0.5.
clear_border : bool, optional
If True, will remove ROI touching image border (in xy only).
Default: False
add_to_input : bool
If True, return an xarray dataset with combined input and output.
Default: False
show_cellpose_debug : bool
If True, return flows with masks.
Default: False
cellpose_kwargs : key value
Cellpose arguments
Returns
-------
masks = np.ndarray or xr.Dataset
Segmented image or input with segmented image
"""
imgs, anisotropy, channels = __get_images(dataset, do_3D, channel)
if len(imgs) > 1:
imgs = [img for img in rescale_intensity(np.array(imgs))]
else:
imgs = [rescale_intensity(np.array(img)) for img in imgs]
model = models.Cellpose(gpu=gpu, model_type=model_type)
masks, flows, styles, diams = model.eval(imgs, anisotropy=anisotropy,
diameter=diameter, channels=channels,
do_3D=do_3D,
**cellpose_kwargs)
masks = __reshape_output(masks, dataset, do_3D)
if diameter is None:
diameter = 12 / intake_io.get_spacing(dataset)[-1]
masks = __combine_3D(masks, do_3D, diameter,
remove_small_mode=remove_small_mode,
remove_small_diam_fraction=remove_small_diam_fraction,
clear_border=clear_border)
if show_cellpose_debug:
flows = np.array([flows[i][0] for i in range(len(flows))])
if do_3D:
imgs = imgs[0]
flows = flows[0]
display_cellpose_results(imgs, masks, flows, channels, is_3d='z' in dataset.dims)
if add_to_input:
masks = __add_segmentation_to_image(dataset['image'].data, masks)
if 'c' in dataset.dims:
ch_names = list(dataset.coords['c'].data)
else:
ch_names = ['channel 0']
masks = __image_to_dataset(masks,
ch_names + ['ROI segmentation'],
dataset)
return masks
def __add_segmentation_to_image(img, masks):
if len(img.shape) > len(masks.shape):
nshape = (img.shape[0] + 1,) + img.shape[1:]
else:
nshape = (2,) + img.shape
new_img = np.zeros(nshape)
new_img[:-1] = img
new_img[-1] = masks
return new_img.astype(np.uint16)
def __image_to_dataset(img, channel_names, template_dataset):
coords = dict(c=channel_names)
for c in ['x', 'y', 'z']:
if c in template_dataset.dims:
coords[c] = template_dataset.coords[c]
dims = template_dataset['image'].dims
if 'c' not in dims:
dims = ('c',) + dims
dataset = xr.Dataset(data_vars=dict(image=(dims, img)),
coords=coords,
attrs=template_dataset.attrs)
return dataset
def segment_roi_batch(input_dir: str, output_dir: str, channel: int, **kwargs):
"""
Segment ROI (cells or nuclei) in all images in a given folder.
Parameters
----------
input_dir : str
Input directory
output_dir : str
Directory to save segmentation results.
Segmentation is combined with the raw data into a multi-page tiff
channel : int
Channel number to use for segmentation, starting from 0.
kwargs : key value
Arguments for `segment_image` (see below)
Attributes
---------
do_3D : bool, optional
If True, segment in the 3D mode with cellpose (computationally expensive).
If False, segment each z-layer and then label in 3D.
Default is False.
gpu : bool, optional
If True, use gpu for cellpose segmentation.
Default: True
model_type : str
Cellpose model type ('cyto' or 'nuclei')
Use 'cyto' for irregular nuclei.
Default: 'cyto'.
channels : tuple or list
The 'channels' parameter of cellpose.
Default: [0,0] (gray scale)
diameter : int, optional
Target ROI diameter in pixels.
If None, will be calculated as 12 microns converted to pixels.
Default: None.
remove_small_mode : str, optional
'2D', or '3D'.
Used to remove small ROI by volume (3D) or area (2D).
For a thin stack (as in the example data), use '2D'.
Default: 3D.
remove_small_diam_fraction : float, optional
Minimal diameter for the ROI.
Provided as a fraction of the target diameter (the `diameter` parameter of the Cellpose).
Default: 0.5.
clear_border : bool, optional
If True, will remove ROI touching image border (in xy only).
Default: False
cellpose_kwargs : key value
Cellpose arguments
"""
samples = walk_dir(input_dir)
for i, sample in enumerate(samples):
print(sample)
print(fr'Processing sample {i + 1} of {len(samples)}')
dataset = intake_io.imload(sample)
output = segment_roi(dataset, channel, add_to_input=True, **kwargs)
fn = sample[len(input_dir):].replace(os.path.splitext(sample)[-1], '.tif')
os.makedirs(os.path.dirname(output_dir + fn), exist_ok=True)
intake_io.imsave(output, output_dir + fn)
def __filter_laplace(img, minsize_um, maxsize_um, num_sigma, spacing):
laplace = np.zeros(img.shape, dtype=np.float32)
for sigma in np.linspace(minsize_um, maxsize_um, int(num_sigma), endpoint=True):
gauss = filters.gaussian(img, sigma=sigma / spacing)
laplace = np.max(np.stack([laplace, filters.laplace(gauss)]), axis=0)
return laplace
def centers_to_markers(logblobs, img, bg_img, threshold_background):
markers = np.zeros(img.shape)
ind = np.int_(np.round_(logblobs[:, :len(img.shape)])).transpose()
markers[tuple(ind)] = 1
markers = markers * (img > bg_img * threshold_background)
markers = ndimage.label(markers)[0]
return markers
def calculate_background_image(img, roi, global_background=True,
global_background_percentile=95., background_percentile=50.):
if roi is not None and len(np.unique(roi)) > 1:
llist = np.unique(roi)[1:]
if background_percentile == 50:
bg = ndimage.median(img, roi, llist)
else:
bg = np.array([np.percentile(img[roi == lb], background_percentile)
for lb in llist])
if global_background:
bg_img = np.ones_like(img) * np.percentile(bg, global_background_percentile)
else:
bg_img = np.zeros_like(img)
for i, l in enumerate(llist):
bg_img[np.where(roi == l)] = bg[i]
else:
bg_img = np.zeros_like(img)
return bg_img
def threshold_puncta(img, bg_img, roi, minsize_um, maxsize_um, num_sigma, spacing,
segmentation_mode, threshold_segmentation,
global_background=True, global_background_percentile=95., background_percentile=50.):
if segmentation_mode == 0:
intensity_image = __filter_laplace(img, minsize_um, maxsize_um, num_sigma, spacing)
bg_img = np.ones_like(bg_img)
elif segmentation_mode == 1:
intensity_image = __filter_laplace(img, minsize_um, maxsize_um, num_sigma, spacing)
bg_img = calculate_background_image(intensity_image, roi,
global_background=global_background,
global_background_percentile=global_background_percentile,
background_percentile=background_percentile)
elif segmentation_mode == 2:
intensity_image = img
else:
raise ValueError(rf'{segmentation_mode} is invalid value for segmentation_mode, must be 0, 1, or 2')
mask = intensity_image > threshold_segmentation * bg_img
return mask
def segment_puncta(dataset, channel=None, roi=None, minsize_um=0.2, maxsize_um=2, num_sigma=5,
overlap=1, threshold_detection=0.001,
threshold_background=0, global_background=True,
global_background_percentile=95, background_percentile=50,
threshold_segmentation=50, segmentation_mode=1,
remove_out_of_roi=False, maxrad_um=None):
"""
Parameters
----------
dataset : xr.Dataset
Image in the form of an xarray dataset (read with intake_io).
channel : int, optional
Channel number to use for segmentation, starting from 0.
If the image has only one channel, this can be left out.
roi : np.ndarray, optional
Labeled segmentation masks for cells/nuclei.
Default: None
minsize_um : float
Minimal sigma for the Laplacian of Gaussian detection (microns).
Default: 0.2
maxsize_um : float
Maximal sigma for the Laplacian of Gaussian detection (microns).
Default: 2
num_sigma : int
Number of sigma values for the Laplacian of Gaussian detection.
Default: 5
overlap : float
Value between 0 and 1.
If two blobs overlap by a fraction greater than this value,
the smaller blob is eliminated.
Default: 1 (blobs are removed only if overlapping completely)
threshold_detection : float
Threshold for detecting LoG blobs.
The absolute lower bound for scale space maxima.
Local maxima smaller than thresh are ignored.
Reduce this to detect blobs with less intensities.
Default: 0.001.
threshold_background : float
Threshold used to post-filter puncta in cells with diffuse signal.
This threshold is provided relative to the median intensity inside cells.
E.g, `threshold_background` = 2 will remove all puncta with intensity lower than two background values.
Set to 0 to keep all puncta.
global_background : bool
If True, the background value is calculated globally as the `global_background_percentile` of all cells.
Default: True
global_background_percentile : float
Percentile (between 0 and 100) of cell background values to calculate the global background value.
Default: 95.
background_percentile : float
Percentile (between 0 and 100) of image intensity inside cell to calculate the background value.
Default: 50 (median).
threshold_segmentation : float
Threshold for puncta segmentation.
Used in combination with `segmentation_mode`.
For segmentation_mode 0, choose values in the order of 0.001
For segmentation_mode 1, choose values in the order of 50.
For segmentation_mode 2, choose values in the order of 3.
Reduce to detect more/larger puncta, increase to detect fewer/smaller puncta.
Default: 50 (segmentation_mode 1).
segmentation_mode : int
0, 1, or 2.
Determines the mode how `threshold_segmentation` is applied.
0: apply absolute threshold in LoG space.
1: apply threshold relative to background in LoG space.
2: apply threshold relative to the background in image intensity space.
Default: 1
remove_out_of_roi : bool
If True, remove all puncta (parts) that are not inside cells/nuclei.
Default: False.
maxrad_um : float
If not None, remove puncta with a radius larger than this value.
Default: None
Returns
-------
puncta : np.ndarray
Labeled segmentation mask for puncta
"""
# get image and spacing
spacing = np.array(intake_io.get_spacing(dataset))
if 'c' in dataset.dims:
img = dataset.loc[dict(c=dataset.coords['c'].data[channel])]['image'].data
else:
img = dataset['image'].data
# find blob centers with scale-adapted LoG
logblobs = blob_log(img,
min_sigma=minsize_um / spacing,
max_sigma=maxsize_um / spacing,
num_sigma=int(num_sigma),
overlap=overlap,
threshold=threshold_detection)
# calculate background image
bg_img = calculate_background_image(img, roi, global_background,
global_background_percentile, background_percentile)
# convert the blob centers to watershed markers, filter by background
markers = centers_to_markers(logblobs, img, bg_img, threshold_background)
# segment puncta
mask = threshold_puncta(img, bg_img, roi, minsize_um, maxsize_um, num_sigma, spacing,
segmentation_mode, threshold_segmentation, global_background=True,
global_background_percentile=global_background_percentile,
background_percentile=background_percentile)
if remove_out_of_roi and roi is not None:
mask = mask * (roi > 0)
dist = ndimage.distance_transform_edt(mask, sampling=tuple(spacing))
puncta = watershed(-dist, markers, mask=mask)
if maxrad_um is not None:
llist = | np.unique(puncta) | numpy.unique |
import pandas as pd
# import matplotlib.pyplot as plt
import cv2
import math
import numpy as np
import sys
import os
from Model.BoundaryDescriptor import get_contours_binary, calc_contour_feature, get_crop_imgs, draw_bbox
def get_mask(img, en_pix):
en_pix.split(' ')
rle = list(map(int, en_pix.split(' ')))
pixel, pixel_count = [], []
[pixel.append(rle[i]) if i % 2 == 0 else pixel_count.append(rle[i])
for i in range(0, len(rle)-1)]
# print('pixel starting points:\n', pixel)
# print('pixel counting:\n', pixel_count)
rle_pixels = [list(range(pixel[i], pixel[i]+pixel_count[i]))
for i in range(0, len(pixel)-1)]
# print('rle_pixels\n:', rle_pixels)
rle_mask_pixels = sum(rle_pixels, [])
# print('rle mask pixels:\n', rle_mask_pixels)
# cv2.imshow("img", img)
# cv2.waitKey(0)
# plt.imshow(img)
# plt.show()
img_size = img.shape[0] * img.shape[1]
mask_img = np.zeros((img_size, 1), dtype=int)
mask_img[rle_mask_pixels] = 255
l, b = img.shape[0], img.shape[1]
mask = | np.reshape(mask_img, (b, l)) | numpy.reshape |
import numpy as np
import torch
import matplotlib.pylab
from fiery.utils.instance import predict_instance_segmentation_and_trajectories
DEFAULT_COLORMAP = matplotlib.pylab.cm.jet
def flow_to_image(flow: np.ndarray, autoscale: bool = False) -> np.ndarray:
"""
Applies colour map to flow which should be a 2 channel image tensor HxWx2. Returns a HxWx3 numpy image
Code adapted from: https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
u = flow[0, :, :]
v = flow[1, :, :]
# Convert to polar coordinates
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = np.max(rad)
# Normalise flow maps
if autoscale:
u /= maxrad + np.finfo(float).eps
v /= maxrad + np.finfo(float).eps
# visualise flow with cmap
return np.uint8(compute_color(u, v) * 255)
def _normalise(image: np.ndarray) -> np.ndarray:
lower = np.min(image)
delta = np.max(image) - lower
if delta == 0:
delta = 1
image = (image.astype(np.float32) - lower) / delta
return image
def apply_colour_map(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = False
) -> np.ndarray:
"""
Applies a colour map to the given 1 or 2 channel numpy image. if 2 channel, must be 2xHxW.
Returns a HxWx3 numpy image
"""
if image.ndim == 2 or (image.ndim == 3 and image.shape[0] == 1):
if image.ndim == 3:
image = image[0]
# grayscale scalar image
if autoscale:
image = _normalise(image)
return cmap(image)[:, :, :3]
if image.shape[0] == 2:
# 2 dimensional UV
return flow_to_image(image, autoscale=autoscale)
if image.shape[0] == 3:
# normalise rgb channels
if autoscale:
image = _normalise(image)
return np.transpose(image, axes=[1, 2, 0])
raise Exception('Image must be 1, 2 or 3 channel to convert to colour_map (CxHxW)')
def heatmap_image(
image: np.ndarray, cmap: matplotlib.colors.LinearSegmentedColormap = DEFAULT_COLORMAP, autoscale: bool = True
) -> np.ndarray:
"""Colorize an 1 or 2 channel image with a colourmap."""
if not issubclass(image.dtype.type, np.floating):
raise ValueError(f"Expected a ndarray of float type, but got dtype {image.dtype}")
if not (image.ndim == 2 or (image.ndim == 3 and image.shape[0] in [1, 2])):
raise ValueError(f"Expected a ndarray of shape [H, W] or [1, H, W] or [2, H, W], but got shape {image.shape}")
heatmap_np = apply_colour_map(image, cmap=cmap, autoscale=autoscale)
heatmap_np = np.uint8(heatmap_np * 255)
return heatmap_np
def compute_color(u: np.ndarray, v: np.ndarray) -> np.ndarray:
assert u.shape == v.shape
[h, w] = u.shape
img = np.zeros([h, w, 3])
nan_mask = np.isnan(u) | np.isnan(v)
u[nan_mask] = 0
v[nan_mask] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
f_k = (a + 1) / 2 * (ncols - 1) + 1
k_0 = np.floor(f_k).astype(int)
k_1 = k_0 + 1
k_1[k_1 == ncols + 1] = 1
f = f_k - k_0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k_0 - 1] / 255
col1 = tmp[k_1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = col * (1 - nan_mask)
return img
def make_color_wheel() -> np.ndarray:
"""
Create colour wheel.
Code adapted from https://github.com/liruoteng/FlowNet/blob/master/models/flownet/scripts/flowlib.py
"""
red_yellow = 15
yellow_green = 6
green_cyan = 4
cyan_blue = 11
blue_magenta = 13
magenta_red = 6
ncols = red_yellow + yellow_green + green_cyan + cyan_blue + blue_magenta + magenta_red
colorwheel = np.zeros([ncols, 3])
col = 0
# red_yellow
colorwheel[0:red_yellow, 0] = 255
colorwheel[0:red_yellow, 1] = np.transpose(np.floor(255 * np.arange(0, red_yellow) / red_yellow))
col += red_yellow
# yellow_green
colorwheel[col: col + yellow_green, 0] = 255 - np.transpose(
np.floor(255 * np.arange(0, yellow_green) / yellow_green)
)
colorwheel[col: col + yellow_green, 1] = 255
col += yellow_green
# green_cyan
colorwheel[col: col + green_cyan, 1] = 255
colorwheel[col: col + green_cyan, 2] = np.transpose(np.floor(255 * np.arange(0, green_cyan) / green_cyan))
col += green_cyan
# cyan_blue
colorwheel[col: col + cyan_blue, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, cyan_blue) / cyan_blue))
colorwheel[col: col + cyan_blue, 2] = 255
col += cyan_blue
# blue_magenta
colorwheel[col: col + blue_magenta, 2] = 255
colorwheel[col: col + blue_magenta, 0] = np.transpose(np.floor(255 * np.arange(0, blue_magenta) / blue_magenta))
col += +blue_magenta
# magenta_red
colorwheel[col: col + magenta_red, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, magenta_red) / magenta_red))
colorwheel[col: col + magenta_red, 0] = 255
return colorwheel
def make_contour(img, colour=[0, 0, 0], double_line=False):
h, w = img.shape[:2]
out = img.copy()
# Vertical lines
out[np.arange(h), | np.repeat(0, h) | numpy.repeat |
import sys
from firebase_admin import credentials, firestore, storage
from datetime import datetime
from flask import Flask, request
import optparse
import firebase_admin
import cv2
import numpy as np
import json
# ======================Khai Bao=================================#
app = Flask(__name__)
classes = None
class_ids = []
cred = credentials.Certificate('./Credentials.json')
firebase_admin.initialize_app(cred, {
'storageBucket': "objectdetection-python.appspot.com"
})
firestore.client()
bucket = storage.bucket()
with open("models_config_label/yolov3.txt", 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
# ==========================///Khai Bao///=============================#
# ======================Utils Function=================================#
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(index, img, class_id, confidence, x, y, x_plus_w, y_plus_h):
# label = str(classes[class_id])
color = COLORS[class_id]
# cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
print(str(classes[class_id]) + " sub rect: " +
str(x) + ", " + str(y) + ", " + str(x_plus_w) + ", " + str(y_plus_h))
cv2.imwrite("./img_detected/big-object-detection.jpg", img)
cv2.imwrite("./img_detected/object-detection-" + str(index) + ".jpg", img[y: y_plus_h, x: x_plus_w])
return "./img_detected/object-detection-" + str(index) + ".jpg"
# ======================///Utils Function///=================================#
# ======================Flask Function=================================#
@app.route("/")
def index():
return "Hello world!!!"
@app.route('/image_upload', methods=['GET', 'POST'])
def detect():
path_to_upload = []
dt = datetime.now()
file = request.files['image']
filename = file.filename
pathStored = "./img_store/" + str(dt.microsecond) + filename
file.save(pathStored)
#
image = cv2.imread(pathStored)
# image = cv2.imread("/Users/leclev1/Desktop/MU
# -loan-vi-Alexis-Sanchez-Sao-qua-ta-chieu-menh-Mourinho-sanchez3-1526877991-226-width660height397.jpg")
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
Width = image.shape[1]
Height = image.shape[0]
print("Width Height " + str(Width) + ", " + str(Height))
# scale = 0.00392
scale = 0.00784
net = cv2.dnn.readNet("models_config_label/yolov3.weights", "models_config_label/yolov3.cfg")
blob = cv2.dnn.blobFromImage(image, scale, (608, 608), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
for out in outs:
for detection in out:
scores = detection[5:]
class_id = | np.argmax(scores) | numpy.argmax |
#
# Copyright 2005- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
# Python implementation: bufr_read_tempf
#
#
# Description: read and print radiosonde data from TEMP BUFR messages.
# If available this version also lists the position information from the WMO list
# (now OSCAR/Surface) appended to the reports by ECMWF
#
# Author: <NAME>
#
# Please note that TEMP reports can be encoded in various ways in BUFR.
# Therefore the code below might not work directly for other types of TEMP
# messages than the one used in the example. It is advised to use bufr_dump to
# understand the structure of the messages.
#
from __future__ import print_function
import sys
import traceback
import numpy as np
from eccodes import *
INPUT = "../../data/bufr/PraticaTemp.bufr"
VERBOSE = 1 # verbose error reporting
def example():
# open BUFR file
f = open(INPUT, "rb")
llstdonly = 1
cnt = 0
# loop over the messages in the file
while 1:
# get handle for message
bufr = codes_bufr_new_from_file(f)
if bufr is None:
break
cnt += 1
# desc = codes_get_array(bufr, 'unexpandedDescriptors')
# if all(desc != 309056): # descent reports
# codes_release(bufr)
# continue # Skip other templates
# we need to instruct ecCodes to expand all the descriptors
# i.e. unpack the data section
codes_set(bufr, "unpack", 1)
# get header information from the message
try:
sid = codes_get(bufr, "aircraftRegistrationNumberOrOtherIdentification")
except Exception:
sid = "UNKNOWN"
statid = "00000 "
try:
block = codes_get(bufr, "blockNumber")
stnum = codes_get(bufr, "stationNumber")
if (block > 0) and (block < 100): # or block != CODES_MISSING_LONG
statid = str.format("%.2i%.3i " % (block, stnum))
except Exception:
statid = "00000 "
if statid == "00000 ":
statid = sid[0:8]
# subtype = codes_get(bufr,'rdbSubtype')
sondetype = codes_get(bufr, "radiosondeType")
slat = codes_get_array(bufr, "latitude")
slon = codes_get_array(bufr, "longitude")
try:
htg = codes_get(bufr, "heightOfStationGroundAboveMeanSeaLevel")
except Exception:
htg = -999.0
try:
htp = codes_get(bufr, "heightOfBarometerAboveMeanSeaLevel")
except Exception:
htp = -999.0
year = codes_get(bufr, "year")
month = codes_get(bufr, "month")
day = codes_get(bufr, "day")
hour = codes_get(bufr, "hour")
minute = codes_get(bufr, "minute")
try:
second = codes_get(bufr, "second")
except Exception:
second = 0.0
date = str.format("%i%.2i%.2i" % (year, month, day))
time = str.format("%.2i%.2i%.2i" % (hour, minute, second))
try:
windsp = codes_get_array(bufr, "windSpeed")
except Exception:
codes_release(bufr)
continue
print(
"Ob: %7i %s %s %s %7.3f %8.3f %6.1f %6.1f %3i %4i"
% (
cnt,
statid,
date,
time,
slat[0],
slon[0],
htg,
htp,
sondetype,
len(windsp),
)
)
try:
rsnumber = codes_get(bufr, "radiosondeSerialNumber")
rssoftware = codes_get(bufr, "softwareVersionNumber")
balloonwt = codes_get(bufr, "weightOfBalloon")
print("RS number/software/balloonwt ", rsnumber, rssoftware, balloonwt)
except Exception:
rsnumber = 0
try:
htec = codes_get(
bufr, "heightOfStation"
) # Height from WMO list (appended by ECMWF)
print("WMO list lat, lon, ht: %7.3f %8.3f %6.1f" % (slat[1], slon[1], htec))
except Exception:
htec = 0
# get all the timePeriods
dtime = codes_get_array(bufr, "timePeriod")
try:
pressure = codes_get_array(bufr, "pressure")
except Exception:
codes_release(bufr)
continue
vsSignif = codes_get_array(bufr, "extendedVerticalSoundingSignificance")
try:
geopoth = codes_get_array(bufr, "nonCoordinateGeopotentialHeight")
except Exception:
codes_release(bufr)
continue
dlat = codes_get_array(bufr, "latitudeDisplacement")
dlon = codes_get_array(bufr, "longitudeDisplacement")
airt = codes_get_array(bufr, "airTemperature")
dewt = codes_get_array(bufr, "dewpointTemperature")
windd = codes_get_array(bufr, "windDirection")
dtime = | np.where(dtime != CODES_MISSING_LONG, dtime, np.nan) | numpy.where |
# Copyright (c) 2019, MD2K Center of Excellence
# - <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
from typing import List
import math
import pandas as pd
import numpy as np
from datetime import timedelta
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import *
# from pyspark.sql.functions import pandas_udf,PandasUDFType
from operator import attrgetter
from pyspark.sql.types import StructType
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.window import Window
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata, DataDescriptor, ModuleMetadata
from cerebralcortex.core.plotting.basic_plots import BasicPlots
from cerebralcortex.core.plotting.stress_plots import StressStreamPlots
class DataStream(DataFrame):
def __init__(self,
data: object = None,
metadata: Metadata = None
):
"""
DataStream object contains pyspark dataframe and metadata linked to it.
Args:
data (DataFrame): pyspark dataframe
metadata (Metadata): metadata of data
"""
self._data = data
self._metadata = metadata
self._basic_plots = BasicPlots()
self._stress_plots = StressStreamPlots()
if isinstance(data, DataFrame):
super(self.__class__, self).__init__(data._jdf, data.sql_ctx)
if self._metadata is not None and not isinstance(self.metadata,list) and len(self.metadata.data_descriptor)==0 and data is not None:
self.metadata = self._gen_metadata()
# !!!! Disable some of dataframe operations !!!
def write(self):
raise NotImplementedError
def writeStream(self):
raise NotImplementedError
def get_metadata(self, version: int = None) -> Metadata:
"""
get stream metadata
Args:
version (int): version of a stream
Returns:
Metadata: single version of a stream
Raises:
Exception: if specified version is not available for the stream
"""
for md in self._metadata:
if md.version == version:
return md
else:
raise Exception("Version '" + str(version) + "' is not available for this stream.")
return None
@property
def metadata(self):
"""
return stream metadata
Returns:
Metadata:
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
set stream metadata
Args:
metadata (Metadata):
"""
self._metadata = metadata
@property
def data(self):
"""
get stream data
Returns (DataFrame):
"""
# raise Exception("Cannot access data. Please use DataStream object to perform all the operations.")
return self._data
@data.setter
def data(self, value):
"""
set stream data
Args:
value (DataFrame):
"""
self._data = value
# !!!! STAT METHODS !!!
def compute_average(self, windowDuration: int = None, slideDuration:int=None, startTime=None, colmnName: str = None) -> object:
"""
Window data and compute average of a windowed data of a single or all columns
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
colmnName (str): average will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
return self._compute_stats(windowDuration=windowDuration, slideDuration=slideDuration,startTime=startTime, methodName="avg", columnName=colmnName)
def compute_sqrt(self, windowDuration: int = None, slideDuration:int=None, startTime=None, colmnName: str = None) -> object:
"""
Window data and compute square root of a windowed data of a single or all columns
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
colmnName (str): square root will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
return self._compute_stats(windowDuration=windowDuration, slideDuration=slideDuration,startTime=startTime, methodName="sqrt", columnName=colmnName)
def compute_sum(self, windowDuration: int = None, slideDuration:int=None, startTime=None, colmnName: str = None) -> object:
"""
Window data and compute sum of a windowed data of a single or all columns
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
colmnName (str): average will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
return self._compute_stats(windowDuration=windowDuration, slideDuration=slideDuration,startTime=startTime, methodName="sum", columnName=colmnName)
def compute_variance(self, windowDuration: int = None, slideDuration:int=None, startTime=None, colmnName: str = None) -> object:
"""
Window data and compute variance of a windowed data of a single or all columns
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
colmnName (str): variance will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
return self._compute_stats(windowDuration=windowDuration, slideDuration=slideDuration,startTime=startTime, methodName="variance", columnName=colmnName)
def compute_stddev(self, windowDuration: int = None, slideDuration:int=None, startTime=None, colmnName: str = None) -> object:
"""
Window data and compute standard deviation of a windowed data of a single or all columns
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
colmnName (str): standard deviation will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
return self._compute_stats(windowDuration=windowDuration, slideDuration=slideDuration,startTime=startTime, methodName="stddev", columnName=colmnName)
def compute_min(self, windowDuration: int = None, slideDuration:int=None, startTime=None, colmnName: str = None) -> object:
"""
Window data and compute min of a windowed data of a single or all columns
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
colmnName (str): min value will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
return self._compute_stats(windowDuration=windowDuration, slideDuration=slideDuration,startTime=startTime, methodName="min", columnName=colmnName)
def compute_max(self, windowDuration: int = None, slideDuration:int=None, startTime=None, colmnName: str = None) -> object:
"""
Window data and compute max of a windowed data of a single or all columns
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
colmnName (str): max will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
return self._compute_stats(windowDuration=windowDuration, slideDuration=slideDuration,startTime=startTime, methodName="max", columnName=colmnName)
def _compute_stats(self, windowDuration: int = None, slideDuration:int=None, startTime=None, methodName: str = None, columnName: List[str] = []) -> object:
"""
Compute stats on pyspark dataframe
Args:
windowDuration (int): duration of a window in seconds. If it is not set then stats will be computed for the whole data in a column(s)
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
methodName (str): pyspark stat method name
columnName (str): max will be computed for all the columns if columnName param is not provided (for all windows)
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
exprs = self._get_column_names(columnName=columnName, methodName=methodName)
if windowDuration:
windowDuration = str(windowDuration) + " seconds"
win = F.window("timestamp", windowDuration=windowDuration, slideDuration=slideDuration, startTime=startTime)
result = self._data.groupBy(['user','version', win]).agg(exprs)
else:
result = self._data.groupBy(['user','version']).agg(exprs)
result = result.withColumn("timestamp",result.window.start)
# to get local time - agg/window won't return localtimestamp col
offset = (self._data.first().timestamp - self._data.first().localtime).total_seconds()
result = result.withColumn("localtime", result.window.start+F.expr("INTERVAL "+str(offset)+" SECONDS"))
result = self._update_column_names(result)
return DataStream(data=result, metadata=Metadata())
# !!!! WINDOWING METHODS !!!
def window(self, windowDuration: int = 60, groupByColumnName: List[str] = [], columnName: List[str] = [],
slideDuration: int = None, startTime=None, preserve_ts=False):
"""
Window data into fixed length chunks. If no columnName is provided then the windowing will be performed on all the columns.
Args:
windowDuration (int): duration of a window in seconds
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
columnName List[str]: column names on which windowing should be performed. Windowing will be performed on all columns if none is provided
slideDuration (int): slide duration of a window
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
preserve_ts (bool): setting this to True will return timestamps of corresponding to each windowed value
Returns:
DataStream: this will return a new datastream object with blank metadata
Note:
This windowing method will use collect_list to return values for each window. collect_list is not optimized.
"""
windowDuration = str(windowDuration) + " seconds"
if slideDuration is not None:
slideDuration = str(slideDuration) + " seconds"
exprs = self._get_column_names(columnName=columnName, methodName="collect_list", preserve_ts=preserve_ts)
win = F.window("timestamp", windowDuration=windowDuration, slideDuration=slideDuration, startTime=startTime)
if len(groupByColumnName) > 0:
groupByColumnName.append("user")
groupByColumnName.append("version")
groupByColumnName.append(win)
windowed_data = self._data.groupBy(groupByColumnName).agg(exprs)
else:
windowed_data = self._data.groupBy(['user', 'version', win]).agg(exprs)
data = windowed_data
data = self._update_column_names(data)
return DataStream(data=data, metadata=Metadata())
def _update_column_names(self, data):
columns = []
for column in data.columns:
if "(" in column:
m = re.search('\((.*?)\)', column)
columns.append(m.group(1))
else:
columns.append(column)
return data.toDF(*columns)
def map_stream(self, window_ds):
"""
Map/join a stream to a windowed stream
Args:
window_ds (Datastream): windowed datastream object
Returns:
Datastream: joined/mapped stream
"""
window_ds = window_ds.data.drop("version", "user")
df = window_ds.join(self.data, self.data.timestamp.between(F.col("window.start"), F.col("window.end")))
return DataStream(data=df, metadata=Metadata())
def filter_user(self, user_ids: List):
"""
filter data to get only selective users' data
Args:
user_ids (List[str]): list of users' UUIDs
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
if not isinstance(user_ids, list):
user_ids = [user_ids]
data = self._data.where(self._data["user"].isin(user_ids))
return DataStream(data=data, metadata=Metadata())
def filter_version(self, version: List):
"""
filter data to get only selective users' data
Args:
version (List[str]): list of stream versions
Returns:
DataStream: this will return a new datastream object with blank metadata
Todo:
Metadata version should be return with the data
"""
if not isinstance(version, list):
version = [version]
data = self._data.where(self._data["version"].isin(version))
return DataStream(data=data, metadata=Metadata())
def compute_magnitude(self, col_names=[], magnitude_col_name="magnitude"):
if len(col_names)<1:
raise Exception("col_names param is missing.")
tmp = ""
for col_name in col_names:
tmp += 'F.col("'+col_name+'")*F.col("'+col_name+'")+'
tmp = tmp.rstrip("+")
data = self._data.withColumn(magnitude_col_name, F.sqrt(eval(tmp)))
return DataStream(data=data, metadata=Metadata())
def interpolate(self, freq=16, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', limit_area=None,
downcast=None):
"""
Interpolate values according to different methods. This method internally uses pandas interpolation.
Args:
freq (int): Frequency of the signal
method (str): default ‘linear’
- ‘linear’: Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes.
- ‘time’: Works on daily and higher resolution data to interpolate given length of interval.
- ‘index’, ‘values’: use the actual numerical values of the index.
- ‘pad’: Fill in NaNs using existing values.
- ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘spline’, ‘barycentric’, ‘polynomial’: Passed to scipy.interpolate.interp1d. These methods use the numerical values of the index. Both ‘polynomial’ and ‘spline’ require that you also specify an order (int), e.g. df.interpolate(method='polynomial', order=5).
- ‘krogh’, ‘piecewise_polynomial’, ‘spline’, ‘pchip’, ‘akima’: Wrappers around the SciPy interpolation methods of similar names. See Notes.
- ‘from_derivatives’: Refers to scipy.interpolate.BPoly.from_derivatives which replaces ‘piecewise_polynomial’ interpolation method in scipy 0.18.
axis {0 or ‘index’, 1 or ‘columns’, None}: default None. Axis to interpolate along.
limit (int): optional. Maximum number of consecutive NaNs to fill. Must be greater than 0.
inplace (bool): default False. Update the data in place if possible.
limit_direction {‘forward’, ‘backward’, ‘both’}: default ‘forward’. If limit is specified, consecutive NaNs will be filled in this direction.
limit_area {None, ‘inside’, ‘outside’}: default None. If limit is specified, consecutive NaNs will be filled with this restriction.
- None: No fill restriction.
- ‘inside’: Only fill NaNs surrounded by valid values (interpolate).
- ‘outside’: Only fill NaNs outside valid values (extrapolate).
downcast optional, ‘infer’ or None: defaults to None
**kwargs: Keyword arguments to pass on to the interpolating function.
Returns DataStream: interpolated data
"""
schema = self._data.schema
sample_freq = 1000/freq
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def interpolate_data(pdf):
pdf.set_index("timestamp", inplace=True)
pdf = pdf.resample(str(sample_freq)+"ms").bfill(limit=1).interpolate(method=method, axis=axis, limit=limit,inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast)
pdf.ffill(inplace=True)
pdf.reset_index(drop=False, inplace=True)
pdf.sort_index(axis=1, inplace=True)
return pdf
data = self._data.groupby(["user", "version"]).apply(interpolate_data)
return DataStream(data=data,metadata=Metadata())
def complementary_filter(self, freq:int=16, accelerometer_x:str="accelerometer_x",accelerometer_y:str="accelerometer_y",accelerometer_z:str="accelerometer_z", gyroscope_x:str="gyroscope_x", gyroscope_y:str="gyroscope_y", gyroscope_z:str="gyroscope_z"):
"""
Compute complementary filter on gyro and accel data.
Args:
freq (int): frequency of accel/gryo. Assumption is that frequency is equal for both gyro and accel.
accelerometer_x (str): name of the column
accelerometer_y (str): name of the column
accelerometer_z (str): name of the column
gyroscope_x (str): name of the column
gyroscope_y (str): name of the column
gyroscope_z (str): name of the column
"""
dt = 1.0 / freq # 1/16.0;
M_PI = math.pi;
hpf = 0.90;
lpf = 0.10;
window = Window.partitionBy(self._data['user']).orderBy(self._data['timestamp'])
data = self._data.withColumn("thetaX_accel",
((F.atan2(-F.col(accelerometer_z), F.col(accelerometer_y)) * 180 / M_PI)) * lpf) \
.withColumn("roll",
(F.lag("thetaX_accel").over(window) + F.col(gyroscope_x) * dt) * hpf + F.col("thetaX_accel")).drop("thetaX_accel") \
.withColumn("thetaY_accel",
((F.atan2(-F.col(accelerometer_x), F.col(accelerometer_z)) * 180 / M_PI)) * lpf) \
.withColumn("pitch",
(F.lag("thetaY_accel").over(window) + F.col(gyroscope_y) * dt) * hpf + F.col("thetaY_accel")).drop("thetaY_accel")\
.withColumn("thetaZ_accel",
((F.atan2(-F.col(accelerometer_y), F.col(accelerometer_x)) * 180 / M_PI)) * lpf) \
.withColumn("yaw",
(F.lag("thetaZ_accel").over(window) + F.col(gyroscope_z) * dt) * hpf + F.col("thetaZ_accel")).drop("thetaZ_accel")
return DataStream(data=data.dropna(),metadata=Metadata())
def compute(self, udfName, windowDuration: int = None, slideDuration: int = None,
groupByColumnName: List[str] = [], startTime=None):
"""
Run an algorithm. This method supports running an udf method on windowed data
Args:
udfName: Name of the algorithm
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream: this will return a new datastream object with blank metadata
"""
if slideDuration:
slideDuration = str(slideDuration) + " seconds"
if 'custom_window' in self._data.columns:
data = self._data.groupby('user', 'custom_window').apply(udfName)
else:
groupbycols = ["user", "version"]
if windowDuration:
windowDuration = str(windowDuration) + " seconds"
win = F.window("timestamp", windowDuration=windowDuration, slideDuration=slideDuration, startTime=startTime)
groupbycols.append(win)
if len(groupByColumnName) > 0:
groupbycols.extend(groupByColumnName)
data = self._data.groupBy(groupbycols).apply(udfName)
return DataStream(data=data, metadata=Metadata())
# def compute(self, udfName, timeInterval=None):
# if 'custom_window' in self._data.columns:
# data = self._data.groupby('user', 'custom_window').apply(udfName)
# else:
# data = self._data.groupby('user','version').apply(udfName)
# return DataStream(data=data, metadata=Metadata())
# def run_algorithm(self, udfName, columnNames: List[str] = [], windowDuration: int = 60, slideDuration: int = None,
# groupByColumnName: List[str] = [], startTime=None):
# """
# Run an algorithm. This method supports running an udf method on windowed data
#
# Args:
# udfName: Name of the algorithm
# columnName List[str]: column names on which windowing should be performed. Windowing will be performed on all columns if none is provided
# windowDuration (int): duration of a window in seconds
# slideDuration (int): slide duration of a window
# groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
# startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
# Returns:
# DataStream: this will return a new datastream object with blank metadata
#
# """
# windowDuration = str(windowDuration) + " seconds"
# groupbycols = ["user", "version"]
#
# win = F.window("timestamp", windowDuration=windowDuration, slideDuration=slideDuration, startTime=startTime)
#
# if len(groupByColumnName) > 0:
# groupbycols.extend(groupByColumnName)
#
# groupbycols.append(win)
#
# # if len(columnNames) == 0:
# # raise ValueError("columnNames list cannot be empty.")
#
# # tmp = ""
# # for col in columnNames:
# # tmp += "collect_list({}{}{}){}".format('"', col, '"', ",")
#
# # tmp = "{}{}{}{}".format("udfName", "(", tmp.rstrip(","), ")")
# merged_column = self._data.groupBy(groupbycols).apply(udfName)
#
# # cols = merged_column.schema.fields
# # new_cols = ["timestamp"]
# # for col in cols:
# # if col.name == "merged_column":
# # for cl in col.dataType.names:
# # new_cols.append("merged_column." + cl)
# # else:
# # new_cols.append(col.name)
# #
# # merged_column = merged_column.withColumn("timestamp", merged_column.window.start)
# #
# # data = merged_column.select(new_cols)
#
# return DataStream(data=merged_column, metadata=Metadata())
def _get_column_names(self, columnName: List[str], methodName: str, preserve_ts: bool = False):
"""
Get data column names and build expression for pyspark aggregate method
Args:
columnName(List[str]): get all column names expression if columnName is empty
methodName (str): name of the method that should be applied on the column
Todo:
update non-data column names
Returns:
dict: {columnName: methodName}
"""
columns = self._data.columns
black_list_column = ["timestamp", "localtime", "user", "version"]
if "localtime" not in columns:
black_list_column.pop(1)
elif preserve_ts:
black_list_column.pop(1)
if preserve_ts:
black_list_column.pop(0)
if columnName:
if isinstance(columns, str):
columns = [columnName]
elif isinstance(columns, list):
columns = columnName
else:
columns = list(set(columns) - set(black_list_column))
exprs = {x: methodName for x in columns}
return exprs
############################### PLOTS ###############################
def _sort_values(self, pdf):
if "timestamp" in pdf.columns:
return pdf.sort_values('timestamp')
return pdf
def plot(self, y_axis_column=None):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
self._basic_plots.timeseries(pdf, y_axis_column=y_axis_column)
def plot_hist(self, x_axis_column=None):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
self._basic_plots.hist(pdf, x_axis_column=x_axis_column)
def plot_gps_cords(self, zoom=5):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
return self._basic_plots.plot_gps_cords(pdf, zoom=zoom)
def plot_stress_pie(self, x_axis_column="stresser_main"):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
self._stress_plots.plot_pie(pdf, x_axis_column)
def plot_stress_gantt(self):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
self._stress_plots.plot_gantt(pdf)
def plot_stress_sankey(self, cat_cols=["stresser_main", "stresser_sub"], value_cols='density',
title="Stressers' Sankey Diagram"):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
self._stress_plots.plot_sankey(df=pdf, cat_cols=cat_cols, value_cols=value_cols, title=title)
def plot_stress_bar(self, x_axis_column="stresser_main"):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
self._stress_plots.plot_bar(pdf, x_axis_column=x_axis_column)
def plot_stress_comparison(self, x_axis_column="stresser_main", usr_id=None, compare_with="all"):
pdf = self._data.toPandas()
pdf = self._sort_values(pdf)
self._stress_plots.plot_comparison(pdf, x_axis_column=x_axis_column, usr_id=usr_id, compare_with=compare_with)
#############################################################################
# Wrapper for PySpark Methods #
#############################################################################
def alias(self, alias):
"""
Returns a new DataStream with an alias set.
Args:
alias: string, an alias name to be set for the datastream.
Returns:
object: DataStream object
Examples:
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
"""
data = self._data.alias(alias)
return DataStream(data=data, metadata=Metadata())
def agg(self, *exprs):
"""
Aggregate on the entire DataStream without groups
Args:
*exprs:
Returns:
DataStream: this will return a new datastream object with blank metadata
Examples:
>>> ds.agg({"age": "max"}).collect()
>>> # Below example shows how to use pyspark functions in add method
>>> from pyspark.sql import functions as F
>>> ds.agg(F.min(ds.age)).collect()
"""
data = self._data.agg(*exprs)
return DataStream(data=data, metadata=Metadata())
def approxQuantile(self,col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a DataStream.
The result of this algorithm has the following deterministic bound: If the DataStream has N elements and if we request the quantile at probability p up to error err, then the algorithm will return a sample x from the DataStream so that the exact rank of x is close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna algorithm (with some speed optimizations). The algorithm was first present in [[http://dx.doi.org/10.1145/375663.375670 Space-efficient Online Computation of Quantile Summaries]] by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation. For columns only containing null values, an empty list is returned.
Args:
col (str[list]): Can be a single column name, or a list of names for multiple columns.
probabilities: a list of quantile probabilities Each number must belong to [0, 1]. For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError: The relative target precision to achieve (>= 0). If set to zero, the exact quantiles are computed, which could be very expensive. Note that values greater than 1 are accepted but give the same result as 1.
Returns:
the approximate quantiles at the given probabilities. If the input col is a string, the output is a list of floats. If the input col is a list or tuple of strings, the output is also a list, but each element in it is a list of floats, i.e., the output is a list of list of floats.
"""
return self._data.approxQuantile(col=col,probabilities=probabilities,relativeError=relativeError)
# def between(self, column_name, lowerBound, upperBound):
# """
# A boolean expression that is evaluated to true if the value of this expression is between the given columns.
# Args:
# column_name (str): name of the column
# lowerBound:
# upperBound:
# Examples:
# >>> ds.select(ds.timestamp, ds.between("column-name",2, 4)).show()
# Returns:
# DataStream object
# """
#
# data = self._data[column_name].between(lowerBound=lowerBound, upperBound=upperBound)
# return DataStream(data=data, metadata=Metadata())
# def cast(self, dataType, columnName):
# """
# Convert the column into type dataType (int, string, double, float).
# Args:
# dataType (str): new dataType of the column
# columnName (str): name of the column
# Examples:
# >>> ds.select(ds.col_name.cast("string").alias('col_name')).collect()
# Returns:
# DataStream object
# """
#
# data = self._data[columnName].cast(dataType=dataType)
# return DataStream(data=data, metadata=Metadata())
def colRegex(self,colName):
"""
Selects column based on the column name specified as a regex and returns it as Column.
Args:
colName (str): column name specified as a regex.
Returns:
DataStream:
Examples:
>>> ds.colRegex("colName")
"""
return DataStream(data=self._data.colRegex(colName=colName), metadata=Metadata())
def collect(self):
"""
Collect all the data to master node and return list of rows
Returns:
List: rows of all the dataframe
Examples:
>>> ds.collect()
"""
return self._data.collect()
def crossJoin(self, other):
"""
Returns the cartesian product with another DataStream
Args:
other: Right side of the cartesian product.
Returns:
DataStream object with joined streams
Examples:
>>> ds.crossJoin(ds2.select("col_name")).collect()
"""
data = self._data.crossJoin(other=other)
return DataStream(data=data, metadata=Metadata())
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency table. The number of distinct values for each column should be less than 1e4. At most 1e6 non-zero pair frequencies will be returned. The first column of each row will be the distinct values of col1 and the column names will be the distinct values of col2. The name of the first column will be $col1_$col2. Pairs that have no occurrences will have zero as their counts.
Args:
col1 (str): The name of the first column. Distinct items will make the first item of each row.
col2 (str): The name of the second column. Distinct items will make the column names of the DataStream.
Returns:
DataStream object
Examples:
>>> ds.crosstab("col_1", "col_2")
"""
data = self._data.crosstab(col1=col1, col2=col2)
return DataStream(data=data, metadata=Metadata())
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataStream as a double value. Currently only supports the Pearson Correlation Coefficient.
Args:
col1 (str): The name of the first column
col2 (str): The name of the second column
method (str): The correlation method. Currently only supports “pearson”
Returns:
DataStream: this will return a new datastream object with blank metadata
Examples:
>>> ds.corr("cal1", "col2", "pearson").collect()
"""
data = self._data.corr(col1, col2, method)
return DataStream(data=data, metadata=Metadata())
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a double value.
Args:
col1 (str): The name of the first column
col2 (str): The name of the second column
Returns:
DataStream: this will return a new datastream object with blank metadata
Examples:
>>> ds.cov("cal1", "col2", "pearson").collect()
"""
data = self._data.cov(col1, col2)
return DataStream(data=data, metadata=Metadata())
def count(self):
"""
Returns the number of rows in this DataStream.
Examples:
>>> ds.count()
"""
return self._data.count()
def distinct(self):
"""
Returns a new DataStream containing the distinct rows in this DataStream.
Returns:
DataStream: this will return a new datastream object with blank metadata
Examples:
>>> ds.distinct().count()
"""
data = self._data.distinct()
return DataStream(data=data, metadata=Metadata())
def drop(self, *cols):
"""
Returns a new Datastream that drops the specified column. This is a no-op if schema doesn’t contain the given column name(s).
Args:
*cols: a string name of the column to drop, or a Column to drop, or a list of string name of the columns to drop.
Returns:
Datastream:
Examples:
>>> ds.drop('col_name')
"""
data = self._data.drop(*cols)
return DataStream(data=data, metadata=Metadata())
def describe(self, *cols):
"""
Computes basic statistics for numeric and string columns. This include count, mean, stddev, min, and max. If no columns are given, this function computes statistics for all numerical or string columns.
Args:
*cols:
Examples:
>>> ds.describe(['col_name']).show()
>>> ds.describe().show()
"""
return self._data.describe()
def dropDuplicates(self, subset=None):
"""
Return a new DataStream with duplicate rows removed, optionally only considering certain columns.
Args:
subset: optional list of column names to consider.
Returns:
Datastream:
Examples:
>>> ds.dropDuplicates().show()
>>> # Example on how to use it with params
>>> ds.dropDuplicates(['col_name1', 'col_name2']).show()
"""
data = self._data.dropDuplicates(subset=subset)
return DataStream(data=data, metadata=Metadata())
def dropna(self, how='any', thresh=None, subset=None):
"""
Returns a new DataStream omitting rows with null values.
Args:
how: ‘any’ or ‘all’. If ‘any’, drop a row if it contains any nulls. If ‘all’, drop a row only if all its values are null.
thresh: int, default None If specified, drop rows that have less than thresh non-null values. This overwrites the how parameter.
subset: optional list of column names to consider.
Returns:
Datastream:
Examples:
>>> ds.dropna()
"""
data = self._data.dropna(how=how, thresh=thresh, subset=subset)
return DataStream(data=data, metadata=Metadata())
def explain(self, extended=False):
"""
Prints the (logical and physical) plans to the console for debugging purpose.
Args:
extended: boolean, default False. If False, prints only the physical plan.
Examples:
>>> ds.explain()
"""
self._data.explain()
def exceptAll(self, other):
"""
Return a new DataStream containing rows in this DataStream but not in another DataStream while preserving duplicates.
Args:
other: other DataStream object
Returns:
Datastream:
Examples:
>>> ds1.exceptAll(ds2).show()
"""
data = self._data.exceptAll(other=other._data)
return DataStream(data=data, metadata=Metadata())
def fillna(self, value, subset=None):
"""
Replace null values
Args:
value: int, long, float, string, bool or dict. Value to replace null values with. If the value is a dict, then subset is ignored and value must be a mapping from column name (string) to replacement value. The replacement value must be an int, long, float, boolean, or string.
subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if value is a string, and subset contains a non-string column, then the non-string column is simply ignored.
Returns:
Datastream:
Examples:
>>> ds.fill(50).show()
>>> ds.fill({'col1': 50, 'col2': 'unknown'}).show()
"""
data = self._data.fillna(value=value, subset=subset)
return DataStream(data=data, metadata=Metadata())
def repartition(self, numPartitions, *cols):
"""
Returns a new DataStream partitioned by the given partitioning expressions. The resulting DataStream is hash partitioned.
numPartitions can be an int to specify the target number of partitions or a Column. If it is a Column, it will be used as the first partitioning column. If not specified, the default number of partitions is used.
Args:
numPartitions:
*cols:
Returns:
"""
data = self._data.repartition(numPartitions,*cols)
return DataStream(data=data, metadata=Metadata())
def filter(self, condition):
"""
Filters rows using the given condition
Args:
condition: a Column of types.BooleanType or a string of SQL expression.
Returns:
DataStream: this will return a new datastream object with blank metadata
Examples:
>>> ds.filter("age > 3")
>>> df.filter(df.age > 3)
"""
data = self._data.filter(condition)
return DataStream(data=data, metadata=Metadata())
def foreach(self, f):
"""
Applies the f function to all Row of DataStream. This is a shorthand for df.rdd.foreach()
Args:
f: function
Returns:
DataStream object
Examples:
>>> def f(person):
... print(person.name)
>>> ds.foreach(f)
"""
data = self._data.foreach(f)
return DataStream(data=data, metadata=Metadata())
def first(self):
"""
Returns the first row as a Row.
Returns:
First row of a DataStream
Examples:
>>> ds.first()
"""
return self._data.first()
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the frequent element count algorithm described in “http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou”.
Returns:
DataStream:
Examples:
>>> ds.freqItems("col-name")
"""
data = self._data.freqItems(cols=cols,support=support)
return DataStream(data=data, metadata=Metadata())
def groupby(self, *cols):
"""
Groups the DataFrame using the specified columns, so we can run aggregation on them.
Args:
list of columns to group by. Each element should be a column name (string) or an expression (Column)
Returns:
"""
data = self._data.groupby(*cols)
return DataStream(data=data, metadata=Metadata())
def head(self, n=None):
"""
Returns the first n rows.
Args:
n (int): default 1. Number of rows to return.
Returns:
If n is greater than 1, return a list of Row. If n is 1, return a single Row.
Notes:
This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver’s memory.
Examples:
>>> ds.head(5)
"""
return self._data.head(n=n)
def intersect(self, other):
"""
Return a new DataFrame containing rows only in both this frame and another frame. This is equivalent to INTERSECT in SQL.
Args:
other (int): DataStream object
Returns:
If n is greater than 1, return a list of Row. If n is 1, return a single Row.
Examples:
>>> ds.intersect(other=ds2)
"""
data = self._data.intersect(other=other._data)
return DataStream(data=data, metadata=Metadata())
def intersectAll(self, other):
"""
Return a new DataFrame containing rows in both this dataframe and other dataframe while preserving duplicates.
Args:
other (int): DataStream object
Returns:
If n is greater than 1, return a list of Row. If n is 1, return a single Row.
Examples:
>>> ds.intersectAll(ds2).show()
"""
data = self._data.intersectAll(other=other._data)
return DataStream(data=data, metadata=Metadata())
def join(self, other, on=None, how=None):
"""
Joins with another DataStream, using the given join expression.
Args:
other (DataStream): Right side of the join
on – a string for the join column name, a list of column names, a join expression (Column), or a list of Columns. If on is a string or a list of strings indicating the name of the join column(s), the column(s) must exist on both sides, and this performs an equi-join.
how (str) – str, default inner. Must be one of: inner, cross, outer, full, full_outer, left, left_outer, right, right_outer, left_semi, and left_anti.
Examples:
>>> ds.join(ds2, 'user', 'outer').show()
Returns:
DataStream object with joined streams
"""
data = self._data.join(other=other._data, on=on, how=how)
return DataStream(data=data, metadata=Metadata())
def limit(self, num):
"""
Limits the result count to the number specified.
Args:
num:
Returns:
Datastream:
"""
data = self._data.limit(num=num)
return DataStream(data=data, metadata=Metadata())
def orderBy(self, *cols):
"""
order by column name
Args:
*cols:
Returns:
Datastream:
"""
data = self._data.orderBy(*cols)
return DataStream(data=data, metadata=Metadata())
def printSchema(self):
"""
Prints out the schema in the tree format.
Examples:
>>> ds.printSchema()
"""
self._data.printSchema()
def replace(self, to_replace, value, subset=None):
"""
Returns a new DataStream replacing a value with another value. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with {42: -1, 42.0: 1}) and arbitrary replacement will be used.
Args:
to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then value is ignored or can be omitted, and to_replace must be a mapping between a value and a replacement.
value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If value is a list, value should be of the same length and type as to_replace. If value is a scalar and to_replace is a sequence, then value is used as a replacement for each item in to_replace.
subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if value is a string, and subset contains a non-string column, then the non-string column is simply ignored.
Returns:
Datastream:
Examples:
>>> ds.replace(10, 20).show()
>>> ds.replace('some-str', None).show()
>>> ds.replace(['old_val1', 'new_val1'], ['old_val2', 'new_val2'], 'col_name').show()
"""
data = self._data.replace(to_replace, value, subset)
return DataStream(data=data, metadata=Metadata())
def select(self, *cols):
"""
Projects a set of expressions and returns a new DataStream
Args:
cols(str): list of column names (string) or expressions (Column). If one of the column names is ‘*’, that column is expanded to include all columns in the current DataStream
Returns:
DataStream: this will return a new datastream object with selected columns
Examples:
>>> ds.select('*')
>>> ds.select('name', 'age')
>>> ds.select(ds.name, (ds.age + 10).alias('age'))
"""
data = self._data.select(*cols)
return DataStream(data=data, metadata=Metadata())
def selectExpr(self, *expr):
"""
This is a variant of select() that accepts SQL expressions. Projects a set of expressions and returns a new DataStream
Args:
expr(str):
Returns:
DataStream: this will return a new datastream object with selected columns
Examples:
>>> ds.selectExpr("age * 2")
"""
data = self._data.selectExpr(*expr)
return DataStream(data=data, metadata=Metadata())
def sort(self, *cols, **kwargs):
"""
Returns a new DataStream sorted by the specified column(s).
Args:
cols: list of Column or column names to sort by.
ascending: boolean or list of boolean (default True). Sort ascending vs. descending. Specify list for multiple sort orders. If a list is specified, length of the list must equal length of the cols.
Returns:
object: DataStream object
Examples:
>>> ds.sort("col_name", ascending=False)
"""
data = self._data.sort(*cols, **kwargs)
return DataStream(data=data, metadata=Metadata())
def summary(self, *statistics):
"""
Computes specified statistics for numeric and string columns. Available statistics are: - count - mean - stddev - min - max - arbitrary approximate percentiles specified as a percentage (eg, 75%) If no statistics are given, this function computes count, mean, stddev, min, approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
Args:
*statistics:
Examples:
>>> ds.summary().show()
>>> ds.summary("count", "min", "25%", "75%", "max").show()
>>> # To do a summary for specific columns first select them:
>>> ds.select("col1", "col2").summary("count").show()
"""
self._data.summary().show(truncate=False)
def take(self,num):
"""
Returns the first num rows as a list of Row.
Returns:
Row(list): row(s) of a DataStream
Examples:
>>> ds.take()
"""
return self._data.take(num=num)
def toPandas(self):
"""
This method converts pyspark dataframe into pandas dataframe.
Notes:
This method will collect all the data on master node to convert pyspark dataframe into pandas dataframe.
After converting to pandas dataframe datastream objects helper methods will not be accessible.
Returns:
Datastream (Metadata, pandas.DataFrame): this will return a new datastream object with blank metadata
Examples:
>>> CC = CerebralCortex("/directory/path/of/configs/")
>>> ds = CC.get_stream("STREAM-NAME")
>>> new_ds = ds.toPandas()
>>> new_ds.data.head()
"""
pdf = self._data.toPandas()
return pdf
def union(self, other):
"""
Return a new Datastream containing union of rows in this and another frame.
This is equivalent to UNION ALL in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by distinct().
Also as standard in SQL, this function resolves columns by position (not by name).
Args:
other(DataStream):
Returns:
Datastream:
Examples:
>>> ds.union(ds2).collect()
"""
data = self._data.union(other=other._data)
return DataStream(data=data, metadata=Metadata())
def unionByName(self, other):
"""
Returns a new Datastream containing union of rows in this and another frame.
This is different from both UNION ALL and UNION DISTINCT in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by distinct().
The difference between this function and union() is that this function resolves columns by name (not by position):
Args:
other(DataStream):
Returns:
Datastream:
Examples:
>>> ds.unionByName(ds2).show()
"""
data = self._data.unionByName(other=other._data)
return DataStream(data=data, metadata=Metadata())
def where(self, condition):
"""
where() is an alias for filter().
Args:
condition:
Returns:
Datastream:
Examples:
>>> ds.filter("age > 3").collect()
"""
data = self._data.where(condition)
return DataStream(data=data, metadata=Metadata())
def withColumnRenamed(self, existing, new):
"""
Returns a new DataStream by renaming an existing column. This is a no-op if schema doesn’t contain the given column name.
Args:
existing (str): string, name of the existing column to rename.
new (str): string, new name of the column.
Examples:
>>> ds.withColumnRenamed('col_name', 'new_col_name')
Returns:
DataStream object with new column name(s)
"""
data = self._data.withColumnRenamed(existing=existing, new=new)
return DataStream(data=data, metadata=Metadata())
def withColumn(self, colName, col):
"""
Returns a new DataStream by adding a column or replacing the existing column that has the same name. The column expression must be an expression over this DataStream; attempting to add a column from some other datastream will raise an error.
Args:
colName (str): name of the new column.
col: a Column expression for the new column.
Examples:
>>> ds.withColumn('col_name', ds.col_name + 2)
"""
data = self._data.withColumn(colName=colName, col=col)
return DataStream(data=data, metadata=Metadata())
# !!!! END Wrapper for PySpark Methods !!!
##################### !!!!!!!!!!!!!!!! FEATURE COMPUTATION UDFS !!!!!!!!!!!!!!!!!!! ################################
### COMPUTE FFT FEATURES
def compute_fourier_features(self, exclude_col_names: list = [], feature_names = ["fft_centroid", 'fft_spread', 'spectral_entropy', 'spectral_entropy_old', 'fft_flux',
'spectral_folloff'], windowDuration: int = None, slideDuration: int = None,
groupByColumnName: List[str] = [], startTime=None):
"""
Transforms data from time domain to frequency domain.
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_folloff
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
"""
eps = 0.00000001
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = self._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def stSpectralCentroidAndSpread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs / (2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
# Centroid:
C = (NUM / DEN)
# Spread:
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN)
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def stSpectralFlux(X, Xprev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = np.sum(X + eps)
sumPrevX = np.sum(Xprev + eps)
F = np.sum((X / sumX - Xprev / sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
"""Computes spectral roll-off"""
totalEnergy = np.sum(X ** 2)
fftLength = len(X)
Thres = c * totalEnergy
# Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy
CumSum = np.cumsum(X ** 2) + eps
[a, ] = np.nonzero(CumSum > Thres)
if len(a) > 0:
mC = np.float64(a[0]) / (float(fftLength))
else:
mC = 0.0
return (mC)
def stSpectralEntropy(X, numOfShortBlocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = np.sum(X ** 2) # total spectral energy
subWinLength = int(np.floor(L / numOfShortBlocks)) # length of sub-frame
if L != subWinLength * numOfShortBlocks:
X = X[0:subWinLength * numOfShortBlocks]
subWindows = X.reshape(subWinLength, numOfShortBlocks,
order='F').copy() # define sub-frames (using matrix reshape)
s = np.sum(subWindows ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -np.sum(s * np.log2(s + eps)) # compute spectral entropy
return En
def spectral_entropy(data, sampling_freq, bands=None):
psd = np.abs(np.fft.rfft(data)) ** 2
psd /= np.sum(psd) # psd as a pdf (normalised to one)
if bands is None:
power_per_band = psd[psd > 0]
else:
freqs = np.fft.rfftfreq(data.size, 1 / float(sampling_freq))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0], bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[ | np.bitwise_and(freqs >= low, freqs < up) | numpy.bitwise_and |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import re
import tensorflow as tf
import numpy as np
from PIL import Image
from tqdm import tqdm
from os import listdir
from os.path import join, isdir
# from sklearn.metrics import \
# precision_score, recall_score, f1_score, accuracy_score
from models import utils
class Test(object):
def __init__(self,
cfg,
multi_gpu=False,
version=None,
load_last_ckp=True,
during_training=False,
epoch_train=None,
step_train=None,
model_arch_info=None):
# Config
self.cfg = cfg
self.multi_gpu = multi_gpu
self.version = version
self.load_last_ckp = load_last_ckp
self.during_training = during_training
self.epoch_train = epoch_train
self.step_train = step_train
self.append_info = self.info[0]
# Use encode transfer learning
if self.cfg.TRANSFER_LEARNING == 'encode':
self.tl_encode = True
else:
self.tl_encode = False
# Get paths for testing
self.checkpoint_path, self.test_log_path, self.test_image_path = \
self._get_paths()
# Save config
utils.save_config_log(self.test_log_path, self.cfg, model_arch_info)
# Load data
self.x_test, self.y_test, self.imgs_test = self._load_data()
@property
def info(self):
"""Get information of the class."""
return '', 'Single-object'
def _get_ckp_idx(self):
"""Get checkpoint index."""
if self.load_last_ckp:
ckp_indices = []
for f_name in listdir(join(self.cfg.CHECKPOINT_PATH, self.version)):
m = re.match('.*-(\d*).meta', f_name)
if m:
ckp_indices.append(int(m.group(1)))
return max(ckp_indices)
else:
return self.cfg.TEST_CKP_IDX
def _get_paths(self):
"""Get paths for testing."""
if self.during_training:
# Get log path
test_log_path_ = join(
self.cfg.TEST_LOG_PATH, self.version) + self.append_info
test_log_path = test_log_path_
i_append_info = 0
if self.epoch_train == 0:
while isdir(test_log_path):
i_append_info += 1
test_log_path = test_log_path_ + '({})'.format(i_append_info)
# Path for saving images
if self.epoch_train == 'end':
test_image_path = join(test_log_path, 'images')
else:
test_image_path = join(
join(test_log_path, 'images'),
'epoch-{}_batch-{}'.format(self.epoch_train, self.step_train))
checkpoint_path = None
else:
ckp_idx = self._get_ckp_idx()
# Get checkpoint path
checkpoint_path = join(
self.cfg.CHECKPOINT_PATH,
'{}/models.ckpt-{}'.format(self.version, ckp_idx))
# Get log path, append information if the directory exist.
test_log_path_ = join(
self.cfg.TEST_LOG_PATH,
'{}-{}'.format(self.version, ckp_idx)) + self.append_info
test_log_path = test_log_path_
i_append_info = 0
while isdir(test_log_path):
i_append_info += 1
test_log_path = test_log_path_ + '({})'.format(i_append_info)
# Path for saving images
test_image_path = join(test_log_path, 'images')
# Check directory of paths
utils.check_dir([test_log_path, test_image_path])
return checkpoint_path, test_log_path, test_image_path
def _load_data(self):
utils.thick_line()
print('Loading data...')
utils.thin_line()
if self.cfg.DATABASE_MODE is not None:
preprocessed_path_ = join(
'../data/{}'.format(self.cfg.DATABASE_MODE), self.cfg.DATABASE_NAME)
else:
preprocessed_path_ = join(self.cfg.DPP_DATA_PATH, self.cfg.DATABASE_NAME)
x = utils.load_pkls(
preprocessed_path_, 'x_test' + self.append_info,
tl=self.tl_encode, add_n_batch=1)
y = utils.load_pkls(
preprocessed_path_, 'y_test' + self.append_info)
imgs = utils.load_pkls(
preprocessed_path_, 'imgs_test' + self.append_info)
utils.thin_line()
print('Data info:')
utils.thin_line()
print('x_test: {}\ny_test: {}\nimgs_test: {}'.format(
x.shape,
y.shape,
imgs.shape))
return x, y, imgs
def _get_tensors(self, loaded_graph):
"""Get inputs, labels, loss, and accuracy tensor from <loaded_graph>."""
with loaded_graph.as_default():
utils.thin_line()
print('Loading graph and tensors...')
inputs_ = loaded_graph.get_tensor_by_name('inputs:0')
labels_ = loaded_graph.get_tensor_by_name('labels:0')
input_imgs_ = loaded_graph.get_tensor_by_name('input_imgs:0')
is_training = loaded_graph.get_tensor_by_name('is_training:0')
if self.multi_gpu:
accuracy_ = loaded_graph.get_tensor_by_name('total_acc:0')
loss_ = loaded_graph.get_tensor_by_name('total_loss:0')
clf_preds_ = loaded_graph.get_tensor_by_name('total_clf_preds:0')
if self.cfg.TEST_WITH_REC:
clf_loss_ = loaded_graph.get_tensor_by_name('total_clf_loss:0')
rec_loss_ = loaded_graph.get_tensor_by_name('total_rec_loss:0')
rec_imgs_ = loaded_graph.get_tensor_by_name('total_rec_imgs:0')
return inputs_, labels_, input_imgs_, is_training, \
loss_, accuracy_, clf_loss_, clf_preds_, rec_loss_, rec_imgs_
else:
return inputs_, labels_, input_imgs_, is_training, \
clf_preds_, loss_, accuracy_
else:
accuracy_ = loaded_graph.get_tensor_by_name('accuracy:0')
loss_ = loaded_graph.get_tensor_by_name('loss:0')
clf_preds_ = loaded_graph.get_tensor_by_name('clf_preds:0')
if self.cfg.TEST_WITH_REC:
clf_loss_ = loaded_graph.get_tensor_by_name('clf_loss:0')
rec_loss_ = loaded_graph.get_tensor_by_name('rec_loss:0')
rec_imgs_ = loaded_graph.get_tensor_by_name('rec_imgs:0')
return inputs_, labels_, input_imgs_, is_training, \
loss_, accuracy_, clf_loss_, clf_preds_, rec_loss_, rec_imgs_
else:
return inputs_, labels_, input_imgs_, is_training, \
clf_preds_, loss_, accuracy_
def _get_top_n_accuracy(self, preds_vec):
"""Get top N accuracy."""
accuracy_top_n_list = []
for top_n in self.cfg.TOP_N_LIST:
accuracy_top_n = []
for pred_vec, y_true in zip(preds_vec, self.y_test):
y_pred_idx_top_n = np.argsort(pred_vec)[-top_n:]
y_true_idx = np.argmax(y_true)
if y_true_idx in y_pred_idx_top_n:
accuracy_top_n.append(1)
else:
accuracy_top_n.append(0)
accuracy_top_n = np.mean(accuracy_top_n)
accuracy_top_n_list.append(accuracy_top_n)
assert len(accuracy_top_n_list) == len(self.cfg.TOP_N_LIST)
return accuracy_top_n_list
def _get_preds_int(self, preds_vec):
"""Get integer predictions."""
utils.thin_line()
print('Converting prediction vectors to ints...')
preds = np.argmax(np.array(preds_vec), axis=1)
# Save preds
if self.cfg.SAVE_TEST_PRED:
if self.during_training and (self.epoch_train != 'end'):
utils.save_test_pred_is_training(
self.test_log_path, self.epoch_train, self.step_train,
self.y_test, preds, preds_vec, save_num=20, pred_is_int=True)
else:
utils.save_test_pred(self.test_log_path, self.y_test,
preds, preds_vec, pred_is_int=True)
return preds
def _save_images(self,
sess,
rec_imgs,
inputs,
labels,
is_training,
x,
y,
imgs,
step=None):
"""Save reconstructed images."""
rec_imgs_ = sess.run(
rec_imgs, feed_dict={inputs: x, labels: y, is_training: False})
utils.save_imgs(
real_imgs=imgs,
rec_imgs=rec_imgs_,
img_path=self.test_image_path,
database_name=self.cfg.DATABASE_NAME,
max_img_in_col=self.cfg.MAX_IMAGE_IN_COL,
step=step,
silent=True,
test_flag=True)
def _eval_on_batches(self,
sess,
inputs,
labels,
input_imgs,
is_training,
preds,
loss,
acc,
clf_loss,
rec_loss,
rec_imgs):
"""Calculate losses and accuracies of full train set."""
pred_all = []
loss_all = []
acc_all = []
clf_loss_all = []
rec_loss_all = []
step = 0
batch_generator = utils.get_batches(
x=self.x_test,
y=self.y_test,
imgs=self.imgs_test,
batch_size=self.cfg.TEST_BATCH_SIZE,
keep_last=True)
if len(self.x_test) % self.cfg.TEST_BATCH_SIZE == 0:
n_batch = (len(self.x_test) // self.cfg.TEST_BATCH_SIZE)
else:
n_batch = (len(self.x_test) // self.cfg.TEST_BATCH_SIZE) + 1
if self.cfg.TEST_WITH_REC:
for _ in tqdm(range(n_batch), total=n_batch,
ncols=100, unit=' batch'):
step += 1
x_batch, y_batch, imgs_batch = next(batch_generator)
len_batch = len(x_batch)
if len_batch == self.cfg.TEST_BATCH_SIZE:
pred_i, loss_i, clf_loss_i, rec_loss_i, acc_i = \
sess.run([preds, loss, clf_loss, rec_loss, acc],
feed_dict={inputs: x_batch,
labels: y_batch,
input_imgs: imgs_batch,
is_training: False})
loss_all.append(loss_i)
clf_loss_all.append(clf_loss_i)
rec_loss_all.append(rec_loss_i)
acc_all.append(acc_i)
# Save reconstruct images
if self.cfg.TEST_SAVE_IMAGE_STEP:
if step % self.cfg.TEST_SAVE_IMAGE_STEP == 0:
self._save_images(sess, rec_imgs, inputs, labels, is_training,
x_batch, y_batch, imgs_batch, step=step)
else:
# The last batch which has less examples
for i in range(self.cfg.TEST_BATCH_SIZE - len_batch):
x_batch = np.append(x_batch, np.expand_dims(
np.zeros_like(x_batch[0]), axis=0), axis=0)
assert len(x_batch) == self.cfg.TEST_BATCH_SIZE
pred_i = sess.run(preds, feed_dict={inputs: x_batch,
is_training: False})
pred_i = pred_i[:len_batch]
pred_all.extend(list(pred_i))
clf_loss_ = sum(clf_loss_all) / len(clf_loss_all)
rec_loss_ = sum(rec_loss_all) / len(rec_loss_all)
else:
for _ in tqdm(range(n_batch), total=n_batch,
ncols=100, unit=' batches'):
x_batch, y_batch, imgs_batch = next(batch_generator)
len_batch = len(x_batch)
if len_batch == self.cfg.TEST_BATCH_SIZE:
pred_i, loss_i, acc_i = \
sess.run([preds, loss, acc],
feed_dict={inputs: x_batch,
labels: y_batch,
input_imgs: imgs_batch,
is_training: False})
loss_all.append(loss_i)
acc_all.append(acc_i)
else:
# The last batch which has less examples
for i in range(self.cfg.TEST_BATCH_SIZE - len_batch):
x_batch = np.append(x_batch, np.expand_dims(
np.zeros_like(x_batch[0]), axis=0), axis=0)
assert len(x_batch) == self.cfg.TEST_BATCH_SIZE
pred_i = sess.run(preds, feed_dict={inputs: x_batch,
is_training: False})
pred_i = pred_i[:len_batch]
pred_all.extend(list(pred_i))
clf_loss_, rec_loss_ = None, None
loss_ = sum(loss_all) / len(loss_all)
acc_ = sum(acc_all) / len(acc_all)
assert len(pred_all) == len(self.x_test), (len(pred_all), len(self.x_test))
preds_vec = np.array(pred_all)
return preds_vec, loss_, clf_loss_, rec_loss_, acc_
def tester(self, sess, inputs, labels, input_imgs,
is_training, clf_preds, rec_imgs, start_time,
loss=None, acc=None, clf_loss=None, rec_loss=None):
utils.thin_line()
print('Calculating loss and accuracy of test set...')
# Get losses and accuracies
clf_preds_vec_test, loss_test, clf_loss_test, rec_loss_test, acc_test = \
self._eval_on_batches(
sess, inputs, labels, input_imgs, is_training, clf_preds,
loss, acc, clf_loss, rec_loss, rec_imgs)
# Get integer predictions
_ = self._get_preds_int(preds_vec=clf_preds_vec_test)
# Get top N accuracy
if self.cfg.TOP_N_LIST is not None:
acc_top_n_list = self._get_top_n_accuracy(clf_preds_vec_test)
else:
acc_top_n_list = None
# Print losses and accuracy
utils.thin_line()
print('Test Loss: {:.4f}'.format(loss_test))
if self.cfg.TEST_WITH_REC:
print('Test Classifier Loss: {:.4f}'.format(clf_loss_test))
print('Test Reconstruction Loss: {:.4f}'.format(rec_loss_test))
print('Test Accuracy: {:.4f}%'.format(acc_test * 100))
if self.cfg.TOP_N_LIST is not None:
utils.thin_line()
for i, top_n in enumerate(self.cfg.TOP_N_LIST):
print('Top_{} Test Accuracy: {:.4f}%'.format(
top_n, acc_top_n_list[i] * 100))
# Save test log
if self.during_training and (self.epoch_train != 'end'):
utils.save_test_log_is_training(
self.test_log_path, self.epoch_train, self.step_train,
loss_test, acc_test, clf_loss_test, rec_loss_test,
self.cfg.TEST_WITH_REC, self.cfg.TOP_N_LIST, acc_top_n_list)
else:
utils.save_test_log(
self.test_log_path, loss_test, acc_test, clf_loss_test,
rec_loss_test, self.cfg.TEST_WITH_REC,
self.cfg.TOP_N_LIST, acc_top_n_list)
utils.thin_line()
print('Testing finished! Using time: {:.2f}'
.format(time.time() - start_time))
utils.thick_line()
def test(self):
"""Test models."""
start_time = time.time()
tf.reset_default_graph()
loaded_graph = tf.Graph()
utils.thick_line()
print('Testing on {} test set...'.format(self.info[1]))
with tf.Session(graph=loaded_graph) as sess:
# Load saved models
loader = tf.train.import_meta_graph(self.checkpoint_path + '.meta')
loader.restore(sess, self.checkpoint_path)
# Get Tensors from loaded models
if self.cfg.TEST_WITH_REC:
inputs, labels, input_imgs, is_training, \
loss, acc, clf_loss, clf_preds, rec_loss, rec_imgs = \
self._get_tensors(loaded_graph)
else:
inputs, labels, input_imgs, is_training, clf_preds, loss, acc = \
self._get_tensors(loaded_graph)
clf_loss, rec_loss, rec_imgs = None, None, None
self.tester(sess, inputs, labels, input_imgs, is_training,
clf_preds, rec_imgs, start_time, loss=loss, acc=acc,
clf_loss=clf_loss, rec_loss=rec_loss)
class TestMultiObjects(Test):
def __init__(self,
cfg,
multi_gpu=False,
version=None,
load_last_ckp=True,
during_training=False,
epoch_train=None,
step_train=None,
model_arch_info=None):
super(TestMultiObjects, self).__init__(cfg,
multi_gpu=multi_gpu,
version=version,
load_last_ckp=load_last_ckp,
during_training=during_training,
epoch_train=epoch_train,
step_train=step_train,
model_arch_info=model_arch_info)
@property
def info(self):
"""Get information of the class."""
return '_multi_obj', 'Multi-object'
def _get_tensors(self, loaded_graph):
"""Get inputs, labels, loss, and accuracy tensor from <loaded_graph>."""
with loaded_graph.as_default():
utils.thin_line()
print('Loading graph and tensors...')
inputs_ = loaded_graph.get_tensor_by_name('inputs:0')
labels_ = loaded_graph.get_tensor_by_name('labels:0')
input_imgs_ = loaded_graph.get_tensor_by_name('input_imgs:0')
is_training = loaded_graph.get_tensor_by_name('is_training:0')
if self.multi_gpu:
clf_preds_ = loaded_graph.get_tensor_by_name('total_clf_preds:0')
if self.cfg.TEST_WITH_REC:
rec_imgs_ = loaded_graph.get_tensor_by_name('total_rec_imgs:0')
return inputs_, labels_, input_imgs_, \
is_training, clf_preds_, rec_imgs_
else:
return inputs_, labels_, input_imgs_, is_training, clf_preds_
else:
clf_preds_ = loaded_graph.get_tensor_by_name('clf_preds:0')
if self.cfg.TEST_WITH_REC:
rec_imgs_ = loaded_graph.get_tensor_by_name('rec_imgs:0')
return inputs_, labels_, input_imgs_, \
is_training, clf_preds_, rec_imgs_
else:
return inputs_, labels_, input_imgs_, is_training, clf_preds_
def _get_preds_vector(self,
sess,
inputs,
preds,
is_training):
"""Get prediction vectors of full train set."""
utils.thin_line()
print('Getting prediction vectors...')
pred_all = []
_batch_generator = utils. get_batches(
self.x_test, batch_size=self.cfg.TEST_BATCH_SIZE, keep_last=True)
if len(self.x_test) % self.cfg.TEST_BATCH_SIZE == 0:
n_batch = (len(self.x_test) // self.cfg.TEST_BATCH_SIZE)
else:
n_batch = (len(self.x_test) // self.cfg.TEST_BATCH_SIZE) + 1
for _ in tqdm(range(n_batch), total=n_batch,
ncols=100, unit=' batch'):
x_batch = next(_batch_generator)
# The last batch which has less examples
len_batch = len(x_batch)
if len_batch != self.cfg.TEST_BATCH_SIZE:
for i in range(self.cfg.TEST_BATCH_SIZE - len_batch):
x_batch = np.append(x_batch, np.expand_dims(
np.zeros_like(x_batch[0]), axis=0), axis=0)
assert len(x_batch) == self.cfg.TEST_BATCH_SIZE
pred_i = sess.run(preds, feed_dict={inputs: x_batch, is_training: False})
if len_batch != self.cfg.TEST_BATCH_SIZE:
pred_i = pred_i[:len_batch]
pred_all.extend(list(pred_i))
assert len(pred_all) == len(self.x_test), (len(pred_all), len(self.x_test))
return np.array(pred_all)
def _get_preds_binary(self, preds_vec):
"""Get binary predictions.
-> [0, 0, 1, ..., 0, 1, 0] as labels
"""
utils.thin_line()
print('Converting prediction vectors to binaries...')
preds = np.array(preds_vec)
if self.cfg.MOD_PRED_MODE == 'top_n':
for pred_i in preds:
pos_idx = np.argsort(pred_i)[-self.cfg.MOD_PRED_MAX_NUM:]
neg_idx = np.argsort(pred_i)[:-self.cfg.MOD_PRED_MAX_NUM]
pred_i[pos_idx] = 1
pred_i[neg_idx] = 0
elif self.cfg.MOD_PRED_MODE == 'length_rate':
for pred_i in preds:
pred_i_copy = pred_i.copy()
max_ = pred_i.max()
pred_i[pred_i < (max_ * self.cfg.MOD_PRED_THRESHOLD)] = 0
pred_i[pred_i >= (max_ * self.cfg.MOD_PRED_THRESHOLD)] = 1
if np.sum(pred_i) > self.cfg.MOD_PRED_MAX_NUM:
pos_idx = np.argsort(pred_i_copy)[-self.cfg.MOD_PRED_MAX_NUM:]
neg_idx = np.argsort(pred_i_copy)[:-self.cfg.MOD_PRED_MAX_NUM]
pred_i[pos_idx] = 1
pred_i[neg_idx] = 0
else:
raise ValueError(
'Wrong Mode Name! Find {}!'.format(self.cfg.MOD_PRED_MODE))
if self.cfg.SAVE_TEST_PRED:
if self.during_training and (self.epoch_train != 'end'):
utils.save_test_pred_is_training(
self.test_log_path, self.epoch_train, self.step_train,
self.y_test, preds, preds_vec, save_num=20)
else:
utils.save_test_pred(self.test_log_path, self.y_test,
preds, preds_vec)
return np.array(preds, dtype=int)
def _get_multi_obj_scores(self, preds, preds_vec):
"""Get evaluation scores for multi-objects detection."""
utils.thin_line()
print('Calculating evaluation scores for {} detection...'.format(
self.info[1]))
def _f_beta_score(p, r, beta):
if p + r == 0:
return 0.
else:
return ((1 + (beta ** 2)) * p * r) / ((beta ** 2) * p + r)
# Calculate scores manually
precision = []
recall = []
accuracy = []
f1score = []
f05score = []
f2score = []
for pred_vec, y_true in zip(preds, self.y_test):
# true positive
tp = np.sum(np.multiply(y_true, pred_vec))
# false positive
fp = np.sum(np.logical_and(np.equal(y_true, 0), np.equal(pred_vec, 1)))
# false negative
fn = np.sum(np.logical_and(np.equal(y_true, 1), np.equal(pred_vec, 0)))
# true negative
tn = np.sum(np.logical_and(np.equal(y_true, 0), np.equal(pred_vec, 0)))
precision_ = tp / (tp + fp)
accuracy_ = (tp + tn) / (tp + fp + tn + fn)
recall_ = tp / (tp + fn)
precision.append(precision_)
accuracy.append(accuracy_)
recall.append(recall_)
f1score.append(_f_beta_score(precision_, recall_, 1.))
f05score.append(_f_beta_score(precision_, recall_, 0.5))
f2score.append(_f_beta_score(precision_, recall_, 2.))
precision = np.mean(precision)
recall = np.mean(recall)
accuracy = np.mean(accuracy)
f1score = np.mean(f1score)
f05score = np.mean(f05score)
f2score = np.mean(f2score)
# true positive
tp = np.sum(np.multiply(preds, self.y_test))
print('TRUE POSITIVE: ', tp)
# false positive
fp = np.sum(np.logical_and(np.equal(self.y_test, 0), np.equal(preds, 1)))
print('FALSE POSITIVE: ', fp)
# false negative
fn = np.sum(np.logical_and(np.equal(self.y_test, 1), np.equal(preds, 0)))
print('TRUE NEGATIVE: ', fn)
# true negative
tn = np.sum(np.logical_and(np.equal(self.y_test, 0), np.equal(preds, 0)))
print('FALSE NEGATIVE: ', tn)
# Calculate scores by using scikit-learn tools
# precision = precision_score(self.y_test, preds, average='samples')
# recall = recall_score(self.y_test, preds, average='samples')
# accuracy = accuracy_score(self.y_test, preds)
# f1score = f1_score(self.y_test, preds, average='samples')
# Top_N
precision_top_n_list = []
if self.cfg.TOP_N_LIST is not None:
for top_n in self.cfg.TOP_N_LIST:
precision_top_n = []
for pred_vec, y_true in zip(preds_vec, self.y_test):
y_pred_idx_top_n = np.argsort(pred_vec)[-top_n:]
y_true_idx = []
for i_, y_i in enumerate(y_true):
if y_i == 1:
y_true_idx.append(i_)
tp_top_n = 0
fp_top_n = 0
for y_idx in y_true_idx:
if y_idx in y_pred_idx_top_n:
tp_top_n += 1
else:
fp_top_n += 1
assert tp_top_n + fp_top_n == len(y_true_idx)
precision_top_n_ = tp_top_n / (tp_top_n + fp_top_n)
precision_top_n.append(precision_top_n_)
precision_top_n = np.mean(precision_top_n)
precision_top_n_list.append(precision_top_n)
assert len(precision_top_n_list) == len(self.cfg.TOP_N_LIST)
# Print evaluation information
utils.print_multi_obj_eval(
precision, recall, accuracy,
f1score, f05score, f2score,
self.cfg.TOP_N_LIST, precision_top_n_list)
# Save evaluation scores of multi-objects detection.
if self.during_training and (self.epoch_train != 'end'):
utils.save_multi_obj_scores_is_training(
self.test_log_path, self.epoch_train, self.step_train,
precision, recall, accuracy, f1score, f05score, f2score,
self.cfg.TOP_N_LIST, precision_top_n_list)
else:
utils.save_multi_obj_scores(
self.test_log_path, precision, recall,
accuracy, f1score, f05score, f2score,
self.cfg.TOP_N_LIST, precision_top_n_list)
def _save_images_mo(self,
sess,
rec_imgs,
inputs,
labels,
is_training,
preds_binary,
preds_vector):
"""Save reconstructed images."""
utils.thin_line()
print('Getting reconstruction images...')
if len(self.y_test) > self.cfg.MAX_IMAGE_IN_COL ** 2:
n_test_img = self.cfg.MAX_IMAGE_IN_COL ** 2
test_img_idx = np.random.choice(len(self.y_test), n_test_img)
else:
test_img_idx = list(range(len(self.y_test)))
rec_imgs_ = []
preds_vec_ = []
if self.cfg.LABEL_FOR_TEST == 'pred':
label_for_img = preds_binary
elif self.cfg.LABEL_FOR_TEST == 'real':
label_for_img = self.y_test
else:
raise ValueError('Wrong LABEL_FOR_TEST Name!')
for x, y_hat, pred_ in tqdm(zip(self.x_test[test_img_idx],
label_for_img[test_img_idx],
preds_vector[test_img_idx]),
total=len(test_img_idx),
ncols=100, unit=' image'):
# Get new x and y_hat list in which each y contain single object
# [0, 1, 0, 1, 0] -> [[0, 1, 0, 0, 0],
# [0, 0, 0, 1, 0]]
x_new = []
y_hat_new = []
preds_vec_new = []
for i, y_i in enumerate(y_hat):
if y_i == 1:
y_hat_new_i = np.zeros_like(y_hat)
y_hat_new_i[i] = 1
assert y_hat_new_i[i] == y_hat[i]
x_new.append(x)
y_hat_new.append(y_hat_new_i)
preds_vec_new.append(pred_[i])
preds_vec_.append(preds_vec_new)
# Filling x and y tensor to batch size for testing
# [[0, 1, 0, 0, 0],
# [0, 0, 0, 1, 0]] -> [[0, 1, 0, 0, 0],
# [0, 0, 0, 1, 0],
# ...
# [0, 0, 0, 0, 0]]
n_y = len(y_hat_new)
assert n_y == int( | np.sum(y_hat) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 10:12:43 2019
@author: amandaash
"""
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
"""
t1 = np.arange(0,1,0.001)
t2 = np.arange(1, 1000, 0.1)
def reduced_B_funcn(t):
m = np.tanh(m/t)
return(m)
def reduced_B(t):
f = np.tanh(m/t) - m
return f
roots_below = []
for temp in t1:
roots1 = opt.newton(reduced_B(temp), 8.5)
roots_below.append(roots1)
"""
t = np.arange(0,0.999,0.001)
roots_t1 = []
for temp in t:
def reduced_B(m):
f = np.tanh(m/temp) - m
return f
roots_t1.append(opt.newton(reduced_B, 1))
print(temp, opt.newton(reduced_B, 1))
def reduced_B_05(m):
f = np.tanh(m/0.5) - m
return f
print(opt.bisect(reduced_B_05, 0.8,1.3))
#plt.vlines(0, 10, x = roots_t1)
plt.plot(t, roots_t1, '.')
plt.xlabel('t')
plt.ylabel('m(t)')
plt.savefig('mvt.pdf')
plt.show()
m = np.arange(0,10,0.01)
def reduced_B(m,t):
f = | np.tanh(m/t) | numpy.tanh |
"""Test dephasing."""
import numpy as np
from toqito.channels import dephasing
from toqito.channel_ops import apply_channel
def test_dephasing_completely_dephasing():
"""The completely dephasing channel kills everything off diagonal."""
test_input_mat = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
expected_res = np.array([[1, 0, 0, 0], [0, 6, 0, 0], [0, 0, 11, 0], [0, 0, 0, 16]])
res = apply_channel(test_input_mat, dephasing(4))
bool_mat = np.isclose(expected_res, res)
np.testing.assert_equal(np.all(bool_mat), True)
def test_dephasing_partially_dephasing():
"""The partially dephasing channel for `p = 0.5`."""
test_input_mat = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
expected_res = np.array([[17.5, 0, 0, 0], [0, 20, 0, 0], [0, 0, 22.5, 0], [0, 0, 0, 25]])
res = apply_channel(test_input_mat, dephasing(4, 0.5))
bool_mat = | np.isclose(expected_res, res) | numpy.isclose |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 8 11:10:46 2021
@author: arash
"""
import pandas as pd
import numpy as np
from sklearn.metrics import pairwise_distances
def AugmentDic():
expert=pd.read_csv('expert.csv').dropna()
for a in expert.columns[1:]:
expert[a]=expert[a]/max(expert[a])
A=expert.to_numpy()
P=pairwise_distances(A[:,1:])
D=dict()
for ind, row in expert.iterrows():
P[ind,ind]=10000
a=np.argmin(P[ind,:])
D[row['name']]=expert.loc[a,'name']
return D
def Augment(by_user, D, user):
lis=list(by_user.keys())
for k in lis:
X=pd.DataFrame(by_user[k])
random=np.random.randint(3000,7000)
u=user[user['id']==k]
u['id']=random
user=user.append(u)
idx=np.random.randint(0, len(X), size=10)
Y=X.copy()
for idx2 in idx:
try:
tempD=D[Y.loc[idx2,'activity']]
Y.loc[idx2,'activity']= | np.random.choice(tempD) | numpy.random.choice |
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by <NAME> (<EMAIL>) 21/04/2021, 17:37. Copyright (c) <NAME>
from multiprocessing.dummy import Pool
from typing import List, Tuple, Union
from warnings import warn
import numpy as np
from astropy.units import Quantity, pix, kpc
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
from tqdm import tqdm
from ..exceptions import NoRegionsError, NoProductAvailableError, XGAFitError, ModelNotAssociatedError, \
ParameterNotAssociatedError
from ..imagetools.profile import radial_brightness
from ..samples.extended import ClusterSample
from ..sources import GalaxyCluster
from ..utils import NUM_CORES
from ..xspec.fakeit import cluster_cr_conv
from ..xspec.fit import single_temp_apec
def _stack_setup_checks(sources: ClusterSample, scale_radius: str = "r200", lo_en: Quantity = Quantity(0.5, 'keV'),
hi_en: Quantity = Quantity(2.0, 'keV'), psf_corr: bool = False, psf_model: str = "ELLBETA",
psf_bins: int = 4, psf_algo: str = "rl", psf_iter: int = 15):
"""
Internal function that was originally split off from radial data stack. This performs checks to make sure passed
in values are valid for all types of stacking available in this part of XGA.
:param ClusterSample sources: The source objects that will contribute to the stacked brightness profile.
:param str scale_radius: The over-density radius to scale the cluster radii by, all GalaxyCluster objects must
have an entry for this radius.
:param Quantity lo_en: The lower energy limit of the data that goes into the stacked profiles.
:param Quantity hi_en: The upper energy limit of the data that goes into the stacked profiles.
:param bool psf_corr: If True, PSF corrected ratemaps will be used to make the brightness profile stack.
:param str psf_model: If PSF corrected, the PSF model used.
:param int psf_bins: If PSF corrected, the number of bins per side.
:param str psf_algo: If PSF corrected, the algorithm used.
:param int psf_iter: If PSF corrected, the number of algorithm iterations.
"""
# Checking that all the sources are GalaxyClusters
if not all([isinstance(s, GalaxyCluster) for s in sources]):
raise TypeError("Currently only GalaxyCluster source objects may be analysed in this way.")
# Checking that every single GalaxyCluster object was supplied with the scale radius chosen by the user
if scale_radius.lower() == "r200":
rad_check = [s.r200 is not None for s in sources]
elif scale_radius.lower() == "r500":
rad_check = [s.r500 is not None for s in sources]
elif scale_radius.lower() == "r2500":
rad_check = [s.r2500 is not None for s in sources]
else:
raise ValueError("{0} is not an acceptable overdensity radius, please use r200, r500, or "
"r2500.".format(scale_radius))
if not all(rad_check):
raise NoRegionsError("Some GalaxyCluster objects are missing the {} region".format(scale_radius))
if psf_corr:
psf_key = "bound_{l}-{u}_{m}_{n}_{a}{i}".format(l=lo_en.value, u=hi_en.value, m=psf_model, n=psf_bins,
a=psf_algo, i=psf_iter)
psf_corr_avail = [len(source.get_products("combined_ratemap", extra_key=psf_key)) != 0 for source in sources]
if False in psf_corr_avail:
raise NoProductAvailableError("At least one source does not have PSF corrected "
"image products available.")
def _create_stack(sb: np.ndarray, sources: ClusterSample, scale_radius: str, lo_en: Quantity, hi_en: Quantity,
custom_temps: Quantity, sim_met: Union[float, List] = 0.3, abund_table: str = 'angr') \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, List]:
"""
Internal function that was originally split off from radial data stack. Takes the surface brightness profiles
that have been generated for radii as a fraction of the scale radius. It then calculates the scaling factors and
combines them into a single stacked profile.
:param np.ndarray sb: The surface brightness data output for all the sources.
:param ClusterSample sources: The source objects that will contribute to the stacked brightness profile.
:param str scale_radius: The overdensity radius to scale the cluster radii by, all GalaxyCluster objects must
have an entry for this radius.
:param Quantity lo_en: The lower energy limit of the data that goes into the stacked profiles.
:param Quantity hi_en: The upper energy limit of the data that goes into the stacked profiles.
:param Quantity custom_temps: Temperatures at which to calculate conversion factors for each cluster
in sources, they will overwrite any temperatures measured by XGA. A single temperature can be passed to be used
for all clusters in sources. If None, appropriate temperatures will be retrieved from the source objects.
:param float/List sim_met: The metallicity(s) to use when calculating the conversion factor. Pass a
single float to use the same value for all sources, or pass a list to use a different value for each.
:param str abund_table: The abundance table to use for the temperature fit and conversion factor calculation.
:return: The average profile, all scaled profiles, the covariance matrix, normalised covariance, and names
of successful profiles.
:rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, List]
"""
# Now, we have all the brightness values at common radii (in units of R200 so scaled properly), now we have
# to weight the SB values so they are directly comparable. This accounts for redshift, nH, and sort-of for
# the temperature of each cluster.
# Calculate all the conversion factors
if custom_temps is not None:
# I'm not going to give the user the ability to choose the specifics of the spectra that are
# being used to calculate conversion factors - I'll use standard settings. This function will
# also make sure that they've actually been generated.
cluster_cr_conv(sources, scale_radius, custom_temps, sim_met=sim_met, abund_table=abund_table)
else:
# Use a simple single_temp_apec to fit said spectra, but only if we haven't had custom temperatures
# passed in
single_temp_apec(sources, scale_radius, abund_table=abund_table)
temp_temps = []
for src in sources:
try:
# A temporary temperature variable
temp_temp = src.get_temperature(scale_radius, "constant*tbabs*apec")[0]
except (ModelNotAssociatedError, ParameterNotAssociatedError):
warn("{s}'s temperature fit is not valid, so I am defaulting to a temperature of "
"3keV".format(s=src.name))
temp_temp = Quantity(3, 'keV')
temp_temps.append(temp_temp.value)
temps = Quantity(temp_temps, 'keV')
cluster_cr_conv(sources, scale_radius, sim_temp=temps, sim_met=sim_met, abund_table=abund_table)
combined_factors = []
# Now to generate a combined conversion factor from count rate to luminosity
for source in sources:
combined_factors.append(source.combined_lum_conv_factor(scale_radius, lo_en, hi_en).value)
# Check for NaN values in the brightness profiles we've retrieved - very bad if they exist
no_nan = np.where(~np.isnan(sb.sum(axis=1)))[0]
# Selects only those clusters that don't have nans in their brightness profiles
combined_factors = np.array(combined_factors)[no_nan]
# Multiplies each cluster profile by the matching conversion factor to go from countrate to luminosity
luminosity = (sb[no_nan, :].T * combined_factors).T
# Finds the highest value in the profile of each cluster
max_lums = np.max(luminosity, axis=1)
# Finds the mean of the maximum values and calculates scaling factors so that the maximum
# value in each profile is now equal to the average
scale_factors = max_lums.mean() / max_lums
# Applied the rescaling factors
scaled_luminosity = (luminosity.T * scale_factors).T
# Calculates normalised and the usual covariance matrices
norm_cov = np.corrcoef(scaled_luminosity, rowvar=False)
cov = np.cov(scaled_luminosity, rowvar=False)
average_profile = np.mean(scaled_luminosity, axis=0)
stack_names = []
for src_ind, src in enumerate(sources):
if src_ind not in no_nan:
warn("A NaN value was detected in {}'s brightness profile, and as such it has been excluded from the "
"stack.".format(src.name))
else:
stack_names.append(src.name)
return average_profile, scaled_luminosity, cov, norm_cov, stack_names
def _view_stack(results: Tuple, scale_radius: str, radii: np.ndarray, figsize: Tuple):
"""
Internal function to plot the results of a stack function.
:param Tuple results: The results tuple from a stack function, this is what will be plotted.
:param str scale_radius: The overdensity radius to scale the cluster radii by, all GalaxyCluster objects must
have an entry for this radius.
:param ndarray radii: The radii (in units of scale_radius) at which to measure and stack surface brightness.
:param tuple figsize: The desired figure size for the plot.
"""
# Gets the average profile from the results
av_prof = results[0]
# Gets the individual scaled profiles from results
all_prof = results[1]
# The covariance matrix
cov = results[3]
# The normalised covariance matrix
norm_cov = results[4]
# Finds the standard deviations by diagonalising the covariance matrix and taking the sqrt
sd = np.sqrt(np.diagonal(cov))
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=figsize)
ax[0, 0].set_title("Average Profile")
ax[0, 0].set_xlabel("Radius [{}]".format(scale_radius))
ax[0, 1].set_title("All Profiles")
ax[0, 1].set_xlabel("Radius [{}]".format(scale_radius))
ax[0, 0].plot(radii, av_prof, color="black", label="Average Profile")
ax[0, 0].errorbar(radii, av_prof, fmt="kx", yerr=sd, capsize=2)
for i in range(0, all_prof.shape[0]):
ax[0, 1].plot(radii, all_prof[i, :])
ax[0, 0].set_xscale("log")
ax[0, 0].set_yscale("log")
ax[0, 1].set_xscale("log")
ax[0, 1].set_yscale("log")
ax[0, 0].xaxis.set_major_formatter(ScalarFormatter())
ax[0, 1].xaxis.set_major_formatter(ScalarFormatter())
ax[1, 0].set_title("Covariance Matrix")
ax[1, 0].tick_params(axis='both', direction='in', which='both', top=False, right=False)
ax[1, 0].xaxis.set_ticklabels([])
ax[1, 0].yaxis.set_ticklabels([])
im = ax[1, 0].imshow(cov, cmap="gnuplot2", origin="lower")
fig.colorbar(im, ax=ax[1, 0])
ax[1, 1].set_title("Normalised Covariance Matrix")
ax[1, 1].tick_params(axis='both', direction='in', which='both', top=False, right=False)
ax[1, 1].xaxis.set_ticklabels([])
ax[1, 1].yaxis.set_ticklabels([])
im = ax[1, 1].imshow(norm_cov, cmap="gnuplot2", origin="lower")
fig.colorbar(im, ax=ax[1, 1])
fig.tight_layout()
plt.show()
def radial_data_stack(sources: ClusterSample, scale_radius: str = "r200", use_peak: bool = True,
pix_step: int = 1, radii: np.ndarray = np.linspace(0.01, 1, 20), min_snr: float = 0.0,
lo_en: Quantity = Quantity(0.5, 'keV'), hi_en: Quantity = Quantity(2.0, 'keV'),
custom_temps: Quantity = None, sim_met: Union[float, List] = 0.3,
abund_table: str = 'angr', psf_corr: bool = False, psf_model: str = "ELLBETA",
psf_bins: int = 4, psf_algo: str = "rl", psf_iter: int = 15, num_cores: int = NUM_CORES) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, list]:
"""
Creates and scales radial brightness profiles for a set of galaxy clusters so that they can be combined
and compared, like for like. This particular function does not fit models, and outputs a mean brightness
profile, as well as the scaled stack data and covariance matrices. This is based on the method in
https://doi.org/10.1093/mnras/stv1366, though modified to work with profiles rather than 2D images.
:param ClusterSample sources: The source objects that will contribute to the stacked brightness profile.
:param str scale_radius: The overdensity radius to scale the cluster radii by, all GalaxyCluster objects must
have an entry for this radius.
:param bool use_peak: Controls whether the peak position is used as the centre of the brightness profile
for each GalaxyCluster object.
:param int pix_step: The width (in pixels) of each annular bin for the individual profiles, default is 1.
:param ndarray radii: The radii (in units of scale_radius) at which to measure and stack surface brightness.
:param int/float min_snr: The minimum allowed signal to noise for individual cluster profiles. Default is
0, which disables automatic rebinning.
:param Quantity lo_en: The lower energy limit of the data that goes into the stacked profiles.
:param Quantity hi_en: The upper energy limit of the data that goes into the stacked profiles.
:param Quantity custom_temps: Temperatures at which to calculate conversion factors for each cluster
in sources, they will overwrite any temperatures measured by XGA. A single temperature can be passed to be
used for all clusters in sources. If None, appropriate temperatures will be retrieved from the source objects.
:param float/List sim_met: The metallicity(s) to use when calculating the conversion factor. Pass a
single float to use the same value for all sources, or pass a list to use a different value for each.
:param str abund_table: The abundance table to use for the temperature fit and conversion factor calculation.
:param bool psf_corr: If True, PSF corrected ratemaps will be used to make the brightness profile stack.
:param str psf_model: If PSF corrected, the PSF model used.
:param int psf_bins: If PSF corrected, the number of bins per side.
:param str psf_algo: If PSF corrected, the algorithm used.
:param int psf_iter: If PSF corrected, the number of algorithm iterations.
:param int num_cores: The number of cores to use when calculating the brightness profiles, the default is 90%
of available cores.
:return: This function returns the average profile, the scaled brightness profiles with the cluster
changing along the y direction and the bin changing along the x direction, an array of the radii at which the
brightness was measured (in units of scale_radius), and finally the covariance matrix and normalised
covariance matrix. I also return a list of source names that WERE included in the stack.
:rtype: Tuple[ndarray, ndarray, ndarray, ndarray, ndarray, list]
"""
def construct_profile(src_obj: GalaxyCluster, src_id: int, lower: Quantity, upper: Quantity) \
-> Tuple[Quantity, int]:
"""
Constructs a brightness profile for the given galaxy cluster, and interpolates to find values
at the requested radii in units of scale_radius.
:param GalaxyCluster src_obj: The GalaxyCluster to construct a profile for.
:param int src_id: An identifier that enables the constructed profile to be placed
correctly in the results array.
:param Quantity lower: The lower energy limit to use.
:param Quantity upper: The higher energy limit to use.
:return: The scaled profile, the cluster identifier, and the original generated
surface brightness profile.
:rtype: Tuple[Quantity, int]
"""
# The storage key is different based on whether the user wishes to generate profiles from PSF corrected
# ratemaps or not.
if not psf_corr:
storage_key = "bound_{l}-{u}".format(l=lower.value, u=upper.value)
else:
storage_key = "bound_{l}-{u}_{m}_{n}_{a}{i}".format(l=lower.value, u=upper.value, m=psf_model,
n=psf_bins, a=psf_algo, i=psf_iter)
# Retrieving the relevant ratemap object, as well as masks
rt = [r[-1] for r in src_obj.get_products("combined_ratemap", just_obj=False) if storage_key in r][0]
# The user can choose to use the original user passed coordinates, or the X-ray centroid
if use_peak:
central_coord = src_obj.peak
else:
central_coord = src_obj.ra_dec
# We obviously want to remove point sources from the profiles we make, so get the mask that removes
# interlopers
int_mask = src_obj.get_interloper_mask()
# Tells the source object to give us the requested scale radius in units of kpc
rad = src_obj.get_radius(scale_radius, kpc)
# This fetches any profiles that might have already been generated to our required specifications
prof_prods = src_obj.get_products("combined_brightness_profile")
if len(prof_prods) == 1:
matching_profs = [p for p in list(prof_prods[0].values()) if p.check_match(rt, central_coord, pix_step,
min_snr, rad)]
else:
matching_profs = []
# This is because a ValueError can be raised by radial_brightness when there is a problem with the
# background mask
try:
if len(matching_profs) == 0:
sb_prof, success = radial_brightness(rt, central_coord, rad, float(src_obj.background_radius_factors[0]),
float(src_obj.background_radius_factors[1]), int_mask,
src_obj.redshift, pix_step, kpc, src_obj.cosmo, min_snr)
src_obj.update_products(sb_prof)
elif len(matching_profs) == 1:
sb_prof = matching_profs[0]
elif len(matching_profs) > 1:
raise ValueError("This shouldn't be possible.")
# Calculates the value of pixel radii in terms of the scale radii
scaled_radii = (sb_prof.radii / rad).value
# Interpolating brightness profile values at the radii passed by the user
interp_brightness = np.interp(radii, scaled_radii, (sb_prof.values - sb_prof.background).value)
except ValueError as ve:
# This will mean that the profile is thrown away in a later step
interp_brightness = np.full(radii.shape, np.NaN)
# But will also raise a warning so the user knows
warn(str(ve).replace("you're looking at", "{s} is".format(s=src_obj.name)).replace(".", "")
+ " - profile set to NaNs.")
return interp_brightness, src_id
# This is an internal function that does setup checks common to both stacking of data and models
_stack_setup_checks(sources, scale_radius, lo_en, hi_en, psf_corr, psf_model, psf_bins, psf_algo, psf_iter)
sb = np.zeros((len(sources), len(radii)))
# Sets up a multiprocessing pool
with tqdm(total=len(sources), desc="Generating Brightness Profiles") as onwards, Pool(num_cores) as pool:
def callback(results):
nonlocal sb
nonlocal onwards
b, s_id = results
sb[s_id, :] = b
onwards.update(1)
def err_callback(err):
onwards.update()
raise err
for s_ind, s in enumerate(sources):
pool.apply_async(construct_profile, callback=callback, error_callback=err_callback,
args=(s, s_ind, lo_en, hi_en))
pool.close()
pool.join()
onwards.close()
average_profile, scaled_luminosity, cov, norm_cov, stack_names = _create_stack(sb, sources, scale_radius, lo_en,
hi_en, custom_temps, sim_met,
abund_table)
return average_profile, scaled_luminosity, radii, cov, norm_cov, stack_names
def view_radial_data_stack(sources: ClusterSample, scale_radius: str = "r200", use_peak: bool = True,
pix_step: int = 1, radii: np.ndarray = | np.linspace(0.01, 1, 20) | numpy.linspace |
from __future__ import division, absolute_import, print_function
import sys
from itertools import product
import numpy as np
from numpy.core import zeros, float64
from numpy.testing import dec, TestCase, assert_almost_equal, assert_, \
assert_raises, assert_array_equal, assert_allclose, assert_equal
from numpy.core.multiarray import inner as inner_
DECPREC = 14
class TestInner(TestCase):
def test_vecself(self):
"""Ticket 844."""
# Inner product of a vector with itself segfaults or give meaningless
# result
a = zeros(shape = (1, 80), dtype = float64)
p = inner_(a, a)
assert_almost_equal(p, 0, decimal = DECPREC)
try:
import numpy.core._dotblas as _dotblas
except ImportError:
_dotblas = None
@dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas")
def test_blasdot_used():
from numpy.core import dot, vdot, inner, alterdot, restoredot
assert_(dot is _dotblas.dot)
assert_(vdot is _dotblas.vdot)
assert_(inner is _dotblas.inner)
assert_(alterdot is _dotblas.alterdot)
assert_(restoredot is _dotblas.restoredot)
def test_dot_2args():
from numpy.core import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args():
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
np.dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = np.dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is np.dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = np.dot(f, v)
assert_(r is np.dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors():
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, np.dot, f, v, r)
assert_raises(ValueError, np.dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, np.dot, f, v, r[:, ::2])
assert_raises(ValueError, np.dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, np.dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, np.dot, f, v, r)
def test_dot_array_order():
""" Test numpy dot with different order C, F
Comparing results with multiarray dot.
Double and single precisions array are compared using relative
precision of 7 and 5 decimals respectively.
Use 30 decimal when comparing exact operations like:
(a.b)' = b'.a'
"""
_dot = np.core.multiarray.dot
a_dim, b_dim, c_dim = 10, 4, 7
orders = ["C", "F"]
dtypes_prec = {np.float64: 7, np.float32: 5}
np.random.seed(7)
for arr_type, prec in dtypes_prec.items():
for a_order in orders:
a = np.asarray(np.random.randn(a_dim, a_dim),
dtype=arr_type, order=a_order)
assert_array_equal(np.dot(a, a), a.dot(a))
# (a.a)' = a'.a', note that mse~=1e-31 needs almost_equal
assert_almost_equal(a.dot(a), a.T.dot(a.T).T, decimal=prec)
#
# Check with making explicit copy
#
a_T = a.T.copy(order=a_order)
assert_almost_equal(a_T.dot(a_T), a.T.dot(a.T), decimal=prec)
assert_almost_equal(a.dot(a_T), a.dot(a.T), decimal=prec)
assert_almost_equal(a_T.dot(a), a.T.dot(a), decimal=prec)
#
# Compare with multiarray dot
#
assert_almost_equal(a.dot(a), _dot(a, a), decimal=prec)
assert_almost_equal(a.T.dot(a), _dot(a.T, a), decimal=prec)
assert_almost_equal(a.dot(a.T), _dot(a, a.T), decimal=prec)
assert_almost_equal(a.T.dot(a.T), _dot(a.T, a.T), decimal=prec)
for res in a.dot(a), a.T.dot(a), a.dot(a.T), a.T.dot(a.T):
assert res.flags.c_contiguous
for b_order in orders:
b = np.asarray(np.random.randn(a_dim, b_dim),
dtype=arr_type, order=b_order)
b_T = b.T.copy(order=b_order)
assert_almost_equal(a_T.dot(b), a.T.dot(b), decimal=prec)
assert_almost_equal(b_T.dot(a), b.T.dot(a), decimal=prec)
# (b'.a)' = a'.b
assert_almost_equal(b.T.dot(a), a.T.dot(b).T, decimal=prec)
assert_almost_equal(a.dot(b), _dot(a, b), decimal=prec)
assert_almost_equal(b.T.dot(a), _dot(b.T, a), decimal=prec)
for c_order in orders:
c = np.asarray(np.random.randn(b_dim, c_dim),
dtype=arr_type, order=c_order)
c_T = c.T.copy(order=c_order)
assert_almost_equal(c.T.dot(b.T), c_T.dot(b_T), decimal=prec)
assert_almost_equal(c.T.dot(b.T).T, b.dot(c), decimal=prec)
assert_almost_equal(b.dot(c), _dot(b, c), decimal=prec)
assert_almost_equal(c.T.dot(b.T), _dot(c.T, b.T), decimal=prec)
@dec.skipif(True) # ufunc override disabled for 1.9
def test_dot_override():
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_npdot_segfault():
if sys.platform != 'darwin': return
# Test for float32 np.dot segfault
# https://github.com/numpy/numpy/issues/4007
def aligned_array(shape, align, dtype, order='C'):
# Make array shape `shape` with aligned at `align` bytes
d = dtype()
# Make array of correct size with `align` extra bytes
N = | np.prod(shape) | numpy.prod |
import h5py
import pickle
import numpy as np
def load_weights():
fff = h5py.File('Mybase/mask_rcnn_coco.h5','r') #打开h5文件
#print(list(f.keys()))
mydict = {}
mydict['global_step:0'] = 1000
########res1########
dset = fff['conv1']
a = dset['conv1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn_conv1']
a = dset['bn_conv1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
########res2########
dset = fff['res2a_branch1']
a = dset['res2a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch1']
a = dset['bn2a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2a']
a = dset['res2a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2a']
a = dset['bn2a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2b']
a = dset['res2a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2b']
a = dset['bn2a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2c']
a = dset['res2a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2c']
a = dset['bn2a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res2b_branch2a']
a = dset['res2b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2a']
a = dset['bn2b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2b']
a = dset['res2b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2b']
a = dset['bn2b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2c']
a = dset['res2b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2c']
a = dset['bn2b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res2c_branch2a']
a = dset['res2c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2a']
a = dset['bn2c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2b']
a = dset['res2c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2b']
a = dset['bn2c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2c']
a = dset['res2c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2c']
a = dset['bn2c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res3########
dset = fff['res3a_branch1']
a = dset['res3a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch1']
a = dset['bn3a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2a']
a = dset['res3a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2a']
a = dset['bn3a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2b']
a = dset['res3a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2b']
a = dset['bn3a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2c']
a = dset['res3a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2c']
a = dset['bn3a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res3b_branch2a']
a = dset['res3b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2a']
a = dset['bn3b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2b']
a = dset['res3b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2b']
a = dset['bn3b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2c']
a = dset['res3b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2c']
a = dset['bn3b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3c_branch2a']
a = dset['res3c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2a']
a = dset['bn3c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2b']
a = dset['res3c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2b']
a = dset['bn3c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2c']
a = dset['res3c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2c']
a = dset['bn3c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3d_branch2a']
a = dset['res3d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2a']
a = dset['bn3d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2b']
a = dset['res3d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2b']
a = dset['bn3d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2c']
a = dset['res3d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2c']
a = dset['bn3d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res4########
dset = fff['res4a_branch1']
a = dset['res4a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch1']
a = dset['bn4a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2a']
a = dset['res4a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2a']
a = dset['bn4a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2b']
a = dset['res4a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2b']
a = dset['bn4a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2c']
a = dset['res4a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2c']
a = dset['bn4a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res4b_branch2a']
a = dset['res4b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2a']
a = dset['bn4b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2b']
a = dset['res4b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2b']
a = dset['bn4b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2c']
a = dset['res4b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2c']
a = dset['bn4b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4c_branch2a']
a = dset['res4c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2a']
a = dset['bn4c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2b']
a = dset['res4c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2b']
a = dset['bn4c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2c']
a = dset['res4c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2c']
a = dset['bn4c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4d_branch2a']
a = dset['res4d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2a']
a = dset['bn4d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2b']
a = dset['res4d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2b']
a = dset['bn4d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2c']
a = dset['res4d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2c']
a = dset['bn4d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4e_branch2a']
a = dset['res4e_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2a']
a = dset['bn4e_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2b']
a = dset['res4e_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2b']
a = dset['bn4e_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2c']
a = dset['res4e_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2c']
a = dset['bn4e_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4f_branch2a']
a = dset['res4f_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2a']
a = dset['bn4f_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2b']
a = dset['res4f_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2b']
a = dset['bn4f_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2c']
a = dset['res4f_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2c']
a = dset['bn4f_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4g_branch2a']
a = dset['res4g_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2a']
a = dset['bn4g_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2b']
a = dset['res4g_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2b']
a = dset['bn4g_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2c']
a = dset['res4g_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2c']
a = dset['bn4g_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4h_branch2a']
a = dset['res4h_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2a']
a = dset['bn4h_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2b']
a = dset['res4h_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2b']
a = dset['bn4h_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2c']
a = dset['res4h_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2c']
a = dset['bn4h_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4i_branch2a']
a = dset['res4i_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2a']
a = dset['bn4i_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2b']
a = dset['res4i_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2b']
a = dset['bn4i_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2c']
a = dset['res4i_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2c']
a = dset['bn4i_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4j_branch2a']
a = dset['res4j_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2a']
a = dset['bn4j_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2b']
a = dset['res4j_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2b']
a = dset['bn4j_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2c']
a = dset['res4j_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2c']
a = dset['bn4j_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4k_branch2a']
a = dset['res4k_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2a']
a = dset['bn4k_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2b']
a = dset['res4k_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2b']
a = dset['bn4k_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2c']
a = dset['res4k_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2c']
a = dset['bn4k_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4l_branch2a']
a = dset['res4l_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2a']
a = dset['bn4l_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2b']
a = dset['res4l_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2b']
a = dset['bn4l_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2c']
a = dset['res4l_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2c']
a = dset['bn4l_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4m_branch2a']
a = dset['res4m_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2a']
a = dset['bn4m_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2b']
a = dset['res4m_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2b']
a = dset['bn4m_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2c']
a = dset['res4m_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2c']
a = dset['bn4m_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4n_branch2a']
a = dset['res4n_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2a']
a = dset['bn4n_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4n_branch2b']
a = dset['res4n_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2b']
a = dset['bn4n_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4n_branch2c']
a = dset['res4n_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2c']
a = dset['bn4n_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4o_branch2a']
a = dset['res4o_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4o_branch2a']
a = dset['bn4o_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4o_branch2b']
a = dset['res4o_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4o_branch2b']
a = dset['bn4o_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4o_branch2c']
a = dset['res4o_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4o_branch2c']
a = dset['bn4o_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_14/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4p_branch2a']
a = dset['res4p_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4p_branch2a']
a = dset['bn4p_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4p_branch2b']
a = dset['res4p_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4p_branch2b']
a = dset['bn4p_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4p_branch2c']
a = dset['res4p_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4p_branch2c']
a = dset['bn4p_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_15/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4q_branch2a']
a = dset['res4q_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4q_branch2a']
a = dset['bn4q_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4q_branch2b']
a = dset['res4q_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4q_branch2b']
a = dset['bn4q_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4q_branch2c']
a = dset['res4q_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4q_branch2c']
a = dset['bn4q_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_16/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4r_branch2a']
a = dset['res4r_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4r_branch2a']
a = dset['bn4r_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4r_branch2b']
a = dset['res4r_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4r_branch2b']
a = dset['bn4r_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4r_branch2c']
a = dset['res4r_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4r_branch2c']
a = dset['bn4r_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_17/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4s_branch2a']
a = dset['res4s_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4s_branch2a']
a = dset['bn4s_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4s_branch2b']
a = dset['res4s_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4s_branch2b']
a = dset['bn4s_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4s_branch2c']
a = dset['res4s_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4s_branch2c']
a = dset['bn4s_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_18/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4t_branch2a']
a = dset['res4t_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4t_branch2a']
a = dset['bn4t_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4t_branch2b']
a = dset['res4t_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4t_branch2b']
a = dset['bn4t_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4t_branch2c']
a = dset['res4t_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4t_branch2c']
a = dset['bn4t_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_19/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4u_branch2a']
a = dset['res4u_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4u_branch2a']
a = dset['bn4u_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4u_branch2b']
a = dset['res4u_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4u_branch2b']
a = dset['bn4u_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4u_branch2c']
a = dset['res4u_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4u_branch2c']
a = dset['bn4u_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_20/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4v_branch2a']
a = dset['res4v_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4v_branch2a']
a = dset['bn4v_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4v_branch2b']
a = dset['res4v_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4v_branch2b']
a = dset['bn4v_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4v_branch2c']
a = dset['res4v_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4v_branch2c']
a = dset['bn4v_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_21/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4w_branch2a']
a = dset['res4w_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4w_branch2a']
a = dset['bn4w_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4w_branch2b']
a = dset['res4w_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4w_branch2b']
a = dset['bn4w_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4w_branch2c']
a = dset['res4w_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4w_branch2c']
a = dset['bn4w_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_22/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res5########
dset = fff['res5a_branch1']
a = dset['res5a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5a_branch1']
a = dset['bn5a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res5a_branch2a']
a = dset['res5a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5a_branch2a']
a = dset['bn5a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res5a_branch2b']
a = dset['res5a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5a_branch2b']
a = dset['bn5a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res5a_branch2c']
a = dset['res5a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5a_branch2c']
a = dset['bn5a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res5b_branch2a']
a = dset['res5b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5b_branch2a']
a = dset['bn5b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res5b_branch2b']
a = dset['res5b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5b_branch2b']
a = dset['bn5b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res5b_branch2c']
a = dset['res5b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5b_branch2c']
a = dset['bn5b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res5c_branch2a']
a = dset['res5c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5c_branch2a']
a = dset['bn5c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res5c_branch2b']
a = dset['res5c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5c_branch2b']
a = dset['bn5c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_3/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res5c_branch2c']
a = dset['res5c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn5c_branch2c']
a = dset['bn5c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = | np.array(a['gamma:0'], dtype=np.float32) | numpy.array |
from cuda import cuda, nvrtc
import numpy as np
import util.RotRep as Rot
import util.Simulation as Gsim
from config import Config
import h5py
import time
import gpuarray
from collections import Counter
from scipy.sparse import coo_matrix
from scipy.ndimage import gaussian_filter
from run_cuda import run_cuda_function
from cuda_python_compile import create_module
class Initializer:
def __init__(self, Cfg):
module = create_module('strain_device_mjw_3.cu')
self.sim_strain_func = cuda.cuModuleGetFunction(module, b'Simulate_for_Strain')[1]
self.sim_pos_func = cuda.cuModuleGetFunction(module, b'Simulate_for_Pos')[1]
self.KL_total_func = cuda.cuModuleGetFunction(module, b'KL_total')[1]
self.KL_diff_func = cuda.cuModuleGetFunction(module, b'KL_diff')[1]
self.One_func = cuda.cuModuleGetFunction(module, b'ChangeOne')[1]
self.hit_func = cuda.cuModuleGetFunction(module, b'Hit_Score')[1]
self.Cfg = Cfg
self.mode = Cfg.mode
self.ImLoaded = False
self.GsLoaded = False
self.GsGenerated = False
self.windowD = gpuarray.to_gpu(np.array(self.Cfg.window).astype(np.int32))
# Det parameters
self.Det = Gsim.Detector(psizeJ=Cfg.pixelSize / 1000.0,
psizeK=Cfg.pixelSize / 1000.0,
J=Cfg.JCenter,
K=Cfg.KCenter,
trans=np.array([Cfg.Ldistance, 0, 0]),
tilt=Rot.EulerZXZ2Mat(np.array(Cfg.tilt) / 180.0 * np.pi))
afDetInfoH = np.concatenate(
[[Cfg.JPixelNum, Cfg.KPixelNum, Cfg.pixelSize / 1000.0, Cfg.pixelSize / 1000.0],
self.Det.CoordOrigin,
self.Det.Norm,
self.Det.Jvector,
self.Det.Kvector]).astype(np.float32)
self.afDetInfoD = gpuarray.to_gpu(afDetInfoH)
# sample parameters
# hack!! only for Hexagonal
self.sample = Gsim.CrystalStr()
self.sample.PrimA = Cfg.lattice[0] * np.array([1, 0, 0])
self.sample.PrimB = Cfg.lattice[1] * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.sample.PrimC = Cfg.lattice[2] * np.array([0, 0, 1])
Atoms = Cfg.atoms
for ii in range(len(Atoms)):
self.sample.addAtom(list(map(eval, Atoms[ii][0:3])), Atoms[ii][3])
self.sample.getRecipVec()
self.sample.getGs(Cfg.maxQ)
if self.mode == 'rec':
f = h5py.File(Cfg.peakFile, 'r')
# Lim for window position
self.LimH = np.array(f['limits']).astype(np.int32)
self.LimD = gpuarray.to_gpu(self.LimH)
# whichOmega for choosing between omega1 and omega2
self.whichOmega = np.array(f['whichOmega']).astype(np.int32)
self.whichOmegaD = gpuarray.to_gpu(self.whichOmega)
# MaxInt for normalize the weight of each spot
# (because different spots have very different intensity but we want them equal weighted)
self.MaxInt = np.array(f['MaxInt'], dtype=np.float32)
self.MaxIntD = gpuarray.to_gpu(self.MaxInt)
self.Gs = np.array(f['Gs'], dtype=np.float32)
self.NumG = len(self.Gs)
print(self.NumG)
self.orienM = np.array(f['OrienM'])
self.avg_distortion = np.array(f['avg_distortion'])
self.GsGenerated = True
# transfer the ExpImgs and all Gs to texture memory
def loadIm(self):
f = h5py.File(self.Cfg.peakFile, 'r')
AllIm = np.zeros(shape=(self.Cfg.window[1], self.Cfg.window[0], self.NumG * self.Cfg.window[2]), dtype=np.uint32, order='F')
for ii in range(self.NumG):
tmp = np.array(f['Imgs']['Im{0:d}'.format(ii)])
AllIm[:tmp.shape[0], :tmp.shape[1], ii * self.Cfg.window[2]:(ii + 1) * self.Cfg.window[2]] = tmp
self.ImLoaded = True
Im = np.array(AllIm).astype(np.uint32)
self.tcExp = gpuarray.to_gpu(Im.ravel())
def loadGs(self):
if not self.GsGenerated:
raise RuntimeError('Gs are not generated yet')
self.tG = gpuarray.to_gpu(np.array(np.transpose(self.Gs).astype(np.float32),order='F'))
self.GsLoaded = True
def generateGs(self, pos, orien, avg_distortion):
self.pos = np.array(pos)
self.orien = np.array(orien)
self.orienM = Rot.EulerZXZ2Mat(self.orien / 180.0 * np.pi)
self.avg_distortion = avg_distortion
Ps, self.Gs, Info = Gsim.GetProjectedVertex(self.Det,
self.sample, self.avg_distortion.dot(self.orienM),
self.Cfg.etalimit / 180.0 * np.pi,
self.pos, getPeaksInfo=True,
omegaL=self.Cfg.omgRange[0],
omegaU=self.Cfg.omgRange[1], energy=self.Cfg.energy)
self.NumG = len(self.Gs)
Lims = []
dx = 150
dy = 80
for ii in range(self.NumG):
omegid = int((self.Cfg.omgRange[2] - Ps[ii, 2]) / self.Cfg.omgInterval) - 22 # becuase store 45 frames
if omegid < 0:
omegid += int(self.Cfg.omgRange[2] / self.Cfg.omgInterval)
elif omegid >= int(self.Cfg.omgRange[2] / self.Cfg.omgInterval):
omegid -= int(self.Cfg.omgRange[2] / self.Cfg.omgInterval)
x1 = int(2047 - Ps[ii, 0] - dx)
y1 = int(Ps[ii, 1] - dy)
x2 = x1 + 2 * dx
y2 = y1 + 2 * dy
# ignore camera boundary limit, I'm just lazy, will correct it later
Lims.append((x1, x2, y1, y2, omegid))
self.LimH = np.array(Lims, dtype=np.int32)
self.LimD = gpuarray.to_gpu(self.LimH)
# whichOmega for choosing between omega1 and omega2
self.whichOmega = np.zeros(len(Lims), dtype=np.int32)
for ii in range(len(Lims)):
if Info[ii]['WhichOmega'] == 'b':
self.whichOmega[ii] = 2
else:
self.whichOmega[ii] = 1
self.whichOmegaD = gpuarray.to_gpu(self.whichOmega)
self.GsGenerated = True
def MoveDet(self, dJ=0, dK=0, dD=0, dT= | np.eye(3) | numpy.eye |
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from helperFunctions import *
from IPython import embed
from mne.stats import permutation_cluster_test
header = 'target_loc'
def paired_t(*args):
"""Call scipy.stats.ttest_rel, but return only f-value."""
from scipy.stats import ttest_rel
return ttest_rel(*args)[0]
def ipsiContra():
'''
'''
#sns.set(font_scale=2.5)
sns.set_style('white')
sns.set_style('white', {'axes.linewidth': 2})
plt.figure(figsize = (20,10))
with open('/home/dvmoors1/big_brother/Dist_suppression/erp/plot_dict.pickle','rb') as handle:
plot = pickle.load(handle)
times = plot['times']
with open('/home/dvmoors1/big_brother/Dist_suppression/erp/ipsi_contra.pickle' ,'rb') as handle:
erps = pickle.load(handle)
ax = plt.subplot(1,2 , 1, title = 'Target repeat diff', ylabel = 'mV')
for cnd in ['DvTr_0','DvTr_3']:#['DvTr0','DvTr1','DvTr2','DvTr3']:
ipsi_t = np.mean(np.vstack([erps[key][cnd]['ipsi'] for key in erps.keys()]), axis = 0)
contra_t = np.mean(np.vstack([erps[key][cnd]['contra'] for key in erps.keys()]), axis = 0)
plt.plot(times, contra_t - ipsi_t, label = cnd)
plt.legend(loc = 'best')
ax = plt.subplot(1,2 , 2, title = 'Distractor repeat diff', ylabel = 'mV')
for cnd in ['DrTv_0','DrTv_3']: #['DrTv0','DrTv1','DrTv2','DrTv3']:
ipsi_d = np.mean(np.vstack([erps[key][cnd]['ipsi'] for key in erps.keys()]), axis = 0)
contra_d = np.mean(np.vstack([erps[key][cnd]['contra'] for key in erps.keys()]), axis = 0)
plt.plot(times, contra_d - ipsi_d, label = cnd)
plt.legend(loc = 'best')
sns.despine()
plt.tight_layout()
plt.savefig('/home/dvmoors1/big_brother/Dist_suppression/erp/n2pc_Pd_modulation.pdf')
plt.close()
def ipsiContra():
'''
'''
#sns.set(font_scale=2.5)
sns.set_style('white')
sns.set_style('white', {'axes.linewidth': 2})
plt.figure(figsize = (20,20))
with open('/Users/dirk/Desktop/suppression/erp/{}/plot_dict.pickle'.format(header) ,'rb') as handle:
plot = pickle.load(handle)
time = [-0.3, 0.8]
start, end = [np.argmin(abs(plot['times'] - t)) for t in time]
times = plot['times'][start:end]
with open('/Users/dirk/Desktop/suppression/erp/{}/ipsi_contra.pickle'.format('target_loc') ,'rb') as handle:
erps_t = pickle.load(handle)
with open('/Users/dirk/Desktop/suppression/erp/{}/ipsi_contra.pickle'.format('dist_loc') ,'rb') as handle:
erps_d = pickle.load(handle)
# plot ipsi and contra (target left and distractor right)
ipsi_t = np.mean(np.vstack([erps_t[key]['all']['ipsi'] for key in erps_t.keys()]), axis = 0)
contra_t = np.mean(np.vstack([erps_t[key]['all']['contra'] for key in erps_t.keys()]), axis = 0)
diff_t = contra_t - ipsi_t
ipsi_d = np.mean(np.vstack([erps_d[key]['all']['ipsi'] for key in erps_d.keys()]), axis = 0)
contra_d = np.mean(np.vstack([erps_d[key]['all']['contra'] for key in erps_d.keys()]), axis = 0)
diff_d = contra_d - ipsi_d
ax = plt.subplot(3,2 , 1, title = 'Target ERPs', ylabel = 'mV')
plt.plot(times, ipsi_t, label = 'ipsi')
plt.plot(times, contra_t, label = 'contra')
plt.legend(loc = 'best')
ax = plt.subplot(3,2 , 2, title = 'Distractor ERPS', ylabel = 'mV')
plt.plot(times, ipsi_d, label = 'ipsi')
plt.plot(times, contra_d, label = 'contra')
plt.legend(loc = 'best')
ax = plt.subplot(3,2 , 3, title = 'Target diff', ylabel = 'mV')
plt.plot(times, contra_t - ipsi_t)
ax = plt.subplot(3,2 , 4, title = 'Distractor diff', ylabel = 'mV')
plt.plot(times, contra_d - ipsi_d)
ax = plt.subplot(3,2 , 5, title = 'Target repeat diff', ylabel = 'mV')
for cnd in ['DvTr0','DvTr3']:#['DvTr0','DvTr1','DvTr2','DvTr3']:
ipsi_t = np.mean(np.vstack([erps_t[key][cnd]['ipsi'] for key in erps_t.keys()]), axis = 0)
contra_t = np.mean(np.vstack([erps_t[key][cnd]['contra'] for key in erps_t.keys()]), axis = 0)
plt.plot(times, contra_t - ipsi_t, label = cnd)
plt.legend(loc = 'best')
ax = plt.subplot(3,2 , 6, title = 'Distractor repeat diff', ylabel = 'mV')
for cnd in ['DrTv0','DrTv3']: #['DrTv0','DrTv1','DrTv2','DrTv3']:
ipsi_d = np.mean(np.vstack([erps_d[key][cnd]['ipsi'] for key in erps_d.keys()]), axis = 0)
contra_d = np.mean(np.vstack([erps_d[key][cnd]['contra'] for key in erps_d.keys()]), axis = 0)
plt.plot(times, contra_d - ipsi_d, label = cnd)
plt.legend(loc = 'best')
sns.despine()
plt.tight_layout()
plt.savefig('/Users/dirk/Desktop/suppression/erp/n2pc_Pd_modulation.pdf')
plt.close()
def plotCTFSlopeAcrossTime(header, power, channel = 'posterior', freqs = 'all'):
'''
'''
# plotting parameters
sns.set(font_scale=2.5)
sns.set_style('white')
sns.set_style('white', {'axes.linewidth': 2})
# read in CTF data
ctf = []
for sj in subject_id:
# read in classification dict
with open('/home/dvmoors1/big_brother/Dist_suppression/ctf/{}_channels/{}/{}_slopes_{}.pickle'.format(channel,header,sj, freqs) ,'rb') as handle:
ctf.append(pickle.load(handle))
with open('/home/dvmoors1/big_brother/Dist_suppression/ctf/{}_channels/{}/{}_info.pickle'.format(channel,header, freqs),'rb') as handle:
plot_dict = pickle.load(handle)
if header == 'target_loc':
rep_cond = ['DvTr_0','DvTr_3']
else:
rep_cond = ['DrTv_0','DrTv_3']
plt.figure(figsize = (20,10))
for idx, plot in enumerate(['variable', 'repeat']):
ax = plt.subplot(1,2, idx + 1, title = plot, ylabel = 'CTF slope', ylim = (-0.2, 0.2))
if plot == 'variable':
diff = []
for i, cnd in enumerate(['DvTv_0','DvTv_3']):
X = np.vstack([ctf[j][cnd][power] for j in range(len(ctf))])
diff.append(X)
error = bootstrap(X)
X = X.mean(axis = 0)
plt.plot(plot_dict['times'], X, color = ['g','r'][i], label = cnd)
plt.fill_between(plot_dict['times'], X + error, X - error, alpha = 0.2, color = ['g','r'][i])
plt.axhline(y=0, ls = '--', color = 'black')
plt.axvline(x=0.258, ls = '--', color = 'grey', label = 'onset gabor')
T_obs, clusters, cluster_pv, HO = permutation_cluster_test(diff, stat_fun = paired_t)
print('T',header, cluster_pv)
mask = np.zeros(plot_dict['times'].size,dtype = bool)
for cl in | np.array(clusters) | numpy.array |
from dataclasses import dataclass, field
from functools import partial
from typing import List, Tuple, AnyStr, Iterator, Dict
import numpy as np
from pandas import DataFrame
from rulelist.datastructure.attribute.attribute import Attribute, Item
from rulelist.util.bitset_operations import indexes2bitset, bitset2indexes
def activation_numeric(df: DataFrame, attribute_name: AnyStr, minval: float, maxval: float) -> DataFrame:
"""Checks in which instances the numerical conditions are True.
Parameters
----------
df : pandas.DataFrame
List of items that describe single-numeric attribute.
attribute_name : str
Name of attribute.
minval: float
Minimum value in the condition x >= minval.
maxval
Maximum value in the condition x < maxval.
Returns
----------
activated_indexes : np.ndarray
Boolean array with True for values where the conditions are true.
"""
activated_indexes = (df[attribute_name] >= minval) & (df[attribute_name] < maxval)
return activated_indexes
def find_cutpoints(values: np.ndarray, n_cutpoints: int) -> Tuple[np.ndarray, int]:
""" Finds the n quantile values as if done with equal frequency binning.
Parameters
----------
values : np.ndarray
Array of values to discretize.
n_cutpoints : int
Number of cut points selected.
Returns
----------
value_quantiles : np.ndarray
Array of the quantile values.
real_ncutpoints : int
In case the values do not allow n_cutpoints it returns a smaller value.
"""
if n_cutpoints > len(values):
n_cutpoints = len(values)
quantile_percentage = [1 / (n_cutpoints + 1) * ncut for ncut in range(0, n_cutpoints + 2)]
value_quantiles = np.nanquantile(values, quantile_percentage, interpolation='midpoint')[1:-1]
# if np.isnan(val_quantiles).any(): continu
value_quantiles = np.unique(value_quantiles)
real_ncutpoints = len(value_quantiles)
return value_quantiles, real_ncutpoints
def create_item(indexes, variable_name, min_val, max_val, description, number_operations):
""" Creates a class of type Item from the values of a NumericAttribute.
Parameters
----------
indexes : np.ndarray
Array of indexes where the item is present in the training datastructure.
variable_name : str
Name of the attribute/variable that this item is attached to.
min_val : float
Minimum value covered by this item. item > min_val.
max_val : float
Maximum value covered by this item. item < max_val.
description : str
Text describing the interval defined by the item. item < max_val = 1; min_val < item < max_val = 2.
number_operations : int
Number of logical operators used to define the interval.
Returns
----------
Item : Item class object
Item with the characteristics described by the arguments.
"""
bit_array = indexes2bitset(indexes)
activation_function = partial(activation_numeric, attribute_name=variable_name, minval=min_val, maxval=max_val)
return Item(bit_array, variable_name, description, number_operations, activation_function)
@dataclass
class NumericAttribute(Attribute):
"""
Describes a single-numeric attribute or variable. Inherits from class Attribute.
Attributes
----------
items : List[Item]
List of items that describe single-numeric attribute.
n_items : int
Number of items in this attribute.
Parameters
----------
Attribute : class object that represents a variable.
Methods
-------
create_items_numeric
Creates the items by making binary partitions of the values using the cutpoints of equal frequency binning.
"""
n_cutpoints : int
discretization : AnyStr
items : List[Item] = field(default_factory=list, init=False)
cardinality_operator: Dict[int,int] = field(default_factory=dict, init=False)
#TODO: it would be interesting to add a generator instead of a list to do dynamic creation
def __post_init__(self):
self.items, self.cardinality_operator = self.create_items()
def create_items(self,indexes=None) -> Tuple[List[Item], Dict[int, int]]:
""" Creates a list of items from the numerical atrribute.
Makes a list of items using equal frequency binning, ignoring NANs, based on the values of the Numeric attribute
Returns
----------
List[Item] : List of Items
A list of all items based on the possible combinations of cutpoints.
"""
if indexes is None:
values = self.values
else:
values = self.values[indexes]
#values = self.values[self.values.index.intersection(indexes)]
value_quantiles, self.n_cutpoints = find_cutpoints(values, self.n_cutpoints)
cardinality_operator = {1:0,2:0}
items = []
for iq, value_quantile1 in enumerate(value_quantiles): # makes binary intervals x<val and x >=val
# condition x<val
index_down = np.where(values < value_quantile1)[0]
if indexes is not None:
index_down = indexes[index_down]
description_down = str(self.name) + " < " + str(value_quantile1)
items.append(create_item(index_down,variable_name= self.name, min_val=np.NINF, max_val=value_quantile1,
description = description_down,number_operations=1))
cardinality_operator[1] += 1
if self.n_cutpoints == 1: break # if there is only one unique value we only need one item
# condition x >=val
index_up = | np.where(values >= value_quantile1) | numpy.where |
import numpy as np
from scipy.special import erf
from scipy.interpolate import interp1d
from scipy.integrate import quad,cumtrapz,trapz,solve_ivp
from scipy.optimize import fmin,brentq,fixed_point
import scipy.linalg as linalg
import time
# helper function for the peak number density (BBKS 1986)
def f(x):
return np.where(x>.03,
.5*(x**3-3*x)*(erf(2.5**.5*x)+erf(2.5**.5*.5*x))+(2./(5*np.pi))**.5*((7.75*x**2+1.6)*np.exp(-.625*x**2)+(.5*x**2-1.6)*np.exp(-2.5*x**2)),
3**5*5**1.5/(7*2**11*(2*np.pi)**.5)*x**8*(1-.625*x**2)
)
# moments of the power spectrum
def sigmaj2(j,k,Pk):
integrand = Pk*k**(2*j)
return trapz(integrand,x=np.log(k),axis=0)
def sigmaj(j,k,Pk):
return np.sqrt(sigmaj2(j,k,Pk))
# ellipsoidal collapse threshold
def ec_func(f,e,p):
return 1+0.47*(5*(e**2-p*np.abs(p))*f**2)**0.615
def ec_scale(e,p):
func = lambda f: ec_func(f,e,p)
try:
return fixed_point(func,1.1,maxiter=25)
except:
return 0
dc = 3./5*(3*np.pi/2)**(2./3) # linear delta at collapse
dv = 3./5*(3./4 + 9*np.pi/8)**(2./3) # linear delta at virialization
dt = 3./5*(3*np.pi/4)**(2./3) # linear delta at turnaround
# window functions
def sinc(x):
return np.where(x > 0.1, np.divide(np.sin(x),x,where=x>0.1), 1. - x**2/6. + x**4/120. - x**6/5040. + x**8/362880.)
def W(x):
return 3*np.where(x > 0.1, np.divide(np.sin(x)-x*np.cos(x),x**3,where=x>0.1), 1./3. - x**2/30. + x**4/840. - x**6/45360. + x**8/3991680.)
# peak ellipticity/prolateness distributions
def fep(e,p,nu):
return 1125./np.sqrt(10*np.pi)*e*(e**2-p**2)*nu**5*np.exp(-5./2*nu**2*(3*e**2+p**2))
def fe(e,nu):
return 45*e*np.exp(-10*e**2*nu**2)*nu**2*(e*np.sqrt(10./np.pi)*nu+np.exp(5./2*e**2*nu**2)*(-1+5*e**2*nu**2)*erf(np.sqrt(5./2)*e*nu))
def Fe(e,nu):
return -3*np.exp(-10*e**2*nu**2)*e*np.sqrt(10/np.pi)*nu+np.exp(-15./2*e**2*nu**2)*(1-15*e**2*nu**2)*erf(np.sqrt(5./2)*e*nu)+erf(np.sqrt(10)*e*nu)
def Fp(e,p,nu):
return (np.exp(-5./2*p**2*nu**2)*(10*(e*np.exp(5./2*p**2*nu**2)+np.exp(5./2*e**2*nu**2)*p)*nu+np.exp(5./2*(e**2+p**2)*nu**2)*np.sqrt(10*np.pi)*(-1+5*e**2*nu**2)*(erf(np.sqrt(5./2)*e*nu)+erf(np.sqrt(5./2)*p*nu))))/(2*(10*e*nu+np.exp(5./2*e**2*nu**2)*np.sqrt(10*np.pi)*(-1+5*e**2*nu**2)*erf(np.sqrt(5./2)*e*nu)))
class Cosmology(object):
'''
Class for the generation of halo populations using the method in
arXiv:1905.05766. Instantiate the class once per cosmological scenario and
then use it to generate halos.
Parameters:
k, pk: a tabulation of the dimensionless matter power spectrum P(k), scaled
such that P(k,a) = P(k) a^{2g} during matter domination. Here, g ~ 0.9 due
to the noncontribution of baryons to clustering at microhalo scales (g=1
if baryons are neglected).
a: the scale factor at which the halo population is desired. Can be
changed later. Default 1.
method: the method for determining r_max and M_max (radius of and mass
enclosing maximum circular velocity). See Section IV of arXiv:1905.05766
for detail. Possible values are 'turnaround', 's=0', and 's=1' currently.
Can be changed later. Default 's=1'.
numax: only sample peaks with height delta < numax*sigma, where sigma is
the rms variance of the density field. Must be finite. Default 7.
nr: resolution of radius grid to use in sampling r_max, M_max. Default
nr=300 radius points.
Rmin: minimum radius of grid, in units of the characteristic comoving
correlation length Rstar (typically, Rstar is roughly the reciprocal of the
power spectrum cutoff wavenumber). Default 0.03.
Rmax: maximum radius of grid, in units of Rstar. Material that initially
lies beyond the comoving radius Rmax*Rstar is neglected in sampling r_max
and M_max. Default 300.
OmegaM: present-day matter density in units of the critical density.
Default 0.3089.
OmegaB: present-day baryon density in units of the critical density.
Default 0.048859.
rhoCrit: present-day critical density. Default 127.313454.
no_cov: if True, do not generate covariance matrices. Covariance matrices
are only required to sample r_max and M_max. Default False.
Methods:
sample_A(N): sample N instances of the r^-3/2 asymptotic coefficients A.
sample_rM(N): sample N instances of r_max and M_max.
sample(N): jointly sample A with r_max and M_max.
sample_profile():
sample_peak(N):
set_scale(a): change the scale factor at which to generate halos.
set_method(method): change the method used to sample r_max and M_max.
'''
# distributions
def dndnudx(self,nu,x):
return np.exp(-.5*nu**2)/((2*np.pi)**2*self.Rstar**3)*f(x)*np.exp(-.5*(x-self.gamma*nu)**2/(1.-self.gamma**2))/np.sqrt(2*np.pi*(1-self.gamma**2))
def dndx(self,x):
return f(x)/(2*(2*np.pi)**2*self.Rstar**3)*np.exp(-x**2/2.)*(1+erf(x*self.gamma/(2*(1-self.gamma**2))**.5))
def _covariance(self):
self.r = np.geomspace(self.Rmin,self.Rmax,self.nr)*self.Rstar
self.rm = self.r[1:]
self.lnr = np.log(self.r)
self.lnrm = np.log(self.rm)
# give everything a leading r index (nr)
pshape = (self.nk, 1, 1,) # k index, for integration
vshape = ( 1,self.nr, 1,) # vector index
kr = self.k.reshape(pshape)*self.r.reshape(vshape) # vector index
skr = sinc(kr)
time_start = time.time()
# column vectors, shape (1,nr,1)
self.cov_delta_nu = 1./self.sigma0*trapz(self.pk.reshape(pshape) *skr,x=self.lnk,axis=0)
self.cov_delta_x = 1./self.sigma2*trapz((self.pk*self.k**2).reshape(pshape)*skr,x=self.lnk,axis=0)
# matrices, shape (1,nr,nr)
self.cov_delta_delta = trapz(self.pk.reshape(pshape)*skr*skr.reshape((self.nk,1,self.nr)),x=self.lnk,axis=0)
del kr, skr
# delta(r) distribution
delta_cov = self.cov_delta_delta-1./(1-self.gamma**2)*(np.matmul(self.cov_delta_nu,self.cov_delta_nu.T)+np.matmul(self.cov_delta_x,self.cov_delta_x.T)-self.gamma*(np.matmul(self.cov_delta_nu,self.cov_delta_x.T)+np.matmul(self.cov_delta_x,self.cov_delta_nu.T)))
self.delta_vals, self.delta_vecs = linalg.eigh(delta_cov) # vecs.T@vecs = 1, vecs.T@M@vecs = np.diag(vals), [email protected](vals)@vecs.T = M
self.delta_vals[self.delta_vals<0]=0
print(' covariance matrices computed in %.2fs'%(time.time() - time_start))
def set_scale(self,a):
'''
Change the scale factor at which to generate the halo population.
'''
self.a = a
self.dcoll = dc/self.a**self.g
self.dvir = dc/self.a**self.g
def set_method(self,method):
'''
Change the method used to sample r_max and M_max.
'''
if method in ['ta' or 'turnaround']:
self.s = 'ta'
self.beta = .131
self.betaM = 0.273
elif method in ['s=0']:
self.s = 0
self.beta = .414
self.betaM = 0.441
elif method in ['s=1']:
self.s = 1
self.beta = .846
self.betaM = 0.658
else:
raise ValueError('method must be turnaround, s=0, or s=1')
def __init__(self,k,pk,a=1,method='s=1',numax=7,nr=300,Rmin=0.03,Rmax=300,
OmegaM=0.3089,OmegaB=0.048859,rhoCrit=127.313454,no_cov=False,):
# power spectrum table
self.k = k
self.pk = pk
self.nk = len(k)
self.lnk = np.log(k)
# power spectrum properties
self.sigma0 = sigmaj(0,k,pk)
self.sigma1 = sigmaj(1,k,pk)
self.sigma2 = sigmaj(2,k,pk)
self.gamma = self.sigma1**2/(self.sigma0*self.sigma2)
self.Rstar = 3**.5*self.sigma1/self.sigma2
# cosmology
self.g = 5./4*(1-24./25*OmegaB/OmegaM)**.5-1./4
self.rhoC = rhoCrit*(OmegaM-OmegaB)
# total number density, reduced distributions
self.n = quad(self.dndx,0,np.inf)[0]
self.fx = lambda x: self.dndx(x)/self.n
self.fnux = lambda nu,x: self.dndnudx(nu,x)/self.n
# halo generation options
self.set_scale(a)
self.set_method(method)
self.alpha = 12.1
self.numax = numax
self.nr = nr
self.Rmin = Rmin
self.Rmax = Rmax
# prepare covariance matrices
if not no_cov:
self._covariance()
def init(self,*args,**kwargs):
self.__init__(*args,**kwargs)
def sample_nux(self,N):
'''
Sample peak properties nu and x (height and "steepness") as defined in BBKS
1986.
'''
fnuxmax = self.fnux(*fmin(lambda nux: -self.fnux(nux[0],nux[1]),[1,1],disp=False))
sample = lambda N: (np.random.rand(N)*self.numax, np.random.rand(N)*self.numax)
test = lambda nu,x: np.random.rand(nu.size)*fnuxmax < self.fnux(nu,x)
time_start = time.time()
nu,x = sample(N)
accept = test(nu,x)
reject, = np.where(~accept)
while reject.size > 0:
nu_,x_ = sample(reject.size)
accept = test(nu_,x_)
nu[reject[accept]] = nu_[accept]
x[reject[accept]] = x_[accept]
reject = reject[~accept]
print(' nu, x sampled in %.2fs'%(time.time() - time_start))
return nu,x
def sample_ep(self,nu,x):
'''
Sample peak properties e and p (ellipticity and prolateness) as defined in
BBKS 1986.
'''
# use inverse transform method
N = len(nu)
P = np.random.rand(N,2)
e = np.zeros(N)
p = np.zeros(N)
time_start = time.time()
for i in range(N):
e[i] = brentq(lambda e: Fe(e,nu[i])-P[i,0],0,2./nu[i])#,rtol=1e-3)
p[i] = brentq(lambda p: Fp(e[i],p,nu[i])-P[i,1],-e[i],e[i])#,rtol=1e-3)
print(' e, p sampled in %.2fs'%(time.time() - time_start))
return e,p
def _sample_A(self,nu,x,e,p,return_ac=False,return_ace=False):
'''
Predict A given peak parameters
'''
N = len(nu)
# compute asymptote
d = nu*self.sigma0
d2d = x*self.sigma2
#A = d**2.25*d2d**-.75*self.rhoC
A = dc**(1.5*(1-1./self.g))*d**(.75*(2./self.g+1))*d2d**-.75*self.rhoC
ec_mod = np.zeros(N)
time_start = time.time()
for i in range(N):
ec_mod[i] = ec_scale(e[i],p[i])
print(' A sampled in %.2fs'%(time.time() - time_start))
idx = (ec_mod>0)&(d>self.dcoll*ec_mod)
Ae = A.copy()
Ae[~idx] = -1
Ae[idx] *= ec_mod[idx]**(-1.5/self.g)*self.alpha
if not (return_ac or return_ace):
return Ae
else:
ret = [Ae]
if return_ac:
ret += [(dc/d)**(1./self.g)]
if return_ace:
ret += [(ec_mod*dc/d)**(1./self.g)]
return ret
def _sample_delta(self,nu,x,return_Delta=False,return_eps=False):
'''
Sample density profile about given peak
'''
delta_mean = 1./(1-self.gamma**2)*((self.cov_delta_nu-self.gamma*self.cov_delta_x)*nu+(self.cov_delta_x-self.gamma*self.cov_delta_nu)*x)
kappa = np.random.normal(0,self.delta_vals**.5).reshape((self.nr,1))
delta = (delta_mean+np.matmul(self.delta_vecs,kappa))
delta.shape = (self.nr)
if not (return_Delta or return_eps):
return delta
else:
d = nu*self.sigma0
Delta = 3./self.rm**3*(cumtrapz(delta*self.r**2,x=self.r)+d*self.r[0]**3/3.)
ret = [delta]
if return_Delta:
ret += [Delta]
if return_eps:
ret += [1.-delta[1:]/Delta]
return tuple(ret)
def _profile(self,d,delta,Delta,eps,return_X=False,return_dlnXdlnq=False):
'''
Predict halo mass profile given peak profile
'''
fail = [-1,-1]
if return_X:
fail += [-1]
if return_dlnXdlnq:
fail += [-1]
if d < self.dcoll:
return fail
# interpolation
d_interp = interp1d(self.lnr,delta,kind='cubic',fill_value='extrapolate')
D_interp = interp1d(self.lnrm,Delta,kind='cubic',fill_value='extrapolate')
e_interp = lambda lnr: 1-d_interp(lnr)/D_interp(lnr)
# get limits
try:
ic1 = np.where(Delta<self.dvir)[0][0]
lnrc1 = brentq(lambda lnr: D_interp(lnr)-self.dvir,self.lnrm[ic1-1],self.lnrm[ic1])
except:
ic1 = len(self.rm)-1
lnrc1 = self.lnrm[-1]
try:
ic2 = | np.where((eps<0)&(self.rm>self.Rstar)) | numpy.where |
import numpy as np
fmt_dict = {
'sep': ',',
'cast_type': int
}
def solve(data, day_targets=(80, 256)):
initial_population = np.array(data, dtype=int)
age_counts = np.array([np.sum(initial_population == i) for i in range(9)], dtype=np.uint64)
result = {day:0 for day in day_targets}
for day in range(1, max(day_targets)+1):
age_counts = | np.roll(age_counts, -1) | numpy.roll |
import sys
import os
import json
from numpy.core.fromnumeric import shape
from torch._C import dtype
from torch.utils.data import Dataset
import torch
import numpy as np
from skimage import io, transform
import matplotlib.pyplot as plt
import math
from utils import image_proc
from timeit import default_timer as timer
import random
import scipy
import torchvision.transforms.functional as TF
from utils.utils import load_flow, load_graph_nodes, load_graph_edges, load_graph_edges_weights, load_graph_node_deformations, \
load_graph_clusters, load_int_image, load_float_image
from utils import image_proc
from NeuralNRT._C import compute_pixel_anchors_geodesic as compute_pixel_anchors_geodesic_c
from NeuralNRT._C import compute_pixel_anchors_euclidean as compute_pixel_anchors_euclidean_c
from NeuralNRT._C import compute_mesh_from_depth as compute_mesh_from_depth_c
from NeuralNRT._C import compute_mesh_from_depth_and_color as compute_mesh_from_depth_and_color_c
from NeuralNRT._C import erode_mesh as erode_mesh_c
from NeuralNRT._C import sample_nodes as sample_nodes_c
from NeuralNRT._C import compute_edges_geodesic as compute_edges_geodesic_c
from NeuralNRT._C import compute_edges_euclidean as compute_edges_euclidean_c
from NeuralNRT._C import construct_regular_graph as construct_regular_graph_c
from utils import utils
import open3d as o3d
import numba
import cv2
class StaticCenterCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
self.h, self.w = image_size
def __call__(self, img):
if len(img.shape) == 2:
return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2]
else:
return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2, :]
class DeformDataset(Dataset):
def __init__(
self,
dataset_base_dir, data_version,
input_width, input_height, max_boundary_dist
):
self.dataset_base_dir = dataset_base_dir
self.data_version_json = os.path.join(
self.dataset_base_dir, data_version + ".json")
self.input_width = input_width
self.input_height = input_height
self.max_boundary_dist = max_boundary_dist
self.cropper = None
self._load()
def _load(self):
with open(self.data_version_json) as f:
self.labels = json.loads(f.read())
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
data = self.labels[index]
src_color_image_path = os.path.join(
self.dataset_base_dir, data["source_color"])
src_depth_image_path = os.path.join(
self.dataset_base_dir, data["source_depth"])
tgt_color_image_path = os.path.join(
self.dataset_base_dir, data["target_color"])
tgt_depth_image_path = os.path.join(
self.dataset_base_dir, data["target_depth"])
graph_nodes_path = os.path.join(
self.dataset_base_dir, data["graph_nodes"])
graph_edges_path = os.path.join(
self.dataset_base_dir, data["graph_edges"])
graph_edges_weights_path = os.path.join(
self.dataset_base_dir, data["graph_edges_weights"])
graph_node_deformations_path = os.path.join(
self.dataset_base_dir, data["graph_node_deformations"])
graph_clusters_path = os.path.join(
self.dataset_base_dir, data["graph_clusters"])
pixel_anchors_path = os.path.join(
self.dataset_base_dir, data["pixel_anchors"])
pixel_weights_path = os.path.join(
self.dataset_base_dir, data["pixel_weights"])
optical_flow_image_path = os.path.join(
self.dataset_base_dir, data["optical_flow"])
scene_flow_image_path = os.path.join(
self.dataset_base_dir, data["scene_flow"])
# Load source, target image and flow.
source, _, cropper = DeformDataset.load_image(
src_color_image_path, src_depth_image_path, data[
"intrinsics"], self.input_height, self.input_width
)
target, target_boundary_mask, _ = DeformDataset.load_image(
tgt_color_image_path, tgt_depth_image_path, data[
"intrinsics"], self.input_height, self.input_width, cropper=cropper,
max_boundary_dist=self.max_boundary_dist, compute_boundary_mask=True
)
optical_flow_gt, optical_flow_mask, scene_flow_gt, scene_flow_mask = DeformDataset.load_flow(
optical_flow_image_path, scene_flow_image_path, cropper
)
# Load/compute graph.
graph_nodes, graph_edges, graph_edges_weights, graph_node_deformations, graph_clusters, pixel_anchors, pixel_weights = DeformDataset.load_graph_data(
graph_nodes_path, graph_edges_path, graph_edges_weights_path, graph_node_deformations_path,
graph_clusters_path, pixel_anchors_path, pixel_weights_path, cropper
)
# Compute groundtruth transformation for graph nodes.
num_nodes = graph_nodes.shape[0]
# Check that flow mask is valid for at least one pixel.
assert np.sum(
optical_flow_mask) > 0, "Zero flow mask for sample: " + json.dumps(data)
# Store intrinsics.
fx = data["intrinsics"]["fx"]
fy = data["intrinsics"]["fy"]
cx = data["intrinsics"]["cx"]
cy = data["intrinsics"]["cy"]
fx, fy, cx, cy = image_proc.modify_intrinsics_due_to_cropping(
fx, fy, cx, cy, self.input_height, self.input_width, original_h=480, original_w=640
)
intrinsics = np.zeros((4), dtype=np.float32)
intrinsics[0] = fx
intrinsics[1] = fy
intrinsics[2] = cx
intrinsics[3] = cy
return {
"source": source,
"target": target,
"target_boundary_mask": target_boundary_mask,
"optical_flow_gt": optical_flow_gt,
"optical_flow_mask": optical_flow_mask,
"scene_flow_gt": scene_flow_gt,
"scene_flow_mask": scene_flow_mask,
"graph_nodes": graph_nodes,
"graph_edges": graph_edges,
"graph_edges_weights": graph_edges_weights,
"graph_node_deformations": graph_node_deformations,
"graph_clusters": graph_clusters,
"pixel_anchors": pixel_anchors,
"pixel_weights": pixel_weights,
"num_nodes": np.array(num_nodes, dtype=np.int64),
"intrinsics": intrinsics,
"index": np.array(index, dtype=np.int32)
}
def get_metadata(self, index):
return self.labels[index]
@staticmethod
def backproject_depth(depth_image, fx, fy, cx, cy, normalizer=1000.0):
return image_proc.backproject_depth(depth_image, fx, fy, cx, cy, normalizer=1000.0)
@staticmethod
def load_image(
color_image_path, depth_image_path,
intrinsics, input_height, input_width, cropper=None,
max_boundary_dist=0.1, compute_boundary_mask=False
):
# Load images.
color_image = io.imread(color_image_path) # (h, w, 3)
depth_image = io.imread(depth_image_path) # (h, w)
# Backproject depth image.
depth_image = image_proc.backproject_depth(
depth_image, intrinsics["fx"], intrinsics["fy"], intrinsics["cx"], intrinsics["cy"]) # (3, h, w)
depth_image = depth_image.astype(np.float32)
depth_image = np.moveaxis(depth_image, 0, -1) # (h, w, 3)
image_size = color_image.shape[:2]
# Crop, since we need it to be divisible by 64
if cropper is None:
cropper = StaticCenterCrop(image_size, (input_height, input_width))
color_image = cropper(color_image)
depth_image = cropper(depth_image)
# Construct the final image.
image = np.zeros((6, input_height, input_width), dtype=np.float32)
image[:3, :, :] = np.moveaxis(
color_image, -1, 0) / 255.0 # (3, h, w)
assert np.max(image[:3, :, :]) <= 1.0, np.max(image[:3, :, :])
image[3:, :, :] = np.moveaxis(
depth_image, -1, 0) # (3, h, w)
if not compute_boundary_mask:
return image, None, cropper
else:
assert max_boundary_dist
boundary_mask = image_proc.compute_boundary_mask(
depth_image, max_boundary_dist)
return image, boundary_mask, cropper
@staticmethod
def load_flow(optical_flow_image_path, scene_flow_image_path, cropper):
# Load flow images.
optical_flow_image = load_flow(optical_flow_image_path) # (2, h, w)
scene_flow_image = load_flow(scene_flow_image_path) # (3, h, w)
# Temporarily move axis for cropping
optical_flow_image = np.moveaxis(
optical_flow_image, 0, -1) # (h, w, 2)
scene_flow_image = np.moveaxis(scene_flow_image, 0, -1) # (h, w, 3)
# Crop for dimensions to be divisible by 64
optical_flow_image = cropper(optical_flow_image)
scene_flow_image = cropper(scene_flow_image)
# Compute flow mask.
# (h, w, 2)
optical_flow_mask = np.isfinite(optical_flow_image)
optical_flow_mask = np.logical_and(
optical_flow_mask[..., 0], optical_flow_mask[..., 1]) # (h, w)
# (h, w, 1)
optical_flow_mask = optical_flow_mask[..., np.newaxis]
optical_flow_mask = np.repeat(
optical_flow_mask, 2, axis=2) # (h, w, 2)
# (h, w, 3)
scene_flow_mask = np.isfinite(scene_flow_image)
scene_flow_mask = np.logical_and(
scene_flow_mask[..., 0], scene_flow_mask[..., 1], scene_flow_mask[..., 2]) # (h, w)
# (h, w, 1)
scene_flow_mask = scene_flow_mask[..., np.newaxis]
# (h, w, 3)
scene_flow_mask = np.repeat(scene_flow_mask, 3, axis=2)
# set invalid pixels to zero in the flow image
optical_flow_image[optical_flow_mask == False] = 0.0
scene_flow_image[scene_flow_mask == False] = 0.0
# put channels back in first axis
optical_flow_image = np.moveaxis(
optical_flow_image, -1, 0).astype(np.float32) # (2, h, w)
optical_flow_mask = np.moveaxis(
optical_flow_mask, -1, 0).astype(np.int64) # (2, h, w)
scene_flow_image = np.moveaxis(
scene_flow_image, -1, 0).astype(np.float32) # (3, h, w)
scene_flow_mask = np.moveaxis(
scene_flow_mask, -1, 0).astype(np.int64) # (3, h, w)
return optical_flow_image, optical_flow_mask, scene_flow_image, scene_flow_mask
@staticmethod
def load_graph_data(
graph_nodes_path, graph_edges_path, graph_edges_weights_path, graph_node_deformations_path, graph_clusters_path,
pixel_anchors_path, pixel_weights_path, cropper
):
# Load data.
graph_nodes = load_graph_nodes(graph_nodes_path)
graph_edges = load_graph_edges(graph_edges_path)
graph_edges_weights = load_graph_edges_weights(
graph_edges_weights_path)
graph_node_deformations = load_graph_node_deformations(
graph_node_deformations_path) if graph_node_deformations_path is not None else None
graph_clusters = load_graph_clusters(graph_clusters_path)
pixel_anchors = cropper(load_int_image(pixel_anchors_path))
pixel_weights = cropper(load_float_image(pixel_weights_path))
assert np.isfinite(graph_edges_weights).all(), graph_edges_weights
assert np.isfinite(pixel_weights).all(), pixel_weights
if graph_node_deformations is not None:
assert np.isfinite(
graph_node_deformations).all(), graph_node_deformations
assert graph_node_deformations.shape[1] == 3
assert graph_node_deformations.dtype == np.float32
return graph_nodes, graph_edges, graph_edges_weights, graph_node_deformations, graph_clusters, pixel_anchors, pixel_weights
@staticmethod
def collate_with_padding(batch):
batch_size = len(batch)
# Compute max number of nodes.
item_keys = 0
max_num_nodes = 0
for sample_idx in range(batch_size):
item_keys = batch[sample_idx].keys()
num_nodes = batch[sample_idx]["num_nodes"]
if num_nodes > max_num_nodes:
max_num_nodes = num_nodes
# Convert merged parts into torch tensors.
# We pad graph nodes, edges and deformation ground truth with zeros.
batch_converted = {}
for key in item_keys:
if key == "graph_nodes" or key == "graph_edges" or \
key == "graph_edges_weights" or key == "graph_node_deformations" or \
key == "graph_clusters":
batched_sample = torch.zeros(
(batch_size, max_num_nodes, batch[0][key].shape[1]), dtype=torch.from_numpy(batch[0][key]).dtype)
for sample_idx in range(batch_size):
batched_sample[sample_idx, :batch[sample_idx][key].shape[0], :] = torch.from_numpy(
batch[sample_idx][key])
batch_converted[key] = batched_sample
else:
batched_sample = torch.zeros(
(batch_size, *batch[0][key].shape), dtype=torch.from_numpy(batch[0][key]).dtype)
for sample_idx in range(batch_size):
batched_sample[sample_idx] = torch.from_numpy(
batch[sample_idx][key])
batch_converted[key] = batched_sample
return [
batch_converted["source"],
batch_converted["target"],
batch_converted["target_boundary_mask"],
batch_converted["optical_flow_gt"],
batch_converted["optical_flow_mask"],
batch_converted["scene_flow_gt"],
batch_converted["scene_flow_mask"],
batch_converted["graph_nodes"],
batch_converted["graph_edges"],
batch_converted["graph_edges_weights"],
batch_converted["graph_node_deformations"],
batch_converted["graph_clusters"],
batch_converted["pixel_anchors"],
batch_converted["pixel_weights"],
batch_converted["num_nodes"],
batch_converted["intrinsics"],
batch_converted["index"]
]
def erode_mesh(vertexPositions, faceIndices, nIterations, minNeighbors):
"""[summary]
Args:
vertexPositions ([type]): [N,3]
faceIndices ([type]): [N,3]
nIterations ([type]): int
minNeighbors ([type]): int
Returns:
[type]: [description]
"""
nonErodedVertices = erode_mesh_c(
vertexPositions, faceIndices, nIterations, minNeighbors)
return nonErodedVertices
def sample_nodes(vertexPositions, nonErodedVertices, nodeCoverage, useOnlyValidIndices):
nodePositions = np.zeros(shape=vertexPositions.shape, dtype=np.float32)
nodeIndices = np.zeros(
shape=[vertexPositions.shape[0], 1], dtype=np.int)
nodeIndices[:, :] = -1
nodes_size = sample_nodes_c(vertexPositions, nonErodedVertices,
nodePositions, nodeIndices, nodeCoverage, useOnlyValidIndices)
return nodePositions, nodeIndices, nodes_size
def sample_node_py_v2(vertexPositions, nodeCoverage=0.05):
nodeCoverage2 = nodeCoverage * nodeCoverage
nVertices = vertexPositions.shape[0]
shuffledVertices = [i for i in range(nVertices)]
np.random.shuffle(shuffledVertices)
nodePositionsVec = []
nodeIndices = []
for vertexIdx in shuffledVertices:
point = vertexPositions[vertexIdx]
bIsNode = True
for node in nodePositionsVec:
if np.sum((point-node) ** 2) <= nodeCoverage2:
bIsNode = False
break
if bIsNode:
nodePositionsVec.append(vertexPositions[vertexIdx])
nodeIndices.append(vertexIdx)
return np.array(nodePositionsVec, dtype=np.float32), np.array(nodeIndices, np.int)
def sample_nodes_v3(vertexPositions, nodeCoverage=0.05):
# down-sampling vertices at frist, then sample nodes
org_pcd = o3d.geometry.PointCloud()
org_pcd.points = o3d.utility.Vector3dVector(vertexPositions)
output, cubic_id, original_indices = org_pcd.voxel_down_sample_and_trace(
voxel_size=nodeCoverage*0.8, min_bound=vertexPositions.min(0), max_bound=vertexPositions.max(0))
sampled_vertices = np.asarray(output.points)
return sampled_vertices
def sample_nodes_py(vertexPositions, radius=0.05):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(vertexPositions)
pcd.colors = o3d.utility.Vector3dVector(
np.ones_like(vertexPositions, dtype=np.uint8)*np.array([0, 0, 255]))
# sample nodes python
downpcd = pcd.voxel_down_sample(voxel_size=0.025*0.7)
graph_nodes = downpcd.points
graph_nodes = sample_nodes(graph_nodes, radius=radius)
return np.array(graph_nodes)
def compute_edges_geodesic(vertexPositions, faceIndices, nodeIndices, nMaxNeighbors, maxInfluence):
graphEdges = compute_edges_geodesic_c(
vertexPositions, faceIndices, nodeIndices, nMaxNeighbors, maxInfluence)
return graphEdges
def compute_edges_geodesic_py(vertexPositions, faceIndices, nodeIndices, nMaxNeighbors, maxInfluence):
from queue import PriorityQueue
nVertices = vertexPositions.shape[0]
nFaces = faceIndices.shape[0]
nNodes = nodeIndices.shape[0]
vertexNeighbors = [[] for i in range(nVertices)]
# Preprocess vertex neighbors.
for faceIdx in range(nFaces):
for j in range(3):
v_idx = faceIndices[faceIdx, j]
for k in range(3):
n_idx = faceIndices[faceIdx, k]
if(v_idx == n_idx):
continue
vertexNeighbors[v_idx].append(n_idx)
# Compute inverse vertex -> node relationship.
mapVertexToNode = np.array([-1 for i in range(nVertices)])
for nodeId in range(nNodes):
vertexIdx = nodeIndices[nodeId]
if vertexIdx > 0:
mapVertexToNode[vertexIdx] = nodeId
graphEdges = -np.ones(shape=[nNodes, nMaxNeighbors], dtype=np.int)
for nodeId in range(nNodes):
nextVerticesWithIds = PriorityQueue()
visitedVertices = []
# Add node vertex as the first vertex to be visited
nodeVertexIdx = nodeIndices[nodeId]
if nodeVertexIdx < 0:
continue
nextVerticesWithIds.put([0., nodeVertexIdx, ])
# Traverse all neighbors in the monotonically increasing order.
neighborNodeIds = []
while not nextVerticesWithIds.empty():
nextVertexDist, nextVertexIdx = nextVerticesWithIds.get()
# We skip the vertex, if it was already visited before.
if nextVertexIdx in visitedVertices:
continue
# We check if the vertex is a node.
nextNodeId = mapVertexToNode[nextVertexIdx]
if nextNodeId >= 0 and nextNodeId != nodeId:
neighborNodeIds.append(nextNodeId)
if len(neighborNodeIds) > nMaxNeighbors:
break
# We visit the vertex, and check all his neighbors.
# We add only vertices under a certain distance.
visitedVertices.append(nextVertexIdx)
nextVertexPos = vertexPositions[nextVertexIdx]
nextNeighbors = vertexNeighbors[nextVertexIdx]
for neighborIdx in nextNeighbors:
neighborVertexPos = vertexPositions[neighborIdx]
dist = nextVertexDist + \
np.linalg.norm(nextVertexPos - neighborVertexPos, ord=2)
if dist <= maxInfluence:
nextVerticesWithIds.put([dist, neighborIdx])
# If we don't get any geodesic neighbors, we take one nearest Euclidean neighbor,
# to have a constrained optimization system at non-rigid tracking.
if len(neighborNodeIds) == 0:
nearestDistance2 = np.inf
nearestNodeId = -1
nodePos = vertexPositions[nodeVertexIdx]
for i in range(nNodes):
vertexIdx = nodeIndices[i]
if i != nodeId and vertexIdx >= 0:
neighborPos = vertexPositions[vertexIdx]
distance2 = np.linalg.norm(neighborPos - nodePos, ord=2)
if distance2 < nearestDistance2:
nearestDistance2 = distance2
nearestNodeId = i
if (nearestNodeId >= 0):
neighborNodeIds.append(nearestNodeId)
nNeighbors = min(nMaxNeighbors, len(neighborNodeIds))
for i in range(nNeighbors):
graphEdges[nodeId, i] = neighborNodeIds[i]
for i in range(nNeighbors, nMaxNeighbors):
graphEdges[nodeId, i] = -1
return graphEdges
def compute_edges_euclidean(nodePositions, nMaxNeighbors=8):
graphEdges = compute_edges_euclidean_c(nodePositions, nMaxNeighbors)
return graphEdges
@numba.jit()
def compute_distance(src_points, target_points):
num_src = src_points.shape[0]
num_tgt = target_points.shape[0]
distance = np.zeros(shape=[num_src, num_tgt])
for i in range(num_src):
for j in range(num_tgt):
distance[i, j] = np.linalg.norm(
src_points[i] - target_points[j], ord=2)
return distance
def compute_edges_py(graph_nodes, nMaxNeighbors=8):
distance = compute_distance(graph_nodes, graph_nodes)
sorted_index = np.argsort(distance)
graph_edges = sorted_index[:, 1:nMaxNeighbors]
return graph_edges
def compute_pixel_anchors_geodesic(graphNodes, graphEdges, pointImage, neighborhoodDepth, nodeCoverage):
nMaxNeighbors = graphEdges.shape[1]
_, height, width = pointImage.shape
pixelAnchors = np.zeros(shape=[height, width, nMaxNeighbors], dtype=np.int)
pixelAnchors[:] = -1
pixelWeights = np.zeros(
shape=[height, width, nMaxNeighbors], dtype=np.float32)
compute_pixel_anchors_geodesic_c(
graphNodes, graphEdges, pointImage, neighborhoodDepth, nodeCoverage, pixelAnchors, pixelWeights)
return pixelAnchors, pixelWeights
@numba.jit()
def compute_pixel_anchors_geodesic_py(pixelAnchors, pixelWeights, graphNodes, graphEdges, pointImage, neighborhoodDepth, nodeCoverage):
numNodes, numNeighbors = graphNodes.shape
GRAPH_K = 4
_, height, width = pointImage.shape
for y in range(height):
for x in range(width):
pixelPos = pointImage[:, y, x]
if pixelPos[2] <= 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-pixelPos) ** 2).sum(axis=1))
nearestNodeId = np.argsort(dists)
# Compute the geodesic neighbor candidates.
neighbors = set([nearestNodeId, ])
newNeighbors = set([nearestNodeId, ])
for i in range(neighborhoodDepth):
currentNeighbors = set()
for neighborId in newNeighbors:
for k in range(numNeighbors):
currentNeighborId = graphEdges[neighborId, k]
if currentNeighborId >= 0:
currentNeighbors.add(currentNeighborId)
newNeighbors.clear()
newNeighbors = currentNeighbors - neighbors
neighbors.union(newNeighbors)
# Keep only the k nearest geodesic neighbors.
nodes_distances = [np.linalg.norm(
graphNodes[neighborId] - pixelPos, ord=2) for neighborId in neighbors]
nearestNodes = np.argsort(nodes_distances)[:GRAPH_K]
# Compute skinning weights.
nearestGeodesicNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in nearestNodes:
nodePose = graphNodes[nodeId]
weight = np.exp(-(np.linalg.norm(pixelPos - nodePose, ord=2))
** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestGeodesicNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestGeodesicNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
pixelAnchors[y, x] = np.array(nearestGeodesicNodeIds[i])
pixelWeights[y, x] = np.array(skinningWeights[i])
return pixelAnchors, pixelWeights
@numba.jit()
def compute_mesh_anchors_geodesic_py(Anchors, Weights, graphNodes, graphEdges,
verts, neighborhoodDepth, nodeCoverage):
numNodes, numNeighbors = graphEdges.shape
GRAPH_K = 4
nverts, _ = verts.shape
for x in range(nverts):
vertPos = verts[x]
if vertPos[2] <= 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-vertPos) ** 2).sum(axis=1))
nearestNodeId = np.argsort(dists)[0]
# Compute the geodesic neighbor candidates.
neighbors = set([nearestNodeId, ])
newNeighbors = set([nearestNodeId, ])
for i in range(neighborhoodDepth):
currentNeighbors = set()
for neighborId in newNeighbors:
for k in range(numNeighbors):
currentNeighborId = graphEdges[neighborId, k]
if currentNeighborId >= 0:
currentNeighbors.add(currentNeighborId)
newNeighbors.clear()
newNeighbors = currentNeighbors - neighbors
neighbors = neighbors.union(newNeighbors)
# Keep only the k nearest geodesic neighbors.
dists = [np.linalg.norm(
graphNodes[neighborId] - vertPos, ord=2) for neighborId in neighbors]
neighbors = np.argsort(dists)[:GRAPH_K]
# Compute skinning weights.
nearestNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in neighbors:
dist = dists[nodeId]
if dist > nodeCoverage:
continue
weight = np.exp(-dist ** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
Anchors[x, i] = np.array(nearestNodeIds[i])
Weights[x, i] = np.array(skinningWeights[i])
return Anchors, Weights
@numba.jit()
def compute_mesh_anchors_euclidean_py(Anchors, Weights, graphNodes, verts, nodeCoverage):
GRAPH_K = 4
nverts, _ = verts.shape
for x in range(nverts):
vertPos = verts[x]
if vertPos[2] <= 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-vertPos) ** 2).sum(axis=1))
neighbors = np.argsort(dists)[:GRAPH_K]
# Compute skinning weights.
nearestNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in neighbors:
dist = dists[nodeId]
if dist > nodeCoverage:
continue
weight = np.exp(-dist ** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
Anchors[x, i] = np.array(nearestNodeIds[i])
Weights[x, i] = np.array(skinningWeights[i])
return Anchors, Weights
def compute_pixel_anchors_euclidean(graphNodes, pointImage, nodeCoverage):
nMaxNeighbors = graphNodes.shape[0]
_, height, width = pointImage.shape
pixelAnchors = - \
np.ones(shape=[height, width, nMaxNeighbors], dtype=np.int)
pixelWeights = np.zeros(
shape=[height, width, nMaxNeighbors], dtype=np.float32)
compute_pixel_anchors_euclidean_c(
graphNodes, pointImage, nodeCoverage, pixelAnchors, pixelWeights)
return pixelAnchors, pixelWeights
@numba.jit()
def compute_pixel_anchors_euclidean_py(graphNodes, pointImage, nodeCoverage):
GRAPH_K = 4
_, height, width = pointImage.shape
pixelAnchors = -np.ones(shape=[height, width, GRAPH_K], dtype=np.int)
pixelWeights = np.zeros(
shape=[height, width, GRAPH_K], dtype=np.float32)
for y in range(height):
for x in range(width):
pixelPos = pointImage[:, y, x]
if pixelPos[2] < 0:
continue
# find nearest Euclidean graph node.
dists = np.sqrt(((graphNodes-pixelPos) ** 2).sum(axis=1))
neighbors = np.argsort(dists)[:GRAPH_K]
# Compute skinning weights.
nearestEuclideanNodeIds, skinningWeights = [], []
weightSum = 0
for nodeId in neighbors:
distance = dists[nodeId]
if distance > nodeCoverage:
continue
weight = np.exp(-distance ** 2 / (2*nodeCoverage*nodeCoverage))
weightSum += weight
nearestEuclideanNodeIds.append(nodeId)
skinningWeights.append(weight)
nAnchors = len(nearestEuclideanNodeIds)
if weightSum > 0:
for i in range(nAnchors):
skinningWeights[i] = skinningWeights[i]/weightSum
elif nAnchors > 0:
for i in range(nAnchors):
skinningWeights[i] = 1 / nAnchors
# Store the results
for i in range(nAnchors):
pixelAnchors[y, x, i] = np.array(nearestEuclideanNodeIds[i])
pixelWeights[y, x, i] = np.array(skinningWeights[i])
return pixelAnchors, pixelWeights
@ numba.jit()
def compute_voxel_anchors(voxel_anchors, voxel_weigths, transfromed_graphNodes,
w2d_r, w2d_t, cell_size, nodeCoverage):
X_SIZE, Y_SIZE, Z_SIZE = voxel_anchors.shape[:3]
GRAPH_K = 4
for ix in range(X_SIZE):
for iy in range(Y_SIZE):
for iz in range(Z_SIZE):
voxelPos = (np.array([ix, iy, iz]) + 0.5) * cell_size
voxel_depth_frame = np.dot(voxelPos, w2d_r) + w2d_t
if (voxel_depth_frame[2] < 0):
continue
# find nearest Euclidean graph node.
dists = np.sqrt(
((transfromed_graphNodes-voxelPos) ** 2).sum(axis=1))
neighbors = | np.argsort(dists) | numpy.argsort |
#######################################
# load mnist #
import mnist
import numpy as np
def normalize(img):
fac = 0.99 / 255
return img * fac + 0.01
def digit_to_layer(digit):
return (np.arange(10) == digit).astype(np.float)
train_images = np.array([normalize(img) for img in mnist.train_images()])
train_labels = np.array([digit_to_layer(digit) for digit in mnist.train_labels()])
test_images = np.array([normalize(img) for img in mnist.test_images()])
test_labels = np.array([digit_to_layer(digit) for digit in mnist.test_labels()])
###
import math
from functools import reduce
padding = 'valid'
padding = 'same'
padding = 'full'
# I x I x C
# O x O x K
def init_tuple_counter(count_to: tuple) -> tuple:
return tuple(np.zeros(len(count_to.shape), dtype=int))
def adder(counter: tuple, max: tuple) -> tuple:
if counter == max:
return counter
counter_array = np.array(counter)
length = len(counter_array)
carry = True
for i in range(length - 1, -1, -1):
counter_array[i] = counter_array[i] + 1
carry = False
if counter_array[i] > max[i]:
counter_array[i] = 0
carry = True
if not carry:
break
counted = [max[:-1] == counter_array[:-1]]
if carry and counted:
counter_array = max
return tuple(counter_array)
def conv2d(input: np.array, output: np.array, filters: np.array, stride: tuple([int, int]) = (1, 1)) \
-> np.array:
## padding needs to be implemented
## proper strides
kernel_y = len(filters)
kernel_x = len(filters[0])
kernel_channels = len(filters[0][0])
num_filters = len(filters[0][0][0])
batch_shape = input.shape[:-3]
layer_shape = input.shape[-3:]
layer_height = layer_shape[0]
layer_width = layer_shape[1]
layer_channel = layer_shape[2]
stride_x = stride[0]
stride_y = stride[1]
padding = 0
## assert padding is valid I x I x K
conv_out_height = int(((layer_height - kernel_y + 2 * padding) / stride_y)) \
+ 1
conv_out_width = int(((layer_width - kernel_x + 2 * padding) / stride_x)) \
+ 1
conv_shape = batch_shape + (conv_out_height, conv_out_width, num_filters)
# conv_out = np.ndarray(shape=conv_shape)
batch_idx = np.zeros(len(batch_shape), dtype=int)
while batch_idx != batch_shape: ## probably needs to be changed
layer = input[tuple(batch_idx)]
for y_idx in range(0, conv_out_height):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, conv_out_width):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
kernel = layer[y_start:y_end, x_start:x_end]
for filter_idx in range(num_filters):
filter = filters[:, :, :, filter_idx]
multi = np.multiply(kernel, filter)
product_idx = (y_idx, x_idx, filter_idx)
output[tuple(batch_idx) + product_idx] = np.sum(multi)
batch_idx = adder(batch_idx, batch_shape)
return output
def conv_output_size(layer_dimensions: tuple, kernel_dimensions: tuple,
stride_dimensionsensions: tuple, padding: int):
return (int(((layer_dimensions[0] - kernel_dimensions[0] + 2 * padding) \
/ stride_dimensionsensions[0])) + 1,
int(((layer_dimensions[1] - kernel_dimensions[1] + 2 * padding) \
/ stride_dimensionsensions[1])) + 1,
kernel_dimensions[3])
def generate_conv2d_filters(kernel_dimensions: tuple, k: float = 2.0) -> np.array:
kernel_y = kernel_dimensions[0]
kernel_x = kernel_dimensions[1]
kernel_channels = kernel_dimensions[2]
num_filters = kernel_dimensions[3]
filters = np.ndarray(shape=kernel_dimensions)
filter_shape = tuple([kernel_y, kernel_x, kernel_channels])
nl = kernel_x * kernel_y * kernel_channels
std = math.sqrt(k / nl)
for filter_idx in range(num_filters):
filter = np.random.normal(scale=std, size=nl)
filter = filter.reshape(filter_shape)
filters[:, :, :, filter_idx] = filter
return filters
def lif_neuron(Vm: float, V_reset: float, V_th: float, tau_m: float, fire=True,
leaky=True) -> np.array:
if Vm >= V_th and fire:
spike = 1
Vm = V_reset
else:
spike = 0
if leaky:
Vm = Vm * math.exp(-1 / tau_m)
return [Vm, spike]
def flatten(input: np.array, output: np.array, flatten_dim: int):
self.input_shape
batch_dimensions = input.shape[:flatten_dim]
flattened_dimension = tuple([math.prod(input.shape[flatten_dim:])])
output = np.reshape(input, batch_dimensions + flattened_dimension)
return output
def lif_neuron_pool(Vin: np.array,
Vout: np.array,
spike_out: np.array,
Vreset: float = 0,
Vth: float = 0.75,
tau_m: int = 100,
fire: bool = True,
leaky: bool = True,
time_index: int = 0) -> np.array:
# [batch][time][spike_train]
# [batch][ Vin ]
# adequate dimensions to process
# a dimensions to
# assert (len(Vin.shape[-4]) > 2)
#if (Vin != NULL):
# s = 1 # TODO: implement smth here
# generate output arrays
# Vout = np.zero(shape=(Vin.shape))
# spike_out = np.zero(shape=(Vin.shape))
assert(Vin.shape == Vout.shape)
# process batches
batch_dimensions = Vin.shape[:max(time_index-1,0)]
spike_train_length = Vin.shape[time_index]
membrane_dimensions = Vin.shape[time_index+1:]
for batch_idx in np.ndindex(batch_dimensions):
for neuron_idx in np.ndindex(membrane_dimensions):
for t_idx in range(1, spike_train_length):
# membrane voltage for this step
t_current = batch_idx + tuple([t_idx]) + neuron_idx
t_previous = batch_idx + tuple([t_idx - 1]) + neuron_idx
Vm = Vin[t_current] + Vout[t_previous]
# simulate lif-neuron
[Vout[t_current], spike_out[t_current]] = lif_neuron(Vm, Vreset, Vth, tau_m, fire, leaky)
return [Vout, spike_out]
def generate_spike_train(p: float, t: int) -> np.array:
dist = np.random.uniform(1, 0, t)
return np.array([int(item < p) for item in dist])
def generate_layer_spike_train(layer: np.array, train_length: int):
layer_height = len(layer)
layer_width = len(layer[0])
spike_layer = np.ndarray(shape=(train_length, layer_height, layer_width, 1))
for y in range(0, layer_height):
for x in range(0, layer_width):
train = np.array(generate_spike_train(layer[y][x], train_length))
for t in range(0, train_length):
spike_layer[t, y, x, 0] = train[t]
return spike_layer
def avg_pool(input: np.array, output:np.array, kernel_size: tuple([int, int]) = (2, 2), stride: tuple([int, int]) = (1, 1)) -> np.array:
pool = output
## padding needs to be implemented
## proper strides
kernel_y = kernel_size[1]
kernel_x = kernel_size[0]
batch_shape = input.shape[:-3]
layer_shape = input.shape[-3:]
layer_height = layer_shape[0]
layer_width = layer_shape[1]
layer_channel = layer_shape[2]
stride_x = stride[0]
stride_y = stride[1]
padding = 0
pool_height = int(((layer_height - kernel_y + 2 * padding) / stride_y)) + 1
pool_width = int(((layer_width - kernel_x + 2 * padding) / stride_x)) + 1
pool_shape = batch_shape + (pool_height, pool_width, layer_channel)
# pool = np.ndarray(shape=pool_shape)
# TODO: Update this code
batch_idx = np.zeros(len(batch_shape), dtype=int)
while batch_idx != batch_shape:
layer = input[tuple(batch_idx)]
for y_idx in range(0, pool_height):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, pool_width):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
for channel_idx in range(0, layer_channel):
kernel = layer[y_start:y_end, x_start:x_end, channel_idx]
product = np.sum(kernel) / kernel.size
product_idx = (y_idx, x_idx, channel_idx)
pool[tuple(batch_idx) + product_idx] = product
batch_idx = adder(batch_idx, batch_shape)
return pool
def generate_dense_layer_weights(input_dimensions: tuple, num_neuron_output: int, k: float = 2.0) -> np.array:
axons_per_neuron = math.prod(input_dimensions)
synapses = np.ndarray(shape=(num_neuron_output, axons_per_neuron))
nl = axons_per_neuron
std = math.sqrt(k / nl)
for i in range(num_neuron_output):
synapses[i] = np.random.normal(scale=std, size=nl)
return synapses
def dense_forward(input_neurons: np.array, output_neurons: np.array, weights: np.array) -> np.array:
ins = input_neurons.shape
ons = output_neurons.shape
ws = weights.shape
# [batch][spike time]
batch_dimensions = input_neurons.shape[:-1]
# [][]
num_input_neurons = weights.shape[1]
num_output_neurons = weights.shape[0]
#[neuron y][neuron x][channel]
for batch_idx in np.ndindex(batch_dimensions):
for output_neuron_idx in range(num_output_neurons):
# action_potential = 0
# dot product
# for input_neuron_idx in range(num_input_neurons):
# ax = input_neurons[batch_idx][input_neuron_idx]
# wx = weights[output_neuron_idx][input_neuron_idx]
# action_potential = action_potential + ax*wx
output_neurons[batch_idx][output_neuron_idx] = np.dot(input_neurons[batch_idx], weights[output_neuron_idx])
return output_neurons
def generate_membrane(membrane_dimensions: tuple, value: float = 0.0):
membrane = np.ndarray(shape=membrane_dimensions)
membrane.fill(value)
return membrane
# This gains the term da_lif / d_net
def differentiate_spike_train(spike_train, Vth = 1):
# sum of decay over time
gamma = sum(spike_train)
if gamma == 0:
return 0
tau_m = len(spike_train)
total_decay = 0
t = tk = 1
for activation in spike_train:
if activation:
if t != tk:
decay = math.exp(-(t - tk) / tau_m)
total_decay = total_decay - (1 / tau_m) * decay
tk = t + 1
t = t + 1
return (1/Vth) * (1 + (1/gamma) * total_decay)
class Layer:
def __init__(self):
self.trainable = True
self.input_shape = None
self.output_shape = None
def count_parameters(self):
raise NotImplementedError()
def compute_output_shape(self, input_shape):
raise NotImplementedError()
def forward_propagate(self, A):
raise NotImplementedError()
def backward_propagate(self, dZ, cache):
raise NotImplementedError()
def get_weights(self):
raise NotImplementedError()
def set_weights(self, weights):
raise NotImplementedError()
def build(self, input_shape):
self.input_shape = input_shape
class Dropout(Layer):
def __init__(self, probability):
super().__init__()
self.probability = probability
self.mask = None
def build(self, input_shape):
self.input_shape = input_shape
self.output_shape = input_shape
self.reset()
def reset(self):
self.mask = np.random.binomial(1, 1-self.probability, size=self.output_shape)
def forward_propagate(self, A):
masked = np.multiply(self.mask, A)
cache = [{"mask" : self.mask,
"output" : masked}]
return masked, cache
def backward_propagate(self, dZ, cache):
assert(dZ.shape == self.mask.shape)
return np.multiply(self.mask * dZ)
def compute_output_shape(self, input_shape):
return input_shape
class AveragePool2D(Layer):
def __init__(self, kernel_size, strides):
super().__init__()
assert (len(kernel_size) == 2)
assert (len(strides) == 2)
self.kernel_size = kernel_size
self.strides = strides
def build(self, input_shape):
self.input_shape = input_shape
self.output_shape = self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
# incorrect input dimensions
assert(len(input_shape) >= 3)
# dimensions for sample / instance in the input
sample_shape = input_shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
kernel_x = self.kernel_size[1]
kernel_y = self.kernel_size[0]
stride_x = self.strides[0]
stride_y = self.strides[1]
padding = 0
return (int(((sample_y - kernel_y + 2 * padding) / stride_y)) + 1,
int(((sample_x - kernel_x + 2 * padding) / stride_x)) + 1,
sample_channels)
def forward_propagate(self, A):
# padding needs to be implemented
# separate batches
batch_shape = A.shape[:-3]
# unpack sample shape
sample_shape = A.shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
# unpack kernel
kernel_y = self.kernel_size[0]
kernel_x = self.kernel_size[1]
# unpack stride shape
stride_y = self.strides[0]
stride_x = self.strides[1]
# unpack pooling layer shape
pool_shape = self.compute_output_shape(A.shape)
pool_y = pool_shape[0]
pool_x = pool_shape[1]
# initialize the output convolution
Z_shape = batch_shape + pool_shape
Z = np.zeros(shape=Z_shape)
Z.fill(-9e99)
# begin pooling
for batch_idx in np.ndindex(batch_shape):
layer = A[batch_idx]
for y_idx in range(0, pool_y):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, pool_x):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
for channel_idx in range(0, sample_channels):
kernel = layer[y_start:y_end, x_start:x_end, channel_idx]
product = np.sum(kernel) / kernel.size
product_idx = (y_idx, x_idx, channel_idx)
Z[batch_idx + product_idx] = product
return Z, None
class Convolution2D(Layer):
def __init__(self, number_of_filters, kernel_size, strides):
super().__init__()
self.number_of_filters = number_of_filters
self.kernel_size = kernel_size
self.strides = strides
self.filters = []
self.kernel_shape = []
self.padding = 0
def build(self, input_shape):
k = 2
sample_shape = input_shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
self.input_shape = sample_shape
kernel_y = self.kernel_size[0]
kernel_x = self.kernel_size[1]
kernel_channels = sample_channels
kernel_filters = self.number_of_filters
self.kernel_shape = tuple([kernel_y, kernel_x, kernel_channels, kernel_filters])
self.output_shape = self.compute_output_shape(sample_shape)
self.filters = np.ndarray(shape=self.kernel_shape)
filter_shape = tuple([kernel_y, kernel_x, kernel_channels])
nl = kernel_x * kernel_y * kernel_channels
std = math.sqrt(k / nl)
for filter_idx in range(self.number_of_filters):
filter = np.random.normal(scale=std, size=nl)
filter = filter.reshape(filter_shape)
self.filters[:, :, :, filter_idx] = filter
def compute_output_shape(self, input_shape):
sample_shape = input_shape[-3:]
batch_shape = input_shape[:-3]
input_x = sample_shape[1]
input_y = sample_shape[0]
kernel_x = self.kernel_size[1]
kernel_y = self.kernel_size[0]
stride_x = self.strides[1]
stride_y = self.strides[0]
padding = 0
return (int(((input_y - kernel_y + 2 * padding) / stride_y)) + 1,
int(((input_x - kernel_x + 2 * padding) / stride_x)) + 1,
self.number_of_filters)
def forward_propagate(self, A):
# padding needs to be implemented
# separate batches
batch_shape = A.shape[:-3]
# unpack sample shape
sample_shape = A.shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
assert(sample_shape == self.input_shape)
# unpack kernel
kernel_y = self.kernel_size[0]
kernel_x = self.kernel_size[1]
# unpack stride shape
stride_y = self.strides[0]
stride_x = self.strides[1]
# unpack convolution
conv_shape = self.compute_output_shape(A.shape)
conv_y = conv_shape[0]
conv_x = conv_shape[1]
# initialize the output convolution
output_shape = batch_shape + conv_shape
output = np.zeros(shape= output_shape)
output.fill(-9e99)
# begin convolution
for batch_idx in np.ndindex(batch_shape):
layer = A[batch_idx]
for y_idx in range(0, conv_y):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, conv_x):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
kernel = layer[y_start:y_end, x_start:x_end]
for filter_idx in range(self.number_of_filters):
filter = self.filters[:, :, :, filter_idx]
multi = np.multiply(kernel, filter)
product_idx = (y_idx, x_idx, filter_idx)
output[batch_idx + product_idx] = np.sum(multi)
return output, None
def backward_propagate(self, dZ, cache):
raise NotImplementedError()
def get_weights(self):
return self.filters
def set_weights(self, weights):
self.filters = weights
class Flatten(Layer):
def __init__(self):
super().__init__()
self.input_shape = None
self.output_shape = None
def compute_output_shape(self, sample_shape):
return tuple([math.prod(sample_shape)])
def build(self, input_shape):
self.input_shape = input_shape
self.output_shape = self.compute_output_shape(input_shape)
def forward_propagate(self, A):
sample_dimensions = len(self.input_shape)
sample_shape = A.shape[-sample_dimensions:]
flattened_shape = tuple([math.prod(sample_shape)])
batch_shape = A.shape[:-sample_dimensions]
return np.reshape(A, batch_shape + flattened_shape), None
def backward_propagate(self, dZ, cache):
batch_shape = input_shape[:-3]
return np.reshape(dZ, batch_shape + self.input_shape)
def get_weights(self):
return []
def set_weights(self, weights):
pass
class Dense(Layer):
def __init__(self, num_outputs):
super().__init__()
self.num_outputs = num_outputs
self.num_inputs = 0
self.weights = np.zeros(0)
self.output_shape = tuple([num_outputs])
def build(self, input_shape, k = 2):
self.input_shape = input_shape
sample_shape = input_shape[-3:]
self.num_inputs = math.prod(sample_shape)
self.weights = np.ndarray(shape=(self.num_outputs, self.num_inputs))
nl = self.num_inputs
std = math.sqrt(k / nl)
for i in range(self.num_outputs):
self.weights[i] = np.random.normal(scale=std, size=nl)
def set_weights(self, weights):
assert(len(weights.shape) == 2)
self.num_inputs = weights.shape[1]
self.num_outputs = weights.shape[0]
return self.weights
def get_weights(self):
return self.weights
def compute_output_shape(self, input_shape):
return tuple([self.num_outputs])
def forward_propagate(self, A):
print(A.shape)
print(self.weights.shape)
num_input_dimensions = len(self.input_shape)
sample_shape = A.shape[-num_input_dimensions:]
assert(self.input_shape == sample_shape)
batch_shape = A.shape[:-num_input_dimensions]
output_shape = batch_shape + self.output_shape
Z = np.zeros(shape=output_shape)
Z.fill(-9e99)
for batch_idx in np.ndindex(batch_shape):
Z[batch_idx] = self.weights @ A[batch_idx]
cache = { 'A' : A }
return Z, cache
def backward_propagate(self, dZ, cache):
A_prev = cache['A']
batch_shape = dZ.shape[:-1]
dW = np.ndarray(shape=batch_shape + self.weights.shape)
db = None
dA = np.ndarray(shape=batch_shape + A_prev.shape)
for batch_idx in np.ndindex(batch_shape):
DW = np.dot(self.weights.T, dZ[batch_idx])
DA = np.dot(dZ[batch_idx], A_prev[batch_idx].T)
dW[batch_idx] = np.dot(self.weights.T, dZ[batch_idx])
db = None
dA[batch_idx] = np.dot(dZ[batch_idx], A.T)
assert (dA.shape == A.shape)
assert (dW.shape == self.weights.shape)
return dZ, dW, db
class Membrane:
def __init__(self):
pass
def reset(self):
pass
def activate(self, Z):
return 0
def differentiate(self, dA, cache):
return 0
class LeakyIntegrateAndFire(Membrane):
def __init__(self, Vreset: float, Vth: float, tau_m: float, fire=True, leaky=True):
super().__init__()
self.Vreset = Vreset
self.Vth = Vth
self.tau_m = tau_m
self.fire = fire
self.leaky = leaky
self.input_shape = None
self.__num_input_dimensions = None
self.output_shape = None
def build(self, input_shape):
self.input_shape = input_shape
self.__num_input_dimensions = len(self.input_shape)
self.output_shape = input_shape
def neuron_activation(self, Vm):
spike = None
if Vm >= self.Vth and self.fire:
spike = 1
self.Vm = self.Vreset
else:
spike = 0
if self.leaky:
Vm = Vm * math.exp(-1 / self.tau_m)
# TODO: 1 / t needs to be implemented
return [Vm, spike]
def activate(self, Vin):
# this function can be optimised given that only the final Vm is required
# assert (Vin.shape == Vout.shape)
batch_shape = Vin.shape[:-self.__num_input_dimensions-1]
spike_train_length = Vin.shape[-self.__num_input_dimensions-1]
membrane_shape = Vin.shape[-self.__num_input_dimensions:]
activation = batch_shape + tuple([spike_train_length]) + membrane_shape
activation_shape = activation
Vout = np.ndarray(shape=(activation_shape))
Vout.fill(self.Vreset)
Vp = np.ndarray(shape=(batch_shape + membrane_shape))
Vp.fill(-9e99)
spike_train = np.ndarray(shape=(activation_shape))
spike_train.fill(0)
t_current = None
t_previous = None
for batch_idx in np.ndindex(batch_shape):
for neuron_idx in np.ndindex(membrane_shape):
for t_idx in range(1, spike_train_length):
# membrane voltage for this step
t_current = batch_idx + tuple([t_idx]) + neuron_idx
t_previous = batch_idx + tuple([t_idx - 1]) + neuron_idx
Vm = Vin[t_current] + Vout[t_previous]
# simulate lif-neuron
[Vout[t_current], spike_train[t_current]] = self.neuron_activation(Vm)
# store the final membrane voltage
Vp_idx = batch_idx + neuron_idx
Vp[Vp_idx] = Vout[t_current]
cache = {
'Vp' : Vp,
'Vout' : Vout,
'spike_train' : spike_train,
'tau_m' : tau_m
}
return spike_train, cache
@staticmethod
def __compute_spike_train_decay(spike_train):
# sum of decay over time
total_decay = 0
gamma = sum(spike_train)
if gamma == 0:
return [total_decay, gamma]
total_decay = 0
t = tk = 1
for activation in spike_train:
if activation:
if t != tk:
decay = math.exp(-(t - tk) / tau_m)
total_decay = total_decay - (1 / tau_m) * decay
tk = t + 1
t = t + 1
return [total_decay, gamma]
def __diff_LIF(self, dA, cache):
Vp = cache['Vp']
spike_trains = cache['spike_train']
tau_m = cache['tau_m']
batch_shape = Vp.shape[:-self.__num_input_dimensions]
membrane_shape = Vp.shape[-self.__num_input_dimensions:]
dZ_shape = batch_shape + membrane_shape
dZ = np.ndarray(shape=dZ_shape)
dZ.fill(-9e99)
for batch_idx in np.ndindex(batch_shape):
for neuron_idx in np.ndindex(membrane_shape):
idx = batch_idx + neuron_idx
spike_train = spike_trains[idx]
[total_decay, gamma] = LeakyIntegrateAndFire.__compute_spike_train_decay(spike_train)
dZ[idx] = dA[idx] * (1 / self.Vth) * (1 + (1 / gamma) * total_decay)
return dZ
def __diff_LI(self, dA, cache):
Vp = cache['Vp']
spike_trains = cache['spike_train']
tau_m = cache['tau_m']
batch_shape = Vp.shape[:-self.__num_input_dimensions]
membrane_shape = Vp.shape[-self.__num_input_dimensions:]
dZ_shape = batch_shape + membrane_shape
dZ = np.ndarray(shape=dZ_shape, dtype=np.float64)
dZ.fill(-9e99)
for batch_idx in np.ndindex(batch_shape):
for neuron_idx in np.ndindex(membrane_shape):
idx = batch_idx + neuron_idx
dZ[idx] = (1/tau_m) * (Vp[idx]) * dA[idx]
return dZ
def __diff_IF(self, dA, cache):
return None
def __diff_I(self, dA, cache):
return None
def differentiate(self, dA, cache):
if self.leaky:
if self.fire:
return self.__diff_LIF(dA, cache)
else:
return self.__diff_LI(dA, cache)
else:
if self.fire:
return self.__diff_IF(dA, cache)
else:
return self.__diff_I(dA, cache)
def get_output_shape(self):
return self.output_shape
# the idea of this model is to process everything LIF
class SpikingNeuralNetwork:
@staticmethod
def __traverse_batch(start, end, step):
i = start
while i < end:
yield i
i += step
yield end
def __init__(self):
self.__layers = []
self.__membrane = []
pass
def build(self, input_shape):
# set input shape for model
self.input_shape = input_shape
self.tau_m = input_shape[0]
for layer_idx in range(0, len(self.__layers)):
layer = self.__layers[layer_idx]
membrane = self.__membrane[layer_idx]
print(str(layer_idx) + ":" + str(layer))
layer.build(input_shape=input_shape)
input_shape = layer.compute_output_shape(input_shape)
if membrane is not None:
membrane.build(input_shape)
# last layers output shape to models output shape
self.output_shape = input_shape
def add_layer(self, layer: Layer, activation: Membrane = None):
self.__layers.append(layer)
self.__membrane.append(activation)
def forward_propagation(self, X):
caches = []
A = X
for layer_idx in range(0, len(self.__layers)):
layer = self.__layers[layer_idx]
membrane = self.__membrane[layer_idx]
print(layer)
Z, linear_cache = layer.forward_propagate(A)
print("Z: " + str(np.amax(Z)))
if membrane is not None:
print(membrane)
A, activation_cache = membrane.activate(Z)
print("A: " + str(np.amax(A)))
cache = { 'linear_cache' : linear_cache,
'activation_cache' : activation_cache }
caches.append({ 'A': A,
'Z': Z,
'cache': cache})
else:
print("Z: " + str(np.amax(Z)))
A = Z
cache = { 'linear_cache' : linear_cache,
'activation_cache' : None }
caches.append({ 'A': None,
'Z': Z,
'cache': cache})
return A, caches
def compute_cost(self, A, Y):
return 0.5 * np.sum(np.power((A - Y), 2))
def compute_loss(self, A, Y):
# np.mean(np.square(Y - A), axis=-2) <- MSE loss
return Y - A
def backward_propagation(self, AL, caches, Y):
grads = []
L = len(self.__layers)
m = AL.shape[1] ## figure this out
# gradients
dZ, dW, db = (None, None, None)
# derivative of activation in final layer
dAL = self.compute_loss(AL, Y)
grad = [
{
"dZ": None,
"dA": dAL,
"dW": None,
"db": None
}
]
grads.insert(0, grad)
# backwards propagating the loss
for layer_idx in range(L-1, 0, -1):
layer = self.__layers[layer_idx]
A, Z, cache = (caches[layer_idx]['A'], caches[layer_idx]['Z'], caches[layer_idx]['cache'])
linear_cache, activation_cache = (cache['linear_cache'], cache['activation_cache'])
membrane = self.__membrane[layer_idx]
if membrane is not None:
dZ = membrane.differentiate(dAL, activation_cache)
dAL, dW, db = layer.backward_propagate(dZ, linear_cache)
else:
dAL, dW, db = layer.backward_propagate(dAL, linear_cache)
grad = [
{
"dZ":dZ,
"dA":dAL,
"dW":dW,
"db":db
}
]
grads.insert(0, grad)
return grads
def fit(self, X=None, Y=None, epochs=1, batch_size=None, learning_rate=0.002):
# batch_size + (time, height, width, channel)
num_input_dimensions = len(self.input_shape)
num_output_dimensions = len(self.output_shape)
batch_shape = X.shape[:-num_input_dimensions]
batch_ndim = len(batch_shape)
num_samples = math.prod(batch_shape)
sample_shape = X.shape[-num_input_dimensions:]
sample_label_shape = Y.shape[-batch_ndim:]
assert(sample_label_shape == self.output_shape)
batch_samples = np.zeros(shape=tuple([batch_size]) + sample_shape)
batch_samples_labels = np.zeros(shape=tuple([batch_size]) + sample_label_shape)
# output from the opperation
output = | np.zeros(shape=batch_shape+Y.shape) | numpy.zeros |
from __future__ import absolute_import, print_function, division, unicode_literals
# credit to: https://github.com/matthias-k/pysaliency/blob/master/pysaliency/metrics.py
import numpy as np
def normalize_saliency_map(saliency_map, cdf, cdf_bins):
""" Normalize saliency to make saliency values distributed according to a given CDF
"""
smap = saliency_map.copy()
shape = smap.shape
smap = smap.flatten()
smap = np.argsort(np.argsort(smap)).astype(float)
smap /= 1.0 * len(smap)
inds = np.searchsorted(cdf, smap, side='right')
smap = cdf_bins[inds]
smap = smap.reshape(shape)
smap = smap.reshape(shape)
return smap
def convert_saliency_map_to_density(saliency_map, minimum_value=0.0):
if saliency_map.min() < 0:
saliency_map = saliency_map - saliency_map.min()
saliency_map = saliency_map + minimum_value
saliency_map_sum = saliency_map.sum()
if saliency_map_sum:
saliency_map = saliency_map / saliency_map_sum
else:
saliency_map[:] = 1.0
saliency_map /= saliency_map.sum()
return saliency_map
def NSS(saliency_map, xs, ys):
xs = np.asarray(xs, dtype=np.int)
ys = np.asarray(ys, dtype=np.int)
mean = saliency_map.mean()
std = saliency_map.std()
value = saliency_map[ys, xs].copy()
value -= mean
if std:
value /= std
return value
def CC(saliency_map_1, saliency_map_2):
def normalize(saliency_map):
saliency_map -= saliency_map.mean()
std = saliency_map.std()
if std:
saliency_map /= std
return saliency_map, std == 0
smap1, constant1 = normalize(saliency_map_1.copy())
smap2, constant2 = normalize(saliency_map_2.copy())
if constant1 and not constant2:
return 0.0
else:
return np.corrcoef(smap1.flatten(), smap2.flatten())[0, 1]
def probabilistic_image_based_kl_divergence(logp1, logp2, log_regularization=0, quotient_regularization=0):
if log_regularization or quotient_regularization:
return (np.exp(logp2) * np.log(log_regularization + np.exp(logp2) / (np.exp(logp1) + quotient_regularization))).sum()
else:
return ( | np.exp(logp2) | numpy.exp |
from os import environ
PHA_DIR = environ['PHA_DIR']
ADD_TO_PATH = PHA_DIR + '/scripts/python_scripts'
from sys import path
path.append(ADD_TO_PATH)
from processcsv import ProcessCSV
from processcsv import StatEnum
import sys
import utility as util
import copy
import csv
import numpy as np
from itertools import izip
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from scipy import stats
import shutil
import os
class LGReader(ProcessCSV):
"""
class LGReader
Performs various functions related to the output file from PHA code
"""
default_data_dir = os.environ['PHA_DIR'] + '/data'
default_results_dir = os.environ['PHA_DIR'] + '/results'
default_ip_opt_fname = default_data_dir + '/' + "ip_opt.csv"
default_in_fname = default_results_dir + '/' + "pha.csv"
max_num_dec_places = 10
# Column names
colname_inst = "INSTANCE"
colname_lpobj = "LP OBJ"
colname_lpopt = "LP OBJ"
colname_lpbound = "LP OBJ"
colname_ipopt = "IP OBJ"
colname_ipobj = "IP OBJ"
colname_ipbound = "IP OBJ"
colname_numrows = "NUM ROWS"
colname_numcols = "NUM COLS"
colname_fraccore = "FRAC CORE"
colname_numpoints = "NUM POINTS (TOTAL)"
colname_numfinalpoints = "NUM FINAL POINTS (TOTAL)"
colname_numsics = "NUM SIC"
colname_numpha = "NUM PHA"
colname_sicbound = "SIC BOUND"
colname_phabound = "PHA BOUND"
colname_allcutsbound = "ALL CUTS (WITH SIC)"
colname_activesics = "ACTIVE SIC"
colname_activegics = "ACTIVE PHA"
colname_objtried = "OBJ"
colname_dupsicfails = "DUPLICATE_SIC_FAILS"
colname_dupgicfails = "DUPLICATE_GIC_FAILS"
colname_totaltime = "TOTAL_TIME"
colname_cutfails = [
"DUAL_CUT_SOLVER_FAILS",
"DUPLICATE_SIC_FAILS",
"DUPLICATE_GIC_FAILS",
"ORTHOGONALITY_FAILS",
"TIMELIMIT_FAILS",
"ITERATION_FAILS",
"ABANDONED_FAILS",
"NON_CUTTING_FAILS",
"SCALING_FAILS",
"UNSPEC_FAILS",
"CUT_LIMIT_FAILS",
"PRIMAL_CUT_SOLVER_FAILS",
"PRIMAL_CUT_SOLVER_NO_OBJ_FAILS",
"NUMERICAL_ISSUES_WARNING_NO_OBJ",
"NUMERICAL_ISSUES_NO_OBJ"
]
colname_numcuts = [
"CUT_VERTICES_CUT_HEUR",
# "DUMMY_OBJ_CUT_HEUR",
"ITER_BILINEAR_CUT_HEUR",
"UNIT_VECTORS_CUT_HEUR",
"TIGHT_POINTS_CUT_HEUR",
"SPLIT_SHARE_CUT_HEUR"
]
colname_activecuts = [
"ACTIVE CUT_VERTICES_CUT_HEUR",
"ACTIVE DUMMY_OBJ_CUT_HEUR",
"ACTIVE ITER_BILINEAR_CUT_HEUR",
"ACTIVE UNIT_VECTORS_CUT_HEUR",
"ACTIVE TIGHT_POINTS_CUT_HEUR",
"ACTIVE SPLIT_SHARE_CUT_HEUR"
]
def __init__(self, in_fname = None, hasipval = None, fill_ip_vals = None,
ip_opt_fname = None, out_fname = None,
inst_name = None, col_info = None, num_header_lines = None,
compute_gap_closed = True):
""" Constructor, sets reader object and instances to all instances """
if (in_fname is None):
self._in_fname = copy.deepcopy(self.default_in_fname)
else:
self._in_fname = copy.deepcopy(in_fname)
if __debug__:
print( "\n## In LGReader(): Opening file %s for reading ##" % self._in_fname )
# If requested, set IP values if they are not available
if (fill_ip_vals is None):
fill_ip_vals = False
if (hasipval is None):
self._hasipval = True # This is so we do not needlessly recreate the ip file
else:
self._hasipval = hasipval
if (ip_opt_fname is not None):
self._ip_opt_fname = copy.deepcopy(ip_opt_fname)
else:
self._ip_opt_fname = copy.deepcopy(self.default_ip_opt_fname)
if ((not self._hasipval) and fill_ip_vals):
self.fill_ip_opt(out_fname = self._in_fname) # Do overwrite the input file
self._hasipval = True
super(LGReader, self).__init__(self._in_fname, inst_name, col_info, num_header_lines, compute_gap_closed)
# If the IP values exist, calculate gap closed
# Note that we check the hasipval boolean, since otherwise gap_closed()
# will grab the IP values whether or not fill_ip_vals is True
if ((inst_name is not None) and (self._hasipval) and (not hasattr(self, '_ip_opt'))):
self.get_ip_opt()
if (compute_gap_closed):
self.gap_closed()
def set_inst(self, inst_name = None, compute_gap_closed = True):
""" Override set_inst from parent class so class values will be reset """
super(LGReader, self).set_inst(inst_name, compute_gap_closed)
if ((inst_name is not None) and (self._hasipval)):
self._ip_opt = None
self.get_ip_opt()
if (compute_gap_closed):
self.gap_closed()
def get_ip_opt(self, ip_opt_fname = None):
"""
Grabs IP optimum values from ip_opt file, only for relevant instances
Saves the values internally as self._ip_opt, a numpy array
"""
# TODO fix bug here that ip_opt_fname might have changed... really not a bug
if ((hasattr(self, '_ip_opt')) and (self._ip_opt is not None)):
return self._ip_opt
if (ip_opt_fname is None):
ip_opt_fname = copy.deepcopy(self._ip_opt_fname)
if __debug__:
print( "\n## Reading IP file: %s ##" % ip_opt_fname )
# Read IP opt file in
ip_opt_reader = ProcessCSV(ip_opt_fname, num_header_lines = 1)
ip_opt_reader._num_dec_places = self.max_num_dec_places
inst_names = super(LGReader, self).get_param(self.colname_inst)
self._ip_opt = ['' for i in range(len(inst_names))]
for inst in range(len(inst_names)):
curr_inst = inst_names[inst]
# find_first_val returns a table, with a header row
# The first row contains all the column information
val_str = ip_opt_reader.find_first_val(col_info = self.colname_ipobj, inst_name = curr_inst)[1][1]
if (len(val_str) > 0):
curr_inst_ip_obj = float(val_str)
self._ip_opt[inst] = curr_inst_ip_obj
if __debug__:
print( "Instance: %s\tIP obj: %f" % (curr_inst, curr_inst_ip_obj) )
elif __debug__:
print( "Instance %s not found in IP file" % curr_inst )
del ip_opt_reader
self._ip_opt = np.asarray(self._ip_opt)
return self._ip_opt
def inst_info(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
cut_presolve = None):
""" Get instance basic information (rows, columns, avg time) """
stat = [StatEnum.FIRST, StatEnum.FIRST, StatEnum.AVG]
col_info = [self.colname_numrows, self.colname_numcols, self.colname_totaltime]
typecol = [int, int, float]
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, cut_presolve)
# Remove the columns that are not relevant
tab = [[tab[i][j] for j in [0,1,3,5]] for i in range(len(tab))]
tab[0] = ["Instance", "Rows", "Cols", "Time"]
# num_rows
col_index = 1
self._num_rows = np.asarray([int(round(float(tab[i][col_index]))) for i in range(1,len(tab))])
# num_cols
col_index += 1
self._num_cols = np.asarray([int(round(float(tab[i][col_index]))) for i in range(1,len(tab))])
util.print_pretty_table(tab)
return tab
def get_best_row(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None):
"""
Get best-performing row per instance
"""
stat = [StatEnum.MAX]
col_info = [self.colname_allcutsbound] #[self.colname_activesics, self.colname_numpha, self.colname_activegics]
typecol = [float] #[int, int, int]
secondary_stat = [StatEnum.MAXRATIO]
secondary_col_info = [[self.colname_activegics, self.colname_numpha]]
tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear,
use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve,
secondary_stat = secondary_stat,
secondary_col_info = secondary_col_info)
# Get best row for each instance
self._best_row = [int(tab[i][2]) for i in range(1,len(tab))]
def obj_fails_table(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
cut_presolve = None):
"""
Analyze number of cuts generated in relation to maximum possible,
as well as some potential common problems (e.g., duplicate SICs in the cut LP)
Returns 2D list with the information
[instance, num splits, num sics, num active sics, num gics, num active gics, obj tried, fails...]
"""
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
num_inst = len(inst_name)
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut,
max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear,
use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
#stat = [StatEnum.MAX]
#col_info = [self.colname_activesics, self.colname_numpha, self.colname_activegics]
#typecol = [int, int, int]
#tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
# num_alg2_rounds, num_rays_cut, cut_limit,
# use_split_share, num_cuts_iter_bilinear,
# use_cut_vert_heur, cut_presolve)
out_tab = [[
"Instance", "Splits", "SICs", "Active SICs", "GICs", "Active GICs", "Obj",
#"Duplicate tilt", "Unbounded tilt",
"Unbounded", "Dup SIC", "Dup GIC", "Orth", "Time limit", "Iter limit", "Abandoned",
"Non-cutting", "Scaling", "Unspec", "Cut limit", "Primal infeas", "Primal infeas (setup)",
"Numerics warnings", "Numerics errors"]]
outcol_obj_tried = out_tab[0].index("Obj");
numcols_out_tab = len(out_tab[0]);
np_out_tab = np.zeros(shape = (num_inst, numcols_out_tab), dtype=int)
# Get column indices for each of the relevant stats
index_numsplits = super(LGReader, self).get_col_index(self.colname_fraccore)
index_numsics = super(LGReader, self).get_col_index(self.colname_numsics)
index_activesics = super(LGReader, self).get_col_index(self.colname_activesics)
index_numpha = super(LGReader, self).get_col_index(self.colname_numpha)
index_activegics = super(LGReader, self).get_col_index(self.colname_activegics)
index_objtried = super(LGReader, self).get_col_index(self.colname_objtried)
index_cutfail = [-1 for i in range(len(self.colname_cutfails))]
for i in range(len(self.colname_cutfails)):
index_cutfail[i] = super(LGReader, self).get_col_index(self.colname_cutfails[i])
for i in range(len(np_out_tab)):
if __debug__:
print( "## Obj_fails_table: Filling in information for instance %d with name %s ##" % (i,inst_name[i]) )
if self._best_row[i] >= 0:
row = super(LGReader, self).get_row(self._best_row[i])
np_out_tab[i,1] = int(row[index_numsplits])
np_out_tab[i,2] = int(row[index_numsics])
np_out_tab[i,3] = int(row[index_activesics])
np_out_tab[i,4] = int(row[index_numpha])
np_out_tab[i,5] = int(row[index_activegics])
obj_tried = int(row[index_objtried])
np_out_tab[i,outcol_obj_tried] = obj_tried
for j in range(len(index_cutfail)):
curr_fail = int(row[index_cutfail[j]])
np_out_tab[i,outcol_obj_tried+1+j] = 100. * curr_fail / obj_tried
else:
for j in range(len(index_cutfail)):
np_out_tab[i,outcol_obj_tried+1+j] = 0
out_tab.extend(np_out_tab.tolist())
for i in range(1,num_inst+1):
out_tab[i][0] = inst_name[i-1]
util.print_pretty_table(out_tab)
return out_tab
def active_cuts_table(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None):
"""
Analyze which heuristics led to most active cuts
Returns 2D list with the information
[instance, num sics, num active sics, num gics, num active gics, active from which heur...]
"""
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
num_inst = len(inst_name)
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
out_tab = [[
"Instance", "SICs", "Active SICs", "GICs", "Active GICs",
"V", "Active V", #
"B", "Active B",
"R", "Active R",
"T", "Active T",
"S", "Active S"
]]
numcols_out_tab = len(out_tab[0]);
np_out_tab = np.zeros(shape = (num_inst, numcols_out_tab), dtype=int)
# Get column indices for number of each type of cut
# Number active of that type is the subsequent column (be aware could change)
colindex_first = [super(LGReader, self).get_col_index(self.colname_numsics),
super(LGReader, self).get_col_index(self.colname_activesics),
super(LGReader, self).get_col_index(self.colname_numpha),
super(LGReader, self).get_col_index(self.colname_activegics)]
colindex_numcuts = [super(LGReader, self).get_col_index(self.colname_numcuts[i]) for i in range(len(self.colname_numcuts))]
for i in range(len(np_out_tab)):
if __debug__:
print( "## Active_cuts_table: Filling in information for instance %d with name %s ##" % (i,inst_name[i]) )
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
for j in range(len(colindex_first)):
np_out_tab[i,j+1] = curr_row[colindex_first[j]]
for j in range(len(colindex_numcuts)):
np_out_tab[i,2*j+1+len(colindex_first)] = curr_row[colindex_numcuts[j]]
np_out_tab[i,2*j+1+len(colindex_first)+1] = curr_row[colindex_numcuts[j]+1]
else:
for j in range(len(colindex_first)):
np_out_tab[i,j+1] = 0
for j in range(len(colindex_numcuts)):
np_out_tab[i,2*j+1+len(colindex_first)] = 0
np_out_tab[i,2*j+1+len(colindex_first)+1] = 0
out_tab.extend(np_out_tab.tolist())
for i in range(1,num_inst+1):
out_tab[i][0] = inst_name[i-1]
util.print_pretty_table(out_tab)
return out_tab
def write_best_params(self, out_dir = None):
""" Writes best parameters for each instance to file """
if (out_dir is None):
out_dir = self.default_data_dir + "/params"
try:
os.makedirs(out_dir)
except OSError: # for the race condition, however unlikely
if not os.path.isdir(out_dir):
raise
#num_params = super(LGReader, self)._param_container.num_params
num_params = self._param_container.num_params
inst_names = super(LGReader, self).get_param(self.colname_inst)
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_names, cut_limit=[1000], cut_presolve=[0]) # these are current defaults we want to report for, but this can be confusing if we forget we set it...
out_tab = [copy.deepcopy(self._header[0])]
out_tab.append(copy.deepcopy(self._header[1]))
for i in range(len(inst_names)):
inst = inst_names[i]
row = super(LGReader, self).get_row(self._best_row[i]) if self._best_row[i] >= 0 else [0 for i in range(num_params)]
out_tab.append(row)
curr_fname = out_dir + '/' + inst + "_params.txt"
with open(curr_fname, 'wb') as out_f:
if __debug__:
print( "## Writing parameters for %s ##" % inst )
for p in range(1,num_params):
out_f.write(str(self._param_container.param_names[p]).lower() + ' ')
curr_val = self._param_container.type_param[p](row[p])
out_f.write(str(curr_val) + '\n')
# Save parameter information
with open(out_dir + '/' + "best_runs.csv", 'wb') as out_f:
out_writer = csv.writer(out_f)
out_writer.writerows(out_tab)
del out_writer
def gap_closed(self, inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None, recompute_best_row = None):
"""
Adds gap closed information to the instance of LGReader
In addition, keeps lp opt, ip opt, osic best, and all cuts best
Defaults to all instances, unless inst_name is specified
"""
# Make sure that ip values are available
# (i.e., reread the file if necessary with ip values filled)
if (not self._hasipval):
self.fill_ip_opt(out_fname = self._in_fname)
self._hasipval = True
# Get best_row
if (not hasattr(self, '_best_row')) or recompute_best_row:
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
# Set defaults
col_info = [self.colname_lpobj, self.colname_sicbound, self.colname_allcutsbound]
stat = [StatEnum.FIRST, StatEnum.MAX, StatEnum.MAX]
typecol = [float, float, float]
if (inst_name is None):
inst_name = super(LGReader, self).get_param(self.colname_inst)
# Save the current number of output decimal places, and set current outputted decimal places
saved_num_dec_places = self._num_dec_places
self._num_dec_places = self.max_num_dec_places
if __debug__:
print( "\n## Calculating gap_closed ##" )
# tab will have columns
# [inst, lp opt, row, osic opt, row, all cut opt, row, num rows]
tab = super(LGReader, self).stat_col(stat, col_info, typecol, inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
self._num_dec_places = saved_num_dec_places
#self._best_row = [int(tab[i][6]) for i in range(1,len(tab))]
# ip opt
if (not hasattr(self, '_ip_opt')):
self.get_ip_opt()
# Add information from instances
lp_col_index = super(LGReader, self).get_col_index(self.colname_lpobj)
sics_col_index = super(LGReader, self).get_col_index(self.colname_sicbound)
allcuts_col_index = super(LGReader, self).get_col_index(self.colname_allcutsbound)
self._lp_opt = []
self._sic_opt= []
self._sics_gap = []
self._allcuts_opt = []
self._allcuts_gap = []
self._gap_closed = []
for i in range(len(inst_name)):
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
self._lp_opt.append(float(curr_row[lp_col_index]))
self._sic_opt.append(float(curr_row[sics_col_index]))
self._sics_gap.append(100 * (self._sic_opt[i] - self._lp_opt[i]) / (self._ip_opt[i] - self._lp_opt[i]))
self._allcuts_opt.append(float(curr_row[allcuts_col_index]))
self._allcuts_gap.append(100 * (self._allcuts_opt[i] - self._lp_opt[i]) / (self._ip_opt[i] - self._lp_opt[i]))
self._gap_closed.append(100 * (self._allcuts_opt[i] - self._sic_opt[i]) / (self._ip_opt[i] - self._lp_opt[i]))
else:
self._lp_opt.append(0.0)
self._sic_opt.append(0.0)
self._sics_gap.append(0.0)
self._allcuts_opt.append(0.0)
self._allcuts_gap.append(0.0)
self._gap_closed.append(0.0)
return self._gap_closed
def gap_closed_table(self,
inst_name = None, pha_act_option = None,
num_alg2_rounds = None, num_rays_cut = None,
max_frac_var = None, cut_limit = None,
use_split_share = None,
num_cuts_iter_bilinear = None,
use_unit_vectors_heur = None,
use_cut_vert_heur = None,
use_tight_points_heur = None,
cut_presolve = None,
recompute = False):
""" Create table with gap closed information """
if (not hasattr(self, '_gap_closed')) or recompute:
self.gap_closed(inst_name, pha_act_option, num_alg2_rounds,
num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur, use_cut_vert_heur,
use_tight_points_heur, cut_presolve, recompute)
if (not hasattr(self, '_num_rows') or not hasattr(self, '_num_cols')):
self.inst_info()
if (not hasattr(self, '_best_row')):
self.get_best_row(inst_name, pha_act_option,
num_alg2_rounds, num_rays_cut, max_frac_var, cut_limit,
use_split_share, num_cuts_iter_bilinear, use_unit_vectors_heur,
use_cut_vert_heur, use_tight_points_heur, cut_presolve)
append_average = True
eps = 1e-5
nonzero_gap_indices = [i for i in range(len(self._gap_closed)) if (self._allcuts_gap[i] > eps)]
zero_gap_indices = [i for i in range(len(self._gap_closed)) if (self._allcuts_gap[i] <= eps)]
# Set up transpose of out_table
out_tab_tr = [[super(LGReader, self).get_param(self.colname_inst)[i] for i in nonzero_gap_indices]]
out_tab_tr.append(np.around([self._num_rows[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._num_cols[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._lp_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._ip_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._sic_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._allcuts_opt[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._sics_gap[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._allcuts_gap[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr.append(np.around([self._gap_closed[i] for i in nonzero_gap_indices], decimals=self._num_dec_places).tolist())
# Also add num SICs, num active SICs, num GICs, num active GICs
numsics_col_index = super(LGReader, self).get_col_index(self.colname_numsics)
activesics_col_index = super(LGReader, self).get_col_index(self.colname_activesics)
numpha_col_index = super(LGReader, self).get_col_index(self.colname_numpha)
activegics_col_index = super(LGReader, self).get_col_index(self.colname_activegics)
num_sics_tab = []
num_active_sics_tab = []
num_gics_tab = []
num_active_gics_tab = []
percent_active_tab = []
for i in nonzero_gap_indices:
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
num_sics_tab.append(int(curr_row[numsics_col_index]))
num_active_sics_tab.append(int(curr_row[activesics_col_index]))
num_gics_tab.append(int(curr_row[numpha_col_index]))
num_active_gics_tab.append(int(curr_row[activegics_col_index]))
num_pha = float(curr_row[numpha_col_index])
percent_active_tab.append(
(100. * float(curr_row[activegics_col_index]) / num_pha) if num_pha > 0 else 0)
else:
num_sics_tab.append(0)
num_active_sics_tab.append(0)
num_gics_tab.append(0)
num_active_gics_tab.append(0)
percent_active_tab.append(0.0)
out_tab_tr.append(num_sics_tab) # num SICs
out_tab_tr.append(num_active_sics_tab) # active SICs
out_tab_tr.append(num_gics_tab) # num GICs
out_tab_tr.append(num_active_gics_tab) # active GICs
out_tab_tr.append(percent_active_tab) # % active
# Header
out_tab = [
[
'', '', '',
"Opt", "Opt", "Opt", "Opt",
"Best % gap closed", "Best % gap closed", "Best % gap closed",
"# cuts", "# cuts", "# cuts", "# cuts",
''
],
[
"Instance", "Rows", "Cols",
"LP", "IP", "SIC", "GIC+SIC",
"SIC", "GIC", "Diff",
"SICs", "Active SICs", "GICs", "Active GICs",
"% active"
]
]
out_tab.extend([list(t) for t in izip(*out_tab_tr)])
if (append_average):
out_tab.append(
[
"Average",'','','','','','',
np.around(np.mean([self._sics_gap[i] for i in nonzero_gap_indices], dtype=np.float64), decimals=self._num_dec_places),
np.around(np.mean([self._allcuts_gap[i] for i in nonzero_gap_indices], dtype=np.float64), decimals=self._num_dec_places),
np.around(np.mean([self._gap_closed[i] for i in nonzero_gap_indices], dtype=np.float64), decimals=self._num_dec_places),
'','','','',
np.around(np.mean(percent_active_tab, dtype=np.float64), decimals=self._num_dec_places)
]
)
out_tab_tr_zero = [[super(LGReader, self).get_param(self.colname_inst)[i] for i in zero_gap_indices]]
out_tab_tr_zero.append(np.around([self._num_rows[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._num_cols[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._lp_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._ip_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._sic_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._allcuts_opt[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._sics_gap[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._allcuts_gap[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
out_tab_tr_zero.append(np.around([self._gap_closed[i] for i in zero_gap_indices], decimals=self._num_dec_places).tolist())
num_sics_tab = []
num_active_sics_tab = []
num_gics_tab = []
num_active_gics_tab = []
percent_active_tab = []
for i in zero_gap_indices:
if self._best_row[i] >= 0:
curr_row = super(LGReader, self).get_row(self._best_row[i])
num_sics_tab.append(int(curr_row[numsics_col_index]))
num_active_sics_tab.append(int(curr_row[activesics_col_index]))
num_gics_tab.append(int(curr_row[numpha_col_index]))
num_active_gics_tab.append(int(curr_row[activegics_col_index]))
num_pha = float(curr_row[numpha_col_index])
percent_active_tab.append(
(100. * float(curr_row[activegics_col_index]) / num_pha) if num_pha > 0 else 0)
else:
num_sics_tab.append(0)
num_active_sics_tab.append(0)
num_gics_tab.append(0)
num_active_gics_tab.append(0)
percent_active_tab.append(0.0)
out_tab_tr_zero.append(num_sics_tab) # num SICs
out_tab_tr_zero.append(num_active_sics_tab) # active SICs
out_tab_tr_zero.append(num_gics_tab) # num GICs
out_tab_tr_zero.append(num_active_gics_tab) # active GICs
out_tab_tr_zero.append(percent_active_tab) # % active
out_tab.extend([list(t) for t in izip(*out_tab_tr_zero)])
util.print_pretty_table(out_tab)
return out_tab
def hplane_analysis(self, hh_start = 0, hh_end = 2, num_act_start = 0, num_act_end = 4):
"""
Sees effect of the various hplane selection heuristics,
as well as the number of activated hyperplanes
Outputs 2D list with the data
"""
if (not hasattr(self, '_gap_closed')):
self.gap_closed()
col_name = [self.colname_allcutsbound]
stat = StatEnum.MAX
make_int = False
inst_names = super(LGReader, self).get_param(self.colname_inst)
numpoints_col_index = super(LGReader, self).get_col_index(self.colname_numpoints)
numfinalpoints_col_index = super(LGReader, self).get_col_index(self.colname_numfinalpoints)
points_tab_tr = []
finalpoints_tab_tr = []
out_tab_tr = [super(LGReader, self).get_param(self.colname_inst)]
out_tab_tr.append(np.around(self._sics_gap, self._num_dec_places).tolist())
out_tab_tr.append(np.around(self._allcuts_gap, self._num_dec_places).tolist())
out_tab_tr.append(np.around(self._gap_closed, self._num_dec_places).tolist())
append_average = True
if (append_average):
out_tab_tr[0].append("Average")
out_tab_tr[1].append(np.around( | np.mean(self._sics_gap, dtype=np.float64) | numpy.mean |
import numpy as np
import re
import random
def prepare_data():
"""This method prepares input positive and negative datasets as bitvectors for the Rap1 binding problem. Output: three lists of bitvectors, one containing positive samples, negative samples that are similar to positive samples, and negative examples that are randomly chosen from the fasta sequences. All bitvectors are 17 bp (34 bits) long"""
# read in all positive data, convert to bitvectors
pos_str = read_positives()
pos_vec = str_to_vec(pos_str)
# read in all negative data. then, remove false negatives from the negative fa sequences and their reverse complements. Call this new set of sequences and their reverse complements "neg_str".
neg_str = read_negatives()
neg_str = remove_falseneg(neg_str, pos_str)
rc_neg_str = reverse_complement(neg_str)
rc_neg_str = remove_falseneg(rc_neg_str, pos_str)
neg_str = reverse_complement(rc_neg_str)
neg_str = neg_str + rc_neg_str
# cache interesting cases as "neg_simiar". interesting cases are those that look similar to the positive sequences (in that they contain cysteines at positions 5, 6, and 10) but are considered negative. also cache randomly chosen sequences, so that the neural net can be trained on sequences that are not similar to positive examples.
neg_sim, neg_rand = cache_cases(neg_str)
neg_sim_vec = str_to_vec(neg_sim)
neg_rand_vec = str_to_vec(neg_rand)
return pos_vec, neg_sim_vec, neg_rand_vec
def read_positives():
"reads in positive samples as strings"
seqs = []
file = '/Users/cjmathy/Documents/courses/bmi203/Final-Project/ann_bmi203/rap1-lieb-positives.txt'
with open(file, 'rb') as f:
for seq in f:
seqs.append(seq.strip())
return seqs
def read_negatives():
"reads in negative samples as strings"
seqs = []
file = '/Users/cjmathy/Documents/courses/bmi203/Final-Project/ann_bmi203/yeast-upstream-1k-negative.fa'
with open(file, 'rb') as f:
sequence = ''
for line in f:
if line[0] is not '>':
sequence += line.strip()
else:
if sequence:
seqs.append(sequence)
sequence = ""
return seqs
def str_to_vec(sequences):
"""converts nucleotide strings into vectors using a 2-bit encoding scheme."""
vecs = []
nuc2bit = {"A": (0, 0),
"C": (0, 1),
"T": (1, 0),
"G": (1, 1)}
for seq in sequences:
vec = []
for nuc in seq:
vec.append(nuc2bit[nuc][0])
vec.append(nuc2bit[nuc][1])
vecs.append(vec)
return vecs
def remove_falseneg(negatives, positives):
"""this method removes any negative fasta sequences that contain one of the positive sample sequences (essentially making them false negatives."""
seqs = []
for n in negatives:
if not any(p in n for p in positives):
seqs.append(n)
return seqs
def reverse_complement(sequences):
"""returns a list of reverse complemented sequences"""
rc = []
complement = {'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A'}
for seq in sequences:
seq = list(seq)
seq = reversed([complement.get(nuc) for nuc in seq])
seq = ''.join(seq)
rc.append(seq)
return rc
def cache_cases(sequences):
"""this method separates the negative data into two sets: those that contain the Rap1 binding signature sequence, and a set that is randomly chosen from the negative data."""
# 1) cache negative cases that are similar to positives
sim_cache = []
for seq in sequences:
matches = re.findall(r'....CC...C.......', seq)
for match in matches:
sim_cache.append(match)
sim_cache = list(set(sim_cache))
# 2) cache randomly chosen 17 bp negatives. 5 from each fa sequence (including reverse complements). there are about 30000 neg_sim samples, so this will create about 30000 neg_rand samples from the 3000 sequences and their 3000 reverse complements.
bp = 17
rand_cache = []
for seq in sequences:
for _ in xrange(5):
i = random.randint(0, len(seq)-bp)
substr = seq[i:i+bp]
rand_cache.append(substr)
return sim_cache, rand_cache
def build_training_set(pos, neg_sim, neg_rand):
"""Builds a training set using 50% positive data, and 50% negative data. Negative data consists equally of similar-to-positve and random negative sequences"""
# we have 137 positive examples, 30000 special negative examples, and 30000 random negative examples, all 34 bits long. take 69 special negative examples and 68 random negative examples. add them to the positive examples to make our training set.
neg = []
for _ in xrange(69):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(68):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = np.array(pos)
Xn = np.array(neg)
X = np.concatenate((Xp, Xn), axis=0) # nd array, 274 x 34
yp = np.ones((Xp.shape[0],))
yn = np.zeros((Xn.shape[0],))
y = np.concatenate((yp, yn), axis=0) # nd array, 34 x 1
return X, y
def build_training_set_100(pos, neg_sim, neg_rand):
"""same as above, but allowing for some positive and negative samples to be held out as a test set"""
neg = []
for _ in xrange(50):
i = np.random.randint(0, len(neg_sim))
neg.append(neg_sim[i])
for _ in xrange(50):
i = np.random.randint(0, len(neg_rand))
neg.append(neg_rand[i])
Xp = | np.array(pos) | numpy.array |
import autograd.numpy as np
import tensorflow as tf
import torch
from numpy import linalg as la
from numpy import random as rnd
import pymanopt
from examples._tools import ExampleRunner
from pymanopt.manifolds import PSDFixedRank
from pymanopt.solvers import TrustRegions
SUPPORTED_BACKENDS = ("Autograd", "Callable", "PyTorch", "TensorFlow")
def create_cost_egrad_ehess(manifold, matrix, backend):
egrad = ehess = None
if backend == "Autograd":
@pymanopt.function.Autograd(manifold)
def cost(Y):
return np.linalg.norm(Y @ Y.T - matrix, "fro") ** 2
elif backend == "Callable":
@pymanopt.function.Callable(manifold)
def cost(Y):
return | la.norm(Y @ Y.T - matrix, "fro") | numpy.linalg.norm |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for measuring mutual information using kNN method.
Using binning method for computing mutual information of ordinal features is
not applicable in real use cases due to the bad accuracy. Instead, this module
implements:
(1) the algorithm in PRE paper 10.1103/PhysRevE.69.066138 (See the paper online
at http://arxiv.org/abs/cond-mat/0305641v1) for estimating the mutual
information between ordinal features.
(2) the algorithm in PLOS paper PLoS ONE 9(2): e87357 (
http://journals.plos.org/plosone/article?id=10.1371%2Fjournal.pone.0087357) for
estimating the mutual information between ordinal features and categorical
features. Besides, this module also handles missing values and weighted samples.
For each algorithm there are two estimate methods '1' and '2' as described in
the PRE paper. Scikit-learn(dev) and several other Python open source
implementations only provide the method '1' with feature dimension 2 and without
handling missing values or weights.
The two methods should usually produce similar results. Method '1' has smaller
statistical errors, while method '2' has smaller systematic errors. So method
'1' is more suitable for low dimensional small data sets, while method '2' is
more preferred for high dimensional larger data sets. The default method is '2'.
In this implementation, we use the more understandable names 'smaller_data' and
'larger_data' to represent the methods '1' and '2'.
The results are insensitive to the value of k. In practice, usually people use
k = 3. Larger values are also fine.
The major problem with kNN estimation method is that it would fail if the
features have lots of samples have the same value or very close values. The PRE
paper has two suggestions that usually work:
(a) Add a tiny noise. This not only breaks the data degeneracy but also speeds
up the computation a lot. This is because it decreases the number of neighbors
by breaking the data degeneracy. And this trick does not affect the quality of
the result. This functionality is controlled by 'seed' and '_NOISE_AMPLITUDE'
constant.
(b) Reparametrize the features, for example, M.Log(A) can re-distribute data
within a small range into a wider range. Any homeomorphism transformation of a
feature does not alter the mutual information, so this does not affect the
result either. Of course any homeomorphism is fine. This is helpful especially
when the feature value distributed is highly skewed. Strictly speaking, this is
not required, but it usually decreases the error. Re-scaling the features to
have unit variance also helps decreasing errors.
"""
# NOTE: In the code, some variable names start with "c" and some with "d"; this
# is because the key distinction between categorical vs ordinal features is
# closely related to, hence vaguely conflated with, "continuous" vs "discrete".
import functools
import itertools
import math
from typing import Any, List, Optional, Tuple, Union
import uuid
import numpy as np
import pandas as pd
import scipy.special
import sklearn.neighbors
# For categorical features, we will use this unique string to represent missing
# values and handle it as if it was a normal value.
_NONE_STR = str(uuid.uuid4()).encode()
# For ordinal features, we will use Max(feat) + Max(feat) - Min(feat)
# + _NONE_NUM to represent missing values and handle it as if it was a normal
# value.
_NONE_NUM = 10.
# When considering the k nearest neighbors, it could cause problems if two
# neighbors have the same distance. Do we want to include one of them or both of
# them? So we use a tiny noise to break the tie, which does not affect the
# mutual information value.
_NOISE_AMPLITUDE = 1e-10
def mutual_information(
feature_list0: List[np.ndarray],
feature_list1: List[np.ndarray],
is_categorical_list0: List[bool],
is_categorical_list1: List[bool],
k: int = 3,
estimate_method: str = 'larger_data',
weight_feature: Optional[np.ndarray] = None,
filter_feature: Optional[np.ndarray] = None,
output_each: bool = False,
seed: Optional[int] = None) -> Union[np.float, Tuple[np.float, np.ndarray]]:
"""Computes MI between two lists of features (numpy arrays).
The mutual information value is scaled by log(2) in the end so that the unit
is bit.
The paper (1) in the module doc string gives the method for computing MI
between two lists of ordinal features. The paper (2) provides the method
for computing MI between a list of ordinal features and a list of categorical
features. For the general case, suppose we have ordinal feature set C0, C1,
and categorical feature set D0, D1. Then we can derive
I({C0,D0};{C1,D1}) = I({C0,C1};{D0,D1}) + I(C0;C1) + I(D0;D1) - I(C0;D0)
- I(C1;D1),
where the right hand side terms can all be computed by using the methods in
the two papers.
Args:
feature_list0: (list(np.ndarray)) A list of features.
feature_list1: (list(np.ndarray)) A list of features.
is_categorical_list0: (list(bool)) Whether the first list of features are
categorical or not.
is_categorical_list1: (list(bool)) Whether the second list of features are
categorical or not.
k: (int) The number of nearest neighbors. It has to be an integer no less
than 3.
estimate_method: (str) 'smaller_data' or 'larger_data' estimator in the
above paper.
weight_feature: (np.ndarray) A feature that contains weights for each
sample.
filter_feature: (np.ndarray) A feature that is used as the filter to drop
all data where this filter has missing values. By default, it is None and
no filtering is done.
output_each: (bool) Whether to output the contribution from each individual
sample. The output values are not scaled by the number of samples.
seed: (int) Random seed for the tiny noise.
Returns:
(float | (float, np.ndarray)) The mutual information between the features in
feature_list0 and feature_list1. If output_each is True, an np array of
the contributions from all samples is also output, whose mean is equal
to the mutual information.
"""
_validate_args(feature_list0, feature_list1, is_categorical_list0,
is_categorical_list1, k, estimate_method, weight_feature,
filter_feature, output_each, seed)
cf_list0, cf_list1, df_list0, df_list1, weights = _feature_list_to_numpy_arrays(
feature_list0, feature_list1, is_categorical_list0, is_categorical_list1,
weight_feature, filter_feature)
# Try to reuse these data in later computations to avoid converting Feature to
# numpy array multiple times.
final_mi, each = _mi_for_arrays(cf_list0, cf_list1, df_list0, df_list1,
weights, k, estimate_method, seed)
if output_each:
return final_mi, each
return final_mi
def adjusted_mutual_information(
feature_list0: List[np.ndarray],
feature_list1: List[np.ndarray],
is_categorical_list0: List[bool],
is_categorical_list1: List[bool],
k: int = 3,
estimate_method: str = 'larger_data',
weight_feature: Optional[np.ndarray] = None,
filter_feature: Optional[np.ndarray] = None,
seed: Optional[int] = None,
) -> float:
"""Computes adjusted MI between two lists of features.
Args:
feature_list0: (list(np.ndarray)) a list of features represented as numpy
arrays.
feature_list1: (list(np.ndarray)) a list of features represented as numpy
arrays.
is_categorical_list0: (list(bool)) Whether the first list of features are
categorical or not.
is_categorical_list1: (list(bool)) Whether the second list of features are
categorical or not.
k: (int) The number of nearest neighbors. It has to be an integer no less
than 3.
estimate_method: (str) 'smaller_data' or 'larger_data' estimator in the
above paper.
weight_feature: (np.ndarray) numpy array that are weights for each example.
filter_feature: (np.ndarray) numpy array that is used as the filter to drop
all data where this has missing values. By default, it is None and no
filtering is done.
seed: (int) the numpy random seed.
Returns:
The adjusted mutual information between the features in feature_list0 and
feature_list1.
"""
_validate_args(feature_list0, feature_list1, is_categorical_list0,
is_categorical_list1, k, estimate_method, weight_feature,
filter_feature, False, seed)
cf_list0, cf_list1, df_list0, df_list1, weights = _feature_list_to_numpy_arrays(
feature_list0, feature_list1, is_categorical_list0, is_categorical_list1,
weight_feature, filter_feature)
return _adjusted_mi_for_arrays(cf_list0, cf_list1, df_list0, df_list1,
weights, k, estimate_method, seed)
def _mi_for_arrays(c_arrs0: List[np.ndarray],
c_arrs1: List[np.ndarray],
d_arrs0: List[np.ndarray],
d_arrs1: List[np.ndarray],
weights: Optional[np.ndarray] = None,
k: int = 3,
estimate_method: str = 'larger_data',
seed: Optional[int] = None) -> Tuple[float, np.ndarray]:
"""Computes MI for a list of np.ndarrays."""
assert (bool(c_arrs0 + d_arrs0) and
bool(c_arrs1 + d_arrs1)), 'Both sides are expected to be nonempty.'
fs = list(itertools.chain(c_arrs0, c_arrs1, d_arrs0, d_arrs1))
for other_f in fs[1:]:
assert len(fs[0]) == len(other_f)
np.random.seed(seed)
# Scale ordinal features, and replace missing values in all features.
c_arrs0 = [
_replace_none_categorical(_unit_variance_scale(f)) for f in c_arrs0
]
c_arrs1 = [
_replace_none_categorical(_unit_variance_scale(f)) for f in c_arrs1
]
d_arrs0 = [_to_dense_discrete_array(f) for f in d_arrs0]
d_arrs1 = [_to_dense_discrete_array(f) for f in d_arrs1]
arr0 = _to_noisy_numpy_array(c_arrs0)
arr1 = _to_noisy_numpy_array(c_arrs1)
df0 = _merge_categorical(d_arrs0)
df1 = _merge_categorical(d_arrs1)
if weights is None:
weights = np.ones_like(fs[0], dtype=float)
if (arr0 is None and arr1 is None) or (df0 is None and df1 is None):
mi_c01_d01, each_c01_d01 = 0., 0.
else:
arr = np.hstack(([] if arr0 is None else [arr0]) +
([] if arr1 is None else [arr1]))
df = _merge_categorical(([] if df0 is None else [df0]) +
([] if df1 is None else [df1]))
mi_c01_d01, each_c01_d01 = _mi_high_dim_cd(arr, df, k, estimate_method,
weights)
if arr0 is None or arr1 is None:
mi_c0_c1, each_c0_c1 = 0., 0.
else:
mi_c0_c1, each_c0_c1 = _mi_high_dim_cc(arr0, arr1, k, estimate_method,
weights)
if df0 is None or df1 is None:
mi_d0_d1, each_d0_d1 = 0., 0.
else:
mi_d0_d1, each_d0_d1 = _mi_high_dim_dd(df0, df1, weights)
if arr0 is None or df0 is None:
mi_c0_d0, each_c0_d0 = 0., 0.
else:
mi_c0_d0, each_c0_d0 = _mi_high_dim_cd(arr0, df0, k, estimate_method,
weights)
if arr1 is None or df1 is None:
mi_c1_d1, each_c1_d1 = 0., 0.
else:
mi_c1_d1, each_c1_d1 = _mi_high_dim_cd(arr1, df1, k, estimate_method,
weights)
final_mi = max(0., mi_c01_d01 + mi_c0_c1 + mi_d0_d1 - mi_c0_d0 - mi_c1_d1)
each = each_c01_d01 + each_c0_c1 + each_d0_d1 - each_c0_d0 - each_c1_d1
assert isinstance(each, np.ndarray)
return final_mi, each
def _adjusted_mi_for_arrays(
c_arrs0: List[np.ndarray],
c_arrs1: List[np.ndarray],
d_arrs0: List[np.ndarray],
d_arrs1: List[np.ndarray],
weights: Optional[np.ndarray] = None,
k: int = 3,
estimate_method: str = 'larger_data',
seed: Optional[int] = None,
) -> float:
"""Computes AdjustedMutualInformation for given np.ndarrays.
Args:
c_arrs0: Continuous arrays for side 0.
c_arrs1: Continuous arrays for side 1.
d_arrs0: Discrete arrays for side 0.
d_arrs1: Discrete arrays for side 1.
weights: Weights for data points.
k: The number of nearest neighbors to check when computing MI.
estimate_method: Underlying estimate method for computing MI.
seed: The seed for RNGs.
Returns:
AMI
"""
if seed is not None:
np.random.seed(seed)
# Always set `output_each` to be False.
seed1 = None if seed is None else np.random.randint(0, 1000)
mi, _ = _mi_for_arrays(c_arrs0, c_arrs1, d_arrs0, d_arrs1, weights, k,
estimate_method, seed1)
# We use the same seed to shuffle several features together.
shuffle_seed = np.random.randint(0, 1000) # a fixed seed for shuffling
array_length = next(itertools.chain(c_arrs0, c_arrs1, d_arrs0, d_arrs1)).size
np.random.seed(shuffle_seed)
shuffled_index = np.random.permutation(array_length)
shuffled_c_arrs0 = [a[shuffled_index] for a in c_arrs0]
shuffled_d_arrs0 = [a[shuffled_index] for a in d_arrs0]
seed2 = None if seed is None else np.random.randint(0, 1000)
mi_shuffled, _ = _mi_for_arrays(shuffled_c_arrs0, c_arrs1, shuffled_d_arrs0,
d_arrs1, weights, k, estimate_method, seed2)
return max(mi - mi_shuffled, 0.0)
def _to_dense_discrete_array(f: np.ndarray) -> np.ndarray:
ret = f.astype(bytes)
ret[pd.isnull(f)] = _NONE_STR
return ret
def _replace_none_categorical(f: np.ndarray) -> np.ndarray:
"""Replaces missing values in a ordinal feature."""
if np.all(np.isnan(f)):
return np.full_like(f, _NONE_NUM)
# Replace the missing value with a large enough float value so that when
# looking for k nearest neighbors, samples with missing values are treated
# separately (only samples with the same missing values are taken into account
# for nearest neighbors).
return np.nan_to_num(
f, copy=True, nan=2 * np.nanmax(f) - np.nanmin(f) + _NONE_NUM)
def _unit_variance_scale(f: np.ndarray) -> np.ndarray:
"""Rescales a feature to have a unit variance."""
f_nan_max = np.nanmax(f)
f_nan_min = np.nanmin(f)
if np.isnan(f_nan_max) or np.isnan(f_nan_min):
raise ValueError('Continuous feature all missing.')
if f_nan_max == f_nan_min:
ret = np.full_like(f, np.nan, dtype=float)
ret[~np.isnan(f)] = 0
return ret
return (f - np.nanmean(f)) / np.nanstd(f, ddof=1)
def _merge_categorical(discrete_fs: List[np.ndarray]) -> Any:
"""Merges a list of categorical features into a single categorical feature."""
if not discrete_fs:
return None
operand_list = []
for i in range(2 * len(discrete_fs) - 1):
if i % 2 == 0:
operand_list.append(discrete_fs[i // 2].astype(bytes))
else:
operand_list.append(b':') # use ':' to join values
return functools.reduce(np.char.add, operand_list)
def _entropy_discrete(discrete_f: np.ndarray,
weight_f: np.ndarray) -> Tuple[float, np.ndarray]:
"""Computes the entropy of a list of categorical features with weights."""
_, inverse_idx, unique_counts = np.unique(
discrete_f, return_inverse=True, return_counts=True)
group_counts = unique_counts[inverse_idx]
each = -np.log2(group_counts / discrete_f.size) * weight_f
return np.mean(each), each
def _assert_feature_list(feature_list: List[np.ndarray],
list_name: str) -> None:
"""Validates the contents of feature_list arg for `mutual_information`."""
for f in feature_list:
if f.dtype == np.float:
mask = (f == float('inf')) | (f == float('-inf'))
assert np.sum(mask) == 0, (
'Feature list: %s in list %s contains infinite values, which '
'currently are not supported.' % (f, list_name))
def _validate_args(
feature_list0: List[np.ndarray],
feature_list1: List[np.ndarray],
is_categorical_list0: List[bool],
is_categorical_list1: List[bool],
k: int,
estimate_method: str,
weight_feature: np.ndarray,
filter_feature: np.ndarray,
output_each: bool,
seed: Optional[int]) -> None:
"""Validates the arguments of the function `mutual_information`."""
assert len(set(len(f) for f in feature_list0 + feature_list1)) == 1, (
'The features have different number of items.')
assert len(is_categorical_list0) == len(feature_list0), (
'is_categorical_list0 is not the same length as feature_list0.')
assert len(is_categorical_list1) == len(feature_list1), (
'is_categorical_list1 is not the same length as feature_list1.')
assert isinstance(k, int) and k >= 3, 'k has to be an integer no less than 3.'
assert estimate_method in ['smaller_data', 'larger_data']
def assert_feature(f, f_name):
assert (f is None or isinstance(f, np.ndarray) and
len(f) == len(feature_list0[0])), (
'%s must be None or a feature with the same item number.' %
f_name)
assert_feature(weight_feature, 'weight_feature')
assert_feature(filter_feature, 'filter_feature')
assert isinstance(output_each, bool)
assert seed is None or isinstance(seed, int) and seed > 0
def _fill_missing_values(f: np.ndarray, is_categorical: bool) -> np.ndarray:
"""Fills `f` with `np.nan` for missing values.
Missing values are represented with `np.nan`, regardless of the dtype of the
returned np.ndarray. All continuous features (i.e. is_categorical == False)
are cast to float.
E.g.
np.array([1, 2, None]) -> np.array([1.0, 2.0, nan], dtype=float)
np.array(['a', None, None]) -> np.array(['a', nan, nan], dtype=object)
Args:
f: np.ndarray.
is_categorical: bool.
Returns:
np.ndarray.
"""
if is_categorical:
f = f.astype(object)
f[pd.isnull(f)] = np.nan
return f
else:
# Converting to np.float64 is necessary for getting smaller errors.
return f.astype(float)
def _feature_list_to_numpy_arrays(
feature_list0: List[np.ndarray], feature_list1: List[np.ndarray],
is_categorical_list0: List[bool], is_categorical_list1: List[bool],
weight_feature: Optional[np.ndarray], filter_feature: Optional[np.ndarray]
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray],
List[np.ndarray], np.ndarray]:
"""Converts feature lists into np.ndarray lists for MI computation."""
n_samples = len(feature_list0[0])
if weight_feature is None: # the default weight is constant 1
weights = np.ones(n_samples).astype(float)
else:
weights = weight_feature.astype(float)
# We will handle ordinal and categorical features differently.
def select_features(feature_list, is_categorical_list, keep_fn):
return [
_fill_missing_values(f, is_categorical)
for f, is_categorical in zip(feature_list, is_categorical_list)
if keep_fn(is_categorical)
]
# Select ordinal features and categorical features.
cf_list0 = select_features(feature_list0, is_categorical_list0,
lambda a: not a)
cf_list1 = select_features(feature_list1, is_categorical_list1,
lambda a: not a)
df_list0 = select_features(feature_list0, is_categorical_list0, lambda a: a)
df_list1 = select_features(feature_list1, is_categorical_list1, lambda a: a)
# Ignore those samples whose the filter_feature is missing.
if filter_feature is not None:
cf_list0 = [f[filter_feature] for f in cf_list0]
df_list0 = [f[filter_feature] for f in df_list0]
cf_list1 = [f[filter_feature] for f in cf_list1]
df_list1 = [f[filter_feature] for f in df_list1]
weights = weights[filter_feature]
return cf_list0, cf_list1, df_list0, df_list1, weights
def _to_noisy_numpy_array(cf_list: List[np.ndarray]) -> Optional[np.ndarray]:
"""Adds a tiny noise onto ordinal features."""
# In order to use double precision computation to get smaller errors, we add
# noise after the features have been converted to numpy arrays.
if not cf_list:
return None
arr = np.hstack([l.reshape((-1, 1)) for l in cf_list])
# This may add a noise that is too big for features with very small mean. So
# far it works fine, but should change it if it poses a problem.
means = np.maximum(1, np.mean(np.abs(arr), axis=0))
arr += (_NOISE_AMPLITUDE * means * np.random.randn(*arr.shape))
return arr
def _process_high_dim(arr: np.ndarray, radius: int, estimate_method: str,
weights: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Processes high dimensional feature in the same way as 1-d feature."""
kd_tree = sklearn.neighbors.KDTree(arr, metric='chebyshev')
radius_ns = kd_tree.query_radius(X=arr, r=radius, count_only=True)
if estimate_method == 'smaller_data':
each = -scipy.special.digamma(radius_ns) * weights
elif estimate_method == 'larger_data':
each = -scipy.special.digamma(radius_ns - 1) * weights
return np.sum(each), each
def _mi_high_dim_cc(arr0: np.ndarray, arr1: np.ndarray, k: int,
estimate_method: str,
weights: np.ndarray) -> Tuple[float, np.ndarray]:
"""Computes high dimensional MI for ordinal features."""
arr = np.hstack([arr0, arr1])
m0 = arr0.shape[1]
n_samples, _ = arr.shape
nn = sklearn.neighbors.NearestNeighbors(
metric='chebyshev', n_neighbors=k, n_jobs=1)
nn.fit(arr)
k_neighbors = nn.kneighbors()
if estimate_method == 'smaller_data':
# Use one radius for all features. Exclude the point on the boundary by
# taking a radius slightly smaller than the distance to the k-th nearest
# neighbor.
r = np.nextafter(k_neighbors[0][:, -1], 0).reshape((-1, 1))
radius = np.hstack([r, r])
elif estimate_method == 'larger_data':
# Treat arr0 and arr1 as two high dimensional features and each of them uses
# its own projection of the radius. The idea is to look at the k nearest
# neighbors and find the radius (largest distance) in the two sub-spaces
# separately. The following code does this for chebyshev distance metric.
ind = k_neighbors[1][:, 0]
r = np.fabs(arr - arr[ind])
for i in range(1, k_neighbors[1].shape[1]):
ind = k_neighbors[1][:, i]
r = np.maximum(r, np.fabs(arr - arr[ind]))
r0 = np.max(r[:, :m0], axis=1).reshape((-1, 1))
r1 = np.max(r[:, m0:], axis=1).reshape((-1, 1))
radius = np.hstack([r0, r1])
mi0, each0 = _process_high_dim(arr0, radius[:, 0], estimate_method, weights)
mi1, each1 = _process_high_dim(arr1, radius[:, 1], estimate_method, weights)
mi = (mi0 + mi1) / float(n_samples)
if estimate_method == 'smaller_data':
extra = (scipy.special.digamma(k) +
scipy.special.digamma(n_samples)) * weights
elif estimate_method == 'larger_data':
extra = (scipy.special.digamma(k) + scipy.special.digamma(n_samples) -
1. / k) * weights
mi += np.mean(extra)
each = each0 + each1 + extra
final_mi = max(0., mi / math.log(2))
return final_mi, each / math.log(2)
def _mi_high_dim_cd(arr: np.ndarray, arr_d: np.ndarray, k: int,
estimate_method: str,
weights: np.ndarray) -> Tuple[float, np.ndarray]:
"""Computes high dimensional MI between ordinal and categorical features."""
n_samples = arr_d.size
radius = np.empty(n_samples)
label_counts = | np.empty(n_samples) | numpy.empty |
import functools
import unittest
import hypothesis as hp, hypothesis.strategies as hps
import hypothesis.extra.numpy as hpn
import numpy as np
import tensorflow as tf
import fsph, fsph.tf_ops
class TestTensorflow(unittest.TestCase):
@hp.settings(deadline=None, print_blob=True)
@hp.given(hps.integers(0, 64), hps.booleans(),
hpn.arrays(np.float32, hpn.array_shapes(max_dims=1),
elements=hps.floats(0, np.float32(np.pi), width=32)),
hpn.arrays(np.float32, hpn.array_shapes(max_dims=1),
elements=hps.floats(0, np.float32(2*np.pi), width=32)))
def test_basic(self, lmax, negative_m, phis, thetas):
phis = phis[:min(len(phis), len(thetas))]
thetas = thetas[:min(len(phis), len(thetas))]
Ys_fsph = fsph.pointwise_sph(phis, thetas, lmax, negative_m)
inputs = np.array([phis, thetas]).T
Ys_tf = fsph.tf_ops.spherical_harmonic_series(inputs, lmax, negative_m)
self.assertEqual(Ys_fsph.shape, Ys_tf.shape)
np.testing.assert_allclose(Ys_fsph, Ys_tf, atol=1e-4)
@hp.settings(deadline=None, print_blob=True)
@hp.given(hps.integers(0, 12), hps.booleans(),
hpn.arrays(np.float32, hpn.array_shapes(max_dims=1),
elements=hps.floats(np.float32(.1),
np.float32(np.pi - .1), width=32)),
hpn.arrays(np.float32, hpn.array_shapes(max_dims=1),
elements=hps.floats(np.float32(.1),
np.float32(2*np.pi - .1), width=32)))
def test_numeric_gradient(self, lmax, negative_m, phis, thetas):
phis = phis[:min(len(phis), len(thetas))]
thetas = thetas[:min(len(phis), len(thetas))]
Y0 = fsph.pointwise_sph(phis, thetas, lmax, negative_m)
grad_numeric = []
for dim in range(2):
dx = 1e-3
if dim == 0:
Y = fsph.pointwise_sph(phis + dx, thetas, lmax, negative_m)
else:
Y = fsph.pointwise_sph(phis, thetas + dx, lmax, negative_m)
dY = Y - Y0
grad_numeric.append(dY/dx)
grad_numeric = np.transpose(grad_numeric, (1, 2, 0))
inputs = np.array([phis, thetas]).T
grad_tf = fsph.tf_ops.spherical_harmonic_series_grad(inputs, lmax, negative_m)
np.testing.assert_allclose(grad_numeric, grad_tf, atol=5e-2)
@hp.settings(deadline=None, print_blob=True)
@hp.given(hps.integers(0, 12), hps.booleans(),
hpn.arrays(np.float32, hpn.array_shapes(max_dims=1),
elements=hps.floats(np.float32(.1),
np.float32(np.pi - .1), width=32)),
hpn.arrays(np.float32, hpn.array_shapes(max_dims=1),
elements=hps.floats( | np.float32(.1) | numpy.float32 |
import numpy as np
def get_input(path: str):
input = []
with open(path, 'r') as f:
for line in f.readlines():
input.append(translate_line(line.strip()))
return np.array(input)
def translate_line(line: str):
return [0 if char == "L" else None for char in line]
def change_seats(array: np.array, max_seats_occcupied: int, adjacent: bool):
new_array = np.copy(array)
for x in range(len(array)):
for y in range(len(array[0])):
if array[x, y] is None:
continue
seats_occupied = get_occupied_seats(array, x, y, adjacent)
if array[x, y] == 0 and seats_occupied == 0:
new_array[x, y] = 1
elif array[x, y] == 1 and seats_occupied > max_seats_occcupied:
new_array[x, y] = 0
return new_array
def get_occupied_seats(array: np.array, x: int, y: int, adjacent: bool) -> int:
if adjacent:
x0, x1, y0, y1 = max(0, x - 1), min(len(array), x + 2), max(0, y - 1), min(len(array[0]), y + 2)
return | np.count_nonzero(array[x0:x1, y0:y1]) | numpy.count_nonzero |
"""
File: continuous.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/ComeBertrand
Description: Classical continuous functions for performance evaluation of
metaheuristics. All theses functions were taken from the following website :
https://www.sfu.ca/~ssurjano/optimization.html
"""
import numpy as np
from ...models import Problem
from ...common.representation import RealEncoding, Boundaries
from ...common.fitness import Objective
from ...operators.neighborhood import NeighborhoodOperator, move_distance_continuous, ContinuousLogMoveRange
class ContinuousProblem(Problem):
"""Problems that are defined by a continuous function.
# TODO: Do it in a more abstract way and move it in abstract
Args:
n_dim (int): Number of dimensions.
min_vals (np.array): Minimum values for each dimension.
max_vals (np.array): Maximum values for each dimension.
move_range (MoveRange): Range of the move step.
known_min (float): Minimum of the continuous function. None means that
the minimum is not known.
"""
def __init__(self, n_dim, min_vals, max_vals, move_range, known_min):
nb_neighbors = n_dim * 100 # TODO: shall be an argument of the object
neighborhood = NeighborhoodOperator(move_distance_continuous, move_range, nb_neighbors)
boundaries = Boundaries(min_vals, max_vals, np.float)
encoding = RealEncoding(boundaries)
objective = Objective(self._eval_func)
super().__init__(objective, encoding, neighborhood=neighborhood, known_min=known_min)
def _eval_func(self, solution):
"""Actual evaluation of a solution by the continuous function.
Args:
solution (Solution): Solution to be evaluated.
Returns:
float: function value of the solution.
"""
raise NotImplementedError("Abstract Class")
# --------------------------------------------------------------------------- #
# Functions with many local minima #
# --------------------------------------------------------------------------- #
class Ackleys(ContinuousProblem):
"""Ackley's function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-32.768] * n_dim, np.float)
max_vals = np.array([32.768] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
part1 = -0.2 * np.sqrt(1/n * np.sum(solution * solution))
part2 = 1/n * np.sum(np.cos(2 * np.pi * solution))
return 20 - 20 * np.exp(part1) + np.e - np.exp(part2)
class Bukin6(ContinuousProblem):
"""Bukin funtion N.6."""
def __init__(self):
n_dim = 2
min_vals = np.array([-15.0, -3.0], np.float)
max_vals = np.array([-5.0, 3.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(solution[1] - 0.01 * solution[0] * solution[0])
part2 = np.abs(solution[0] + 10)
return 100 * np.sqrt(part1) + 0.01 * part2
class CrossInTray(ContinuousProblem):
"""Cross-in-tray function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0, -10.0], np.float)
max_vals = np.array([10.0, 10.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -2.06261
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(100 - np.sqrt(np.sum(solution * solution)) / np.pi)
part2 = np.sin(solution[0]) * np.sin(solution[1])
final = np.abs(part2 * np.exp(part1)) + 1.0
return -0.0001 * np.power(final, 0.1)
class DropWave(ContinuousProblem):
"""Drop-Wave function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-5.12, -5.12], np.float)
max_vals = np.array([5.12, 5.12], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -1.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
sum_sol_sq = | np.sum(solution * solution) | numpy.sum |
'''
Implementation of Classifier Training, partly described inside Fanello et al.
'''
import sys
import signal
import errno
import glob
import numpy as np
import class_objects as co
import action_recognition_alg as ara
import cv2
import os.path
import cPickle as pickle
import logging
import yaml
import time
from OptGridSearchCV import optGridSearchCV
# pylint: disable=no-member,R0902,too-many-public-methods,too-many-arguments
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def timeit(func):
'''
Decorator to time extraction
'''
def wrapper(self, *arg, **kw):
t1 = time.time()
res = func(self, *arg, **kw)
t2 = time.time()
self.time.append(t2 - t1)
del self.time[:-5000]
return res
return wrapper
class Classifier(object):
'''
Class to hold all Classifier specific methods.
<descriptors>:['pca','ghog','3dhof']
<action_type>:True if no buffers are used
<sparsecoding_level> is True if sparse coding is used
Classifier Parameters, for example <AdaBoost_n_estimators> or
<RDF_n_estimators> or <kernel> can be
a list, which will be reduced using optimized grid search with cross
validation.
'''
def __init__(self, log_lev='INFO',
visualize=False, masks_needed=True,
buffer_size=co.CONST['buffer_size'],
sparse_dim_rat=co.CONST['sparse_dim_rat'],
descriptors='',
ptpca=False,
ptpca_components=None,
action_type='Dynamic',
classifiers_used='SVM', num_of_cores=4, name='',
svm_c=None,
AdaBoost_n_estimators=None,
RDF_n_estimators=None,
add_info=None,
sparsecoding_level=None,
kernel=None,
save_all_steps=False,
post_scores_processing_method=None,
hardcore=False,
for_app=False):
'''
sparsecoding_level = [Buffer, Features, None]
'''
if not os.path.isdir(co.CONST['AppData']):
os.makedirs(co.CONST['AppData'])
self.app_dir = co.CONST['AppData']
self.for_app = for_app
self.time = []
self.classifiers_ids = None
self.test_ind = None
# General configuration
if not isinstance(descriptors, list):
descriptors = [descriptors]
descriptors = sorted(descriptors)
###
features_params = {}
coders_params = {}
for descriptor in descriptors:
features_params[descriptor] = {}
features_params[descriptor]['params'] = {attrib.replace(descriptor, ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith(descriptor)}
features_params[descriptor]['sparsecoded'] = sparsecoding_level
features_params[descriptor]['action_type'] = action_type
coders_params[descriptor] = {}
if not sparsecoding_level:
features_params[descriptor]['sparse_params'] = None
else:
features_params[descriptor]['sparse_params'] = {
attrib.replace('sparse', ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith('sparse')}
coders_params[descriptor] = {
attrib.replace('sparse', ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith('sparse') and
'fss' not in attrib}
self.test_name = None
self.kernel = kernel
self.svm_c = svm_c
self.RDF_n_estimators = RDF_n_estimators
self.AdaBoost_n_estimators = AdaBoost_n_estimators
self.sparse_dim_rat = sparse_dim_rat
if 'SVM' in classifiers_used and kernel is None:
self.kernel = 'linear'
if 'SVM' in classifiers_used:
if svm_c is None:
self.svm_c = co.CONST['SVM_C']
if post_scores_processing_method == 'CProb':
LOG.warning('Invalid post_scores_processing_method for SVM')
if hardcore:
raise Exception
else:
LOG.warning('Changing method to CSTD')
post_scores_processing_method = 'CSTD'
if 'RDF' in classifiers_used or 'AdaBoost' in classifiers_used:
if svm_c is not None:
LOG.warning(
'svm_c is not None for RDF or AdaBoost experimentation')
if hardcore:
raise Exception
if post_scores_processing_method is None:
if 'RDF' in classifiers_used or 'AdaBoost' in classifiers_used:
post_scores_processing_method = 'CProb'
else:
post_scores_processing_method = 'CSTD'
classifier_params = {}
if 'RDF' in classifiers_used and RDF_n_estimators is None:
self.RDF_n_estimators = co.CONST['RDF_trees']
if 'AdaBoost' in classifiers_used and AdaBoost_n_estimators is None:
self.AdaBoost_n_estimators = co.CONST['AdaBoost_Estimators']
if 'SVM' in classifiers_used:
classifier_params['SVM_kernel'] = self.kernel
classifier_params['SVM_C'] = self.svm_c
if 'RDF' in classifiers_used:
classifier_params['RDF_n_estimators'] = self.RDF_n_estimators
if 'AdaBoost' in classifiers_used:
classifier_params['AdaBoost_n_estimators'] = self.AdaBoost_n_estimators
if action_type != 'Passive':
dynamic_params = {'buffer_size': buffer_size,
'buffer_confidence_tol': co.CONST['buffer_confidence_tol'],
'filter_window_size':
co.CONST['STD_big_filt_window']}
else:
dynamic_params = {'buffer_size': 1}
if ptpca and ptpca_components is None:
ptpca_components = co.CONST['PTPCA_components']
ptpca_params = {'PTPCA_components': ptpca_components}
for descriptor in descriptors:
features_params[descriptor]['dynamic_params'] = dynamic_params
if sparsecoding_level:
if not isinstance(sparse_dim_rat, list):
sparse_dim_rat = [sparse_dim_rat] * len(descriptors)
if len(list(sparse_dim_rat)) != len(descriptors):
raise Exception('<sparse_dim_rat> should be either an integer/None or' +
' a list with same length with <descriptors>')
sparse_params = dict(zip(descriptors, sparse_dim_rat))
sparse_params['fss_max_iter'] = co.CONST['sparse_fss_max_iter']
else:
sparse_params = None
testing_params = {'online': None}
testing_params['post_scores_processing_method'] = \
post_scores_processing_method
fil = os.path.join(co.CONST['rosbag_location'],
'gestures_type.csv')
self.passive_actions = None
self.dynamic_actions = None
if os.path.exists(fil):
with open(fil, 'r') as inp:
for line in inp:
if line.split(':')[0] == 'Passive':
self.passive_actions = line.split(
':')[1].rstrip('\n').split(',')
elif line.split(':')[0] == 'Dynamic':
self.dynamic_actions = line.split(
':')[1].rstrip('\n').split(',')
action_params = {'Passive': self.passive_actions,
'Dynamic': self.dynamic_actions}
LOG.debug('Extracting: ' + str(descriptors))
self.parameters = {'classifier': classifiers_used,
'descriptors': descriptors,
'features_params': features_params,
'coders_params': coders_params,
'dynamic_params': dynamic_params,
'classifier_params': classifier_params,
'sparse_params': sparse_params,
'action_type': action_type,
'sparsecoded': sparsecoding_level,
'testing': False,
'testing_params': testing_params,
'actions_params': action_params,
'PTPCA': ptpca,
'PTPCA_params': ptpca_params}
self.training_parameters = {k: self.parameters[k] for k in
('classifier', 'descriptors',
'features_params',
'dynamic_params',
'classifier_params',
'sparse_params',
'action_type',
'sparsecoded',
'PTPCA',
'PTPCA_params') if k in
self.parameters}
self.descriptors = descriptors
self.add_info = add_info
self.log_lev = log_lev
self.visualize = visualize
self.buffer_size = buffer_size
self.masks_needed = masks_needed
self.action_type = action_type
self.classifiers_used = classifiers_used
self.num_of_cores = num_of_cores
self.name = name
self.ptpca = ptpca
self.action_recog = ara.ActionRecognition(
self.parameters,
log_lev=log_lev)
if not self.for_app:
self.available_tests = sorted(os.listdir(co.CONST['test_save_path']))
else:
self.available_tests = []
self.update_experiment_info()
if 'SVM' in self.classifiers_used:
from sklearn.svm import LinearSVC
self.classifier_type = LinearSVC(
class_weight='balanced', C=self.svm_c,
multi_class='ovr',
dual=False)
elif 'RDF' in self.classifiers_used:
from sklearn.ensemble import RandomForestClassifier
self.classifier_type =\
RandomForestClassifier(self.RDF_n_estimators)
elif 'AdaBoost' in self.classifiers_used:
from sklearn.ensemble import AdaBoostClassifier
self.classifier_type =\
AdaBoostClassifier(n_estimators=self.AdaBoost_n_estimators)
self.unified_classifier = None
if sparsecoding_level:
if not(sparsecoding_level == 'Features' or sparsecoding_level == 'Buffer'):
raise Exception('Invalid sparsecoding_level, its value shoud be '
+ 'None/False/Buffer/Features')
self.sparsecoded = sparsecoding_level
self.decide = None
# Training variables
self.training_data = None
self.train_ground_truth = None # is loaded from memory after training
self.train_classes = None # is loaded from memory after training
# Testing general variables
self.accuracy = None
self.f1_scores = None
self.confusion_matrix = None
self.scores_savepath = None
self.scores_std = []
self.scores_std_mean = []
self.scores = None
self.scores_filter_shape = None
self.std_big_filter_shape = None
self.std_small_filter_shape = None
self.recognized_classes = []
self.crossings = None
self.testname = ''
self.save_fold = None
self.online = False
# Testing offline variables
self.testdataname = ''
self.test_instances = None
# Testing online variables
self.count_prev = None
self.buffer_exists = None
self.scores_exist = None
self.img_count = -1
self._buffer = []
self.scores_running_mean_vec = []
self.big_std_running_mean_vec = []
self.small_std_running_mean_vec = []
self.saved_buffers_scores = []
self.new_action_starts_count = 0
self.test_ground_truth = None
self.mean_from = -1
self.on_action = False
self.act_inds = []
self.max_filtered_score = 0
self.less_filtered_scores_std = None
self.high_filtered_scores_std = None
self.classifier_folder = None
self.testing_initialized = False
self.classifiers_list = {}
self.classifier_savename = 'trained_'
self.classifier_savename += self.full_info.replace(' ', '_').lower()
try:
[self.unified_classifier,
info] = co.file_oper.load_labeled_data(
['Classifier'] + self.classifier_id)
co.file_oper.save_labeled_data(['Classifier'],
[self.unified_classifier,
self.training_parameters],
name=self.app_dir)
if isinstance(info, tuple):
self.training_params = info[0]
self.additional_params = info[1:]
else:
self.training_params = info
self.loaded_classifier = True
LOG.info('Loaded Classifier')
except TypeError:
if self.for_app:
[self.unified_classifier,
info] = co.file_oper.load_labeled_data(
['Classifier'],
name=self.app_dir)
self.loaded_classifier = True
else:
self.loaded_classifier = False
LOG.info('Classifier not Loaded')
self.load_tests()
try:
self.classifier_folder = str(self.classifiers_list[
self.classifier_savename])
except KeyError:
self.classifier_folder = str(len(self.classifiers_list))
self.coders_to_train = []
# parameters bound variables
self.frames_preproc = ara.FramesPreprocessing(self.parameters)
available_descriptors =\
ara.Actions(self.parameters).available_descriptors
try:
self.features_extractors = [available_descriptors[nam](
self.parameters, self.frames_preproc)
for nam in self.parameters['descriptors']]
self.buffer_operators = [
ara.BufferOperations(self.parameters)
for nam in self.parameters['descriptors']]
if self.sparsecoded:
[self.action_recog.
actions.load_sparse_coder(ind) for ind in range(
len(self.parameters['descriptors']))]
except BaseException: pass
def load_tests(self, reset=True):
if reset:
self.testdata = [None] * len(self.available_tests)
self.fscores = [None] * len(self.available_tests)
self.accuracies = [None] * len(self.available_tests)
self.results = [None] * len(self.available_tests)
self.conf_mats = [None] * len(self.available_tests)
self.test_times = [None] * len(self.available_tests)
for count, test in enumerate(self.available_tests):
if (self.testdata[count] is None or
self.testdata[count]['Accuracy'] is None):
self.testdata[count] = co.file_oper.load_labeled_data(
['Testing'] + self.tests_ids[count])
if (self.testdata[count] is not None and
self.testdata[count]['Accuracy'] is not None):
self.accuracies[count] = self.testdata[count]['Accuracy']
self.fscores[count] = self.testdata[count]['FScores']
self.results[count] = self.testdata[count]['Results']
self.conf_mats[count] = self.testdata[count]['ConfMat']
self.test_times[count] = self.testdata[count]['TestTime']
try:
self.partial_accuracies[count] = self.testdata[count][
'PartialAccuracies']
except BaseException: pass
else:
self.testdata[count] = {}
self.testdata[count]['Accuracy'] = {}
self.testdata[count]['FScores'] = {}
self.testdata[count]['Results'] = {}
self.testdata[count]['ConfMat'] = {}
self.testdata[count]['TestTime'] = {}
self.testdata[count]['Labels'] = {}
try:
self.testdata[count]['PartialAccuracies'] = {}
except BaseException: pass
def update_experiment_info(self):
if self.parameters['action_type'] == 'Passive':
info = 'passive '
else:
info = 'dynamic '
info = info + self.name + ' ' + self.classifiers_used + ' '
info += 'using'
if self.parameters['sparsecoded']:
info += ' sparsecoded'
for feature in self.parameters['descriptors']:
info += ' ' + feature
info += ' descriptors '
if 'SVM' in self.parameters['classifier']:
info += 'with ' + self.parameters[
'classifier_params']['SVM_kernel'] + ' kernel'
elif 'RDF' in self.parameters['classifier']:
info += ('with ' + str(self.parameters['classifier_params'][
'RDF_n_estimators']) + ' estimators')
elif 'AdaBoost' in self.parameters['classifier']:
info += ('with ' + str(self.parameters['classifier_params'][
'AdaBoost_n_estimators']) + ' estimators')
if self.parameters['action_type'] == 'Dynamic':
info += ' with buffer size ' + str(self.buffer_size)
if self.parameters['sparsecoded']:
info += ' with sparsecoding by ratio of ' + \
str(self.sparse_dim_rat)
if self.ptpca:
info += (' with ' +
str(self.parameters['PTPCA_params']['PTPCA_components']) +
' post-time-pca components')
self.full_info = info.title()
if self.add_info:
info += self.add_info
self.classifier_savename = 'trained_'
self.classifier_savename += self.full_info.replace(' ', '_').lower()
self.update_classifier_id()
self.update_tests_ids()
def update_classifier_id(self):
self.features_file_id = []
self.features_id = []
for count in range(len(self.parameters['descriptors'])):
_id, file_id = self.action_recog.actions.retrieve_descriptor_possible_ids(count,
assume_existence=True)
self.features_id.append(_id)
self.features_file_id.append(file_id)
self.classifier_id = [co.dict_oper.create_sorted_dict_view(
{'Classifier': str(self.classifiers_used)}),
co.dict_oper.create_sorted_dict_view(
{'ClassifierParams': str(co.dict_oper.create_sorted_dict_view(
self.parameters['classifier_params']))}),
co.dict_oper.create_sorted_dict_view(
{'ActionsType': str(self.action_type)}),
co.dict_oper.create_sorted_dict_view(
{'FeaturesParams': str(self.features_file_id)})]
def update_tests_ids(self):
self.tests_ids = []
for count, test in enumerate(self.available_tests):
self.tests_ids.append([co.dict_oper.create_sorted_dict_view({'Test': str(test)}),
co.dict_oper.create_sorted_dict_view(
{'TestingParams': str(co.dict_oper.create_sorted_dict_view(
self.parameters['testing_params']))})]
+ [self.classifier_id])
def initialize_classifier(self, classifier):
'''
Add type to classifier and set methods
'''
self.unified_classifier = classifier
if 'SVM' in self.classifiers_used:
self.unified_classifier.decide = self.unified_classifier.decision_function
self.unified_classifier.predict = self.unified_classifier.predict
elif 'RDF' in self.classifiers_used or 'AdaBoost' in self.classifiers_used:
self.unified_classifier.decide = self.unified_classifier.predict_proba
self.unified_classifier.predict = self.unified_classifier.predict
co.file_oper.save_labeled_data(['Classifier'] + self.classifier_id,
[self.unified_classifier,
self.training_parameters])
co.file_oper.save_labeled_data(['Classifier'],
[self.unified_classifier,
self.training_parameters],
name=self.app_dir)
def reset_offline_test(self):
'''
Reset offline testing variables
'''
# Testing general variables
self.scores_std = []
self.scores_std_mean = []
self.scores = None
self.recognized_classes = []
self.crossings = None
self.save_fold = None
self.testing_initialized = True
# Testing offline variables
def reset_online_test(self):
'''
Reset online testing variables
'''
# Testing general variables
self.scores_std = []
self.scores_std_mean = []
self.scores = []
self.recognized_classes = []
self.crossings = []
self.save_fold = None
# Testing online variables
self.count_prev = None
self.buffer_exists = []
self.scores_exist = []
self.img_count = -1
self._buffer = []
self.scores_running_mean_vec = []
self.big_std_running_mean_vec = []
self.small_std_running_mean_vec = []
self.saved_buffers_scores = []
self.new_action_starts_count = 0
self.test_ground_truth = None
self.mean_from = -1
self.on_action = False
self.act_inds = []
self.max_filtered_score = 0
self.less_filtered_scores_std = None
self.high_filtered_scores_std = None
self.testing_initialized = True
def add_train_classes(self, training_datapath):
'''
Set the training classes of the classifier
'''
try:
self.train_classes = [name for name in os.listdir(training_datapath)
if os.path.isdir(os.path.join(training_datapath, name))][::-1]
except:
if self.for_app:
with open(os.path.join(self.app_dir,
'train_classes'),'r') as inp:
self.train_classes = pickle.load(inp)
else:
raise
self.all_actions = ['Undefined'] + self.train_classes
# Compare actions in memory with actions in file 'gestures_type.csv'
if self.passive_actions is not None:
passive_actions = [clas for clas in
(self.passive_actions) if clas
in self.train_classes]
if self.dynamic_actions is not None:
dynamic_actions = [clas for clas in
(self.dynamic_actions) if clas
in self.train_classes]
if (self.dynamic_actions is not None and
self.passive_actions is not None):
if 'Sync' in self.classifiers_used:
self.train_classes = {'Passive': passive_actions,
'Dynamic': dynamic_actions}
else:
classes = []
if self.action_type == 'Dynamic' or self.action_type == 'All':
classes += dynamic_actions
if self.action_type == 'Passive' or self.action_type == 'All':
classes += passive_actions
self.train_classes = classes
with open(os.path.join(self.app_dir,
'train_classes'),'w') as out:
pickle.dump(self.train_classes, out)
def run_training(self, coders_retrain=False,
classifiers_retrain=False,
training_datapath=None, classifier_savename=None,
num_of_cores=4, classifier_save=True,
max_act_samples=None,
min_dict_iterations=5,
visualize_feat=False, just_sparse=False,
init_sc_traindata_num=200,
train_all=False):
'''
<Arguments>
For coders training:
Do not train coders if coder already exists or <coders_retrain>
is False. <min_dict_iterations> denote the minimum training iterations to
take place after the whole data has been processed from the trainer
of the coder.<init_dict_traindata_num> denotes how many samples
will be used in the first iteration of the sparse coder training
For svm training:
Train ClassifierS with <num_of_cores>.
Save them if <classifier_save> is True to <classifiers_savepath>. Do not train
if <classifiers_savepath> already exists and <classifiers_retrain> is False.
'''
self.train_all = train_all
self.parameters['testing'] = False
LOG.info(self.full_info + ':')
if classifier_savename is not None:
self.classifier_savename = classifier_savename
if training_datapath is None:
training_datapath = co.CONST['actions_path']
self.add_train_classes(training_datapath)
if self.unified_classifier is None:
LOG.info('Missing trained classifier:' +
self.full_info)
LOG.info('Classifier will be retrained')
classifiers_retrain = True
else:
if not self.sparsecoded:
return
self.prepare_training_data(training_datapath, max_act_samples,
visualize_feat=visualize_feat)
if just_sparse:
return
if self.sparsecoded and self.coders_to_train and classifiers_retrain:
# Enters only if coders were not initially trained or had to be
# retrained. Otherwise, sparse descriptors are computed when
#<Action.add_features> is called
LOG.info('Trained' + str([self.parameters['descriptors'][coder] for coder in
self.coders_to_train]))
LOG.info('Making Sparse Features..')
self.action_recog = ara.ActionRecognition(
self.parameters,
log_lev=self.log_lev,
feat_filename=os.path.join(co.CONST['feat_save_path'],
'saved'))
self.prepare_training_data(training_datapath, max_act_samples,
visualize_feat=visualize_feat)
self.process_training(num_of_cores, classifiers_retrain,
self.classifier_savename, classifier_save)
def prepare_training_data(self, path=None, max_act_samples=None,
visualize_feat=False):
'''
Read actions from the <path> and name them according to their parent
folder name
'''
LOG.info('Adding actions..')
while True:
self.training_data = []
self.training_samples_inds = []
for act_count, action in enumerate(self.train_classes):
LOG.info('Action:' + action)
descriptors, samples_indices, mean_depths, _, trained_coders, _ = self.add_action(name=action,
data=os.path.join(
path, action),
use_dexter=False,
action_type=self.action_type,
max_act_samples=max_act_samples)
if not(self.sparsecoded and None in trained_coders):
descriptors = np.hstack(tuple(descriptors))
fmask = np.prod(np.isfinite(
descriptors), axis=1).astype(bool)
descriptors = descriptors[fmask]
LOG.info('Action \'' + action + '\' has ' +
'descriptors of shape ' + str(descriptors.shape))
self.training_data.append(descriptors)
self.training_samples_inds.append(
np.array(samples_indices)[fmask])
else:
self.training_samples_inds = []
self.training_data = []
self.train_ground_truth = []
if self.training_data:
if self.action_type == 'Dynamic':
self.training_data = co.preproc_oper.equalize_samples(
samples=self.training_data,
utterance_indices=self.training_samples_inds,
mode='random')
self.train_ground_truth = []
for act_count, clas in enumerate(self.training_data):
self.train_ground_truth += clas.shape[0] * [act_count]
self.training_data = np.vstack((self.training_data))
if None in trained_coders and self.sparsecoded:
self.action_recog.actions.train_sparse_dictionary()
else:
break
finite_samples = np.prod(
np.isfinite(
self.training_data),
axis=1).astype(bool)
self.train_ground_truth = np.array(
self.train_ground_truth)[finite_samples]
self.training_data = self.training_data[finite_samples, :]
LOG.info('Total Training Data has shape:'
+ str(self.training_data.shape))
def process_training(self, num_of_cores=4, retrain=False,
savepath=None, save=True):
'''
Train (or load trained) Classifiers with number of cores num_of_cores, with buffer size (stride
is 1) <self.buffer_size>. If <retrain> is True, Classifiers are retrained, even if
<save_path> exists.
'''
loaded = 0
if save and savepath is None:
raise Exception('savepath needed')
if retrain or self.unified_classifier is None:
if retrain and self.unified_classifier is not None:
LOG.info('retrain switch is True, so the Classifier ' +
'is retrained')
classifier_params = {elem.replace(self.classifiers_used + '_', ''):
self.parameters['classifier_params'][elem]
for elem in
self.parameters['classifier_params']
if elem.startswith(self.classifiers_used)}
if any([isinstance(classifier_params[elem], list)
for elem in classifier_params]):
grid_search_params = classifier_params.copy()
from sklearn.multiclass import OneVsRestClassifier
if isinstance(self.classifier_type, OneVsRestClassifier):
grid_search_params = {('estimator__' + key): classifier_params[key]
for key in classifier_params}
grid_search_params = {key: (grid_search_params[key] if
isinstance(
grid_search_params[key], list)
else [
grid_search_params[key]]) for key in
classifier_params}
best_params, best_scores, best_estimators = optGridSearchCV(
self.classifier_type, self.training_data,
self.train_ground_truth, grid_search_params, n_jobs=4,
fold_num=3)
best_params = best_params[-1]
best_scores = best_scores[-1]
best_estimator = best_estimators[-1]
if isinstance(self.classifier_type, OneVsRestClassifier):
best_params = {key.replace('estimator__', ''):
classifier_params[
key.replace('estimator__', '')]
for key in best_params}
classifier_params = {self.classifiers_used + '_' + key: best_params[key] for key
in best_params}
self.parameters['classifier_params'].update(classifier_params)
self.training_parameters['classifier_params'].update(
classifier_params)
self.classifier_type = best_estimator
self.update_experiment_info()
savepath = self.classifier_savename
self.initialize_classifier(self.classifier_type.fit(self.training_data,
self.train_ground_truth))
def compute_testing_time(self, testname):
testing_time = {}
features_extraction_time = 0
if not self.online:
for count in range(len(self.parameters['descriptors'])):
try:
loaded = co.file_oper.load_labeled_data(
[str(self.features_id[count][-1])] +
self.features_file_id[count] +
[str(testname)])
(_, _, _, feat_times) = loaded
except BaseException:
return None
for key in feat_times:
LOG.info('Time:' + str(key) + ':' +
str(np.mean(feat_times[key])))
features_extraction_time += np.mean(feat_times[key])
try:
testing_time['Classification'] = self.time[
-1] / float(self.scores.shape[0])
except IndexError:
testing_time['Classification'] = (
co.file_oper.load_labeled_data(
['Testing'] + self.tests_ids[
self.available_tests.index(
testname)])['TestTime'][
'Classification'])
else:
testing_time['Classification'] = np.mean(self.time)
testing_time['Features Extraction'] = features_extraction_time
return testing_time
def add_action(self, name=None, data=None, visualize=False, offline_vis=False,
to_visualize=[], exit_after_visualization=False,
use_dexter=False,
action_type=None,
max_act_samples=None):
return self.action_recog.add_action(
name=name,
use_dexter=use_dexter,
action_type=self.action_type,
max_act_samples=max_act_samples,
data=data,
offline_vis=offline_vis,
to_visualize=to_visualize,
exit_after_visualization=exit_after_visualization)
def offline_testdata_processing(self, datapath):
'''
Offline testing data processing, using data in <datapath>.
'''
LOG.info('Processing test data..')
LOG.info('Extracting descriptors..')
(descriptors, _, mean_depths, test_name, _, _) = self.add_action(
name=None, data=datapath)
testdata = np.hstack(tuple(descriptors))
self.parameters['testing_params'][test_name] = test_name
self.parameters['testing_params']['current'] = test_name
return testdata
def save_plot(self, fig, lgd=None, display_all=False, info=None):
'''
<fig>: figure
<lgd>: legend of figure
<display_all>: whether to save as Total plot
Saves plot if the action resides in self.available_tests
'''
filename = None
if display_all:
testname = self.action_type.lower()
filename = os.path.join(*self.save_fold.split(os.sep)[:-1] +
['Total', testname + '.pdf'])
else:
if self.test_name is None:
self.test_name = (self.name + ' ' + self.classifiers_used).title()
if self.test_name in self.available_tests:
if self.save_fold is None:
if not self.online:
fold_name = co.file_oper.load_labeled_data(['Testing'],
just_catalog=True,
include_all_catalog=True)[
str(self.tests_ids[
self.available_tests.
index(self.test_name)])]
else:
fold_name = 'Online'
self.save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', fold_name,
self.test_name)
if self.add_info is not None:
self.save_fold = os.path.join(
self.save_fold, self.add_info.replace(' ', '_').lower())
co.makedir(self.save_fold)
LOG.info('Saving to ' + self.save_fold)
if info is not None:
filename = os.path.join(
self.save_fold, (self.testname + ' ' + info +
'.pdf').replace(' ','_'))
else:
filename = os.path.join(
self.save_fold, self.testname.replace(' ','_') + '.pdf')
else:
LOG.warning('Requested figure to plot belongs to an' +
' action that does not reside in <self.'+
'available_tests> .Skipping..')
filename = None
import matplotlib.pyplot as plt
if filename is not None:
if lgd is None:
plt.savefig(filename)
else:
plt.savefig(filename,
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def plot_result(self, data, info=None, save=True, xlabel='Frames', ylabel='',
labels=None, colors=None, linewidths=None, alphas=None,
xticks_names=None, yticks_names=None, xticks_locs=None,
yticks_locs=None, markers=None, markers_sizes=None, zorders=None, ylim=None, xlim=None,
display_all=False, title=False):
'''
<data> is a numpy array dims (n_points, n_plots),
<labels> is a string list of dimension (n_plots)
<colors> ditto
'''
import matplotlib
from matplotlib import pyplot as plt
#matplotlib.rcParams['text.usetex'] = True
#matplotlib.rcParams['text.latex.unicode'] = True
# plt.style.classifiers_used('seaborn-ticks')
if len(data.shape) == 1:
data = np.atleast_2d(data).T
fig, axes = plt.subplots()
if xticks_locs is not None:
axes.set_xticks(xticks_locs, minor=True)
axes.xaxis.grid(True, which='minor')
if yticks_locs is not None:
axes.set_yticks(yticks_locs, minor=True)
axes.yaxis.grid(True, which='minor')
if xticks_names is not None:
plt.xticks(range(len(xticks_names)), xticks_names)
if yticks_names is not None:
plt.yticks(range(len(yticks_names)), yticks_names)
if markers is None:
markers = [','] * data.shape[1]
if markers_sizes is None:
markers_sizes = [10] * data.shape[1]
if colors is None:
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
if alphas is None:
alphas = data.shape[1] * [1]
if zorders is None:
zorders = data.shape[1] * [0]
while len(colors) < data.shape[1]:
colors += [tuple(np.random.random(3))]
if linewidths is None:
linewidths = [1] * data.shape[1]
lgd = None
for count in range(data.shape[1]):
if labels is not None:
axes.plot(data[:, count], label='%s' % labels[count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
lgd = co.plot_oper.put_legend_outside_plot(axes,
already_reshaped=True)
else:
axes.plot(data[:, count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
if title:
if info is not None:
plt.title(self.testname +
'\n Dataset: ' + self.testdataname +
'\n' + info.title())
else:
plt.title(self.testname +
'\n Dataset ' + self.testdataname)
info = ''
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
if save:
self.save_plot(fig, lgd, display_all=display_all, info=info)
return fig, lgd, axes
def init_testing(self, data=None, online=True, save=True, load=True,
testname=None, scores_savepath=None,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
testdatapath=None, save_results=True):
'''
Initializes paths and names used in testing to save, load and visualize
data.
Built as a convenience method, in case <self.run_testing> gets overriden.
'''
self.parameters['testing'] = True
self.parameters['testing_params']['online'] = online
if online:
self.reset_online_test()
else:
self.reset_offline_test()
self.scores_filter_shape = scores_filter_shape
self.std_small_filter_shape = std_small_filter_shape
self.std_big_filter_shape = std_big_filter_shape
self.online = online
if testname is not None:
self.testname = testname.title()
else:
self.testname = (self.name + ' ' + self.classifiers_used).title()
if self.add_info is not None:
self.testname += ' ' + self.add_info.title()
self.parameters['testing_params']['current'] = self.testname
if online:
if testdatapath is not None:
self.testdataname = ('online (using '
+ os.path.basename(testdatapath) + ')')
else:
self.testdataname = 'online'
else:
self.testdataname = os.path.basename(data)
if not self.online:
if self.test_ind is not None:
available_tests_ids = co.file_oper.load_labeled_data(['Testing'],
just_catalog=True,
include_all_catalog=True)
if available_tests_ids is None:
fold_name = '0'
else:
curr_test_id = self.tests_ids[self.available_tests.
index(self.test_name)]
if str(curr_test_id) in available_tests_ids:
fold_name = str(available_tests_ids[str(curr_test_id)])
else:
fold_name = str(len(available_tests_ids))
else:
self.test_name = 'Online'
try:
fold_name = os.path.join(*[co.CONST['results_fold'],
'Classification', 'Online'])
except OSError:
fold_name = '0'
if self.test_ind is not None:
self.save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', self.test_name,
fold_name)
co.makedir(self.save_fold)
if save or load:
fold_name = self.classifier_folder
if scores_savepath is None:
self.scores_savepath = self.testdataname + '_scores_for_'
self.scores_savepath += self.full_info.replace(' ',
'_').lower()
self.scores_savepath += '.pkl'
else:
self.scores_savepath = scores_savepath
return True
def run_testing(self, data=None, derot_angle=None, derot_center=None,
online=True,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
ground_truth_type=None,
img_count=None, save=True, scores_savepath=None,
load=False, testname=None, display_scores=True,
construct_gt=True, just_scores=False, testdatapath=None,
compute_perform=True,
save_results=True):
'''
Test Classifiers using data (.png files) located in <data>. If <online>, the
testing is online, with <data> being a numpy array, which has been
firstly processed by <hand_segmentation_alg>. The scores retrieved from
testing are filtered using a box filter of shape <box_filter_shape>.
The running mean along a buffer
of the data is computed with a running window of length
<mean_filter_shape>. The ground truth for the testing data is given by
<ground_truth_type> (for further info about the variable refer to
<co.gd_oper.construct_ground_truth>). If the training is online, the count of
the frame is passed by <img_count>. If <save> is True,
testing results are saved to <scores_savepath>, or a path constructed
by the configuration. <testname> overrides the first line of the plots.
If <load> is True and <scores_save_path> exists, testing is bypassed and all the
necessary results are loaded from memory. If <just_scores> is True, the
classification stage is not done and only scores are computed. If
<testdatapath> is not <None> and <online> is True, then it will be
assumed that a pseudoonline testing is taking place
'''
loaded = False
if not online:
LOG.info('Testing:' + data)
try:
self.test_ind = self.available_tests.index(data)
self.test_name = data
except BaseException:
if data.split(os.sep)[-1] in self.available_tests:
self.test_ind = (
self.available_tests.index(data.split(os.sep)[-1]))
self.test_name = data.split(os.sep)[-1]
elif data in self.dynamic_actions or data in self.passive_actions:
self.test_ind = None
elif data.split(os.sep)[-1] in self.dynamic_actions or \
data.split(os.sep)[-1] in self.passive_actions:
self.test_ind = None
else:
raise Exception('test data must be inside test_save_path,' +
' check config.yaml')
if construct_gt and ground_truth_type is None:
ground_truth_type =os.path.join(
co.CONST['ground_truth_fold'],
self.test_name + '.csv')
elif isinstance(data, tuple):
derot_angle = data[1]
derot_center = data[2]
data = data[0]
if not self.testing_initialized or not online:
if not self.init_testing(data=data,
online=online,
save=save,
load=load,
testname=testname,
scores_savepath=scores_savepath,
scores_filter_shape=scores_filter_shape,
std_small_filter_shape=std_small_filter_shape,
std_big_filter_shape=std_big_filter_shape,
testdatapath=testdatapath,
save_results=save_results):
return False
if not online:
if self.test_ind is not None and (
load and self.accuracies[self.available_tests.index(self.test_name)]
is not None):
LOG.info('Tests already performed, loaded data')
try:
self.scores = self.results['Scores']
loaded = True
except:
pass
if not loaded:
if self.test_ind is not None:
testdata = self.offline_testdata_processing(
os.path.join(co.CONST['test_save_path'],
self.test_name))
else:
testdata = self.offline_testdata_processing(
data)
try:
self.test_ind = self.available_tests.index(data)
except BaseException: self.test_ind = None
LOG.info(self.full_info + ':')
LOG.info('Testing Classifiers using testdata with size: '
+ str(testdata.shape))
fmask = np.prod(np.isfinite(testdata), axis=1).astype(bool)
fin_scores = self.unified_classifier.decide(
testdata[fmask, :])
self.scores = np.zeros(
(testdata.shape[0], fin_scores.shape[1]))
self.scores[:] = None
self.scores[fmask] = fin_scores
if self.test_ind is not None:
self.testdata[self.test_ind]['Results']['Scores'] = self.scores
if construct_gt:
LOG.info('Constructing ground truth vector..')
self.test_ground_truth, self.test_breakpoints = co.gd_oper.construct_ground_truth(
os.path.join(co.CONST['test_save_path'], self.test_name),
classes_namespace=self.train_classes,
length=self.scores.shape[0],
ground_truth_type=ground_truth_type,
ret_breakpoints=True)
utterances_inds = co.gd_oper.merge_utterances_vectors(
co.gd_oper.create_utterances_vectors(
self.test_breakpoints, len(self.test_ground_truth)),
self.train_classes)
if not just_scores:
self.classify_offline(save=save, display=display_scores,
compute_perform=compute_perform,
extraction_method=
self.parameters[
'testing_params']['post_scores_processing_method'])
self.correlate_with_ground_truth(save=save,
display=display_scores,
compute_perform=compute_perform,
utterances_inds=utterances_inds)
self.display_scores_and_time(save=save)
if self.test_ind is not None:
co.file_oper.save_labeled_data(['Testing'] +self.tests_ids[
self.test_ind], self.testdata[self.test_ind])
if not just_scores:
if display_scores:
if self.parameters['testing_params'][
'post_scores_processing_method'] == 'CSTD':
self.plot_result(np.concatenate((
self.less_filtered_scores_std[:, None],
self.high_filtered_scores_std[:, None]), axis=1),
info='Scores Statistics',
xlabel='Frames',
labels=['STD', 'STD Mean'],
colors=['r', 'g'],
save=save)
mean_diff = (np.array(self.high_filtered_scores_std) -
np.array(self.less_filtered_scores_std))
mean_diff = (mean_diff) / float(np.max(np.abs(mean_diff[
np.isfinite(mean_diff)])))
plots = [mean_diff]
labels = ['ScoresSTD - ScoresSTDMean']
if self.test_ground_truth is not None:
plots += [((self.test_ground_truth - np.mean(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])) / float(
np.max(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])))[:, None]]
labels += ['Ground Truth']
self.plot_result(np.concatenate(plots, axis=1), labels=labels,
info='Metric of actions starting and ending ' +
'points', xlabel='Frames', save=save)
if display_scores:
self.plot_result(self.scores,
labels=self.train_classes,
info='Scores',
xlabel='Frames',
save=save,
)
return True, self.scores
else:
'''
input is processed from hand_segmentation_alg (any data
processed in such way, that the result is the same with my processing,
is acceptable, eg. Dexter)
There must be a continuous data streaming (method called in every
loop), even if the result of the previous algorithm is None
'''
scores_exist, score = self.process_online_data(data, img_count,
derot_angle,
derot_center,
just_scores=just_scores)
return scores_exist, score
def visualize_action(self, action, save=True,
save_name=None, *args, **kwargs):
'''
Visualizes action or a testing dataset using predefined locations in
config.yaml and the method co.draw_oper.plot_utterances
'''
dataset_loc = '/media/vassilis/Thesis/Datasets/PersonalFarm/'
results_loc = '/home/vassilis/Thesis/KinectPainting/Results/DataVisualization'
ground_truth, breakpoints, labels = co.gd_oper.load_ground_truth(action, ret_labs=True,
ret_breakpoints=True)
testing =True
images_base_loc = os.path.join(dataset_loc, 'actions',
'sets' if not testing else 'whole_result')
images_loc = os.path.join(
images_base_loc, action.replace(
'_', ' ').title())
imgs, masks, sync, angles, centers, samples_indices = co.imfold_oper.load_frames_data(
images_loc, masks_needed=True)
import cv2
masks_centers = []
xdim = 0
ydim = 0
conts = []
tmp = []
for mask, img in zip(masks, imgs):
conts = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
conts_areas = [cv2.contourArea(cont) for cont in conts]
tmp.append(np.sum(mask*img >0))
if np.sum(mask*img >0) < 500:
masks_centers.append(None)
else:
cont = conts[ | np.argmax(conts_areas) | numpy.argmax |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.utils import np_utils, plot_model
# https://keras.io/guides/sequential_model/
# https://appliedmachinelearning.blog/2019/07/29/transfer-learning-using-feature-extraction-from-trained-models-food-images-cmodellassification/
def feedForward(X,y,epoch, batch_size):
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Normalize the images.
train_images = X_train.astype('float32')
test_images = X_test.astype('float32')
train_images /= 255
test_images /= 255
train_labels = np_utils.to_categorical(Y_train, num_classes=10, dtype='uint8')
test_labels = np_utils.to_categorical(Y_test, num_classes=10, dtype='uint8')
# Build the model
model = Sequential()
model.add(Dense(28, activation='relu',input_shape=(784,)))
model.add(Dense(28, activation='relu'))
model.add(Dense(10, activation='softmax', name="Layer"))
# Compile the model
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
model.layers
plot_model(model, to_file='multilayer_graph.png')
# extracting the features from the last layer of the network
feature_extractor = Model(inputs=model.inputs, outputs=model.get_layer(name="Layer").output)
# Call feature extractor on test input.
x = test_images
print("x_shape: \n", test_images.shape)
print("x : \n", test_images)
features = feature_extractor.predict(x)
print("features shape: \n", features.shape)
print("features: \n", features)
X = np.arange(10)
plt.figure()
index = 0
while index<100:
plt.scatter(X , features[index])
index+=1
plt.show()
# Train the model
model.fit(train_images, train_labels, epochs=epoch, batch_size=batch_size, verbose=0)
# Evaluate the model
results = model.evaluate(test_images, test_labels, verbose=0)
print("test loss, test acc: \n", results)
# Save the model to disk
model.save_weights('model.h1')
# Load the model from disk later using:
# model.load_weights('model.h1')
# Predict on the first 5 test images.
preds = np.argmax(model.predict(test_images[0:10]), axis=1)
print("Prediction Shape: \n", preds.shape)
# Print our model's predictions.
print("prediction: \n", preds)
# Check our predictions against the ground truths.
print("test_labels shape: \n", test_labels[0:10].shape)
print("test_labels: \n", test_labels[0:10])
print("test_labels argmax: \n", np.argmax(test_labels[0:10],axis=1))
print("\nAccuracy on Test Data: ", accuracy_score(np.argmax(test_labels[0:10], axis=1), preds))
print("\nNumber of correctly identified images: ", accuracy_score( | np.argmax(test_labels[0:10], axis=1) | numpy.argmax |
from unittest import TestCase
import numpy as np
from numpy.testing import assert_allclose
from statsmodels.regression.linear_model import WLS
from statsmodels.regression._tools import _MinimalWLS
class TestMinimalWLS(TestCase):
@classmethod
def setUpClass(cls):
rs = np.random.RandomState(1234)
cls.exog1 = rs.randn(200,5)
cls.endog1 = cls.exog1.sum(1) + rs.randn(200)
cls.weights1 = 1.0 + np.sin( | np.arange(200.0) | numpy.arange |
"""
Functions used in Metabolite annotation
"""
import numpy as np
from scipy.interpolate import interpolate
from scipy.optimize import leastsq
def find(a,cond):
b=np.nonzero(cond)
return b
def cauchy(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
arg=((1/(p3[0]*3.14159))/((1/p3[0])**2+(X*X)))*((1/(p3[1]*3.14159))/((1/p3[1])**2+(Y*Y)))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def expon(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
#arg=((1/p3[0])/((1/p3[0])**2+(X*X)))*((1/p3[1])/((1/p3[1])**2+(Y*Y)))
arg=(1/6.28)*(1/p3[0])*np.exp(-X*X/(2*p3[0]**2))*(1/p3[1])*np.exp(-X*X/(2*p3[1]**2))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def exponcauchy(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
#arg=((1/p3[0])/((1/p3[0])**2+(X*X)))*((1/p3[1])/((1/p3[1])**2+(Y*Y)))
arg=(1/3.14159)*(1/p3[0])*np.exp(-X*X/(2*p3[0]**2))*((1/(p3[1]*3.14159))/((1/p3[1])**2+(Y*Y)))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def cauchyexpon(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
#arg=((1/p3[0])/((1/p3[0])**2+(X*X)))*((1/p3[1])/((1/p3[1])**2+(Y*Y)))
arg=((1/(p3[0]*3.14159))/((1/p3[0])**2+(X*X)))*(1/3.14159)*(1/p3[1])*np.exp(-Y*Y/(2*p3[0]**2))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def subpix2(z,ii,jj):
trange = np.arange(7)
ttrange = np.arange(7)
X,Y = np.meshgrid(trange,ttrange)
outgrid = interpolate.interp2d(X,Y,z,kind='quintic')
xx=yy=np.arange(61)/10.
l=outgrid(xx,yy)
l=l[30-9:30+10,30-9:30+10]
ind=find(l,l==np.amax(l))
#print l
#print ind[0][0],ind[1][0]
ni=ii+(ind[0][0]-9.)/10
nj=jj+(ind[1][0]-9.)/10
#print ii,jj
#print ni,nj
return[ni,nj]
def dephcl(z):
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*cauchy(19,v)),1)
vi=[0.3,0.3]
v, success = leastsq(e, vi, args=(z), maxfev=1000)
if v[0]<0.001 or v[0]>2 or v[1]<0.001 or v[1]>2 :
v[0]=v[1]=0.3+np.random.normal(0, 0.05, 1)
return v
def dephcg(z):
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*expon(19,v)),1)
vi=[1,1]
#z[z<0]=0
v, success = leastsq(e, vi, args=(z), maxfev=1000)
if v[0]<0.1 or v[0]>4 or v[1]<0.1 or v[1]>4 :
v[0]=v[1]=2+np.random.normal(0, 0.05, 1)
return v
def dephcaprio(z,a,b,c):
if c[0]=='g' and c[1]=='g':
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*expon(19,v)),1)
vi=[a,b]
#z[z<0]=0
v, success = leastsq(e, vi, args=(z), maxfev=1000)
if np.abs(float(v[0]-a))>1 or v[0]<0.5 or v[0]>6:
v[0]=a+np.random.normal(0, 0.05, 1)
if np.abs(float(v[1]-b))>1 or v[0]<0.5 or v[0]>6:
v[1]=b+ | np.random.normal(0, 0.05, 1) | numpy.random.normal |
'''plotting ODS methods and utilities
-------
'''
# NOTEs: https://git.iter.org/projects/IMAS/repos/idstools/browse/bin has some plotting utilities that may be worth checking out
from .omas_utils import *
from .omas_physics import cocos_transform
from .omas_symbols import latexit
__all__ = []
__ods__ = []
def add_to__ODS__(f):
"""
anything wrapped here will be available as a ODS method with name 'plot_'+f.__name__
"""
__ods__.append(f.__name__)
return f
def add_to__ALL__(f):
__all__.append(f.__name__)
return f
# ================================
# plotting helper functions
# ================================
def uerrorbar(x, y, ax=None, **kwargs):
r"""
Given arguments y or x,y where x and/or y have uncertainties, feed the
appropriate terms to matplotlib's errorbar function.
If y or x is more than 1D, it is flattened along every dimension but the last.
:param x: array of independent axis values
:param y: array of values with uncertainties, for which shaded error band is plotted
:param ax: The axes instance into which to plot (default: gca())
:param \**kwargs: Passed to ax.errorbar
:return: list. A list of ErrorbarContainer objects containing the line, bars, and caps of each (x,y) along the last dimension.
"""
result = []
# set default key word arguments
if ax is None:
from matplotlib import pyplot
ax = pyplot.gca()
kwargs.setdefault('marker', 'o')
if 'linestyle' not in kwargs and 'ls' not in kwargs:
kwargs['linestyle'] = ''
if numpy.all(std_devs(y) == 0) and numpy.all(std_devs(x) == 0):
kwargs.setdefault('capsize', 0)
# enable combinations of 1D and 2D x's and y's
y = numpy.array(y)
y = y.reshape(-1, y.shape[-1])
x = numpy.array(x)
x = x.reshape(-1, x.shape[-1])
if x.shape[0] == 1 and y.shape[0] > 1: # one x for all y's
x = numpy.tile(x[0, :], y.shape[0]).reshape(-1, x.shape[-1])
# plot each (x,y) and collect container objects
for xi, yi in zip(x, y):
tmp = ax.errorbar(nominal_values(xi), nominal_values(yi), xerr=std_devs(xi), yerr=std_devs(yi), **kwargs)
result.append(tmp)
return result
class Uband(object):
"""
This class wraps the line and PollyCollection(s) associated with a banded
errorbar plot for use in the uband function.
"""
def __init__(self, line, bands):
"""
:param line: Line2D
A line of the x,y nominal values
:param bands: list of PolyCollections
The fill_between and/or fill_betweenx PollyCollections spanning the std_devs of the x,y data
"""
from matplotlib.cbook import flatten
self.line = line # matplotlib.lines.Line2D
self.bands = list(flatten([bands])) # matplotlib.collections.PolyCollection(s)
def __getattr__(self, attr):
if attr in ['set_color', 'set_lw', 'set_linewidth', 'set_dashes', 'set_linestyle']:
def _band_line_method(self, method, *args, **kw):
"""
Call the same method for line and band.
Returns Line2D method call result.
"""
for band in self.bands:
getattr(band, method)(*args, **kw)
return getattr(self.line, method)(*args, **kw)
return lambda method=attr, *args, **kw: _band_line_method(method, *args, **kw)
else:
return getattr(self.line, attr)
def uband(x, y, ax=None, fill_kw={'alpha': 0.25}, **kw):
r"""
Given arguments x,y where either or both have uncertainties, plot x,y using pyplt.plot
of the nominal values and surround it with with a shaded error band using matplotlib's
fill_between and/or fill_betweenx.
If y or x is more than 1D, it is flattened along every dimension but the last.
:param x: array of independent axis values
:param y: array of values with uncertainties, for which shaded error band is plotted
:param ax: axes instance into which to plot (default: gca())
:param fill_kw: dict. Passed to pyplot.fill_between
:param \**kw: Passed to pyplot.plot
:return: list. A list of Uband objects containing the line and bands of each (x,y) along the last dimension.
"""
from matplotlib import pyplot
result = []
if ax is None:
ax = pyplot.gca()
# enable combinations of 1D and 2D x's and y's
y = numpy.array(y)
y = y.reshape(-1, y.shape[-1])
x = numpy.array(x)
x = x.reshape(-1, x.shape[-1])
if x.shape[0] == 1 and y.shape[0] > 1: # one x for all y's
x = numpy.tile(x[0, :], y.shape[0]).reshape(-1, x.shape[-1])
# plot each (x,y) and collect the lines/bands into a single object
for xi, yi in zip(x, y):
xnom = numpy.atleast_1d(numpy.squeeze(nominal_values(xi)))
xerr = numpy.atleast_1d(numpy.squeeze(std_devs(xi)))
ynom = numpy.atleast_1d(numpy.squeeze(nominal_values(yi)))
yerr = numpy.atleast_1d(numpy.squeeze(std_devs(yi)))
(l,) = ax.plot(xnom, ynom, **kw)
fkw = copy.copy(fill_kw) # changes to fill_kw propagate to the next call of uband!
fkw.setdefault('color', l.get_color())
bands = []
if numpy.any(yerr != 0):
bandy = ax.fill_between(xnom, ynom - yerr, ynom + yerr, **fkw)
bands.append(bandy)
if numpy.any(xerr != 0):
bandx = ax.fill_betweenx(ynom, xnom - xerr, xnom + xerr, **fkw)
bands.append(bandx)
tmp = Uband(l, bands)
result.append(tmp)
return result
def imas_units_to_latex(unit):
"""
converts units to a nice latex format for plot labels
:param unit: string with unit in imas format
:return: string with unit in latex format
"""
unit = re.sub('(\-?[0-9]+)', r'{\1}', unit)
unit = re.sub('\.', r'\,', unit)
return f' [${unit}$]'
@add_to__ALL__
def get_channel_count(ods, hw_sys, check_loc=None, test_checker=None, channels_name='channel'):
"""
Utility function for CX hardware overlays.
Gets a channel count for some hardware systems.
Provide check_loc to make sure some data exist.
:param ods: OMAS ODS instance
:param hw_sys: string
Hardware system to check. Must be a valid top level IDS name, like 'thomson_scattering'
:param check_loc: [optional] string
If provided, an additional check will be made to ensure that some data exist.
If this check fails, channel count will be set to 0
Example: 'thomson_scattering.channel.0.position.r'
:param test_checker: [optional] string to evaluate into bool
Like "checker > 0", where checker = ods[check_loc]. If this test fails, nc will be set to 0
:param channels_name: string
Use if you need to generalize to something that doesn't have real channels but has something analogous,
like how 'gas_injection' has 'pipe' that's shaped like 'channel' is in 'thomson_scattering'.
:return: Number of channels for this hardware system. 0 indicates empty.
"""
try:
nc = len(ods[hw_sys][channels_name])
if check_loc is not None:
checker = ods[check_loc]
if test_checker is not None:
assert eval(test_checker)
except (TypeError, AssertionError, ValueError, IndexError, KeyError):
nc = 0
if nc == 0:
printd('{} overlay could not find sufficient data to make a plot'.format(hw_sys))
return nc
def gas_filter(label, which_gas):
"""
Utility: processes the mask / which_gas selector for gas_injection_overlay
:param label: string
Label for a gas pipe / inlet to be tested
:param which_gas: string or list
See gas_injection_overlay docstring
:return: bool
Flag indicating whether or not a pipe with this label should be shown
"""
include = False
if isinstance(which_gas, str):
if which_gas == 'all':
include = True
elif isinstance(which_gas, list):
include = any(wg in label for wg in which_gas)
return include
def gas_arrow(ods, r, z, direction=None, r2=None, z2=None, snap_to=numpy.pi / 4.0, ax=None, color=None, pad=1.0, **kw):
"""
Draws an arrow pointing in from the gas inlet
:param ods: ODS instance
:param r: float
R position of gas injector (m)
:param z: float
Z position of gas injector (m)
:param r2: float [optional]
R coordinate of second point, at which the gas injector is aiming inside the vessel
:param z2: float [optional]
Z coordinate of second point, at which the gas injector is aiming inside the vessel
:param direction: float
Direction of injection (radians, COCOS should match ods.cocos). None = try to guess.
:param snap_to: float
Snap direction angle to nearest value. Set snap to pi/4 to snap to 0, pi/4, pi/2, 3pi/4, etc. No in-between.
:param ax: axes instance into which to plot (default: gca())
:param color: matplotlib color specification
:param pad: float
Padding between arrow tip and specified (r,z)
"""
from matplotlib import pyplot
def pick_direction():
"""Guesses the direction for the arrow (from injector toward machine) in case you don't know"""
dr = ods['equilibrium']['time_slice'][0]['global_quantities']['magnetic_axis']['r'] - r
dz = ods['equilibrium']['time_slice'][0]['global_quantities']['magnetic_axis']['z'] - z
theta = numpy.arctan2(dz, -dr)
if snap_to > 0:
theta = snap_to * round(theta / snap_to)
return theta
if (r2 is not None) and (z2 is not None):
direction = numpy.arctan2(z2 - z, r - r2)
elif direction is None:
direction = pick_direction()
else:
direction = cocos_transform(ods.cocos, 11)['BP'] * direction
if ax is None:
ax = pyplot.gca()
shaft_len = 3.5 * (1 + pad) / 2.0
da = numpy.pi / 10 # Angular half width of the arrow head
x0 = numpy.cos(-direction) * pad
y0 = numpy.sin(-direction) * pad
head_mark = [
(x0, y0),
(x0 + numpy.cos(-direction + da), y0 + numpy.sin(-direction + da)),
(x0 + numpy.cos(-direction), y0 + numpy.sin(-direction)),
(x0 + shaft_len * numpy.cos(-direction), y0 + shaft_len * numpy.sin(-direction)),
(x0 + numpy.cos(-direction), y0 + numpy.sin(-direction)),
(x0 + numpy.cos(-direction - da), y0 + numpy.sin(-direction - da)),
]
kw.pop('marker', None) # Ignore this
return ax.plot(r, z, marker=head_mark, color=color, markersize=100 * (pad + shaft_len) / 5, **kw)
def geo_type_lookup(geometry_type, subsys, imas_version=omas_rcparams['default_imas_version'], reverse=False):
"""
Given a geometry type code
:param geometry_type: int (or string if reverse=True)
Geometry type code (or geometry name if reverse)
:param subsys: string
Name of subsystem or ODS, like 'pf_active'
:param imas_version: string
IMAS version to use when mapping
:param reverse: bool
Switches the roles of param geometry_type and return
:return: string (or int if reverse=True)
Name of the field indicated by geometry_type (or type code if reverse=True).
For example: In IMAS 3.19.0, `pf_active.coil[:].element[:].geometry.geometry_type = 1` means 'outline'.
In version 3.19.0 the following geometry types exist {1: 'outline', 2: 'rectangle', 4: 'arcs of circle'}
"""
# Fetch information from IMAS data description of geometry_type for the relevant subsys
lookup = {
'ic_antennas': 'ic_antennas.antenna.:.strap.:.geometry.geometry_type',
'pf_active': 'pf_active.coil.:.element.:.geometry.geometry_type',
}
if subsys not in lookup.keys():
printe('Warning: unrecognized IMAS substructure ({})'.format(subsys))
return None
try:
doc = omas_info_node(lookup[subsys], imas_version=imas_version)['documentation']
except ValueError as _excp:
printe(repr(_excp))
return None
geo_map = eval('{%s}' % doc.split('(')[-1][:-2])
if 3 not in geo_map:
geo_map[3] = 'oblique' # For backward compatibility
if reverse:
# https://stackoverflow.com/a/13149770/6605826
return list(geo_map.keys())[list(geo_map.values()).index(geometry_type)]
else:
return geo_map.get(geometry_type, None)
def padded_extension(values_in, n, fill_value):
"""
Forces values_in to be at least length n by appending copies of fill_value as needed
:param values_in: scalar or 1D iterable
:param n: int
:param fill_value: scalar
:return: 1D array with length >= n
"""
x = numpy.atleast_1d(values_in).tolist()
if len(x) < n:
x += [fill_value] * (n - len(x))
return numpy.array(x)
def text_alignment_setup(n, default_ha='left', default_va='baseline', **kw):
"""
Interprets text alignment instructions
:param n: int
Number of labels that need alignment instructions
:param default_ha: string or list of n strings
Default horizontal alignment. If one is supplied, it will be copied n times.
:param default_va: string or list of n strings
Default vertical alignment. If one is supplied, it will be copied n times.
:param kw: keywords caught by overlay method
:return: (list of n strings, list of n strings, kw)
Horizontal alignment instructions
Vertical alignment instructions
Updated keywords
"""
label_ha = padded_extension(kw.pop('label_ha', None), n, None)
label_va = padded_extension(kw.pop('label_va', None), n, None)
default_ha = numpy.atleast_1d(default_ha).tolist()
default_va = numpy.atleast_1d(default_va).tolist()
if len(default_ha) == 1:
default_ha *= n
if len(default_va) == 1:
default_va *= n
for i in range(n):
label_ha[i] = default_ha[i] if label_ha[i] is None else label_ha[i]
label_va[i] = default_va[i] if label_va[i] is None else label_va[i]
return label_ha, label_va, kw
def label_shifter(n, kw):
"""
Interprets label shift instructions
:param n: int
Number of labels that need shift instructions
:param kw: dict
Keywords passed to main plot script; label shifting keywords will be removed
:return: (1D array with length >= n, 1D array with length >= n)
R shifts
Z shifts
"""
label_dr = kw.pop('label_r_shift', 0)
label_dz = kw.pop('label_z_shift', 0)
label_dr = padded_extension(label_dr, n, fill_value=label_dr if numpy.isscalar(label_dr) else 0)
label_dz = padded_extension(label_dz, n, fill_value=label_dz if numpy.isscalar(label_dz) else 0)
return label_dr, label_dz
# hold last 100 references of matplotlib.widgets.Slider
_stimes = []
def ods_time_plot(ods_plot_function, ods, time_index, time, **kw):
r"""
Utility function for generating time dependent plots
:param ods_plot_function: ods plot function to be called
this function must accept ax (either a single or a list of axes)
and must return the axes (or list of axes) it used
:param ods: ods
:param time_index: time indexes to be scanned
:param time: array of times
:param \**kw: extra aruments to passed to ods_plot_function
:return: slider instance and list of axes used
"""
from matplotlib import pyplot
from matplotlib.widgets import Slider
time_index = numpy.atleast_1d(time_index)
time = time[time_index]
axs = {}
def do_clean(time0):
if axs is not None:
for ax in axs:
if axs[ax] is not None:
axs[ax].cla()
def update(time0):
if 'ax' in kw:
ax = kw.pop('ax')
elif not len(axs):
ax = None
elif len(axs) == 1:
ax = list(axs.values())[0]
else:
ax = axs
time_index0 = time_index[numpy.argmin(abs(time - time0))]
tmp = ods_plot_function(ods, time_index0, ax=ax, **kw)['ax']
if isinstance(tmp, dict):
axs.update(tmp)
else:
axs[1, 1, 1] = tmp
stime, axtime = kw.pop('stime', (None, None))
update(time[0])
if stime is None:
axtime = pyplot.axes([0.1, 0.96, 0.75, 0.03])
min_time = min(time)
max_time = max(time)
if min_time == max_time:
min_time = min_time - 1
max_time = max_time + 1
stime = Slider(axtime, 'Time[s]', min_time, max_time, valinit=min(time), valstep=min(numpy.diff(time)))
if stime not in _stimes:
_stimes.append(stime)
if len(_stimes) > 100:
_stimes.pop(0)
stime.on_changed(do_clean)
stime.on_changed(update)
for time0 in time:
axtime.axvline(time0, color=['r', 'y', 'c', 'm'][stime.cnt - 2])
return {'stime': (stime, axtime), 'ax': axs}
def cached_add_subplot(fig, ax_cache, *args, **kw):
r"""
Utility function that works like matplotlib add_subplot
but reuses axes if these were already used before
:param fig: matplotlib figure
:param ax_cache: caching dictionary
:param \*args: arguments passed to matplotlib add_subplot
:param \**kw: keywords arguments passed to matplotlib add_subplot
:return: matplotlib axes
"""
if args in ax_cache:
return ax_cache[args]
else:
ax = fig.add_subplot(*args, **kw)
ax_cache[args] = ax
return ax
# ================================
# ODSs' plotting methods
# ================================
def handle_time(ods, time_location, time_index, time):
"""
Given either time_index or time returns both time_index and time consistent with one another
NOTE: time takes precedence over time_index
:param time_location: location of which to get the time
:param time_index: int or list of ints
:param time: float or list of floats
:return: time_index, time
"""
if time is not None:
tds = ods.time(time_location)
time_index = []
for t in numpy.atleast_1d(time):
time_index.append(numpy.argmin(abs(tds - t)))
if time_index is None:
time = ods.time(time_location)
if time is None:
time_index = 0
else:
time_index = numpy.arange(len(time))
return time_index, numpy.atleast_1d(time)
@add_to__ODS__
def equilibrium_CX(
ods,
time_index=None,
time=None,
levels=None,
contour_quantity='rho_tor_norm',
allow_fallback=True,
ax=None,
sf=3,
label_contours=None,
show_wall=True,
xkw={},
ggd_points_triangles=None,
**kw,
):
r"""
Plot equilibrium cross-section
as per `ods['equilibrium']['time_slice'][time_index]`
:param ods: ODS instance
input ods containing equilibrium data
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param levels: sorted numeric iterable
values to pass to 2D plot as contour levels
:param contour_quantity: string
quantity to contour, anything in eq['profiles_1d'] or eq['profiles_2d'] or psi_norm
:param allow_fallback: bool
If rho/phi is requested but not available, plot on psi instead if allowed. Otherwise, raise ValueError.
:param ax: Axes instance
axes to plot in (active axes is generated if `ax is None`)
:param sf: int
Resample scaling factor. For example, set to 3 to resample to 3x higher resolution. Makes contours smoother.
:param label_contours: bool or None
True/False: do(n't) label contours
None: only label if contours are of q
:param show_wall: bool
Plot the inner wall or limiting surface, if available
:param xkw: dict
Keywords to pass to plot call to draw X-point(s). Disable X-points by setting xkw={'marker': ''}
:param ggd_points_triangles:
Caching of ggd data structure as generated by omas_physics.grids_ggd_points_triangles() method
:param \**kw: keywords passed to matplotlib plot statements
:return: Axes instance
"""
# caching of ggd data
if ggd_points_triangles is None and 'equilibrium.grids_ggd' in ods:
from .omas_physics import grids_ggd_points_triangles
ggd_points_triangles = grids_ggd_points_triangles(ods['equilibrium.grids_ggd[0].grid[0]'])
if allow_fallback is True:
allow_fallback = 'psi'
# time animation
time_index, time = handle_time(ods, 'equilibrium', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(
equilibrium_CX,
ods,
time_index,
time,
levels=levels,
contour_quantity=contour_quantity,
allow_fallback=allow_fallback,
ax=ax,
sf=sf,
label_contours=label_contours,
show_wall=show_wall,
xkw=xkw,
ggd_points_triangles=ggd_points_triangles,
**kw,
)
import matplotlib
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
return_dict = {'ax': ax}
wall = None
eq = ods['equilibrium']['time_slice'][time_index]
if 'wall' in ods:
if time_index in ods['wall']['description_2d']:
wall = ods['wall']['description_2d'][time_index]['limiter']['unit']
elif 0 in ods['wall']['description_2d']:
wall = ods['wall']['description_2d'][0]['limiter']['unit']
# Plotting style
kw.setdefault('linewidth', 1)
label = kw.pop('label', '')
kw1 = copy.deepcopy(kw)
kw1['linewidth'] = kw['linewidth'] + 1
for contour_only_keyword in ['linestyles', 'linewidths', 'colors']:
kw1.pop(contour_only_keyword, None) # Remove keywords that only affect contours so they don't offend plot
# Boundary
ax.plot(eq['boundary.outline.r'], eq['boundary.outline.z'], label=label, **kw1)
kw1.setdefault('color', ax.lines[-1].get_color())
# Magnetic axis
if 'global_quantities.magnetic_axis.r' in eq and 'global_quantities.magnetic_axis.z':
ax.plot(eq['global_quantities']['magnetic_axis']['r'], eq['global_quantities']['magnetic_axis']['z'], '+', **kw1)
# get 2d data either from grid or ggd
def get2d(contour_quantity):
pr2d = None
if 'profiles_2d' in eq and 'profiles_2d.0.%s' % contour_quantity in eq:
pr2d = eq['profiles_2d.0.%s' % contour_quantity]
elif 'ggd.0.%s.0.values' % contour_quantity in eq:
pr2d = eq['ggd.0.%s.0.values' % contour_quantity]
return pr2d
# Choose quantity to plot
for fallback in range(2):
# Most robust thing is to use PSI2D and interpolate 1D quantities over it
if (
get2d('psi') is not None
and 'psi' in eq['profiles_1d']
and contour_quantity in eq['profiles_1d']
or contour_quantity == 'psi_norm'
):
x_value_1d = eq['profiles_1d']['psi']
m = x_value_1d[0]
M = x_value_1d[-1]
x_value_1d = (x_value_1d - m) / (M - m)
x_value_2d = (get2d('psi') - m) / (M - m)
if contour_quantity == 'psi_norm':
value_1d = x_value_1d
else:
value_1d = eq['profiles_1d'][contour_quantity]
value_2d = omas_interp1d(x_value_2d, x_value_1d, value_1d)
break
# Next get 2D quantity
elif get2d(contour_quantity) is not None:
value_1d = None
value_2d = get2d(contour_quantity)
break
elif allow_fallback and not fallback:
print('No %s equilibrium CX data to plot. Fallback on %s.' % (contour_quantity, allow_fallback))
contour_quantity = allow_fallback
# allow fallback
elif fallback:
txt = 'No %s equilibrium CX data to plot. Aborting.' % contour_quantity
if allow_fallback:
print(txt)
return ax
else:
raise ValueError(txt)
return_dict['contour_quantity'] = contour_quantity
# handle levels
if levels is None and value_1d is not None:
if contour_quantity == 'q':
max_q = int(numpy.round(omas_interp1d(0.95, x_value_1d, value_1d)))
levels = numpy.arange(max_q)
else:
levels = numpy.linspace(numpy.min(value_1d), numpy.max(value_1d), 11)[1:-1]
levels = numpy.hstack((levels, levels[-1] + (levels[1] - levels[0]) * numpy.arange(100)[1:]))
# Wall clipping
if wall is not None:
path = matplotlib.path.Path(numpy.transpose(numpy.array([wall[0]['outline']['r'], wall[0]['outline']['z']])))
wall_path = matplotlib.patches.PathPatch(path, facecolor='none', edgecolor='none')
ax.add_patch(wall_path)
kw.setdefault('colors', kw1['color'])
kw.pop('color', '')
kw['linewidths'] = kw.pop('linewidth')
if 'profiles_2d.0' in eq:
# Contours
if 'r' in eq['profiles_2d.0'] and 'z' in eq['profiles_2d.0']:
r = eq['profiles_2d.0.r']
z = eq['profiles_2d.0.z']
else:
z, r = numpy.meshgrid(eq['profiles_2d.0.grid.dim2'], eq['profiles_2d.0.grid.dim1'])
# sanitize
value_2d = value_2d.copy()
value_2d[:, -1] = value_2d[:, -2]
value_2d[-1, :] = value_2d[-2, :]
value_2d[-1, -1] = value_2d[-2, -2]
# Resample
if sf > 1:
value_2d[numpy.isnan(value_2d)] = numpy.nanmean(value_2d)
import scipy.ndimage
r = scipy.ndimage.zoom(r, sf)
z = scipy.ndimage.zoom(z, sf)
value_2d = scipy.ndimage.zoom(value_2d, sf)
cs = ax.contour(r, z, value_2d, levels, **kw)
if label_contours or ((label_contours is None) and (contour_quantity == 'q')):
ax.clabel(cs)
elif 'ggd' in eq:
cs = ax.tricontour(
ggd_points_triangles[0][:, 0], ggd_points_triangles[0][:, 1], ggd_points_triangles[1], value_2d, levels=levels, **kw
)
else:
raise Exception('No 2D equilibrium data to plot')
if contour_quantity == 'q':
ax.clabel(cs, cs.levels, inline=True, fontsize=10)
# X-point(s)
xkw.setdefault('marker', 'x')
if xkw['marker'] not in ['', ' ']:
from matplotlib import rcParams
xkw.setdefault('color', cs.colors)
xkw.setdefault('linestyle', '')
xkw.setdefault('markersize', rcParams['lines.markersize'] * 1.5)
xkw.setdefault('mew', rcParams['lines.markeredgewidth'] * 1.25 + 1.25)
xp = eq['boundary']['x_point']
for i in range(len(xp)):
try:
xr, xz = xp[i]['r'], xp[i]['z']
except ValueError:
pass
else:
ax.plot(xr, xz, **xkw)
# Internal flux surfaces w/ or w/o masking
if wall is not None:
for collection in cs.collections:
collection.set_clip_path(wall_path)
# Wall
if wall is not None and show_wall:
ax.plot(wall[0]['outline']['r'], wall[0]['outline']['z'], 'k', linewidth=2)
ax.axis([min(wall[0]['outline']['r']), max(wall[0]['outline']['r']), min(wall[0]['outline']['z']), max(wall[0]['outline']['z'])])
# Axes
ax.set_aspect('equal')
ax.set_frame_on(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return return_dict
@add_to__ODS__
def equilibrium_CX_topview(ods, time_index=None, time=None, ax=None, **kw):
r"""
Plot equilibrium toroidal cross-section
as per `ods['equilibrium']['time_slice'][time_index]`
:param ods: ODS instance
input ods containing equilibrium data
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: Axes instance [optional]
axes to plot in (active axes is generated if `ax is None`)
:param \**kw: arguments passed to matplotlib plot statements
:return: Axes instance
"""
# time animation
time_index, time = handle_time(ods, 'equilibrium', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(equilibrium_CX_topview, time, ods, time_index, ax=ax, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
wall = None
eq = ods['equilibrium']['time_slice'][time_index]
if 'wall' in ods:
if time_index in ods['wall']['description_2d']:
wall = ods['wall']['description_2d'][time_index]['limiter']['unit']
elif 0 in ods['wall']['description_2d']:
wall = ods['wall']['description_2d'][0]['limiter']['unit']
# Plotting style
kw.setdefault('linewidth', 1)
label = kw.pop('label', '')
kw1 = copy.deepcopy(kw)
t_angle = numpy.linspace(0.0, 2.0 * numpy.pi, 100)
sint = numpy.sin(t_angle)
cost = numpy.cos(t_angle)
Rout = numpy.max(eq['boundary']['outline']['r'])
Rin = numpy.min(eq['boundary']['outline']['r'])
Xout = Rout * cost
Yout = Rout * sint
Xin = Rin * cost
Yin = Rin * sint
ax.plot(Xin, Yin, **kw1)
kw1.setdefault('color', ax.lines[-1].get_color())
ax.plot(Xout, Yout, **kw1)
# Wall
if wall is not None:
Rout = numpy.max(wall[0]['outline']['r'])
Rin = numpy.min(wall[0]['outline']['r'])
Xout = Rout * cost
Yout = Rout * sint
Xin = Rin * cost
Yin = Rin * sint
ax.plot(Xin, Yin, 'k', label=label, linewidth=2)
ax.plot(Xout, Yout, 'k', label=label, linewidth=2)
ax.axis('equal')
# Axes
ax.set_aspect('equal')
ax.set_frame_on(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return {'ax': ax}
nice_names = {
'rho_tor_norm': '$\\rho$',
'rho_tor': '$\\rho [m]$',
'rho_volume_norm': '$\\rho_{\\rm vol}$',
'psi_norm': '$\\psi$',
'psi': '$\\psi$ [Wb]',
'phi': '$\\phi$ [Wb]',
'phi_norm': '$\\phi$',
'q': '$q$',
}
@add_to__ODS__
def equilibrium_summary(ods, time_index=None, time=None, fig=None, ggd_points_triangles=None, **kw):
"""
Plot equilibrium cross-section and P, q, P', FF' profiles
as per `ods['equilibrium']['time_slice'][time_index]`
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param fig: figure to plot in (a new figure is generated if `fig is None`)
:param ggd_points_triangles:
Caching of ggd data structure as generated by omas_physics.grids_ggd_points_triangles() method
:param kw: arguments passed to matplotlib plot statements
:return: figure handler
"""
# caching of ggd data
if ggd_points_triangles is None and 'equilibrium.grids_ggd' in ods:
from .omas_physics import grids_ggd_points_triangles
ggd_points_triangles = grids_ggd_points_triangles(ods['equilibrium.grids_ggd[0].grid[0]'])
from matplotlib import pyplot
axs = kw.pop('ax', {})
if axs is None:
axs = {}
if not len(axs) and fig is None:
fig = pyplot.figure()
# time animation
time_index, time = handle_time(ods, 'equilibrium', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(
equilibrium_summary, ods, time_index, time, fig=fig, ggd_points_triangles=ggd_points_triangles, ax=axs, **kw
)
ax = cached_add_subplot(fig, axs, 1, 3, 1)
contour_quantity = kw.pop('contour_quantity', 'rho_tor_norm')
tmp = equilibrium_CX(
ods, time_index=time_index, ax=ax, contour_quantity=contour_quantity, ggd_points_triangles=ggd_points_triangles, **kw
)
eq = ods['equilibrium']['time_slice'][time_index]
# x
if tmp['contour_quantity'] in eq['profiles_1d']:
raw_xName = tmp['contour_quantity']
x = eq['profiles_1d'][raw_xName]
else:
raw_xName = 'psi'
x = eq['profiles_1d']['psi_norm']
x = (x - min(x)) / (max(x) - min(x))
xName = nice_names.get(raw_xName, raw_xName)
# pressure
ax = cached_add_subplot(fig, axs, 2, 3, 2)
ax.plot(x, eq['profiles_1d']['pressure'], **kw)
kw.setdefault('color', ax.lines[-1].get_color())
ax.set_title(r'$\,$ Pressure')
ax.ticklabel_format(style='sci', scilimits=(-1, 2), axis='y')
pyplot.setp(ax.get_xticklabels(), visible=False)
# q
ax = cached_add_subplot(fig, axs, 2, 3, 3, sharex=ax)
ax.plot(x, eq['profiles_1d']['q'], **kw)
ax.set_title('$q$ Safety factor')
ax.ticklabel_format(style='sci', scilimits=(-1, 2), axis='y')
if 'label' in kw:
leg = ax.legend(loc=0)
import matplotlib
if compare_version(matplotlib.__version__, '3.1.0') >= 0:
leg.set_draggable(True)
else:
leg.draggable(True)
pyplot.setp(ax.get_xticklabels(), visible=False)
# dP_dpsi
ax = cached_add_subplot(fig, axs, 2, 3, 5, sharex=ax)
ax.plot(x, eq['profiles_1d']['dpressure_dpsi'], **kw)
ax.set_title(r"$P\,^\prime$ source function")
ax.ticklabel_format(style='sci', scilimits=(-1, 2), axis='y')
pyplot.xlabel(xName)
# FdF_dpsi
ax = cached_add_subplot(fig, axs, 2, 3, 6, sharex=ax)
ax.plot(x, eq['profiles_1d']['f_df_dpsi'], **kw)
ax.set_title(r"$FF\,^\prime$ source function")
ax.ticklabel_format(style='sci', scilimits=(-1, 2), axis='y')
pyplot.xlabel(xName)
if raw_xName.endswith('norm'):
ax.set_xlim([0, 1])
return {'ax': axs}
@add_to__ODS__
def core_profiles_currents_summary(ods, time_index=None, time=None, ax=None, **kw):
"""
Plot currents in core_profiles_1d
:param ods: input ods
:param fig: figure to plot in (a new figure is generated if `fig is None`)
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# time animation
time_index, time = handle_time(ods, 'core_profiles', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(core_profiles_currents_summary, ods, time_index, time, fig=fig, ax=axs, **kw)
assert 'j_total' in ods['core_profiles.profiles_1d'][time_index], "j_total not in core profiles"
ax.plot(
ods[f'core_profiles.profiles_1d[{time_index}].grid.rho_tor_norm'],
ods[f'core_profiles.profiles_1d[{time_index}]']['j_total'],
label='total current',
ls='--',
)
for item in ods['core_profiles.profiles_1d'][time_index]:
if 'j_' in item and item not in ['j_tor', 'j_total']:
ax.plot(
ods[f'core_profiles.profiles_1d[{time_index}].grid.rho_tor_norm'],
ods[f'core_profiles.profiles_1d[{time_index}]'][item],
label=' '.join(item[2:].split(sep='_')),
)
ax.legend(loc=0)
ax.set_ylabel(r'Parallel current density $[A\,m^-{2}]$')
ax.set_xlabel(r'$\rho_{tor}$')
return {'ax': ax}
@add_to__ODS__
def core_profiles_summary(ods, time_index=None, time=None, fig=None, ods_species=None, quantities=['density_thermal', 'temperature'], **kw):
"""
Plot densities and temperature profiles for electrons and all ion species
as per `ods['core_profiles']['profiles_1d'][time_index]`
:param ods: input ods
:param fig: figure to plot in (a new figure is generated if `fig is None`)
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ods_species: list of ion specie indices as listed in the core_profiles ods (electron index = -1)
if None selected plot all the ion speciess
:param quantities: list of strings to plot from the profiles_1d ods like zeff, temperature & rotation_frequency_tor_sonic
:param kw: arguments passed to matplotlib plot statements
:return: figure handler
"""
from matplotlib import pyplot
axs = kw.pop('ax', {})
if axs is None:
axs = {}
if not len(axs) and fig is None:
fig = pyplot.figure()
# time animation
time_index, time = handle_time(ods, 'core_profiles', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(
core_profiles_summary, ods, time_index, time, fig=fig, ods_species=ods_species, quantities=quantities, ax=axs, **kw
)
prof1d = ods['core_profiles']['profiles_1d'][time_index]
rho = prof1d['grid.rho_tor_norm']
# Determine subplot rows x colls
if ods_species is None:
ncols = len(prof1d['ion']) + 1
ods_species = [-1] + list(prof1d['ion'])
else:
ncols = len(ods_species)
nplots = sum([ncols if 'density' in i or 'temperature' in i else 1 for i in quantities])
nrows = int(numpy.ceil(nplots / ncols))
# Generate species with corresponding name
species_in_tree = [f"ion.{i}" if i >= 0 else 'electrons' for i in ods_species]
names = [f"{prof1d[i]['label']} ion" if i != 'electrons' else "electron" for i in species_in_tree]
plotting_list = []
label_name = []
label_name_z = []
unit_list = []
for q in quantities:
if 'density' in q or 'temperature' in q:
for index, specie in enumerate(species_in_tree):
unit_list.append(omas_info_node(o2u(f"core_profiles.profiles_1d.0.{specie}.{q}"))['units'])
if q in prof1d[specie]:
if 'density' in q and 'ion' in specie and prof1d[specie]['element[0].z_n'] != 1.0:
plotting_list.append(prof1d[specie][q] * prof1d[specie]['element[0].z_n'])
label_name_z.append(r'$\times$' + f" {int(prof1d[specie]['element[0].z_n'])}")
else:
plotting_list.append(prof1d[specie][q])
label_name_z.append("")
label_name.append(f'{names[index]} {q.capitalize()}')
else:
plotting_list.append(numpy.zeros(len(rho)))
else:
unit_list.append(omas_info_node(o2u(f"core_profiles.profiles_1d.0.{q}"))['units'])
plotting_list.append(prof1d[q])
label_name.append(q.capitalize())
for index, y in enumerate(plotting_list):
plot = index + 1
if index % ncols == 0:
sharey = None
sharex = None
elif 'Density' in label_name[index] or 'Temperature' in label_name[index]:
sharey = ax
sharex = ax
ax = cached_add_subplot(fig, axs, nrows, ncols, plot, sharex=sharex, sharey=sharey)
uband(rho, y, ax=ax, **kw)
if "Temp" in label_name[index]:
ax.set_ylabel(r'$T_{}$'.format(label_name[index][0]) + imas_units_to_latex(unit_list[index]))
elif "Density" in label_name[index]:
ax.set_ylabel(r'$n_{}$'.format(label_name[index][0]) + imas_units_to_latex(unit_list[index]) + label_name_z[index])
else:
ax.set_ylabel(label_name[index][:10] + imas_units_to_latex(unit_list[index]))
if (nplots - plot) < ncols:
ax.set_xlabel('$\\rho$')
if 'label' in kw:
ax.legend(loc='lower center')
ax.set_xlim([0, 1])
return {'ax': axs, 'fig': fig}
@add_to__ODS__
def core_profiles_pressures(ods, time_index=None, time=None, ax=None, **kw):
"""
Plot pressures in `ods['core_profiles']['profiles_1d'][time_index]`
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'core_profiles', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(core_profiles_pressures, ods, time_index, time, ax=ax)
import matplotlib
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
prof1d = ods['core_profiles']['profiles_1d'][time_index]
x = prof1d['grid.rho_tor_norm']
for item in prof1d.paths():
item = l2o(item)
if 'pressure' in item:
if 'ion' in item:
try:
i = int(item.split("ion.")[-1].split('.')[0])
label = prof1d['ion'][i]['label']
except ValueError:
label = item
elif 'electrons' in item:
label = 'e$^-$'
else:
label = item
if item != label:
label += ' (thermal)' if 'thermal' in item else ''
label += ' (fast)' if 'fast' in item else ''
uband(x, prof1d[item], ax=ax, label=label)
ax.set_xlim([0, 1])
ax.set_ylabel('Pressure (Pa)')
ax.set_xlabel('$\\rho_N$')
leg = ax.legend(loc=0)
if compare_version(matplotlib.__version__, '3.1.0') >= 0:
leg.set_draggable(True)
else:
leg.draggable(True)
return {'ax': ax}
@add_to__ODS__
def core_transport_fluxes(ods, time_index=None, time=None, fig=None, show_total_density=True, plot_zeff=False, **kw):
"""
Plot densities and temperature profiles for all species, rotation profile, TGYRO fluxes and fluxes from power_balance per STEP state.
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param fig: figure to plot in (a new figure is generated if `fig is None`)
:param show_total_density: bool
Show total thermal+fast in addition to thermal/fast breakdown if available
:param plot_zeff: if True, plot zeff below the plasma rotation
:kw: matplotlib plot parameters
:return: axes
"""
from matplotlib import pyplot
axs = kw.pop('ax', {})
if axs is None:
axs = {}
if not len(axs) and fig is None:
fig = pyplot.figure()
# time animation
time_index, time = handle_time(ods, 'core_profiles', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(
core_transport_fluxes,
ods,
time_index,
time,
fig=fig,
ax=axs,
show_total_density=show_total_density,
plot_zeff=plot_zeff,
**kw,
)
def sum_density_types(specie_index):
final_density = numpy.zeros(len(prof1d['grid.rho_tor_norm']))
for therm_fast in ['_thermal', '_fast']:
if not show_total_density and therm_fast != "_thermal":
continue # Skip total thermal+fast because the flag turned it off
density = ods_species[specie_index] + '.density' + therm_fast
if density not in prof1d:
continue
final_density += prof1d[density]
return final_density
def plot_function(x, y, plot_num, ylabel, sharex=None, sharey=None):
ax = cached_add_subplot(fig, axs, nrows, ncols, plot_num, sharex=sharex, sharey=sharey)
uband(x, y, ax=ax, **kw)
ax.set_ylabel(ylabel)
return ax
if plot_zeff:
nrows = 5
else:
nrows = 4
ncols = 2
if "core_profiles" in ods:
prof1d = ods['core_profiles']['profiles_1d'][time_index]
equilibrium = ods['equilibrium']['time_slice'][time_index]
rho_core_prof = prof1d['grid.rho_tor_norm']
ods_species = ['electrons'] + ['ion[%d]' % k for k in range(len(prof1d['ion']))]
species_name = ['Electrons'] + [prof1d['ion[%d].label' % k] + ' ion' for k in range(len(prof1d['ion']))]
# Temperature electrons
ax = plot_function(x=rho_core_prof, y=prof1d[ods_species[0]]['temperature'] / 1e3, plot_num=1, ylabel='$T_{e}\,[keV]$')
pyplot.setp(ax.get_xticklabels(), visible=False)
# Temperature main ion species
ax = plot_function(
x=rho_core_prof, y=prof1d[ods_species[1]]['temperature'] / 1e3, plot_num=3, ylabel='$T_{i}\,[keV]$', sharey=ax, sharex=ax
)
pyplot.setp(ax.get_xticklabels(), visible=False)
# Density electrons
ax = plot_function(x=rho_core_prof, y=sum_density_types(specie_index=0), plot_num=5, ylabel='$n_{e}\,[m^{-3}]$', sharex=ax)
pyplot.setp(ax.get_xticklabels(), visible=False)
# Rotation
if 'rotation_frequency_tor_sonic' in prof1d and 'psi' in prof1d['grid']:
from .omas_physics import omas_environment
with omas_environment(
ods,
coordsio={
f'equilibrium.time_slice.{k}.profiles_1d.psi': prof1d['grid']['psi'] for k in range(len(ods['equilibrium.time_slice']))
},
):
rotation = (equilibrium['profiles_1d']['r_outboard'] - equilibrium['profiles_1d']['r_inboard']) / 2 + equilibrium[
'profiles_1d'
]['geometric_axis']['r'] * -prof1d['rotation_frequency_tor_sonic']
ax = plot_function(x=rho_core_prof, y=rotation, plot_num=7, ylabel='R*$\Omega_0$ (m/s)', sharex=ax)
if not plot_zeff:
ax.set_xlabel('$\\rho$')
# Zeff
if plot_zeff:
pyplot.setp(ax.get_xticklabels(), visible=False)
ax = plot_function(x=rho_core_prof, y=prof1d['zeff'], plot_num=9, ylabel='$Z_{eff}$', sharex=ax)
ax.set_xlabel('$\\rho$')
# Fluxes
if "core_transport" in ods:
core_transport = ods['core_transport']['model']
rho_transport_model = core_transport[0]['profiles_1d'][time_index]['grid_d']['rho_tor']
# Qe
ax = plot_function(
x=rho_transport_model,
y=core_transport[2]['profiles_1d'][time_index]['electrons']['energy']['flux'],
plot_num=2,
ylabel='$Q_e$ [W/$m^2$]',
sharex=ax,
)
color = ax.lines[-1].get_color()
uband(
x=rho_transport_model,
y=core_transport[3]['profiles_1d'][time_index]['electrons']['energy']['flux'],
ax=ax,
marker='o',
ls='None',
color=color,
)
uband(
x=rho_core_prof, y=core_transport[4]['profiles_1d'][time_index]['electrons']['energy']['flux'], ax=ax, ls='--', color=color
)
pyplot.setp(ax.get_xticklabels(), visible=False)
# Add legend on top (black) as it applies to all lines
from matplotlib.lines import Line2D
legend_elements = [
Line2D([0], [0], color='k', ls='--', label='Power Balance'),
Line2D([0], [0], color='k', label='Model total'),
Line2D([0], [0], marker='o', ls='None', color='k', label='Model target', markersize=6),
]
fig.legend(handles=legend_elements).set_draggable(True)
# Qi
ax = plot_function(
x=rho_transport_model,
y=core_transport[2]['profiles_1d'][time_index]['total_ion_energy']['flux'],
plot_num=4,
ylabel='$Q_i$ [W/$m^2$]',
sharex=ax,
sharey=ax,
)
uband(
x=rho_transport_model,
y=core_transport[3]['profiles_1d'][time_index]['total_ion_energy']['flux'],
ax=ax,
marker='o',
ls='None',
color=color,
)
uband(x=rho_core_prof, y=core_transport[4]['profiles_1d'][time_index]['total_ion_energy']['flux'], ax=ax, ls='--', color=color)
pyplot.setp(ax.get_xticklabels(), visible=False)
# Particle flux (electron particle source)
ax = plot_function(
x=rho_transport_model,
y=3 / 2 * core_transport[2]['profiles_1d'][time_index]['electrons']['particles']['flux'],
plot_num=6,
ylabel=r'$ \frac{3}{2}T_{e}\Gamma_{e}$ [W/$m^2$]',
sharex=ax,
)
uband(
x=rho_transport_model,
y=3 / 2 * core_transport[3]['profiles_1d'][time_index]['electrons']['particles']['flux'],
ax=ax,
marker='o',
ls='None',
color=color,
)
pyplot.setp(ax.get_xticklabels(), visible=False)
# Pi (toroidal momentum flux)
ax = plot_function(
x=rho_transport_model,
y=core_transport[2]['profiles_1d'][time_index]['momentum_tor']['flux'],
plot_num=8,
ylabel='$\Pi_{i}$ [N/$m$]',
sharex=ax,
)
ax.set_xlabel('$\\rho$')
uband(
x=rho_transport_model,
y=core_transport[3]['profiles_1d'][time_index]['momentum_tor']['flux'],
ax=ax,
marker='o',
ls='None',
color=color,
)
uband(x=rho_core_prof, y=core_transport[4]['profiles_1d'][time_index]['momentum_tor']['flux'], ax=ax, ls='--', color=color)
ax.set_xlim(0, 1)
return {'ax': axs, 'fig': fig}
@add_to__ODS__
def core_sources_summary(ods, time_index=None, time=None, fig=None, **kw):
"""
Plot sources for electrons and all ion species
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param fig: figure to plot in (a new figure is generated if `fig is None`)
:param kw: arguments passed to matplotlib plot statements
:return: axes
"""
import matplotlib
from matplotlib import pyplot
axs = kw.pop('ax', {})
if axs is None:
axs = {}
if not len(axs) and fig is None:
fig = pyplot.figure()
# time animation
time_index, time = handle_time(ods, 'core_sources', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(core_sources, ods, time_index, time, fig=fig, ax=axs ** kw)
colors = [k['color'] for k in list(matplotlib.rcParams['axes.prop_cycle'])]
lss = ['-', '--', 'dotted']
colors, lss = numpy.meshgrid(colors, lss)
if len(ods[f'core_sources.source']) > len(colors):
colors = colors.T
lss = lss.T
colors = colors.flatten()
lss = lss.flatten()
# if list is too small use all colors
if len(ods[f'core_sources.source']) > len(colors):
import matplotlib.colors as mcolors
colors = list(mcolors.CSS4_COLORS.keys())
for k, s in enumerate(ods['core_sources.source']):
rho = ods[f'core_sources.source.{s}.profiles_1d.{time_index}.grid.rho_tor_norm']
label = ods[f'core_sources.source.{s}.identifier.name']
tmp = {}
tmp[f'core_sources.source.{s}.profiles_1d.{time_index}.electrons.energy'] = ('$q_e$', 'linear')
tmp[f'core_sources.source.{s}.profiles_1d.{time_index}.total_ion_energy'] = ('$q_i$', 'linear')
tmp[None] = None
tmp[f'core_sources.source.{s}.profiles_1d.{time_index}.electrons.particles'] = ('$p_e$', 'linear')
tmp[f'core_sources.source.{s}.profiles_1d.{time_index}.j_parallel'] = ('$J_\parallel$', 'linear')
tmp[f'core_sources.source.{s}.profiles_1d.{time_index}.momentum_tor'] = (r'$\pi_i$', 'linear')
ax = None
for kp, item in enumerate(tmp):
if item is None:
continue
ax = cached_add_subplot(fig, axs, 2, 3, kp + 1, sharex=ax)
if item in ods:
ax.plot(rho, ods[item], label=label, color=colors[k], ls=lss[k])
else:
ax.plot(numpy.nan, numpy.nan, label=label, color=colors[k], ls=lss[k])
ax.set_title(tmp[item][0])
ax.set_yscale(tmp[item][1])
ax.legend(loc=0)
return {'ax': axs, 'fig': fig}
@add_to__ODS__
def pf_active_data(ods, equilibrium_constraints=True, ax=None, **kw):
"""
plot pf_active time traces
:param equilibrium_constraints: plot equilibrium constraints if present
:param ax: Axes instance [optional]
axes to plot in (active axes is generated if `ax is None`)
:param \**kw: Additional keywords for plot
:return: axes instance
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# time traces
for channel in ods['pf_active.coil']:
label = ods[f'pf_active.coil.{channel}.element[0].identifier']
turns = ods[f'pf_active.coil.{channel}.element[0].turns_with_sign']
data = ods[f'pf_active.coil.{channel}.current.data']
time = ods[f'pf_active.coil.{channel}.current.time']
ax.plot(time, data * turns, label=label, **kw)
# equilibrium constraints
if equilibrium_constraints:
for channel in ods['pf_active.coil']:
if f'equilibrium.time_slice.0.constraints.pf_current.{channel}.measured' in ods:
ax.plot(
ods[f'equilibrium.time'],
ods[f'equilibrium.time_slice.:.constraints.pf_current.{channel}.measured'],
marker='o',
color='k',
mec='none',
)
return ax
@add_to__ODS__
def magnetics_bpol_probe_data(ods, equilibrium_constraints=True, ax=None, **kw):
"""
plot bpol_probe time traces and equilibrium constraints
:param equilibrium_constraints: plot equilibrium constraints if present
:param ax: Axes instance [optional]
axes to plot in (active axes is generated if `ax is None`)
:param \**kw: Additional keywords for plot
:return: axes instance
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# time traces
for channel in ods['magnetics.b_field_pol_probe']:
valid = ods.get(f'magnetics.b_field_pol_probe.{channel}.field.validity', 0)
if valid == 0:
label = ods[f'magnetics.b_field_pol_probe.{channel}.identifier']
data = ods[f'magnetics.b_field_pol_probe.{channel}.field.data']
time = ods[f'magnetics.b_field_pol_probe.{channel}.field.time']
ax.plot(time, data, label=label, **kw)
# equilibrium constraints
if equilibrium_constraints:
for channel in ods['magnetics.b_field_pol_probe']:
valid = ods.get(f'magnetics.b_field_pol_probe.{channel}.field.validity', 0)
if valid == 0:
if f'equilibrium.time_slice.0.constraints.bpol_probe.{channel}.measured' in ods:
ax.plot(
ods[f'equilibrium.time'],
ods[f'equilibrium.time_slice.:.constraints.bpol_probe.{channel}.measured'],
marker='o',
color='k',
mec='none',
)
return ax
@add_to__ODS__
def magnetics_flux_loop_data(ods, equilibrium_constraints=True, ax=None, **kw):
"""
plot flux_loop time traces and equilibrium constraints
:param equilibrium_constraints: plot equilibrium constraints if present
:param ax: Axes instance [optional]
axes to plot in (active axes is generated if `ax is None`)
:param \**kw: Additional keywords for plot
:return: axes instance
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# time traces
for channel in ods['magnetics.flux_loop']:
valid = ods.get(f'magnetics.flux_loop.{channel}.flux.validity', 0)
if valid == 0:
label = ods[f'magnetics.flux_loop.{channel}.identifier']
data = ods[f'magnetics.flux_loop.{channel}.flux.data']
time = ods[f'magnetics.flux_loop.{channel}.flux.time']
ax.plot(time, data, label=label, **kw)
# equilibrium constraints
if equilibrium_constraints:
for channel in ods['magnetics.flux_loop']:
valid = ods.get(f'magnetics.flux_loop.{channel}.flux.validity', 0)
if valid == 0:
if f'equilibrium.time_slice.0.constraints.flux_loop.{channel}.measured' in ods:
ax.plot(
ods[f'equilibrium.time'],
ods[f'equilibrium.time_slice.:.constraints.flux_loop.{channel}.measured'],
marker='o',
color='k',
mec='none',
)
return ax
@add_to__ODS__
def magnetics_ip_data(ods, equilibrium_constraints=True, ax=None, **kw):
"""
plot ip time trace and equilibrium constraint
:param equilibrium_constraints: plot equilibrium constraints if present
:param ax: Axes instance [optional]
axes to plot in (active axes is generated if `ax is None`)
:param \**kw: Additional keywords for plot
:return: axes instance
"""
return _plot_signal_eq_constraint(
ods,
'magnetics.ip.0.time',
'magnetics.ip.0.data',
'equilibrium.time_slice.:.constraints.ip.measured',
equilibrium_constraints,
ax,
label='ip',
**kw,
)
@add_to__ODS__
def magnetics_diamagnetic_flux_data(ods, equilibrium_constraints=True, ax=None, **kw):
"""
plot diamagnetic_flux time trace and equilibrium constraint
:param equilibrium_constraints: plot equilibrium constraints if present
:param ax: Axes instance [optional]
axes to plot in (active axes is generated if `ax is None`)
:param \**kw: Additional keywords for plot
:return: axes instance
"""
return _plot_signal_eq_constraint(
ods,
'magnetics.diamagnetic_flux.0.time',
'magnetics.diamagnetic_flux.0.data',
'equilibrium.time_slice.:.constraints.diamagnetic_flux.measured',
equilibrium_constraints,
ax,
label='dflux',
**kw,
)
@add_to__ODS__
def tf_b_field_tor_vacuum_r_data(ods, equilibrium_constraints=True, ax=None, **kw):
"""
plot b_field_tor_vacuum_r time trace and equilibrium constraint
:param equilibrium_constraints: plot equilibrium constraints if present
:param ax: Axes instance [optional]
axes to plot in (active axes is generated if `ax is None`)
:param \**kw: Additional keywords for plot
:return: axes instance
"""
return _plot_signal_eq_constraint(
ods,
'tf.b_field_tor_vacuum_r.time',
'tf.b_field_tor_vacuum_r.data',
'equilibrium.time_slice.:.constraints.b_field_tor_vacuum_r.measured',
equilibrium_constraints,
ax,
label='bt',
**kw,
)
def _plot_signal_eq_constraint(ods, time, data, constraint, equilibrium_constraints, ax, **kw):
"""
Utility function to plot individual signal and their constraint in equilibrium IDS
:param time: ods location for time
:param data: ods location for data
:param constraint: ods location fro equilibrium constraint
:param ax: axes where to plot
:param kw: extra arguments passed to
:return:
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# time traces
time = ods[time]
data = ods[data]
ax.plot(time, data, **kw)
# equilibrium constraints
if equilibrium_constraints and constraint in ods:
ax.plot(ods['equilibrium.time'], ods[constraint], ls='', marker='o', color='k', mec='none')
return ax
# ================================
# actuator aimings
# ================================
@add_to__ODS__
def pellets_trajectory_CX(ods, time_index=None, time=None, ax=None, **kw):
"""
Plot pellets trajectory in poloidal cross-section
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'pellets', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(pellets_trajectory_CX, ods, time_index, time, ax=ax, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
pellets = ods['pellets']['time_slice'][time_index]['pellet']
for pellet in pellets:
R0 = pellets[pellet]['path_geometry.first_point.r']
R1 = pellets[pellet]['path_geometry.second_point.r']
Z0 = pellets[pellet]['path_geometry.first_point.z']
Z1 = pellets[pellet]['path_geometry.second_point.z']
ax.plot([R0, R1], [Z0, Z1], '--', **kw)
return {'ax': ax}
@add_to__ODS__
def pellets_trajectory_CX_topview(ods, time_index=None, time=None, ax=None, **kw):
"""
Plot pellet trajectory in toroidal cross-section
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'pellets', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(pellets_trajectory_CX_topview, ods, time_index, time, ax=ax, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
pellets = ods['pellets']['time_slice'][time_index]['pellet']
for pellet in pellets:
R0 = pellets[pellet]['path_geometry.first_point.r']
R1 = pellets[pellet]['path_geometry.second_point.r']
phi0 = pellets[pellet]['path_geometry.first_point.phi']
phi1 = pellets[pellet]['path_geometry.second_point.phi']
x0 = R0 * numpy.cos(phi0)
y0 = R0 * numpy.sin(phi0)
x1 = R1 * numpy.cos(phi1)
y1 = R1 * numpy.sin(phi1)
ax.plot([x0, x1], [y0, y1], '--', **kw)
return {'ax': ax}
@add_to__ODS__
def lh_antennas_CX(ods, time_index=None, time=None, ax=None, antenna_trajectory=None, **kw):
"""
Plot LH antenna position in poloidal cross-section
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param antenna_trajectory: length of antenna on plot
:param kw: arguments passed to matplotlib plot statements
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'lh_antennas', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(lh_antennas_CX, ods, time_index, time, ax=ax, antenna_trajectory=antenna_trajectory, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
equilibrium = ods['equilibrium']['time_slice'][time_index]
antennas = ods['lh_antennas']['antenna']
if antenna_trajectory is None:
antenna_trajectory = 0.1 * ods['equilibrium']['vacuum_toroidal_field.r0']
for antenna in antennas:
R = antennas[antenna]['position.r.data']
Z = antennas[antenna]['position.z.data']
# just point to magnetic axis for now (is there a better way?)
Raxis = equilibrium['global_quantities.magnetic_axis.r']
Zaxis = equilibrium['global_quantities.magnetic_axis.z']
Rvec = Raxis - R
Zvec = Zaxis - Z
R1 = R + Rvec * antenna_trajectory / numpy.sqrt(Rvec ** 2 + Zvec ** 2)
Z1 = Z + Zvec * antenna_trajectory / numpy.sqrt(Rvec ** 2 + Zvec ** 2)
ax.plot([R, R1], [Z, Z1], 's-', markevery=2, **kw)
return {'ax': ax}
@add_to__ODS__
def lh_antennas_CX_topview(ods, time_index=None, time=None, ax=None, antenna_trajectory=None, **kw):
"""
Plot LH antenna in toroidal cross-section
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:param antenna_trajectory: length of antenna on plot
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'lh_antennas', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(lh_antennas_CX_topview, ods, time_index, time, ax=ax, antenna_trajectory=antenna_trajectory, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
equilibrium = ods['equilibrium']
antennas = ods['lh_antennas']['antenna']
if antenna_trajectory is None:
antenna_trajectory = 0.1 * equilibrium['vacuum_toroidal_field.r0']
for antenna in antennas:
R = antennas[antenna]['position.r.data']
phi = antennas[antenna]['position.phi.data']
x0 = R * numpy.cos(phi)
y0 = R * numpy.sin(phi)
x1 = (R - antenna_trajectory) * numpy.cos(phi)
y1 = (R - antenna_trajectory) * numpy.sin(phi)
ax.plot([x0, x1], [y0, y1], 's-', markevery=2, **kw)
return {'ax': ax}
@add_to__ODS__
def ec_launchers_CX(ods, time_index=None, time=None, ax=None, launcher_trajectory=None, **kw):
"""
Plot EC launchers in poloidal cross-section
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:param launcher_trajectory: length of launcher on plot
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'ec_launchers', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(ec_launchers_CX, ods, time_index, time, ax=ax, launcher_trajectory=launcher_trajectory, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
equilibrium = ods['equilibrium']
launchers = ods['ec_launchers.launcher']
if launcher_trajectory is None:
launcher_trajectory = 0.1 * equilibrium['vacuum_toroidal_field.r0']
for launcher in launchers:
R0 = launchers[launcher]['launching_position.r']
Z0 = launchers[launcher]['launching_position.z']
ang_tor = launchers[launcher]['steering_angle_tor.data']
ang_pol = launchers[launcher]['steering_angle_pol.data']
ang_pol_proj = 0.5 * numpy.pi - numpy.arctan2(numpy.tan(ang_pol), numpy.cos(ang_tor))
R1 = R0 - launcher_trajectory * numpy.cos(ang_pol_proj)
Z1 = Z0 - launcher_trajectory * numpy.sin(ang_pol_proj)
ax.plot([R0, R1], [Z0, Z1], 'o-', markevery=2, **kw)
R1 = R0 - launcher_trajectory * numpy.cos(ang_pol)
Z1 = Z0 - launcher_trajectory * numpy.sin(ang_pol)
ax.plot([R0, R1], [Z0, Z1], 'o-', markevery=2, **kw)
return {'ax': ax}
@add_to__ODS__
def ec_launchers_CX_topview(ods, time_index=None, time=None, ax=None, launcher_trajectory=None, **kw):
"""
Plot EC launchers in toroidal cross-section
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:param launcher_trajectory: length of launcher on plot
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'ec_launchers', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(ec_launchers_CX_topview, ods, time_index, time, ax=ax, launcher_trajectory=launcher_trajectory, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
equilibrium = ods['equilibrium']
launchers = ods['ec_launchers.launcher']
if launcher_trajectory is None:
launcher_trajectory = 0.1 * equilibrium['vacuum_toroidal_field.r0']
for launcher in launchers:
R = launchers[launcher]['launching_position.r']
phi = launchers[launcher]['launching_position.phi']
ang_tor = launchers[launcher]['steering_angle_tor.data']
x0 = R * numpy.cos(phi)
y0 = R * numpy.sin(phi)
x1 = x0 - launcher_trajectory * numpy.cos(ang_tor + phi)
y1 = y0 - launcher_trajectory * numpy.sin(ang_tor + phi)
ax.plot([x0, x1], [y0, y1], 'o-', markevery=2, **kw)
return {'ax': ax}
# ================================
# Heating and current drive
# ================================
@add_to__ODS__
def waves_beam_CX(ods, time_index=None, time=None, ax=None, **kw):
"""
Plot waves beams in poloidal cross-section
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'waves', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(waves_beam_CX, ods, time_index, time, ax=ax, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
coherent_wave = ods['waves.coherent_wave']
for cw in coherent_wave:
bt = coherent_wave[cw]['beam_tracing'][time_index]
for b in bt['beam'].values():
ax.plot(b['position.r'], b['position.z'], **kw)
# plotc(b['position.r'], b['position.z'], b['electrons.power']/max(b['electrons.power']), ax=ax, **kw)
return {'ax': ax}
@add_to__ODS__
def waves_beam_profile(ods, time_index=None, time=None, what=['power_density', 'current_parallel_density'][0], ax=None, **kw):
"""
Plot 1d profiles of waves beams given quantity
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param quantity: quantity to plot
:param ax: axes to plot in (active axes is generated if `ax is None`)
:param kw: arguments passed to matplotlib plot statements
:return: axes handler
"""
# time animation
time_index, time = handle_time(ods, 'waves', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(waves_beam_profile, ods, time_index, time, what=what, ax=ax, **kw)
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
coherent_wave = ods['waves.coherent_wave']
for cw in coherent_wave:
b = coherent_wave[cw]['profiles_1d'][time_index]
ax.plot(b['grid.rho_tor_norm'], b[what], **kw)
ax.set_title(what.replace('_', ' ').capitalize())
ax.set_ylabel('[%s]' % omas_info_node(b.ulocation + '.' + what)['units'])
ax.set_xlabel('rho')
return {'ax': ax}
@add_to__ODS__
def waves_beam_summary(ods, time_index=None, time=None, fig=None, **kw):
"""
Plot waves beam summary: CX, power_density, and current_parallel_density
:param ods: input ods
:param time_index: int, list of ints, or None
time slice to plot. If None all timeslices are plotted.
:param time: float, list of floats, or None
time to plot. If None all timeslicess are plotted.
if not None, it takes precedence over time_index
:param fig: figure to plot in (a new figure is generated if `fig is None`)
:param kw: arguments passed to matplotlib plot statements
:return: figure handler
"""
from matplotlib import pyplot
axs = kw.pop('ax', {})
if axs is None:
axs = {}
if not len(axs) and fig is None:
fig = pyplot.figure()
# time animation
time_index, time = handle_time(ods, 'waves', time_index, time)
if isinstance(time_index, (list, numpy.ndarray)):
if len(time) == 1:
time_index = time_index[0]
else:
return ods_time_plot(waves_beam_summary, ods, time_index, time, fig=fig, ax={}, **kw)
ax = cached_add_subplot(fig, axs, 1, 2, 1)
waves_beam_CX(ods, time_index=time_index, ax=ax, **kw)
ax = cached_add_subplot(fig, axs, 2, 2, 2)
waves_beam_profile(ods, time_index=time_index, what='power_density', ax=ax, **kw)
ax.set_xlabel('')
ax = cached_add_subplot(fig, axs, 2, 2, 4, sharex=ax)
waves_beam_profile(ods, time_index=time_index, what='current_parallel_density', ax=ax, **kw)
ax.set_xlim([0, 1])
return {'ax': axs}
@add_to__ODS__
def nbi_summary(ods, ax=None):
"""
Plot summary of NBI power time traces
:param ods: input ods
:param ax: axes to plot in (active axes is generated if `ax is None`)
:return: axes handler
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
time = ods['nbi.time']
nbi = ods['nbi.unit']
tmp = []
for beam in nbi:
tmp.append(nbi[beam]['power_launched.data'])
ax.plot(time, tmp[-1], label=nbi[beam]['identifier'])
ax.plot(time, numpy.sum(tmp, 0), 'k', lw=2, label='Total')
ax.set_title('Neutral Beam Injectors power')
ax.set_xlabel('Time [s]')
ax.set_ylabel('Power [W]')
ax.legend()
return {'ax': ax}
# ================================
# Hardware overlays
# ================================
@add_to__ODS__
def overlay(ods, ax=None, allow_autoscale=True, debug_all_plots=False, return_overlay_list=False, **kw):
r"""
Plots overlays of hardware/diagnostic locations on a tokamak cross section plot
:param ods: OMAS ODS instance
:param ax: axes instance into which to plot (default: gca())
:param allow_autoscale: bool
Certain overlays will be allowed to unlock xlim and ylim, assuming that they have been locked by equilibrium_CX.
If this option is disabled, then hardware systems like PF-coils will be off the plot and mostly invisible.
:param debug_all_plots: bool
Individual hardware systems are on by default instead of off by default.
:param return_overlay_list:
Return list of possible overlays that could be plotted
:param \**kw: additional keywords for selecting plots.
- Select plots by setting their names to True; e.g.: if you want the gas_injection plot, set gas_injection=True
as a keyword.
If debug_all_plots is True, then you can turn off individual plots by, for example, set_gas_injection=False.
- Instead of True to simply turn on an overlay, you can pass a dict of keywords to pass to a particular overlay
method, as in thomson={'labelevery': 5}. After an overlay pops off its keywords, remaining keywords are passed
to plot, so you can set linestyle, color, etc.
- Overlay functions accept these standard keywords:
* mask: bool array
Set of flags for switching plot elements on/off. Must be equal to the number of channels or items to be
plotted.
* labelevery: int
Sets how often to add labels to the plot. A setting of 0 disables labels, 1 labels every element,
2 labels every other element, 3 labels every third element, etc.
* notesize: matplotlib font size specification
Applies to annotations drawn on the plot. Examples: 'xx-small', 'medium', 16
* label_ha: None or string or list of (None or string) instances
Descriptions of how labels should be aligned horizontally. Either provide a single specification or a
list of specs matching or exceeding the number of labels expected.
Each spec should be: 'right', 'left', or 'center'. None (either as a scalar or an item in the list) will
give default alignment for the affected item(s).
* label_va: None or string or list of (None or string) instances
Descriptions of how labels should be aligned vertically. Either provide a single specification or a
list of specs matching or exceeding the number of labels expected.
Each spec should be: 'top', 'bottom', 'center', 'baseline', or 'center_baseline'.
None (either as a scalar or an item in the list) will give default alignment for the affected item(s).
* label_r_shift: float or float array/list.
Add an offset to the R coordinates of all text labels for the current hardware system.
(in data units, which would normally be m)
Scalar: add the same offset to all labels.
Iterable: Each label can have its own offset.
If the list/array of offsets is too short, it will be padded with 0s.
* label_z_shift: float or float array/list
Add an offset to the Z coordinates of all text labels for the current hardware system
(in data units, which would normally be m)
Scalar: add the same offset to all labels.
Iterable: Each label can have its own offset.
If the list/array of offsets is too short, it will be padded with 0s.
* Additional keywords are passed to the function that does the drawing; usually matplotlib.axes.Axes.plot().
:return: axes handler
"""
if return_overlay_list:
return [k.replace('_overlay', '') for k in __ods__ if k.endswith('_overlay') and k.replace('_overlay', '') in ods]
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
special_subs = ['position_control']
for hw_sys in list_structures(ods.imas_version) + special_subs:
if kw.get(hw_sys, debug_all_plots):
try:
overlay_function = eval('{}_overlay'.format(hw_sys))
except NameError:
continue
overlay_kw = kw.get(hw_sys, {}) if isinstance(kw.get(hw_sys, {}), dict) else {}
for k in ['mask', 'labelevery', 'notesize', 'label_ha', 'label_va', 'label_r_shift', 'label_z_shift']:
if k in kw and k not in overlay_kw:
overlay_kw[k] = kw[k]
if allow_autoscale and hw_sys in ['pf_active', 'gas_injection']: # Not all systems need expanded range to fit everything
ax.set_xlim(auto=True)
ax.set_ylim(auto=True)
overlay_function(ods, ax, **overlay_kw)
return {'ax': ax}
@add_to__ODS__
def wall_overlay(ods, ax=None, component_index=None, types=['limiter', 'mobile', 'vessel'], unit_index=None, **kw):
"""
Plot walls on a tokamak cross section plot
:param ods: OMAS ODS instance
:param ax: axes instance into which to plot (default: gca())
:param component_index: list of index of components to plot
:param types: list with one or more of ['limiter','mobile','vessel']
:param unit_index: list of index of units of the component to plot
:return: axes handler
"""
from matplotlib import pyplot
for k in ['mask', 'labelevery', 'notesize', 'label_ha', 'label_va', 'label_r_shift', 'label_z_shift']:
kw.pop(k, None)
kw.setdefault('color', 'k')
if ax is None:
ax = pyplot.gca()
if component_index is None:
component_index = ods['wall.description_2d'].keys()
elif isinstance(component_index, int):
component_index = [component_index]
elif isinstance(component_index, str):
component_index = [ods['wall.description_2d[:].limiter.type.name'].index(component_index)]
for component in component_index:
for type in types:
if type not in ods[f'wall.description_2d[{component}]']:
continue
if unit_index is None:
unit_index = ods[f'wall.description_2d[{component}].{type}.unit'].keys()
elif isinstance(unit_index, int):
component_index = [unit_index]
elif isinstance(unit_index, str):
component_index = [ods[f'wall.description_2d[{component}].{type}.unit[{unit}].type.name'].index(component_index)]
for unit in ods[f'wall.description_2d[{component}].{type}.unit']:
ax.plot(
ods[f'wall.description_2d[{component}].{type}.unit[{unit}].outline.r'],
ods[f'wall.description_2d[{component}].{type}.unit[{unit}].outline.z'],
**kw,
)
ax.set_aspect('equal')
return {'ax': ax}
@add_to__ODS__
def gas_injection_overlay(
ods,
ax=None,
angle_not_in_pipe_name=False,
which_gas='all',
show_all_pipes_in_group=True,
simple_labels=False,
label_spacer=0,
colors=None,
draw_arrow=True,
**kw,
):
r"""
Plots overlays of gas injectors
:param ods: OMAS ODS instance
:param ax: axes instance into which to plot (default: gca())
:param angle_not_in_pipe_name: bool
Set this to include (Angle) at the end of injector labels. Useful if injector/pipe names don't already
include angles in them.
:param which_gas: string or list
Filter for selecting which gas pipes to display.
- If string: get a preset group, like 'all'.
- If list: only pipes in the list will be shown. Abbreviations are tolerated; e.g. GASA is recognized as
GASA_300. One abbreviation can turn on several pipes. There are several injection location names
starting with RF_ on DIII-D, for example.
:param show_all_pipes_in_group: bool
Some pipes have the same R,Z coordinates of their exit positions (but different phi locations) and will
appear at the same location on the plot. If this keyword is True, labels for all the pipes in such a group
will be displayed together. If it is False, only the first one in the group will be labeled.
:param simple_labels: bool
Simplify labels by removing suffix after the last underscore.
:param label_spacer: int
Number of blank lines and spaces to insert between labels and symbol
:param colors: list of matplotlib color specifications.
These colors control the display of various gas ports. The list will be repeated to make sure it is long enough.
Do not specify a single RGB tuple by itself. However, a single tuple inside list is okay [(0.9, 0, 0, 0.9)].
If the color keyword is used (See \**kw), then color will be popped to set the default for colors in case colors
is None.
:param draw_arrow: bool or dict
Draw an arrow toward the machine at the location of the gas inlet. If dict, pass keywords to arrow drawing func.
:param \**kw: Additional keywords for gas plot:
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Remaining keywords are passed to plot call for drawing markers at the gas locations.
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# Make sure there is something to plot or else just give up and return
npipes = get_channel_count(
ods, 'gas_injection', check_loc='gas_injection.pipe.0.exit_position.r', channels_name='pipe', test_checker='~numpy.isnan(checker)'
)
if npipes == 0:
return {'ax': ax}
mask = kw.pop('mask', numpy.ones(npipes, bool))
pipes = ods['gas_injection']['pipe'] # Shortcut
# Identify gas injectors with the same poloidal location and group them so that their labels won't overlap.
locations = {}
for i in pipes:
if mask[i]:
pipe = pipes[i]
label = pipe['name']
if not gas_filter(label, which_gas):
continue # Skip this pipe because it's not active
r, z = pipe['exit_position']['r'], pipe['exit_position']['z']
location_name = f'{r:0.3f}_{z:0.3f}'
if simple_labels:
label = '_'.join(label.split('_')[:-1])
locations.setdefault(location_name, [])
locations[location_name] += [label]
if angle_not_in_pipe_name:
try:
label += ' ({:0d})'.format(int(round(pipe['exit_position']['phi'] * 180 / numpy.pi)))
except (TypeError, ValueError):
pass
try:
r2, z2 = pipe['second_point']['r'], pipe['second_point']['z']
except (LookupError, ValueError):
if len(locations[location_name]) > 3:
# If an item has already been added at this location, use its r2, z2 to fill in missing values
r2 = locations[location_name][-3]
z2 = locations[location_name][-2]
else:
r2 = z2 = None
locations[location_name] += [r2, z2]
try:
rsplit = ods['equilibrium.time_slice'][0]['global_quantities.magnetic_axis.r']
except ValueError:
draw_arrow = False # This won't work without magnetic axis data, either.
rsplit = numpy.mean([float(loc.split('_')[0]) for loc in locations])
kw.setdefault('marker', 'd')
kw.setdefault('linestyle', ' ')
labelevery = kw.pop('labelevery', 1)
notesize = kw.pop('notesize', 'xx-small')
default_ha = [['left', 'right'][int(float(loc.split('_')[0]) < rsplit)] for loc in locations]
default_va = [['top', 'bottom'][int(float(loc.split('_')[1]) > 0)] for loc in locations]
label_ha, label_va, kw = text_alignment_setup(len(locations), default_ha=default_ha, default_va=default_va, **kw)
label_dr, label_dz = label_shifter(len(locations), kw)
# For each unique poloidal location, draw a marker and write a label describing all the injectors at this location.
default_color = kw.pop('color', None)
colors = numpy.atleast_1d(default_color if colors is None else colors).tolist()
colors2 = colors * int(numpy.ceil(len(locations) / float(len(colors)))) # Make sure the list is long enough.
for i, loc in enumerate(locations):
r, z = numpy.array(loc.split('_')).astype(float)
if show_all_pipes_in_group:
show_locs = list(set(locations[loc][::3])) # Each pipe has ['label', r2, z2], so fc00:e968:6179::de52:7100 selects just labels.
else:
show_locs = [locations[loc][0]]
label = '{spacer:}\n{spacer:}'.format(spacer=' ' * label_spacer).join([''] + show_locs + [''])
if draw_arrow:
kw.update(draw_arrow if isinstance(draw_arrow, dict) else {})
gas_mark = gas_arrow(ods, r, z, r2=locations[loc][-2], z2=locations[loc][-1], ax=ax, color=colors2[i], **kw)
else:
gas_mark = ax.plot(r, z, color=colors2[i], **kw)
kw.pop('label', None) # Prevent label from being applied every time through the loop to avoid spammy legend
if (labelevery > 0) and ((i % labelevery) == 0):
label = '\n' * label_spacer + label if label_va[i] == 'top' else label + '\n' * label_spacer
ax.text(
r + label_dr[i], z + label_dz[i], label, color=gas_mark[0].get_color(), va=label_va[i], ha=label_ha[i], fontsize=notesize
)
return {'ax': ax}
@add_to__ODS__
def pf_active_overlay(ods, ax=None, **kw):
r"""
Plots overlays of active PF coils.
INCOMPLETE: only the oblique geometry definition is treated so far. More should be added later.
:param ods: OMAS ODS instance
:param ax: axes instance into which to plot (default: gca())
:param \**kw: Additional keywords
scalex, scaley: passed to ax.autoscale_view() call at the end
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Remaining keywords are passed to matplotlib.patches.Polygon call
Hint: you may want to set facecolor instead of just color
"""
import matplotlib
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
nc = get_channel_count(
ods, 'pf_active', check_loc='pf_active.coil.0.element.0.geometry.geometry_type', channels_name='coil', test_checker='checker > -1'
)
if nc == 0:
return {'ax': ax}
kw.setdefault('label', 'Active PF coils')
kw.setdefault('facecolor', 'gray')
kw.setdefault('edgecolor', 'k')
kw.setdefault('alpha', 0.7)
labelevery = kw.pop('labelevery', 0)
notesize = kw.pop('notesize', 'xx-small')
mask = kw.pop('mask', numpy.ones(nc, bool))
scalex, scaley = kw.pop('scalex', True), kw.pop('scaley', True)
label_ha, label_va, kw = text_alignment_setup(nc, default_ha='center', default_va='center', **kw)
label_dr, label_dz = label_shifter(nc, kw)
def path_rectangle(rectangle):
"""
:param rectangle: ODS sub-folder: element.*.geometry.rectangle
:return: n x 2 array giving the path around the outline of the coil element, suitable for input to Polygon()
"""
x = rectangle['r']
y = rectangle['z']
dx = rectangle['width']
dy = rectangle['height']
return numpy.array(
[[x - dx / 2.0, x - dx / 2.0, x + dx / 2.0, x + dx / 2.0], [y - dy / 2.0, y + dy / 2.0, y + dy / 2.0, y - dy / 2.0]]
).T
def path_outline(outline):
"""
:param outline: ODS sub-folder: element.*.geometry.outline
:return: n x 2 array giving the path around the outline of the coil element, suitable for input to Polygon()
"""
return numpy.array([outline['r'], outline['z']]).T
patches = []
for c in range(nc):
if mask[c]:
for e in ods['pf_active.coil'][c]['element']:
try:
geometry_type = geo_type_lookup(
ods['pf_active.coil'][c]['element'][e]['geometry.geometry_type'], 'pf_active', ods.imas_version
)
except (IndexError, ValueError):
geometry_type = 'unrecognized'
try:
path = eval('path_{}'.format(geometry_type))(ods['pf_active.coil'][c]['element'][e]['geometry'][geometry_type])
except NameError:
print('Warning: unrecognized geometry type for pf_active coil {}: {}'.format(c, geometry_type))
continue
patches.append(matplotlib.patches.Polygon(path, closed=True, **kw))
kw.pop('label', None) # Prevent label from being placed on more than one patch
try:
pf_id = ods['pf_active.coil'][c]['element'][e]['identifier']
except ValueError:
pf_id = None
if labelevery > 0 and c % labelevery == 0 and pf_id is not None:
ax.text(
numpy.mean(path[:, 0]) + label_dr[c],
numpy.mean(path[:, 1]) + label_dz[c],
pf_id,
ha=label_ha[c],
va=label_va[c],
fontsize=notesize,
)
for p in patches:
ax.add_patch(p) # Using patch collection breaks auto legend labeling, so add patches individually.
ax.autoscale_view(scalex=scalex, scaley=scaley) # add_patch doesn't include this
ax.set_aspect('equal')
return {'ax': ax}
@add_to__ODS__
def magnetics_overlay(
ods,
ax=None,
show_flux_loop=True,
show_bpol_probe=True,
show_btor_probe=True,
flux_loop_style={'marker': 's'},
pol_probe_style={},
tor_probe_style={'marker': '.'},
**kw,
):
"""
Plot magnetics on a tokamak cross section plot
:param ods: OMAS ODS instance
:param flux_loop_style: dictionary with matplotlib options to render flux loops
:param pol_probe_style: dictionary with matplotlib options to render poloidal magnetic probes
:param tor_probe_style: dictionary with matplotlib options to render toroidal magnetic probes
:param ax: axes to plot in (active axes is generated if `ax is None`)
:return: axes handler
"""
from matplotlib import pyplot
kw0 = copy.copy(kw)
if ax is None:
ax = pyplot.gca()
# flux loops
nfl = get_channel_count(
ods, 'magnetics', check_loc='magnetics.flux_loop.0.position.0.r', channels_name='flux_loop', test_checker='~numpy.isnan(checker)'
)
if show_flux_loop and nfl:
kw = copy.copy(kw0)
labelevery = kw.pop('labelevery', 0)
notesize = kw.pop('notesize', 'xx-small')
label_ha, label_va, kw = text_alignment_setup(nfl, **kw)
label_dr, label_dz = label_shifter(nfl, kw)
for k, (r, z) in enumerate(zip(ods[f'magnetics.flux_loop.:.position[0].r'], ods[f'magnetics.flux_loop.:.position[0].z'])):
ax.plot(r, z, **flux_loop_style)
flux_loop_style.setdefault('color', ax.lines[-1].get_color())
if labelevery > 0 and k % labelevery == 0:
ax.text(
r + label_dr[k],
z + label_dz[k],
ods.get(f'magnetics.flux_loop.{k}.identifier', str(k)),
color=flux_loop_style['color'],
fontsize=notesize,
ha=label_ha[k],
va=label_va[k],
)
# poloidal magnetic probes
nbp = get_channel_count(
ods,
'magnetics',
check_loc='magnetics.b_field_pol_probe.0.position.r',
channels_name='b_field_pol_probe',
test_checker='~numpy.isnan(checker)',
)
if show_bpol_probe and nbp:
kw = copy.copy(kw0)
labelevery = kw.pop('labelevery', 0)
notesize = kw.pop('notesize', 'xx-small')
label_ha, label_va, kw = text_alignment_setup(nbp, **kw)
label_dr, label_dz = label_shifter(nbp, kw)
from .omas_physics import probe_endpoints
PX, PY = probe_endpoints(
ods['magnetics.b_field_pol_probe[:].position.r'],
ods['magnetics.b_field_pol_probe[:].position.z'],
ods['magnetics.b_field_pol_probe[:].poloidal_angle'],
ods['magnetics.b_field_pol_probe[:].length'],
ods.cocosio,
)
for k, (px, py) in enumerate(zip(PX, PY)):
r = numpy.mean(px)
z = numpy.mean(py)
if show_bpol_probe:
ax.plot(px, py, label='_' + ods.get(f'magnetics.b_field_pol_probe[{k}].identifier', str(k)), **pol_probe_style, **kw)
pol_probe_style.setdefault('color', ax.lines[-1].get_color())
if labelevery > 0 and k % labelevery == 0:
ax.text(
r + label_dr[k],
z + label_dz[k],
ods.get(f'magnetics.b_field_pol_probe[{k}].identifier', str(k)),
color=pol_probe_style['color'],
fontsize=notesize,
ha=label_ha[k],
va=label_va[k],
)
# toroidal magnetic probes
nbt = get_channel_count(
ods,
'magnetics',
check_loc='magnetics.b_field_tor_probe.0.position.r',
channels_name='b_field_tor_probe',
test_checker='~numpy.isnan(checker)',
)
if show_btor_probe and nbt:
kw = copy.copy(kw0)
labelevery = kw.pop('labelevery', 0)
notesize = kw.pop('notesize', 'xx-small')
label_ha, label_va, kw = text_alignment_setup(nbt, **kw)
label_dr, label_dz = label_shifter(nbt, kw)
for k, (r, z) in enumerate(zip(ods['magnetics.b_field_tor_probe[:].position.r'], ods['magnetics.b_field_tor_probe[:].position.z'])):
ax.plot(r, z, '.m', label='_' + ods.get(f'magnetics.b_field_tor_probe[{k}].identifier', str(k)), **tor_probe_style, **kw)
tor_probe_style.setdefault('color', ax.lines[-1].get_color())
if labelevery > 0 and k % labelevery == 0:
ax.text(
r + label_dr[k],
z + label_dz[k],
ods.get(f'magnetics.b_field_tor_probe[{k}].identifier', str(k)),
color=tor_probe_style['color'],
fontsize=notesize,
ha=label_ha[k],
va=label_va[k],
)
ax.set_aspect('equal')
return {'ax': ax}
@add_to__ODS__
def interferometer_overlay(ods, ax=None, **kw):
r"""
Plots overlays of interferometer chords.
:param ods: OMAS ODS instance
:param ax: axes instance into which to plot (default: gca())
:param \**kw: Additional keywords
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Remaining keywords are passed to plot call
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# Make sure there is something to plot or else just give up and return
nc = get_channel_count(
ods, 'interferometer', check_loc='interferometer.channel.0.line_of_sight.first_point.r', test_checker='~numpy.isnan(checker)'
)
if nc == 0:
return {'ax': ax}
color = kw.pop('color', None)
labelevery = kw.pop('labelevery', 1)
mask = kw.pop('mask', numpy.ones(nc, bool))
notesize = kw.pop('notesize', 'medium')
label_ha, label_va, kw = text_alignment_setup(nc, default_ha='left', default_va='top', **kw)
label_dr, label_dz = label_shifter(nc, kw)
j = 0
for i in range(nc):
if mask[i]:
ch = ods['interferometer.channel'][i]
los = ch['line_of_sight']
r1, z1, r2, z2 = los['first_point.r'], los['first_point.z'], los['second_point.r'], los['second_point.z']
line = ax.plot([r1, r2], [z1, z2], color=color, label='interferometer' if i == 0 else '', **kw)
color = line[0].get_color() # If this was None before, the cycler will have given us something. Lock it in.
if (labelevery > 0) and ((i % labelevery) == 0):
ax.text(
max([r1, r2]) + label_dr[j],
min([z1, z2]) + label_dz[j],
ch['identifier'],
color=color,
va=label_va[i],
ha=label_ha[i],
fontsize=notesize,
)
j += 1
return {'ax': ax}
@add_to__ODS__
def thomson_scattering_overlay(ods, ax=None, **kw):
r"""
Overlays Thomson channel locations
:param ods: OMAS ODS instance
:param ax: axes instance into which to plot (default: gca())
:param \**kw: Additional keywords for Thomson plot:
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Remaining keywords are passed to plot call
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# Make sure there is something to plot or else just give up and return
nc = get_channel_count(
ods, 'thomson_scattering', check_loc='thomson_scattering.channel.0.position.r', test_checker='~numpy.isnan(checker)'
)
if nc == 0:
return {'ax': ax}
labelevery = kw.pop('labelevery', 5)
notesize = kw.pop('notesize', 'xx-small')
mask = kw.pop('mask', numpy.ones(nc, bool))
kw.setdefault('marker', '+')
kw.setdefault('label', 'Thomson scattering')
kw.setdefault('linestyle', ' ')
label_ha, label_va, kw = text_alignment_setup(nc, **kw)
label_dr, label_dz = label_shifter(nc, kw)
r = numpy.array([ods['thomson_scattering']['channel'][i]['position']['r'] for i in range(nc)])[mask]
z = numpy.array([ods['thomson_scattering']['channel'][i]['position']['z'] for i in range(nc)])[mask]
ts_id = numpy.array([ods['thomson_scattering']['channel'][i]['identifier'] for i in range(nc)])[mask]
ts_mark = ax.plot(r, z, **kw)
for i in range(sum(mask)):
if (labelevery > 0) and ((i % labelevery) == 0):
ax.text(
r[i] + label_dr[i],
z[i] + label_dz[i],
ts_id[i],
color=ts_mark[0].get_color(),
fontsize=notesize,
ha=label_ha[i],
va=label_va[i],
)
return {'ax': ax}
@add_to__ODS__
def charge_exchange_overlay(ods, ax=None, which_pos='closest', **kw):
r"""
Overlays Charge Exchange Recombination (CER) spectroscopy channel locations
:param ods: OMAS ODS instance
:param ax: axes instance into which to plot (default: gca())
:param which_pos: string
'all': plot all valid positions this channel uses. This can vary in time depending on which beams are on.
'closest': for each channel, pick the time slice with valid data closest to the time used for the
equilibrium contours and show position at this time. Falls back to all if equilibrium time cannot be
read from time_slice 0 of equilibrium in the ODS.
:param \**kw: Additional keywords for CER plot:
color_tangential: color to use for tangentially-viewing channels
color_vertical: color to use for vertically-viewing channels
color_radial: color to use for radially-viewing channels
marker_tangential, marker_vertical, marker_radial: plot symbols to use for T, V, R viewing channels
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Remaining keywords are passed to plot call
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# Make sure there is something to plot or else just give up and return
nc = get_channel_count(
ods, 'charge_exchange', check_loc='charge_exchange.channel.0.position.r.data', test_checker='any(~numpy.isnan(checker))'
)
if nc == 0:
return {'ax': ax}
try:
eq_time = ods['equilibrium.time_slice.0.time']
except ValueError:
eq_time = None
# Resolve keywords
mask = kw.pop('mask', numpy.ones(nc, bool))
labelevery = kw.pop('labelevery', 5)
if eq_time is None:
which_pos = 'all'
colors = {}
for colorkw in ['color_tangential', 'color_vertical', 'color_radial']:
ckw = kw.pop(colorkw, kw.get('color', None))
if ckw is not None:
colors[colorkw.split('_')[-1][0].upper()] = ckw
kw.pop('color', None)
marker = kw.pop('marker', None)
markers = {
'T': kw.pop('marker_tangential', 's' if marker is None else marker),
'V': kw.pop('marker_vertical', 'd' if marker is None else marker),
'R': kw.pop('marker_radial', '*' if marker is None else marker),
}
notesize = kw.pop('notesize', 'xx-small')
ha, va, kw = text_alignment_setup(nc, **kw)
label_dr, label_dz = label_shifter(nc, kw)
# Get channel positions; each channel has a list of positions as it can vary with time as beams switch on/off.
r = [[numpy.NaN]] * nc
z = [[numpy.NaN]] * nc
for i in range(nc):
rs = ods['charge_exchange.channel'][i]['position.r.data']
zs = ods['charge_exchange.channel'][i]['position.z.data']
w = (rs > 0) & (~numpy.isnan(rs)) & (~numpy.isnan(zs)) # Validity mask: remove zero and NaN
ts = ods['charge_exchange.channel'][i]['position.r.time'][w]
rs = rs[w]
zs = zs[w]
if which_pos == 'all': # Show the set of all valid positions measured by this channel.
rz = list(set(zip(rs, zs)))
r[i] = [rz[j][0] for j in range(len(rz))]
z[i] = [rz[j][1] for j in range(len(rz))]
else: # 'closest': pick just the closest time. The list of positions will only have one element.
w = closest_index(ts, eq_time)
r[i] = [rs[w]]
z[i] = [zs[w]]
cer_id = numpy.array([ods['charge_exchange.channel'][i]['identifier'] for i in range(nc)])
# Plot
label_bank = {'T': 'Tang. CER', 'V': 'Vert. CER', 'R': 'Rad. CER'} # These get popped so only one each in legend
j = 0
for i in range(nc):
if mask[i]:
ch_type = cer_id[i][0].upper()
color = colors.get(ch_type, None) # See if a color has been specified for this view direction
cer_mark = ax.plot(
r[i], z[i], marker=markers.get(ch_type, 'x'), linestyle=' ', color=color, label=label_bank.pop(ch_type, ''), **kw
)
colors[ch_type] = color = cer_mark[0].get_color() # Save color for this view dir in case it was None
if (labelevery > 0) and ((i % labelevery) == 0):
ax.text(
numpy.mean(r[i]) + label_dr[j],
numpy.mean(z[i]) + label_dz[j],
cer_id[i],
color=color,
fontsize=notesize,
ha=ha[i],
va=va[i],
)
j += 1
return {'ax': ax}
@add_to__ODS__
def bolometer_overlay(ods, ax=None, reset_fan_color=True, colors=None, **kw):
r"""
Overlays bolometer chords
:param ods: ODS instance
:param ax: axes instance into which to plot (default: gca())
:param reset_fan_color: bool
At the start of each bolometer fan (group of channels), set color to None to let a new one be picked by the
cycler. This will override manually specified color.
:param colors: list of matplotlib color specifications. Do not use a single RGBA style spec.
:param \**kw: Additional keywords for bolometer plot
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Remaining keywords are passed to plot call for drawing lines for the bolometer sightlines
"""
from matplotlib import pyplot
if ax is None:
ax = pyplot.gca()
# Make sure there is something to plot or else just give up and return
nc = get_channel_count(
ods, 'bolometer', check_loc='bolometer.channel.0.line_of_sight.first_point.r', test_checker='~numpy.isnan(checker)'
)
if nc == 0:
return {'ax': ax}
mask = kw.pop('mask', numpy.ones(nc, bool))
r1 = ods['bolometer.channel.:.line_of_sight.first_point.r'][mask]
z1 = ods['bolometer.channel.:.line_of_sight.first_point.z'][mask]
r2 = ods['bolometer.channel.:.line_of_sight.second_point.r'][mask]
z2 = ods['bolometer.channel.:.line_of_sight.second_point.z'][mask]
bolo_id = ods['bolometer.channel.:.identifier'][mask]
ncm = len(r1)
if colors is None:
colors = [kw.pop('color', None)]
ci = 0
colors2 = colors * nc
color = colors2[ci] # Multiplying list by nc makes sure it's always long enough.
kw.setdefault('alpha', 0.8)
default_label = kw.pop('label', None)
labelevery = kw.pop('labelevery', 2)
notesize = kw.pop('notesize', 'xx-small')
default_ha = [['right', 'left'][int(z1[i] > 0)] for i in range(ncm)]
label_ha, label_va, kw = text_alignment_setup(ncm, default_ha=default_ha, default_va='top', **kw)
label_dr, label_dz = label_shifter(ncm, kw)
for i in range(ncm):
if (i > 0) and (bolo_id[i][0] != bolo_id[i - 1][0]) and reset_fan_color:
ci += 1
color = colors2[ci] # Allow color to reset when changing fans
new_label = True
else:
new_label = False
label = 'Bolometers {}'.format(bolo_id[i][0]) if default_label is None else default_label
bolo_line = ax.plot([r1[i], r2[i]], [z1[i], z2[i]], color=color, label=label if new_label or (i == 0) else '', **kw)
if color is None:
color = bolo_line[0].get_color() # Make subsequent lines the same color
if (labelevery > 0) and ((i % labelevery) == 0):
ax.text(
r2[i] + label_dr[i],
z2[i] + label_dz[i],
'{}{}'.format(['\n', ''][int(z1[i] > 0)], bolo_id[i]),
color=color,
ha=label_ha[i],
va=label_va[i],
fontsize=notesize,
)
return {'ax': ax}
@add_to__ODS__
def langmuir_probes_overlay(ods, ax=None, embedded_probes=None, colors=None, show_embedded=True, show_reciprocating=False, **kw):
r"""
Overlays Langmuir probe locations
:param ods: ODS instance
Must contain langmuir_probes with embedded position data
:param ax: Axes instance
:param embedded_probes: list of strings
Specify probe names to use. Only the embedded probes listed will be plotted. Set to None to plot all probes.
Probe names are like 'F11' or 'P-6' (the same as appear on the overlay).
:param colors: list of matplotlib color specifications. Do not use a single RGBA style spec.
:param show_embedded: bool
Recommended: don't enable both embedded and reciprocating plots at the same time; make two calls instead.
It will be easier to handle mapping of masks, colors, etc.
:param show_reciprocating: bool
:param \**kw: Additional keywords.
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Others will be passed to the plot() call for drawing the probes.
"""
from matplotlib import pyplot
# Get a handle on the axes
if ax is None:
ax = pyplot.gca()
# Make sure there is something to plot or else just give up and return
if show_embedded:
if embedded_probes is not None:
embedded_probes = numpy.atleast_1d(embedded_probes)
embedded_indices = []
for probe in ods['langmuir_probes.embedded']:
if ods['langmuir_probes.embedded'][probe]['name'] in embedded_probes:
embedded_indices += [probe]
nce = len(embedded_indices)
else:
nce = get_channel_count(
ods,
'langmuir_probes',
check_loc='langmuir_probes.embedded.0.position.r',
test_checker='~numpy.isnan(checker)',
channels_name='embedded',
)
embedded_indices = range(nce)
else:
nce = 0
embedded_indices = []
if show_reciprocating:
ncr = get_channel_count(
ods,
'langmuir_probes',
check_loc='langmuir_probes.reciprocating.0.plunge.0.position.r',
test_checker='~numpy.isnan(checker)',
channels_name='reciprocating',
)
else:
ncr = 0
if (nce == 0) and (ncr == 0):
return {'ax': ax}
# Set up masks
mask = kw.pop('mask', numpy.ones(nce + ncr, bool))
mask_e = mask[:nce] # For wall-embedded probes
# mask_r = mask[nce:] # For reciprocating probes
if ncr > 0:
raise NotImplementedError('Reciprocating Langmuir probe overlay plots are not ready yet. Try embedded LPs.')
# Get embedded data
r_e = numpy.array([ods['langmuir_probes.embedded'][i]['position.r'] for i in embedded_indices])[mask_e]
z_e = numpy.array([ods['langmuir_probes.embedded'][i]['position.z'] for i in embedded_indices])[mask_e]
lp_id_e = numpy.array([ods['langmuir_probes.embedded'][i]['name'] for i in embedded_indices])[mask_e]
ncem = len(r_e) # Number of Channels, Embedded, Masked
# Get reciprocating data
ncrm = 0 # Coming soon
nc = ncem + ncem
# Handle plot keywords
if colors is None:
colors = [kw.pop('color', None)]
ci = 0
color = (colors * nc)[ci] # Multiplying list by nc makes sure it's always long enough.
kw.setdefault('alpha', 0.8)
kw.setdefault('marker', '*')
kw.setdefault('linestyle', ' ')
default_label = kw.pop('label', None)
labelevery = kw.pop('labelevery', 2)
notesize = kw.pop('notesize', 'xx-small')
label_dr, label_dz = label_shifter(ncem, kw)
# Decide which side each probe is on, for aligning annotation labels
ha = ['center'] * ncem
va = ['center'] * ncem
try:
wall_r = ods['wall.description_2d[0].limiter.unit[0].outline.r']
wall_z = ods['wall.description_2d[0].limiter.unit[0].outline.z']
except (KeyError, ValueError):
va = ['bottom' if z_e[i] > 0 else 'top' for i in range(ncem)]
else:
wr0 = numpy.min(wall_r)
wr1 = numpy.max(wall_r)
dr = wr1 - wr0
wz0 = numpy.min(wall_z)
wz1 = numpy.max(wall_z)
dz = wz1 - wz0
lr_margin = 0.2
tb_margin = 0.1
right = wr0 + dr * (1 - lr_margin)
left = wr0 + dr * lr_margin
top = wz0 + dz * (1 - tb_margin)
bottom = wz0 + dz * tb_margin
for i in range(ncem):
if z_e[i] > top:
va[i] = 'bottom'
elif z_e[i] < bottom:
va[i] = 'top'
if r_e[i] > right:
ha[i] = 'left'
elif r_e[i] < left:
ha[i] = 'right'
ha, va, kw = text_alignment_setup(ncem, default_ha=ha, default_va=va, **kw)
# Plot
for i in range(ncem):
label = 'Embedded Langmuir probes' if default_label is None else default_label
lp_mark = ax.plot(r_e[i], z_e[i], color=color, label=label if i == 0 else '', **kw)
if color is None:
color = lp_mark[0].get_color() # Make subsequent marks the same color
if (labelevery > 0) and ((i % labelevery) == 0):
ax.text(
r_e[i] + label_dr[i],
z_e[i] + label_dz[i],
'\n {} \n'.format(lp_id_e[i]),
color=color,
ha=ha[i],
va=va[i],
fontsize=notesize,
)
return {'ax': ax}
@add_to__ODS__
def position_control_overlay(
ods, ax=None, t=None, xpoint_marker='x', strike_marker='s', labels=None, measured_xpoint_marker='+', show_measured_xpoint=False, **kw
):
r"""
Overlays position_control data
:param ods: ODS instance
Must contain langmuir_probes with embedded position data
:param ax: Axes instance
:param t: float
Time to display in seconds. If not specified, defaults to the average time of all boundary R coordinate samples.
:param xpoint_marker: string
Matplotlib marker spec for X-point target(s)
:param strike_marker: string
Matplotlib marker spec for strike point target(s)
:param labels: list of strings [optional]
Override default point labels. Length must be long enough to cover all points.
:param show_measured_xpoint: bool
In addition to the target X-point, mark the measured X-point coordinates.
:param measured_xpoint_marker: string
Matplotlib marker spec for X-point measurement(s)
:param \**kw: Additional keywords.
* Accepts standard omas_plot overlay keywords listed in overlay() documentation: mask, labelevery, ...
* Others will be passed to the plot() call for drawing shape control targets
"""
import numpy as np
from matplotlib import pyplot
from matplotlib import rcParams
from scipy.interpolate import interp1d
import time
timing_ref = kw.pop('timing_ref', None)
if timing_ref is not None:
print(time.time() - timing_ref, 'position_control_overlay start')
# Unpack basics
device = ods['dataset_description.data_entry'].get('machine', '')
shot = ods['dataset_description.data_entry'].get('pulse', 0)
if t is None:
try:
t = np.nanmean(ods['pulse_schedule.position_control.boundary_outline[:].r.reference.data'])
except (ValueError, IndexError):
t = 0
if ax is None:
ax = pyplot.gca()
# Handle multi-slice request
if timing_ref is not None:
print(time.time() - timing_ref, 'position_control_overlay setup 1')
if len(np.atleast_1d(t)) > 1:
for tt in t:
position_control_overlay(
ods,
ax=ax,
t=tt,
xpoint_marker=xpoint_marker,
strike_marker=strike_marker,
show_measured_xpoint=show_measured_xpoint,
**copy.deepcopy(kw),
)
return {'ax': ax}
else:
t = np.atleast_1d(t)[0]
labelevery = kw.pop('labelevery', 1)
label_ha = kw.pop('label_ha', None)
label_va = kw.pop('label_va', None)
notesize = kw.pop('notesize', 'xx-small')
if timing_ref is not None:
print(time.time() - timing_ref, 'position_control_overlay setup 2')
# Select data
b = ods['pulse_schedule.position_control.boundary_outline']
x = ods['pulse_schedule.position_control.x_point']
s = ods['pulse_schedule.position_control.strike_point']
ikw = dict(bounds_error=False, fill_value=np.NaN)
try:
nbp = | np.shape(b['[:].r.reference.data']) | numpy.shape |
import argparse
from pathlib import Path
import numpy as np
import tensorflow as tf
import gym
from tqdm import tqdm
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
from imgcat import imgcat
import rospy
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
# Import my own libraries
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/learner/baselines/')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Only show ERROR log
from tf_commons.ops import *
class PPO2Agent(object):
def __init__(self, env, env_type, path, stochastic=False, gpu=True):
from baselines.common.policies import build_policy
from baselines.ppo2.model import Model
self.graph = tf.Graph()
if gpu:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(device_count = {'GPU': 0})
self.sess = tf.Session(graph=self.graph,config=config)
with self.graph.as_default():
with self.sess.as_default():
ob_space = env.observation_space
ac_space = env.action_space
if env_type == 'atari':
policy = build_policy(env,'cnn')
elif env_type == 'mujoco':
policy = build_policy(env,'mlp')
elif env_type == 'gazebo':
policy = build_policy(env, 'mlp')
else:
assert False,' not supported env_type'
make_model = lambda : Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=1, nbatch_train=1,
nsteps=1, ent_coef=0., vf_coef=0.,
max_grad_norm=0.)
self.model = make_model()
self.model_path = path
self.model.load(path)
if env_type == 'mujoco':
with open(path+'.env_stat.pkl', 'rb') as f :
import pickle
s = pickle.load(f)
self.ob_rms = s['ob_rms']
self.ret_rms = s['ret_rms']
self.clipob = 10.
self.epsilon = 1e-8
elif env_type == 'gazebo':
with open(path + '.env_stat.pkl', 'rb') as f:
import pickle
s = pickle.load(f)
self.ob_rms = s['ob_rms']
self.ret_rms = s['ret_rms']
self.clipob = 10.
self.epsilon = 1e-8
else:
self.ob_rms = None
self.stochastic = stochastic
def act(self, obs, reward, done):
if self.ob_rms:
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
with self.graph.as_default():
with self.sess.as_default():
if self.stochastic:
a,v,state,neglogp = self.model.step(obs)
else:
a = self.model.act_model.act(obs)
return a
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, action_space):
self.action_space = action_space
self.model_path = 'random_agent'
def act(self, observation, reward, done):
return self.action_space.sample()[None]
class Model(object):
def __init__(self,include_action,ob_dim,ac_dim,batch_size=64,num_layers=2,embedding_dims=256,steps=None):
self.include_action = include_action
in_dims = ob_dim+ac_dim if include_action else ob_dim
self.inp = tf.placeholder(tf.float32,[None,in_dims])
self.x = tf.placeholder(tf.float32,[None,in_dims]) #[B*steps,in_dim]
self.y = tf.placeholder(tf.float32,[None,in_dims])
self.x_split = tf.placeholder(tf.int32,[batch_size]) # B-lengthed vector indicating the size of each steps
self.y_split = tf.placeholder(tf.int32,[batch_size]) # B-lengthed vector indicating the size of each steps
self.l = tf.placeholder(tf.int32,[batch_size]) # [0 when x is better 1 when y is better]
self.l2_reg = tf.placeholder(tf.float32,[]) # [0 when x is better 1 when y is better]
with tf.variable_scope('weights') as param_scope:
self.fcs = []
last_dims = in_dims
for l in range(num_layers):
self.fcs.append(Linear('fc%d'%(l+1),last_dims,embedding_dims)) #(l+1) is gross, but for backward compatibility
last_dims = embedding_dims
self.fcs.append(Linear('fc%d'%(num_layers+1),last_dims,1))
self.param_scope = param_scope
# build graph
def _reward(x):
for fc in self.fcs[:-1]:
x = tf.nn.relu(fc(x))
r = tf.squeeze(self.fcs[-1](x),axis=1)
return x, r
self.fv, self.r = _reward(self.inp)
_, rs_xs = _reward(self.x)
self.v_x = tf.stack([tf.reduce_sum(rs_x) for rs_x in tf.split(rs_xs,self.x_split,axis=0)],axis=0)
_, rs_ys = _reward(self.y)
self.v_y = tf.stack([tf.reduce_sum(rs_y) for rs_y in tf.split(rs_ys,self.y_split,axis=0)],axis=0)
logits = tf.stack([self.v_x,self.v_y],axis=1) #[None,2]
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=self.l)
self.loss = tf.reduce_mean(loss,axis=0)
weight_decay = 0.
for fc in self.fcs:
weight_decay += tf.reduce_sum(fc.w**2)
self.l2_loss = self.l2_reg * weight_decay
pred = tf.cast(tf.greater(self.v_y,self.v_x),tf.int32)
self.acc = tf.reduce_mean(tf.cast(tf.equal(pred,self.l),tf.float32))
self.optim = tf.train.AdamOptimizer(1e-4)
self.update_op = self.optim.minimize(self.loss+self.l2_loss,var_list=self.parameters(train=True))
self.saver = tf.train.Saver(var_list=self.parameters(train=False),max_to_keep=0)
def parameters(self,train=False):
if train:
return tf.trainable_variables(self.param_scope.name)
else:
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,self.param_scope.name)
def train(self,D,batch_size=64,iter=10000,l2_reg=0.01,noise_level=0.1,debug=False):
"""
Training will be early-terminate when validation accuracy becomes large enough..?
args:
D: list of triplets (\sigma^1,\sigma^2,\mu)
while
sigma^{1,2}: shape of [steps,in_dims]
mu : 0 or 1
"""
sess = tf.get_default_session()
idxes = np.random.permutation(len(D))
train_idxes = idxes[:int(len(D)*0.8)]
valid_idxes = idxes[int(len(D)*0.8):]
def _batch(idx_list,add_noise):
batch = []
if len(idx_list) > batch_size:
idxes = np.random.choice(idx_list,batch_size,replace=False)
else:
idxes = idx_list
for i in idxes:
batch.append(D[i])
b_x,b_y,b_l = zip(*batch)
x_split = np.array([len(x) for x in b_x])
y_split = np.array([len(y) for y in b_y])
b_x,b_y,b_l = np.concatenate(b_x,axis=0),np.concatenate(b_y,axis=0),np.array(b_l)
if add_noise:
b_l = (b_l + np.random.binomial(1,noise_level,batch_size)) % 2 #Flip it with probability 0.1
return b_x,b_y,x_split,y_split,b_l
for it in tqdm(range(iter),dynamic_ncols=True):
b_x,b_y,x_split,y_split,b_l = _batch(train_idxes,add_noise=True)
loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={
self.x:b_x,
self.y:b_y,
self.x_split:x_split,
self.y_split:y_split,
self.l:b_l,
self.l2_reg:l2_reg,
})
if debug:
if it % 100 == 0 or it < 10:
b_x,b_y,x_split,y_split,b_l = _batch(valid_idxes,add_noise=False)
valid_acc = sess.run(self.acc,feed_dict={
self.x:b_x,
self.y:b_y,
self.x_split:x_split,
self.y_split:y_split,
self.l:b_l
})
tqdm.write(('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc)))
#if valid_acc >= 0.95:
# print('loss: %f (l2_loss: %f), acc: %f, valid_acc: %f'%(loss,l2_loss,acc,valid_acc))
# print('early termination@%08d'%it)
# break
def train_with_dataset(self,dataset,batch_size,include_action=False,iter=10000,l2_reg=0.01,debug=False):
sess = tf.get_default_session()
for it in tqdm(range(iter),dynamic_ncols=True):
b_x,b_y,x_split,y_split,b_l = dataset.batch(batch_size=batch_size,include_action=include_action)
loss,l2_loss,acc,_ = sess.run([self.loss,self.l2_loss,self.acc,self.update_op],feed_dict={
self.x:b_x,
self.y:b_y,
self.x_split:x_split,
self.y_split:y_split,
self.l:b_l,
self.l2_reg:l2_reg,
})
if debug:
if it % 100 == 0 or it < 10:
tqdm.write(('loss: %f (l2_loss: %f), acc: %f'%(loss,l2_loss,acc)))
def eval(self,D,batch_size=64):
sess = tf.get_default_session()
b_x,b_y,b_l = zip(*D)
b_x,b_y,b_l = np.array(b_x),np.array(b_y),np.array(b_l)
b_r_x, b_acc = [], 0.
for i in range(0,len(b_x),batch_size):
sum_r_x, acc = sess.run([self.sum_r_x,self.acc],feed_dict={
self.x:b_x[i:i+batch_size],
self.y:b_y[i:i+batch_size],
self.l:b_l[i:i+batch_size]
})
b_r_x.append(sum_r_x)
b_acc += len(sum_r_x)*acc
return np.concatenate(b_r_x,axis=0), b_acc/len(b_x)
def get_reward(self,obs,acs,batch_size=1024):
sess = tf.get_default_session()
if self.include_action:
inp = np.concatenate((obs,acs),axis=1)
else:
inp = obs
b_r = []
for i in range(0,len(obs),batch_size):
r = sess.run(self.r,feed_dict={
self.inp:inp[i:i+batch_size]
})
b_r.append(r)
return np.concatenate(b_r,axis=0)
class GTDataset(object):
def __init__(self,env):
self.env = env
self.unwrapped = env
while hasattr(self.unwrapped,'env'):
self.unwrapped = self.unwrapped.env
def gen_traj(self,agent,min_length):
max_x_pos = -99999
obs, actions, rewards = [self.env.reset()], [], []
while True:
action = agent.act(obs[-1], None, None)
ob, reward, done, _ = self.env.step(action)
if self.unwrapped.sim.data.qpos[0] > max_x_pos:
max_x_pos = self.unwrapped.sim.data.qpos[0]
obs.append(ob)
actions.append(action)
rewards.append(reward)
if done:
if len(obs) < min_length:
obs.pop()
obs.append(self.env.reset())
else:
obs.pop()
break
return (np.stack(obs,axis=0), np.concatenate(actions,axis=0), np.array(rewards)), max_x_pos
def prebuilt(self,agents,min_length):
assert len(agents)>0, 'no agent given'
trajs = []
for agent in tqdm(agents):
traj, max_x_pos = self.gen_traj(agent,min_length)
trajs.append(traj)
tqdm.write('model: %s avg reward: %f max_x_pos: %f'%(agent.model_path,np.sum(traj[2]),max_x_pos))
obs,actions,rewards = zip(*trajs)
self.trajs = ( | np.concatenate(obs,axis=0) | numpy.concatenate |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import os
import sys
import pdb
import time
import math
import argparse
import time
from sklearn.cluster import KMeans
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import rgb2hex
from model import Split_GNN, Split_BaselineGNN
from data_generator import Generator
from data_generator_cifar import Generator as GeneratorCIFAR
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true')
parser.add_argument('--save_file', nargs='?', const=1, type=str, default='')
parser.add_argument('--load_file', nargs='?', const=1, type=str, default='')
parser.add_argument('--output_file', nargs='?', const=1, type=str, default='')
parser.add_argument('--dataset', nargs='?', const=1, type=str, default='GM')
parser.add_argument('--dim', nargs='?', const=1, type=int, default=27)
parser.add_argument('--num_examples_train', nargs='?', const=1, type=int, default=10000)
parser.add_argument('--num_examples_test', nargs='?', const=1, type=int, default=1000)
parser.add_argument('--N', nargs='?', const=1, type=int, default=200)
parser.add_argument('--K', nargs='?', const=1, type=int, default=2)
parser.add_argument('--clusters', nargs='?', const=1, type=int, default=4)
parser.add_argument('--clip_grad_norm', nargs='?', const=1, type=float, default=40.0)
parser.add_argument('--batch_size', nargs='?', const=1, type=int, default=32)
parser.add_argument('--sigma2', nargs='?', const=1, type=float, default=1.)
parser.add_argument('--reg_factor', nargs='?', const=1, type=int, default=0.0)
parser.add_argument('--k_step', nargs='?', const=1, type=int, default=0)
parser.add_argument('--n_samples', nargs='?', const=1, type=int, default=10)
parser.add_argument('--last', action='store_false')
parser.add_argument('--baseline', action='store_true')
###############################################################################
# GNN Arguments #
###############################################################################
parser.add_argument('--num_features', nargs='?', const=1, type=int, default=32)
parser.add_argument('--num_layers', nargs='?', const=1, type=int, default=20)
parser.add_argument('--normalize', action='store_true')
args = parser.parse_args()
# args.save_file = '/home/anowak/DCN-for-KMEANS/model/exp1'
# args.load_file = '/home/anowak/DCN-for-KMEANS/model/exp1'
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
dtype_l = torch.cuda.LongTensor
torch.cuda.manual_seed(0)
else:
dtype = torch.FloatTensor
dtype_l = torch.LongTensor
torch.manual_seed(0)
template_train1 = '{:<10} {:<10} {:<10} {:<10} {:<10} {:<10} {:<10} '
template_train2 = '{:<10} {:<10} {:<10.3f} {:<10.5f} {:<10.5f} {:<10.5f} {:<10.3f} '
template_train3 = '{:<10} {:<10} {:<10} {:<10.5f} {:<10.5f} {:<10.5f} {:<10} \n'
info_train = ['TRAIN', 'iteration', 'loss', 'samples', 'best_smpl', 'trivial', 'elapsed']
if args.output_file != '':
class Logger2(object):
def __init__(self, path):
self.terminal = sys.stdout
self.log = open(path, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
sys.stdout = Logger2(args.output_file)
class Logger():
dicc = {}
def add(self, name, val):
if name in self.dicc:
lis = self.dicc[name]
lis.append(val)
self.dicc[name] = lis
else:
self.dicc[name] = [val]
def empty(self, name):
self.dicc[name] = []
def empty_all(self):
self.dicc = {}
def get(self, name):
return self.dicc[name]
def plot_train_logs(cost_train):
plt.figure(1, figsize=(8,6))
plt.clf()
iters = range(len(cost_train))
plt.semilogy(iters, cost_train, 'b')
plt.xlabel('iterations')
plt.ylabel('Average Mean cost')
plt.title('Average Mean cost Training')
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=2.0)
path = os.path.join('./plots/logs', 'training.png')
plt.savefig(path)
def plot_clusters(num, e, centers, points, fig, model):
"""
:param num: 迭代次数
:param e: 点所属聚类中心编号 (bz, N) 0-indexed
:param centers: 聚类中心 None or (bz, #centers, dim)
:param points: 点 tensor (bz, N, dim)
:param fig: 绘制batch中第fig个图像
:param model: 模型名称
:return:
"""
plt.figure(0)
plt.clf()
plt.gca().set_xlim([-0.05,1.05])
plt.gca().set_ylim([-0.05,1.05])
clusters = e[fig].max()+1
colors = cm.rainbow(np.linspace(0,1,clusters))
for i in range(clusters):
c = colors[i][:-1]
mask = e[fig] == i
x = torch.masked_select(points[fig,:,0], mask) # 仅绘制0和1两个维度
y = torch.masked_select(points[fig,:,1], mask)
plt.plot(x.cpu().numpy(), y.cpu().numpy(), 'o', c=rgb2hex(c))
if centers is not None:
center = centers[i]
plt.plot([center.data[0]], [center.data[1]], '*', c=rgb2hex(c))
plt.title('clustering')
plt.savefig('./plots/clustering_it_{}_{}.png'.format(num, model))
def create_input(points, sigma2):
bs, N, _ = points.size() #points has size bs, N, dim
OP = torch.zeros(bs,N,N,4).type(dtype)
E = torch.eye(N).type(dtype).unsqueeze(0).expand(bs,N,N)
OP[:,:,:,0] = E
W = points.unsqueeze(1).expand(bs,N,N,dim) - points.unsqueeze(2).expand(bs,N,N,dim)
dists2 = (W * W).sum(3)
dists = torch.sqrt(dists2)
W = torch.exp(-dists2 / sigma2) # 注意这里是平方,原论文中wij = exp(-dist2/sigma2),两点之间的关系特征
OP[:,:,:,1] = W
D = E * W.sum(2,True).expand(bs,N,N) # 对角线非零
OP[:,:,:,2] = D
U = (torch.ones(N,N).type(dtype)/N).unsqueeze(0).expand(bs,N,N) # 每个元素均为1/N
OP[:,:,:,3] = U
OP = Variable(OP)
x = Variable(points)
Y = Variable(W.clone())
# Normalize inputs
if normalize:
mu = x.sum(1)/N
mu_ext = mu.unsqueeze(1).expand_as(x)
var = ((x - mu_ext)*(x - mu_ext)).sum(1)/N
var_ext = var.unsqueeze(1).expand_as(x)
x = x - mu_ext
x = x/(10 * var_ext)
return (OP, x, Y), dists # ([E; W; DiagW; {1/N}], points, W), points之间的距离
def sample_K(probs, K, mode='test'):
probs = 1e-6 + probs*(1 - 2e-6) # to avoid log(0)
probs = probs.view(-1, 2**K) # (bz, N, 2^K)
if mode == 'train':
bin_sample = torch.multinomial(probs, 1).detach()
else:
bin_sample = probs.max(1)[1].detach().unsqueeze(1)
sample = bin_sample.clone().type(dtype)
log_probs_samples = torch.log(probs).gather(1, bin_sample).squeeze()
log_probs_samples = log_probs_samples.view(batch_size, N).sum(1)
return bin_sample.data.view(batch_size, N), log_probs_samples
def sample_one(probs, mode='test'):
probs = 1e-6 + probs*(1 - 2e-6) # to avoid log(0)
if mode == 'train':
rand = torch.zeros(*probs.size()).type(dtype)
nn.init.uniform(rand)
else:
rand = torch.ones(*probs.size()).type(dtype) / 2 # 测试时实际上是 >0.5 greedy
bin_sample = probs > Variable(rand)
sample = bin_sample.clone().type(dtype)
log_probs_samples = (sample*torch.log(probs) + (1-sample)*torch.log(1-probs)).sum(1)
return bin_sample.data, log_probs_samples
def update_input(input, dists, sample, sigma2, e, k):
OP, x, Y = input
bs = x.size(0)
N = x.size(1)
sample = sample.float()
mask = sample.unsqueeze(1).expand(bs,N,N)*sample.unsqueeze(2).expand(bs,N,N)
mask += (1-sample).unsqueeze(1).expand(bs,N,N)*(1-sample).unsqueeze(2).expand(bs,N,N)
U = (OP.data[:,:,:,3]>0).float()*mask
W = dists*U
Wm = W.max(2,True)[0].expand_as(W).max(1,True)[0].expand_as(W)
W = W / Wm.clamp(min=1e-6) * np.sqrt(2)
W = torch.exp(- W*W / sigma2)
OP[:,:,:,1] = Variable(W)
D = OP.data[:,:,:,0] * OP.data[:,:,:,1].sum(2,True).expand(bs,N,N)
OP[:,:,:,2] = Variable(D)
U = U / U.sum(2,True).expand_as(U)
OP[:,:,:,3] = Variable(U)
Y = Variable(OP[:,:,:,1].data.clone())
# Normalize inputs
if normalize:
z = Variable(torch.zeros((bs, N, 2**k))).type(dtype)
e = e.unsqueeze(2)
o = Variable(torch.ones((bs, N, 1))).type(dtype)
z = z.scatter_(2, e, o)
z = z.unsqueeze(2).expand(bs, N, 2, 2**k)
z_bar = z * x.unsqueeze(3).expand_as(z)
Nk = z.sum(1)
mu = z_bar.sum(1)/Nk
mu_ext = mu.unsqueeze(1).expand_as(z)*z
var = ((z_bar - mu_ext)*(z_bar - mu_ext)).sum(1)/Nk
var_ext = var.unsqueeze(1).expand_as(z)*z
x = x - mu_ext.sum(3)
x = x/(10 * var_ext.sum(3))
# plt.figure(1)
# plt.clf()
# plt.plot(x[0,:,0].data.cpu().numpy(), x[0,:,1].data.cpu().numpy(), 'o')
# plt.savefig('./plots/norm.png')
# pdb.set_trace()
return OP, x, Y
def compute_variance(e, probs):
bs, N = probs.size()
variance = Variable(torch.zeros(bs).type(dtype))
for i in range(e.max()+1):
mask = Variable((e == i).float())
Ns = mask.sum(1).clamp(min=1)
masked_probs = probs*mask
probs_mean = (masked_probs).sum(1) / Ns
v = (masked_probs*masked_probs).sum(1) / Ns - probs_mean*probs_mean
variance += v
return variance
def compute_reward(e, K, points):
bs, N, _ = points.size()
reward2 = Variable(torch.zeros(bs).type(dtype))
reward3 = Variable(torch.zeros(bs).type(dtype))
c = []
for k in range(2**K): # 默认满二叉树
mask = Variable((e == k).float()).unsqueeze(2).expand_as(points) # (bz, N, dim)
N1 = mask.sum(1)
center = points*mask
center = center.sum(1) / N1.clamp(min=1) # (bz, dim)
c.append(center[0])
subs = ((points-center.unsqueeze(1).expand_as(points)) * mask) # (bz, N, dim)
subs2 = (subs * subs).sum(2).sum(1) / N # (bz,)
subs3 = torch.abs(subs * subs * subs).sum(2).sum(1) / N
reward2 += subs2
reward3 += subs3
return reward2, reward3, c
def execute(points, K, n_samples, sigma2, reg_factor, mode='test'):
"""
:param points: (bz, N, dim)
:param K: 默认为2 K叉树
:param n_samples: Monte Carlo抽样次数 10
:param sigma2: 默认为1.
:param reg_factor: 默认为0.
:param mode: "train" or "test"
:return:
"""
bs, N, _ = points.size()
e = torch.zeros(bs, N).type(dtype_l) # 聚类结果标签
input, dists = create_input(points.data, sigma2) # ([E; W; DiagW; {1/N}], points, W), points之间的距离
loss_total = Variable(torch.zeros(1).type(dtype))
for k in range(K): # 默认满二叉树 故每轮每个点都会被继续划分
scores,_ = gnn(input) # 外部定义的模型 (bz, N)
probs = F.sigmoid(scores)
if mode == 'train': # 每一次划分集合都(可以)有reward指导
variance = compute_variance(e, probs) # 论文中的SPLIT REGULARIZATION
variance = variance.sum() / bs
Lgp = Variable(torch.zeros(n_samples, bs).type(dtype)) #FIXME Lgp would only record the last split when last == True
Reward2 = Variable(torch.zeros(n_samples, bs).type(dtype))
Reward3 = Variable(torch.zeros(n_samples, bs).type(dtype))
for i in range(n_samples):
Samplei, Lgp[i] = sample_one(probs, 'train') # 在probs固定的情况下抽样n_samples次
Ei = e*2 + Samplei.long()
Reward2[i], _,_ = compute_reward(Ei, k+1, points)
baseline = Reward2.mean(0,True).expand_as(Reward3)
loss = 0.0
if (last and k == K-1) or not last:
loss = ((Reward2-baseline) * Lgp).sum(1).sum(0) / n_samples / bs # baseline是n_samples抽样的平均
loss_total = loss_total + loss - reg_factor*variance
show_loss = Reward2.data.mean()
sample, lgp = sample_one(probs, 'test') # (bz, N) (bz, )
e = e*2 + sample.long() # 记录该轮划分结果
reward,_,c = compute_reward(e, k+1, points)
if mode == 'test':
show_loss = reward.data.mean()
if k < K-1:
input = update_input(input, dists, sample, sigma2, e, k+1)
if mode == 'test':
return e, None, show_loss, c
else:
return e, loss_total, show_loss, c
def execute_baseline(points, K, n_samples, sigma2, reg_factor, mode='test'):
bs, N, _ = points.size()
e = torch.zeros(bs, N).type(dtype_l)
input, dists = create_input(points.data, sigma2)
loss_total = Variable(torch.zeros(1).type(dtype))
scores,_ = gnn(input)
probs = F.softmax(scores.permute(2, 1, 0)).permute(2, 1, 0)
if mode == 'train': # 一步到位
Lgp = Variable(torch.zeros(n_samples, bs).type(dtype))
Reward2 = Variable(torch.zeros(n_samples, bs).type(dtype))
Reward3 = Variable(torch.zeros(n_samples, bs).type(dtype))
for i in range(n_samples):
Samplei, Lgp[i] = sample_K(probs, K, 'train')
Reward2[i], _,_ = compute_reward(Samplei, K, points)
baseline = Reward2.mean(0,True).expand_as(Reward3)
loss = ((Reward2-baseline) * Lgp).sum(1).sum(0) / n_samples / bs
loss_total = loss_total + loss
show_loss = Reward2.data.mean()
sample, lgp = sample_K(probs, K, 'test')
reward, _, c = compute_reward(sample, K, points)
if mode == 'test':
show_loss = reward.data.mean()
if mode == 'test':
return sample, None, show_loss, c
else:
return sample, loss_total, show_loss, c
def save_model(path, model):
torch.save(model.state_dict(), path)
print('Model Saved.')
def load_model(path, model):
if os.path.exists(path):
model.load_state_dict(torch.load(path))
print('GNN successfully loaded from {}'.format(path))
return model
else:
raise ValueError('Parameter path {} does not exist.'.format(path))
def Lloyds(input, n_clusters=8):
"""常规的Kmeans 计算了与Split GNN相同的Cost
:param input: 点 (bz, N, dim) tensor
:param n_clusters: 聚类中心个数
:return:
"""
kmeans = KMeans(n_clusters=n_clusters)
nb_pbs, nb_samples, d = input.shape # (bz, N, dim)
Costs = []
for i in range(nb_pbs):
inp = input[i]
labels = kmeans.fit_predict(inp)
cost = 0
for cl in range(n_clusters):
ind = np.where(labels==cl)[0]
if ind.shape[0] > 0: # 存在为cl的聚类中心
x = inp[ind] # (n, dim)
mean = x.mean(axis=0)
cost += np.mean(np.sum((x - mean)**2, axis=1), axis=0)*ind.shape[0] # 论文中的 n * delta
# cost += np.var(inp[ind], axis=0)*ind.shape[0]
Costs.append(cost/nb_samples) #FIXME bug fixed
Cost = sum(Costs)/len(Costs)
return Cost
def Lloyds2(input, ind, E, k, K=2):
"""
:param input: (N, dim)
:param ind: 用于确定点的子集
:param E: (N,) labels
:param k: 递归次数,可以理解为K叉树扩展了几层
:param K: K叉树,默认为2
:return:
"""
# split at first place
inp = input[ind]
if inp.shape[0] >= 2:
kmeans = KMeans(n_clusters=2, max_iter=20)
labels = kmeans.fit_predict(inp) # labels由0或1组成
else:
labels = np.zeros(ind.shape[0]) # array([0])
E[ind] = 2*E[ind] + labels # 类似于2进制, 00 01 10 11,第k位表示第k轮划分情,适合于满二叉树
# recursion
if k == K-1:
return E
else:
ind1 = ind[np.where(labels == 0)[0]]
ind2 = ind[ | np.where(labels == 1) | numpy.where |
import math
import os
from preprocessing.utils import landmark_alignment
import random
import sys
import traceback
import cv2
import numpy as np
import pandas as pd
import skimage.draw
from albumentations import ImageCompression, OneOf, GaussianBlur, Blur
from albumentations.augmentations.functional import image_compression, rot90
from albumentations.pytorch.functional import img_to_tensor
from scipy.ndimage import binary_erosion, binary_dilation
from skimage import measure
from torch.utils.data import Dataset
import dlib
from training.datasets.validation_set import PUBLIC_SET
from preprocessing.retinaface.detect import FaceDetector
def prepare_bit_masks(mask):
h, w = mask.shape
mid_w = w // 2
mid_h = w // 2
masks = []
ones = np.ones_like(mask)
ones[:mid_h] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[mid_h:] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:, :mid_w] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:, mid_w:] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:mid_h, :mid_w] = 0
ones[mid_h:, mid_w:] = 0
masks.append(ones)
ones = np.ones_like(mask)
ones[:mid_h, mid_w:] = 0
ones[mid_h:, :mid_w] = 0
masks.append(ones)
return masks
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('libs/shape_predictor_68_face_landmarks.dat')
def blackout_convex_hull(img):
try:
rect = detector(img)[0]
sp = predictor(img, rect)
landmarks = np.array([[p.x, p.y] for p in sp.parts()])
outline = landmarks[[*range(17), *range(26, 16, -1)]]
Y, X = skimage.draw.polygon(outline[:, 1], outline[:, 0])
cropped_img = np.zeros(img.shape[:2], dtype=np.uint8)
cropped_img[Y, X] = 1
# if random.random() > 0.5:
# img[cropped_img == 0] = 0
# #leave only face
# return img
y, x = measure.centroid(cropped_img)
y = int(y)
x = int(x)
first = random.random() > 0.5
if random.random() > 0.5:
if first:
cropped_img[:y, :] = 0
else:
cropped_img[y:, :] = 0
else:
if first:
cropped_img[:, :x] = 0
else:
cropped_img[:, x:] = 0
img[cropped_img > 0] = 0
except Exception as e:
pass
def dist(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def remove_eyes(image, landmarks):
image = image.copy()
x1, y1, x2, y2 = landmarks[:4]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
mask = np.zeros_like(image[..., 0])
line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2)
w = dist((x1, y1), (x2, y2))
dilation = int(w // 4)
line = binary_dilation(line, iterations=dilation)
image[line, :] = 0
return image
def remove_nose(image, landmarks):
image = image.copy()
x1, y1, x2, y2 = landmarks[:4]
x_center, y_center = (x1 + x2) / 2, (y1 + y2) / 2
x3, y3 = landmarks[4:6]
x_center, y_center, x3, y3 = int(x_center), int(y_center), int(x3), int(y3)
mask = np.zeros_like(image[..., 0])
line = cv2.line(mask, (x3, y3), (x_center, y_center),
color=(1), thickness=2)
w = dist((x1, y1), (x2, y2))
dilation = int(w // 4)
line = binary_dilation(line, iterations=dilation)
image[line, :] = 0
return image
def remove_mouth(image, landmarks):
image = image.copy()
x1, y1, x2, y2 = landmarks[6:]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
mask = np.zeros_like(image[..., 0])
line = cv2.line(mask, (x1, y1), (x2, y2), color=(1), thickness=2)
w = dist((x1, y1), (x2, y2))
dilation = int(w // 3)
line = binary_dilation(line, iterations=dilation)
image[line, :] = 0
return image
def remove_landmark(image, landmarks):
if random.random() > 0.5:
image = remove_eyes(image, landmarks[0])
cv2.imwrite("rm_eyes.jpg", image)
elif random.random() > 0.5:
image = remove_mouth(image, landmarks[0])
cv2.imwrite("rm_month.jpg", image)
elif random.random() > 0.5:
image = remove_nose(image, landmarks[0])
cv2.imwrite("rm_nose.jpg", image)
return image
def change_padding(image, part=5):
h, w = image.shape[:2]
# original padding was done with 1/3 from each side, too much
pad_h = int(((3 / 5) * h) / part)
pad_w = int(((3 / 5) * w) / part)
image = image[h // 5 - pad_h:-h // 5 +
pad_h, w // 5 - pad_w:-w // 5 + pad_w]
return image
def blackout_random(image, mask, label):
binary_mask = mask > 0.4 * 255
h, w = binary_mask.shape[:2]
tries = 50
current_try = 1
while current_try < tries:
first = random.random() < 0.5
if random.random() < 0.5:
pivot = random.randint(h // 2 - h // 5, h // 2 + h // 5)
bitmap_msk = np.ones_like(binary_mask)
if first:
bitmap_msk[:pivot, :] = 0
else:
bitmap_msk[pivot:, :] = 0
else:
pivot = random.randint(w // 2 - w // 5, w // 2 + w // 5)
bitmap_msk = np.ones_like(binary_mask)
if first:
bitmap_msk[:, :pivot] = 0
else:
bitmap_msk[:, pivot:] = 0
if label < 0.5 and np.count_nonzero(image * np.expand_dims(bitmap_msk, axis=-1)) / 3 > (h * w) / 5 \
or np.count_nonzero(binary_mask * bitmap_msk) > 40:
mask *= bitmap_msk
image *= | np.expand_dims(bitmap_msk, axis=-1) | numpy.expand_dims |
from __future__ import division
import os.path as op
from itertools import product
import numpy as np
import pandas as pd
import nibabel as nib
from scipy.signal import periodogram
import pytest
from pytest import approx
from .. import glm
def assert_highly_correlated(a, b, thresh=.999):
corr = np.corrcoef(a.flat, b.flat)[0, 1]
assert corr > thresh
class TestHRFs(object):
@pytest.fixture
def random(self):
seed = sum(map(ord, "hrfs"))
return np.random.RandomState(seed)
@pytest.fixture
def randn_input(self, random):
return random.randn(100)
@pytest.fixture
def delta_input(self, random):
return random.randn(100) > .05
def test_base(self):
with pytest.raises(NotImplementedError):
glm.HRFModel().transform(None)
def test_identity(self, randn_input):
output = glm.IdentityHRF().transform(randn_input)
assert np.array_equal(output, randn_input)
@pytest.mark.parametrize(
"res,duration",
product((10, 20), (24, 42)))
def test_gamma_hrf_kernel_size(self, res, duration):
hrf = glm.GammaHRF(res=res, duration=duration)
assert len(hrf.kernel) == res * duration
def test_kernel_normalization(self):
hrf = glm.GammaHRF()
assert hrf.kernel.sum() == pytest.approx(1)
def test_undershoot(self, random):
double = glm.GammaHRF()
assert double.kernel.min() < 0
single = glm.GammaHRF(ratio=0)
assert single.kernel.min() >= 0
def test_gamma_hrf_output_type(self, random, randn_input):
a = np.asarray(randn_input)
s = pd.Series(randn_input, name="event")
hrf = glm.GammaHRF()
a_out = hrf.transform(a)
s_out = hrf.transform(s)
assert isinstance(a_out, np.ndarray)
assert isinstance(s_out, pd.Series)
def test_gamma_hrf_convolution(self, random, randn_input):
hrf = glm.GammaHRF()
convolution = np.convolve(randn_input, hrf.kernel)[:len(randn_input)]
assert hrf.transform(randn_input) == pytest.approx(convolution)
@pytest.mark.parametrize("name", ["event", 180])
def test_gamma_hrf_output_name(self, random, randn_input, name):
s = pd.Series(randn_input, name=name)
hrf = glm.GammaHRF()
y = hrf.transform(s)
assert y.name == str(name)
def test_gamma_hrf_output_index(self, random, randn_input):
n = len(randn_input)
name = "event"
idx = pd.Index(random.permutation(np.arange(n)))
s = pd.Series(randn_input, idx, name=name)
hrf = glm.GammaHRF()
y = hrf.transform(s)
assert y.index.equals(idx)
@pytest.mark.parametrize("name", ["event", 180])
def test_gamma_basis_output_names(self, random, randn_input, name):
s = pd.Series(randn_input, name=name)
hrf = glm.GammaBasis()
y = hrf.transform(s)
assert list(y.columns) == [f"{name}", f"{name}-dydt", f"{name}-dydw"]
hrf = glm.GammaBasis(time_derivative=False)
y = hrf.transform(s)
assert list(y.columns) == [f"{name}", f"{name}-dydw"]
hrf = glm.GammaBasis(disp_derivative=False)
y = hrf.transform(s)
assert list(y.columns) == [f"{name}", f"{name}-dydt"]
def test_gamma_basis_output_index(self, random, randn_input):
n = len(randn_input)
name = "event"
idx = pd.Index(random.permutation(np.arange(n)))
s = pd.Series(randn_input, idx, name=name)
hrf = glm.GammaBasis()
y = hrf.transform(s)
assert y.index.equals(idx)
def test_gamma_basis_sum_squares(self):
hrf = glm.GammaBasis()
ss = np.sum(hrf.kernel ** 2, axis=0)
assert np.allclose(np.diff(ss), 0)
def test_gamma_basis_convolution_orthogonality(self, randn_input):
hrf = glm.GammaBasis()
y = hrf.transform(randn_input)
yty = y.T @ y
np.allclose(np.triu(yty, 1), 0)
np.allclose(np.tril(yty, 1), 0)
@pytest.mark.parametrize("n", [10, 14])
def test_fir_matrix_size(self, delta_input, n):
n_tp = len(delta_input)
hrf = glm.FIRBasis(n)
assert hrf.transform(delta_input).shape == (n_tp, n)
@pytest.mark.parametrize("offset", [0, 2])
def test_fir_matrix_values(self, delta_input, random, offset):
n = 12
hrf = glm.FIRBasis(n, offset)
onsets, = np.nonzero(delta_input)
delta_basis = hrf.transform(delta_input)
for i in onsets:
for j, row in zip(np.arange(n), delta_basis[(i - offset):]):
assert row[j] == 1
parametric_input = delta_input * random.randn(delta_input.size)
parametric_basis = hrf.transform(parametric_input)
for i in onsets:
for j, row in zip(np.arange(n), parametric_basis[(i - offset):]):
assert row[j] == parametric_input[i]
def test_fir_matrix_output_type(self, delta_input):
hrf = glm.FIRBasis(12)
assert isinstance(hrf.transform(delta_input), np.ndarray)
assert isinstance(hrf.transform(pd.Series(delta_input)), pd.DataFrame)
def test_fir_basis_output_indices(self, random, delta_input):
n = 24
name = "event"
idx = pd.Index(random.permutation(np.arange(delta_input.size)))
cols = pd.Index([f"{name}_{i:02d}" for i in range(n)])
s = pd.Series(delta_input, idx, name=name)
hrf = glm.FIRBasis(n)
basis = hrf.transform(s)
assert basis.index.equals(idx)
assert basis.columns.equals(cols)
def test_fir_basis_suffix(self, delta_input):
n = 12
name = "event"
idx = np.arange(delta_input.size)
suffix = ":"
hrf = glm.FIRBasis(n, suffix=suffix)
s = pd.Series(delta_input, idx, name=name)
basis = hrf.transform(s)
cols = pd.Index([f"{name}{suffix}{i:02d}" for i in range(n)])
assert basis.columns.equals(cols)
class TestDesignMatrix(object):
@pytest.fixture
def random(self):
seed = sum(map(ord, "design_matrix"))
return np.random.RandomState(seed)
@pytest.fixture
def conditions(self):
conditions = pd.DataFrame(dict(
condition=["a", "b", "a", "b"],
onset=[0, 12, 24, 36],
duration=[2, 2, 2, 2],
value=[1, 1, 1, 1],
))
return conditions
@pytest.fixture
def regressors(self, random):
data = random.normal(2, 1, (48, 3))
columns = ["x", "y", "z"]
regressors = pd.DataFrame(data, columns=columns)
return regressors
@pytest.fixture
def artifacts(self, random):
return pd.Series(random.rand(48) < .1)
@pytest.mark.parametrize(
"n_tp,tr,time_deriv,disp_deriv",
product((24, 28), (1, 2), (False, True), (False, True)))
def test_design_shape_and_index(self, conditions, tr, n_tp,
time_deriv, disp_deriv):
X = glm.build_design_matrix(conditions,
glm.GammaBasis(time_deriv, disp_deriv),
n_tp=n_tp, tr=tr)
n_cond = len(np.unique(conditions["condition"]))
assert isinstance(X, pd.DataFrame)
assert X.shape == (n_tp, n_cond * sum([1, time_deriv, disp_deriv]))
tps = np.arange(0, n_tp * tr, tr)
assert np.array_equal(X.index.values, tps)
def test_design_contents(self, conditions):
n_tp, tr = 48, 1
X = glm.build_design_matrix(conditions, glm.IdentityHRF(),
n_tp=n_tp, tr=tr, demean=False)
expected_a = np.zeros(n_tp)
expected_a[[0, 1, 24, 25]] = 1
assert np.array_equal(X["a"].values, expected_a)
n_tp, tr = 24, 2
X = glm.build_design_matrix(conditions, glm.IdentityHRF(),
n_tp=n_tp, tr=tr, demean=False)
expected_a = np.zeros(n_tp)
expected_a[[0 // tr, 24 // tr]] = 1
assert isinstance(X, pd.DataFrame)
assert np.array_equal(X["a"].values, expected_a)
def test_design_regressors(self, conditions, regressors):
cols = regressors.columns.tolist()
X = glm.build_design_matrix(regressors=regressors)
assert X.columns.tolist() == cols
assert np.array_equal(X[cols], regressors - regressors.mean())
X = glm.build_design_matrix(conditions, regressors=regressors)
assert X.columns.tolist() == ["a", "b"] + cols
assert np.array_equal(X[cols], regressors - regressors.mean())
X = glm.build_design_matrix(regressors=regressors, demean=False)
assert np.array_equal(X[cols], regressors)
def test_design_artifacts(self, conditions, artifacts):
cols = ["art{:02d}".format(i) for i in range(artifacts.sum())]
X = glm.build_design_matrix(artifacts=artifacts)
assert X.columns.tolist() == cols
assert X.shape == (48, artifacts.sum())
X = glm.build_design_matrix(conditions, artifacts=artifacts)
assert X.columns.tolist() == ["a", "b"] + cols
def test_design_fir(self, conditions):
n = 12
n_tp = 48
X = glm.build_design_matrix(conditions, glm.FIRBasis(n), n_tp=n_tp,
demean=False, res=1, shift=0)
n_cond = conditions["condition"].unique().size
assert X.shape == (n_tp, n * n_cond)
def test_n_tp_errors(self, conditions, regressors, artifacts):
with pytest.raises(ValueError):
glm.build_design_matrix(regressors=regressors, n_tp=20)
with pytest.raises(ValueError):
glm.build_design_matrix(artifacts=artifacts, n_tp=20)
with pytest.raises(ValueError):
glm.build_design_matrix(regressors=regressors,
artifacts=artifacts.iloc[:20])
def test_hpf(self, conditions):
n_tp = 48
F = glm.highpass_filter_matrix(n_tp, 20, 1)
X = glm.build_design_matrix(conditions, hpf_matrix=F, n_tp=n_tp)
assert len(X) == len(F)
def test_condition_defaults(self, conditions):
conditions.loc[:, "duration"] = 0
conditions.loc[:, "value"] = 1
min_cols = ["condition", "onset"]
X1 = glm.build_design_matrix(conditions, n_tp=48)
X2 = glm.build_design_matrix(conditions[min_cols].copy(), n_tp=48)
assert np.array_equal(X1.values, X2.values)
@pytest.mark.parametrize(
"tr,res",
product((.1, .5, 1, 1.5, 2), (60, 30, 1)))
def test_tr_and_sampling(self, tr, res):
condition = pd.DataFrame(dict(
onset=[2],
value=[1],
duration=[0],
))
hrf = glm.GammaHRF(res=res)
out, *_ = glm.condition_to_regressors(
"test", condition, hrf, 30 / tr, tr, res, 0
)
assert 7 <= out.idxmax() <= 8
class TestContrastMatrix(object):
@pytest.fixture
def random(self):
seed = sum(map(ord, "contrast_matrix"))
return np.random.RandomState(seed)
@pytest.fixture
def design(self, random):
cols = list("abcd")
return pd.DataFrame(random.normal(0, 1, (48, 4)), columns=cols)
def test_contrast_matrix(self, design):
contrast = ("a", ["a"], [1])
C = glm.contrast_matrix(contrast, design)
assert np.array_equal(C, [1, 0, 0, 0])
contrast = ("c", ["c"], [1])
C = glm.contrast_matrix(contrast, design)
assert np.array_equal(C, [0, 0, 1, 0])
contrast = ("a-c", ["a", "c"], [1, -1])
C = glm.contrast_matrix(contrast, design)
assert np.array_equal(C, [1, 0, -1, 0])
contrast = ("a-c", ["c", "a"], [-1, 1])
C = glm.contrast_matrix(contrast, design)
assert np.array_equal(C, [1, 0, -1, 0])
contrast = ("a-bd", ["a", "b", "d"], [1, -.5, -.5])
C = glm.contrast_matrix(contrast, design)
assert np.array_equal(C, [1, -.5, 0, -.5])
class TestLinearModel(object):
@pytest.fixture()
def test_data(self):
data_path = op.join(op.dirname(__file__), "data/film_data.npz")
test_data_obj = np.load(data_path)
test_data = dict(test_data_obj)
ts_data = test_data["ts_data"]
test_data["ts_img"] = nib.Nifti1Image(ts_data, np.eye(4))
nx, ny, nz, n_tp = ts_data.shape
n_vox = nx * ny * nz
test_data["data_matrix_shape"] = n_tp, n_vox
mask = np.ones(ts_data.shape[:-1], np.int)
test_data["mask_img"] = nib.Nifti1Image(mask, np.eye(4))
yield test_data
test_data_obj.close()
def test_image_prewhitening_outputs(self, test_data):
ts_img = test_data["ts_img"]
mask_img = test_data["mask_img"]
X = test_data["X"]
smooth_fwhm = None
n_tp, n_vox = test_data["data_matrix_shape"]
_, n_ev = X.shape
# Test output shapes with the full data
WY, WX = glm.prewhiten_image_data(ts_img, mask_img, X, smooth_fwhm)
assert WY.shape == (n_tp, n_vox)
assert WX.shape == (n_tp, n_ev, n_vox)
# Test output shapes using a more restrictive mask
n_mask = 10
mask = np.zeros(mask_img.shape, np.int)
mask.flat[:n_mask] = 1
mask_img = nib.Nifti1Image(mask, np.eye(4))
WY, WX = glm.prewhiten_image_data(ts_img, mask_img, X, smooth_fwhm)
assert WY.shape == (n_tp, n_mask)
assert WX.shape == (n_tp, n_ev, n_mask)
# Smoke test smoothing
smooth_fwhm = 2
WY, WX = glm.prewhiten_image_data(ts_img, mask_img, X, smooth_fwhm)
assert WY.shape == (n_tp, n_mask)
assert WX.shape == (n_tp, n_ev, n_mask)
def test_residual_autocorrelation_outputs(self, test_data):
ts_data = test_data["ts_data"]
n_tp, n_vox = test_data["data_matrix_shape"]
Y = ts_data.reshape(n_vox, n_tp).T
X = test_data["X"]
# Test outputs with default Tukey window taper
auto_tukey_m = glm.default_tukey_window(n_tp)
acf = glm.estimate_residual_autocorrelation(Y, X)
assert acf.shape == (auto_tukey_m, n_vox)
# Test normalization of autocorrelation estimates
assert np.array_equal(acf[0], np.ones(n_vox))
assert acf[1:].max() < 1
assert acf.min() > -1
# Test outputs with specifed tukey taper size
tukey_m = 10
acf = glm.estimate_residual_autocorrelation(Y, X, tukey_m)
assert acf.shape == (tukey_m, n_vox)
def test_iterative_ols_fit(self, test_data):
ts_img = test_data["ts_img"]
mask_img = test_data["mask_img"]
X = test_data["X"]
smooth_fwhm = None
n_tp, n_vox = test_data["data_matrix_shape"]
_, n_ev = X.shape
WY, WX = glm.prewhiten_image_data(ts_img, mask_img, X, smooth_fwhm)
B, SS, XtXinv, E = glm.iterative_ols_fit(WY, WX)
# Test output shapes
assert B.shape == (n_vox, n_ev)
assert SS.shape == (n_vox,)
assert XtXinv.shape == (n_vox, n_ev, n_ev)
assert E.shape == (n_tp, n_vox)
# Test against numpy's basic least squares estimation
for i in range(n_vox):
B_i, _, _, _ = np.linalg.lstsq(WX[:, :, i], WY[:, i], rcond=None)
assert B_i == approx(B[i])
# Test XtXinv symmetry
for XtXinv_i in XtXinv:
assert np.array_equal(XtXinv_i, XtXinv_i.T)
def test_iterative_contrast_estimation(self, test_data):
ts_img = test_data["ts_img"]
mask_img = test_data["mask_img"]
n_tp, n_vox = test_data["data_matrix_shape"]
X = test_data["X"]
C = test_data["C"]
smooth_fwhm = None
n_tp, n_vox = test_data["data_matrix_shape"]
n_con, n_ev = C.shape
WY, WX = glm.prewhiten_image_data(ts_img, mask_img, X, smooth_fwhm)
B, SS, XtXinv, _ = glm.iterative_ols_fit(WY, WX)
G, V, T = glm.iterative_contrast_estimation(B, SS, XtXinv, C)
# Test output shapes
assert G.shape == (n_vox, n_con)
assert V.shape == (n_vox, n_con)
assert T.shape == (n_vox, n_con)
# Test computation of contrast parameter estimates
assert np.array_equal(G, np.dot(B, C.T))
# Test that variances are all positive
assert np.all(V > 0)
# Test that t stats have the same sign as the effect sizes
assert np.all(np.sign(T) == np.sign(G))
def test_prewhitened_glm_against_fsl(self, test_data):
ts_img = test_data["ts_img"]
mask_img = test_data["mask_img"]
n_tp, n_vox = test_data["data_matrix_shape"]
Y = test_data["ts_data"].reshape(n_vox, n_tp).T
X = test_data["X"]
C = test_data["C"]
smooth_fwhm = None
acf = glm.estimate_residual_autocorrelation(Y - Y.mean(axis=0), X)
WY, WX = glm.prewhiten_image_data(ts_img, mask_img, X, smooth_fwhm)
WX_mean = WX.mean(axis=-1)
B, SS, XtXinv, _ = glm.iterative_ols_fit(WY, WX)
G, V, T = glm.iterative_contrast_estimation(B, SS, XtXinv, C)
# Note that while our code produces highly similar values to what we
# get from FSL, there are enough small differences that we can't simply
# test array equality (or even almost equality to n decimals). This is
# somewhat disconcerting, but given the number of differences in the
# two implementations it is not wholly unexpected. Further, there is
# enough small weirdness in the FSL code (i.e. the autocorrelation
# estimates don't appear properly normalized) that it's not certain
# that small deviations are problems in our code and not FSL. In any
# case, it will suffice to test that the values are highly similar.
# Test residual autocorrelation estimate
assert_highly_correlated(acf, test_data["acf"])
# Test prewhitened fMRI data
assert_highly_correlated(WY, test_data["WY"])
# Test (average) prewhitened design
assert_highly_correlated(WX_mean, test_data["WX"])
# Test model parameter estimates
assert_highly_correlated(B, test_data["B"])
# Test model error summary
assert_highly_correlated(SS, test_data["SS"])
# Test contrast of parameter estimates
assert_highly_correlated(G, test_data["G"])
# Test variance of contrast of parameter estimates
assert_highly_correlated(V, test_data["V"])
# Test contrast t statistics
assert_highly_correlated(T, test_data["T"])
class TestHighpassFilter(object):
@pytest.fixture
def test_data(self):
data_path = op.join(op.dirname(__file__), "data/hpf_data.npz")
test_data_obj = | np.load(data_path) | numpy.load |
import numpy as np
import cv2
import tensorflow as tf
class PolyLaneNetPostProcessHailo(object):
def __init__(self):
return
def recombine_split_endnodes(self, confs, upper_lower, coeffs_1_2, coeffs_3_4):
bs = confs.shape[0]
output = np.zeros((bs, 5, 7))
for lane in range(5):
output[:, lane, 0:1] = confs[:, 1 * lane:1 * lane + 1]
output[:, lane, 1:3] = upper_lower[:, 2 * lane:2 * lane + 2]
output[:, lane, 3:5] = coeffs_1_2[:, 2 * lane:2 * lane + 2]
output[:, lane, 5:7] = coeffs_3_4[:, 2 * lane:2 * lane + 2]
return output.astype(np.float32)
def sigmoid(self, x):
return (np.exp(x)) / (np.exp(x) + 1.0)
def enforce_shared_y(self, pred):
pred = pred.reshape(-1, 5, 7)
pred_lowers = pred[:, :, 1]
first_lowers = pred_lowers[:, 0]
first_lowers = np.expand_dims(first_lowers, 1)
first_lowers = np.repeat(first_lowers, 5, axis=1)
pred[:, :, 1] = first_lowers
return pred
def decode(self, outputs, conf_threshold=0.5):
outputs = self.enforce_shared_y(outputs)
outputs[:, :, 0] = self.sigmoid(outputs[:, :, 0])
outputs[outputs[:, :, 0] < conf_threshold] = 0
return outputs
def polynomize_pred(self, pred):
pred = pred[0] # [0] zero is because it had to be given as a list
batch_lanes = []
for image in range(pred.shape[0]):
# running over images in batch:
lanes = []
for lane_index in range(pred.shape[1]):
confidence = pred[image, lane_index, 0]
lower = pred[image, lane_index, 1]
upper = pred[image, lane_index, 2]
xvals = (np.polyval(pred[image, lane_index, 3:], self.h_range) * self.img_w)
xvals[self.h_range < lower] = -2.
xvals[self.h_range > upper] = -2.
xvals = | np.append(xvals, confidence) | numpy.append |
from reader import Reader
from PIL import Image
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
def get_corners(dimensions, location, rotation_y):
dimensions = np.clip(dimensions, a_min=1.5, a_max=5)
R = np.array([[+np.cos(rotation_y), 0, +np.sin(rotation_y)],
[0, 1, 0],
[-np.sin(rotation_y), 0, +np.cos(rotation_y)]],
dtype=np.float32)
h, w, l = dimensions
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
corners_3D = np.dot(R, [x_corners, y_corners, z_corners])
corners_3D += location.reshape((3, 1))
return corners_3D
def draw_projection(corners, P2, ax, color):
projection = np.dot(P2, np.vstack([corners, np.ones(8, dtype=np.int32)]))
projection = (projection / projection[2])[:2]
orders = [[0, 1, 2, 3, 0],
[4, 5, 6, 7, 4],
[2, 6], [3, 7],
[1, 5], [0, 4]]
for order in orders:
ax.plot(projection[0, order], projection[1, order],
color=color, linewidth=2)
return
def draw_space(corners, ax, color):
assert corners.shape == (3, 8)
orders = [0, 1, 2, 3, 0, 4, 5, 6, 7, 4, 5, 1, 2, 6, 7, 3]
lines = | np.zeros((3, 16), dtype=np.float32) | numpy.zeros |
#
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sympy.ntheory import factorint
import numpy as np
from sympy.combinatorics import Permutation
import io
import math
from config.strtools import *
import itertools
import struct
import config.formats
# Conversion of double to fixed point values
#
# - 8000 gives 8000 in C (int16)
# So when it is multiplied it will give the wrong sign for the result
# of the multiplication except if DSPE instructions with saturation are used
# to compute the negate (and we should get 7FFF).
#
# So for cortex-m without DSP extension, we should try to use 8001
# It is done but not yet tested.
def to_q63(v,dspe):
r = int(round(v * 2**63))
if (r > 0x07FFFFFFFFFFFFFFF):
r = 0x07FFFFFFFFFFFFFFF
if (r < -0x08000000000000000):
if dspe:
r = -0x08000000000000000
else:
r = -0x07FFFFFFFFFFFFFFF
return ("0x%s" % format(struct.unpack('<Q', struct.pack('<q', r))[0],'016X'))
def to_q31(v,dspe):
r = int(round(v * 2**31))
if (r > 0x07FFFFFFF):
r = 0x07FFFFFFF
if (r < -0x080000000):
if dspe:
r = -0x080000000
else:
r = -0x07FFFFFFF
return ("0x%s" % format(struct.unpack('<I', struct.pack('<i', r))[0],'08X'))
def to_q15(v,dspe):
r = int(round(v * 2**15))
if (r > 0x07FFF):
r = 0x07FFF
if (r < -0x08000):
if dspe:
r = -0x08000
else:
r = -0x07FFF
return ("0x%s" % format(struct.unpack('<H', struct.pack('<h', r))[0],'04X'))
def to_q7(v,dspe):
r = int(round(v * 2**7))
if (r > 0x07F):
r = 0x07F
if (r < -0x080):#
if dspe:
r = -0x080
else:
r = -0x07F
return ("0x%s" % format(struct.unpack('<B', struct.pack('<b', r))[0],'02X'))
Q7=1
Q15=2
Q31=3
F16=4
F32=5
F64=6
# In the final C++ code, we have a loop for a given radix.
# The input list here has not grouped the factors.
# The list need to be transformed into a list of pair.
# The pair being (radix,exponent)
def groupFactors(factors):
n = 0
current=-1
result=[]
for f in factors:
if f != current:
if current != -1:
result = result + [current,n]
current=f
n=1
else:
n=n+1
result = result + [current,n]
return(result)
# Compute the grouped factors for the the FFT length originaln
# where the only possible radix are in primitiveFactors list.
def getFactors(primitiveFactors,originaln):
factors=[]
length=[]
primitiveFactors.sort(reverse=True)
n = originaln
while (n > 1) and primitiveFactors:
if (n % primitiveFactors[0] == 0):
factors.append(primitiveFactors[0])
n = n // primitiveFactors[0]
else:
primitiveFactors=primitiveFactors[1:]
# When lowest factors are at the beginning (like 2)
# we use a special implementation of the loopcore template
# and it is removing some cycles.
# So, we will get (for instance) 2x8x8x8 instead of 8x8x8x2
factors.reverse()
for f in factors:
originaln = originaln // f
length.append(originaln)
groupedfactors=groupFactors(factors)
return(groupedfactors,factors,length)
# Apply the radix decomposition to compute the input -> output permutation
# computed by the FFT.
def radixReverse(f,n):
a=np.array(range(0,n)).reshape(f)
r = list(range(0,len(f)))
r.reverse()
r = tuple(r)
a = np.transpose(a,r)
return(a.reshape(n))
def radixPermutation(factors,n):
a = radixReverse(factors,n)
tps = []
vectorizable=True
for c in Permutation.from_sequence(a).cyclic_form:
if (len(c)>2):
vectorizable = False
for i in range(len(c)-1,0,-1):
# 2 because those are indexes in an array of complex numbers but
# with a real type.
tps.append([2*c[i], 2*c[i-1]])
return(np.array(tps,dtype=int).flatten(),vectorizable)
# CFFT Twiddle table
def cfft_twiddle(n):
a=2.0*math.pi* | np.linspace(0,n,num=n,endpoint=False) | numpy.linspace |
#!/usr/bin/python
"""
Design the Nyquist(M) filter prototypes.
Reference:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Filter bank design based on minimization of individual aliasing terms for minimum mutual information subband adaptive beamforming", ICASSP 2018.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from numba import jit
import os
import pickle
@jit(nopython=True)
def mynull(A, num=0, datatype='d'):
"""
Find a null space projectoin matrix
:param A: matrix
:type A: np matrix
:param num: number of bases for the null space
:type num: integer
:param datatype: 'd' or 's' for tolereance
:type datatype: char
:returns: null space projection matrix of A and singular weights
"""
[U,W,VH] = np.linalg.svd(A)
V = VH.transpose()
(rowN, colN) = A.shape
if num > 0:
sX = colN - num
val = np.zeros(num, np.float_)
else:
if rowN > 1:
s = np.diag(W)
elif rowN == 1:
s = np.array([[W[0]]])
if datatype == 'd': # double precision accuracy
tol = max(rowN, colN) * s.max() * 2.2204e-16
else: # single precision accuracy
tol = max(rowN, colN) * s.max() * 1.1921e-07
print('Threshold for nullspace: %e' %tol)
sX = np.sum(s > tol)
val = np.zeros(colN-sX, np.float)
y = np.array(V[:, sX:colN:1])
for i in range(len(val)):
val[i] = W[sX+i]
return (y, val)
@jit(nopython=True)
def create_delA_delC_delb(L_h, M, m, md, A, C, b):
delC = np.zeros((L_h - m + 1, L_h - m + 1), np.float_)
delA = np.zeros((L_h - m + 1, L_h - m + 1), np.float_)
delb = np.zeros((L_h - m + 1, 1), np.float_)
i = 0
for k in range(L_h):
if k == md or (k % M) != 0:
j = 0
for l in range(L_h):
if l == md or (l % M) != 0:
delA[i][j] = A[k][l]
delC[i][j] = C[k][l]
j += 1
delb[i] = b[k]
i += 1
return delA,delC,delb
@jit(nopython=True)
def create_h(L_h, M, md, rh):
h = np.zeros((L_h, 1), np.float_)
k = 0
for m in range(L_h):
if m != md and (m % M) == 0:
h[m] = 0
else:
h[m] = rh[k]
k += 1
return h
def design_Nyquist_analyasis_filter_prototype(M, m, D, wpW=1):
"""
Design an analysis filter prototype
:param M: Number of subbands
:type M: integer
:param m: Filter length factor
:type m: integer
:param D: Decimation factor
:type D: integer
:returns: Coefficients of analysis filter prototype and inband aliasing distortion
"""
L_h = M * m # length of the prototype filter
md = L_h / 2 if m != 1 else 0 # group delay offset
tau_h = L_h / 2 # group delay of analysis fb
w_p = np.pi / (wpW * M) # passband cut-off frequency
i = np.arange(0, L_h)
j = np.arange(0, L_h)
i = np.expand_dims(i, -1)
j = np.expand_dims(j, 0)
j_i = j - i
factor = np.where(j_i % D == 0, D - 1, -1.0)
C = np.where(j_i == 0,
factor / D,
factor * np.sin(np.pi * j_i / D) / (np.pi * j_i)
)
A = np.where(j_i == 0,
1.0,
np.sin(w_p * j_i) / (w_p * j_i)
)
b = np.where((tau_h - i) == 0,
1.0,
np.sin(w_p * (tau_h - i)) / (w_p * (tau_h - i))
)
# delete the rows and columns of C corresponding to the components of h = 0
delA, delC, delb = create_delA_delC_delb(L_h, M, m, md, A, C, b)
rank_delC = np.linalg.matrix_rank(delC)
if rank_delC == len(delC):
# take an eigen vector corresponding to the smallest eigen value.
eVal, eVec = np.linalg.eig(delC)
# take eigen vectors as basis
minX = np.argmin(eVal)
print('nmin eigen val: {}'.format(eVal[minX]))
rh = eVec[:,minX] # eigen values are sorted in the ascending order.
# flip the sign if all the coefficients are negative
all_negative = not np.any(rh > 0)
if all_negative:
rh = - rh
else:
nulldelC, _w = mynull( delC )
if len(nulldelC[0]) == 0:
raise ArithmeticError('No. null space bases of is 0')
print( 'No. null space bases of C is %d' %len(nulldelC[0]))
# In general, null(delP) is not a square matrix.
# We don't want to use a peseude inversion matrix as much as possible.
T1 = np.dot(delA, nulldelC)
T1_2 = np.dot(nulldelC.transpose(), T1)
rank_T = np.linalg.matrix_rank(T1_2)
if rank_T == len(T1_2):
x = np.linalg.solve(T1_2, np.dot(nulldelC.transpose(), delb))
else:
print('Use pseudo-inverse matrix because %d < %d' %(rank_T, len(T1_2)))
x = np.dot( | np.linalg.pinv(T1) | numpy.linalg.pinv |
from unittest import TestCase
import numpy as np
from giant import rotations as at
class TestAttitude(TestCase):
def check_attitude(self, attitude, quaternion, mupdate, vupdate):
np.testing.assert_array_almost_equal(quaternion, attitude.q)
np.testing.assert_array_almost_equal(quaternion[:3], attitude.q_vector)
self.assertAlmostEqual(quaternion[-1], attitude.q_scalar)
self.assertIs(attitude._mupdate, mupdate)
self.assertIs(attitude._vupdate, vupdate)
def test_init(self):
att = at.Rotation()
self.check_attitude(att, [0, 0, 0, 1], True, True)
att = at.Rotation([0, 0, 0, 1])
self.check_attitude(att, [0, 0, 0, 1], True, True)
att = at.Rotation(data=[0, 0, 0, 1])
self.check_attitude(att, [0, 0, 0, 1], True, True)
att = at.Rotation(np.eye(3))
self.check_attitude(att, [0, 0, 0, 1], False, True)
att = at.Rotation([0, 0, 0])
self.check_attitude(att, [0, 0, 0, 1], True, False)
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att = at.Rotation([np.sqrt(2) / 2, 0, 0, -np.sqrt(2) / 2])
self.check_attitude(att, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att2 = att
att = at.Rotation(att2)
self.check_attitude(att, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
self.assertIs(att, att2)
# with self.assertWarns(UserWarning):
#
# at.Rotation([1, 2, 3, 4])
def test_quaternion_setter(self):
att = at.Rotation()
att._mupdate = False
att._vupdate = False
att.quaternion = [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2]
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att.quaternion = [np.sqrt(2)/2, 0, 0, -np.sqrt(2)/2]
self.check_attitude(att, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att2 = at.Rotation([1, 2, 3, 4])
att.quaternion = att2
self.check_attitude(att, np.array([1, 2, 3, 4])/np.sqrt(30), True, True)
self.assertIsNot(att, att2)
# with self.assertWarns(UserWarning):
# att.quaternion = [1, 2, 3, 4]
#
# self.check_attitude(att, np.array([1, 2, 3, 4])/np.sqrt(30), True, True)
with self.assertRaises(ValueError):
att.quaternion = np.eye(4)
def test_matrix_getter(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
np.testing.assert_array_almost_equal([[1, 0, 0], [0, 0, -1], [0, 1, 0]], att.matrix)
np.testing.assert_array_almost_equal([[1, 0, 0], [0, 0, -1], [0, 1, 0]], att._matrix)
self.assertFalse(att._mupdate)
# this is bad and you should never do this but it checks that the caching is working
att._matrix = np.eye(3)
np.testing.assert_array_equal(att.matrix, np.eye(3))
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], False, True)
def test_matrix_setter(self):
att = at.Rotation([1, 2, 3])
att.matrix = np.eye(3)
self.check_attitude(att, [0, 0, 0, 1], False, True)
np.testing.assert_array_equal(att._matrix, np.eye(3))
with self.assertRaises(ValueError):
att.matrix = [1, 2, 3]
def test_vector_getter(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
np.testing.assert_array_almost_equal(att.vector, [np.pi/2, 0, 0])
np.testing.assert_array_almost_equal(att._vector, [np.pi/2, 0, 0])
self.assertFalse(att._vupdate)
# this is bad and you should never do this but it checks that the caching is working
att._vector = [1, 2, 3]
np.testing.assert_array_equal(att.vector, [1, 2, 3])
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, False)
def test_vector_setter(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
att.vector = [1, 2, 3]
self.check_attitude(att, [-0.25532186, -0.51064372, -0.76596558, 0.29555113], True, False)
np.testing.assert_array_equal(att.vector, [1, 2, 3])
with self.assertRaises(ValueError):
att.vector = np.eye(3)
def test_inv(self):
att = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
attinv = att.inv()
self.check_attitude(attinv, [-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
def test_interp_attitude(self):
att = at.Rotation()
att.interp_attitude([1, 2, 3])
self.check_attitude(att, [-0.25532186, -0.51064372, -0.76596558, 0.29555113], True, False)
np.testing.assert_array_equal(att._vector, [1, 2, 3])
att.interp_attitude(np.eye(3))
self.check_attitude(att, [0, 0, 0, 1], False, True)
np.testing.assert_array_equal(att._matrix, np.eye(3))
att.interp_attitude([np.sqrt(2)/2, 0, 0, np.sqrt(2)/2])
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
att2 = at.Rotation([np.sqrt(2) / 2, 0, 0, np.sqrt(2) / 2])
att.interp_attitude(att2)
self.check_attitude(att, [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2], True, True)
self.assertIsNot(att, att2)
with self.assertRaises(ValueError):
att.interp_attitude([1, 2])
def test_eq(self):
att = at.Rotation()
self.assertTrue(att == at.Rotation())
self.assertTrue(att == [0, 0, 0, 1])
self.assertTrue(att == np.eye(3))
self.assertTrue(att == [0, 0, 0])
def test_mul(self):
att = at.Rotation([1, 2, 3])
att2 = att.inv()
self.check_attitude(att*att2, [0, 0, 0, 1], True, True)
with self.assertRaises(TypeError):
_ = att*[0, 0, 0, 1]
with self.assertRaises(TypeError):
_ = [0, 0, 0, 1]*att
# def test_imul(self):
# att = at.Rotation()
# with self.assertWarns(DeprecationWarning):
#
# att *= [1, 0, 0, 0]
#
# self.check_attitude(att, [1, 0, 0, 0], True, True)
def test_rotate(self):
att = at.Rotation()
att.rotate([1, 0, 0, 0])
self.check_attitude(att, [1, 0, 0, 0], True, True)
class TestQuaternionInverse(TestCase):
def test_quaternion_inverse(self):
qinv = at.quaternion_inverse([1, 2, 3, 4])
np.testing.assert_array_equal(qinv, [-1, -2, -3, 4])
qinv = at.quaternion_inverse(at.Rotation([1, 2, 3, 4]))
np.testing.assert_array_almost_equal(qinv.q.flatten(), np.array([-1, -2, -3, 4])/np.sqrt(30))
qinv = at.quaternion_inverse([[1, 2], [2, 3], [3, 4], [4, 5]])
np.testing.assert_array_equal(qinv.T, [[-1, -2, -3, 4], [-2, -3, -4, 5]])
class TestQuaternionMultiplication(TestCase):
def test_quaternion_multiplication(self):
quat_1 = [1, 0, 0, 0]
quat_2 = [0, 1, 0, 0]
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm), [0, 0, 1, 0])
quat_1 = [[1], [0], [0], [0]]
quat_2 = [[0], [1], [0], [0]]
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm), [[0], [0], [1], [0]])
quat_1 = [[1, 0], [0, 1], [0, 0], [0, 0]]
quat_2 = [[0, 0], [1, 1], [0, 0], [0, 0]]
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm), [[0, 0], [0, 0], [1, 0], [0, 1]])
quat_1 = at.Rotation([1, 0, 0, 0])
quat_2 = at.Rotation([0, 0, 1, 0])
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_equal(np.abs(qm.q), [0, 1, 0, 0])
quat_1 = [np.sqrt(2)/2, 0, 0, np.sqrt(2)/2] # x=x, y=z, z=-y
quat_2 = [0, np.sqrt(2)/2, 0, np.sqrt(2)/2] # x=-z, y=y, z=x
qm = at.quaternion_multiplication(quat_1, quat_2)
np.testing.assert_array_almost_equal(np.abs(qm), [0.5, 0.5, 0.5, 0.5])
quat_1 = [0.25532186, 0.51064372, 0.76596558, -0.29555113]
quat_2 = [-0.43199286, -0.53999107, -0.64798929, -0.31922045]
qm = at.quaternion_multiplication(quat_1, quat_2)
# truth comes from matrix rotations
np.testing.assert_array_almost_equal(qm, [0.12889493, -0.16885878, 0.02972499, 0.97672373])
class TestQuaternionToRotVec(TestCase):
def test_quaternion_to_rotvec(self):
rvec = at.quaternion_to_rotvec([1, 0, 0, 0])
np.testing.assert_array_almost_equal(rvec, [np.pi, 0, 0])
rvec = at.quaternion_to_rotvec(at.Rotation([-1, 0, 0, 0]))
np.testing.assert_array_almost_equal(rvec, [-np.pi, 0, 0])
rvec = at.quaternion_to_rotvec([0, 1, 0, 0])
np.testing.assert_array_almost_equal(rvec, [0, np.pi, 0])
rvec = at.quaternion_to_rotvec([0, -1, 0, 0])
np.testing.assert_array_almost_equal(rvec, [0, -np.pi, 0])
rvec = at.quaternion_to_rotvec([0, 0, 1, 0])
np.testing.assert_array_almost_equal(rvec, [0, 0, np.pi])
rvec = at.quaternion_to_rotvec([0, 0, -1, 0])
np.testing.assert_array_almost_equal(rvec, [0, 0, -np.pi])
rvec = at.quaternion_to_rotvec([0, 0, 0, 1])
np.testing.assert_array_almost_equal(rvec, [0, 0, 0])
rvec = at.quaternion_to_rotvec([0, 0, 0, -1])
np.testing.assert_array_almost_equal(rvec, [0, 0, 0])
rvec = at.quaternion_to_rotvec([0.25532186, 0.51064372, 0.76596558, -0.29555113])
np.testing.assert_array_almost_equal(rvec, [1, 2, 3])
rvec = at.quaternion_to_rotvec([-0.25532186, -0.51064372, -0.76596558, 0.29555113])
# euler axis is not unique
np.testing.assert_array_almost_equal(rvec, np.array([1, 2, 3])*(1-2*np.pi/np.sqrt(14)))
rvec = at.quaternion_to_rotvec([[0.25532186], [0.51064372], [0.76596558], [-0.29555113]])
np.testing.assert_array_almost_equal(rvec, [[1], [2], [3]])
rvec = at.quaternion_to_rotvec([[1, 0, 0.25532186, 0],
[0, 0, 0.51064372, 0],
[0, 0, 0.76596558, 0],
[0, 1, -0.29555113, -1]])
np.testing.assert_array_almost_equal(rvec, [[np.pi, 0, 1, 0], [0, 0, 2, 0], [0, 0, 3, 0]])
class TestQuaternionToRotMat(TestCase):
def test_quaternion_to_rotmat(self):
rotmat = at.quaternion_to_rotmat([0, 0, 0, 1])
np.testing.assert_array_almost_equal(rotmat, np.eye(3))
rotmat = at.quaternion_to_rotmat([[0], [0], [0], [1]])
np.testing.assert_array_almost_equal(rotmat, np.eye(3))
rotmat = at.quaternion_to_rotmat([[0, 1], [0, 0], [0, 0], [1, 0]])
np.testing.assert_array_almost_equal(rotmat, [np.eye(3), [[1, 0, 0], [0, -1, 0], [0, 0, -1]]])
rotmat = at.quaternion_to_rotmat(at.Rotation([0, 1, 0, 0]))
np.testing.assert_array_almost_equal(rotmat, [[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
rotmat = at.quaternion_to_rotmat([0, 0, np.sqrt(2)/2, np.sqrt(2)/2])
np.testing.assert_array_almost_equal(rotmat, [[0, -1, 0], [1, 0, 0], [0, 0, 1]])
rotmat = at.quaternion_to_rotmat([-0.25532186, -0.51064372, -0.76596558, 0.29555113])
np.testing.assert_array_almost_equal(rotmat, [[-0.69492056, 0.71352099, 0.08929286],
[-0.19200697, -0.30378504, 0.93319235],
[0.69297817, 0.6313497, 0.34810748]])
class TestQuaternionToEuler(TestCase):
def test_quaternion_to_euler(self):
orders = ['xyz', 'zxy', 'yxz', 'yzx', 'xzy', 'zyx', 'xyx', 'yxy', 'xzx', 'zxz', 'yzy', 'zyz']
angles = [[np.pi/3, np.pi/3, 0], [0, np.pi/3, np.pi/3],
[np.pi/3, np.pi/3, np.pi/3],
[-np.pi/3, -np.pi/3, 0], [0, -np.pi/3, -np.pi/3],
[-np.pi/3, -np.pi/3, -np.pi/3],
[1, 2, 3], [1, -2, 3],
[[1, 2, 3, 1], [2, 3, 1, 2], [3, 1, 2, 3]]]
for angle in angles:
for order in orders:
with self.subTest(angle=angle, order=order):
rmat = at.euler_to_rotmat(angle, order=order)
quat = at.rotmat_to_quaternion(rmat)
euler = at.quaternion_to_euler(quat, order=order)
rmat2 = at.euler_to_rotmat(euler, order=order)
quat2 = at.rotmat_to_quaternion(rmat2)
np.testing.assert_almost_equal(quat, quat2)
class TestRotVecToRotMat(TestCase):
def test_rotvec_to_rotmat(self):
rotmat = at.rotvec_to_rotmat([0, 0, 0])
np.testing.assert_array_almost_equal(rotmat, [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
rotmat = at.rotvec_to_rotmat([[0, 0], [0, 0], [0, 0]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]])
rotmat = at.rotvec_to_rotmat([np.pi, 0, 0])
np.testing.assert_array_almost_equal(rotmat, [[1, 0, 0], [0, -1, 0], [0, 0, -1]])
rotmat = at.rotvec_to_rotmat([0, np.pi, 0])
np.testing.assert_array_almost_equal(rotmat, [[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
rotmat = at.rotvec_to_rotmat([0, 0, np.pi])
np.testing.assert_array_almost_equal(rotmat, [[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
rotmat = at.rotvec_to_rotmat([[np.pi, 0, 0], [0, np.pi, 0], [0, 0, -np.pi]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, -1, 0], [0, 0, -1]],
[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]])
rotmat = at.rotvec_to_rotmat([[np.pi / 2, 0], [0, -np.pi / 2], [0, 0]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]]])
rotmat = at.rotvec_to_rotmat([[np.pi / 2, 0, 0], [0, 0, -np.pi / 2], [0, 0, 0]])
np.testing.assert_array_almost_equal(rotmat, [[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]]])
rotmat = at.rotvec_to_rotmat([1, 2, 3])
np.testing.assert_array_almost_equal(rotmat, [[-0.69492056, 0.71352099, 0.08929286],
[-0.19200697, -0.30378504, 0.93319235],
[0.69297817, 0.6313497, 0.34810748]])
class TestRotVecToQuaternion(TestCase):
def test_rotvec_to_quaternion(self):
q = at.rotvec_to_quaternion([0, 0, 0])
np.testing.assert_array_almost_equal(q, [0, 0, 0, 1])
q = at.rotvec_to_quaternion([[0], [0], [0]])
np.testing.assert_array_almost_equal(q, [[0], [0], [0], [1]])
q = at.rotvec_to_quaternion([np.pi, 0, 0])
np.testing.assert_array_almost_equal(q, [1, 0, 0, 0])
q = at.rotvec_to_quaternion([0, np.pi, 0])
np.testing.assert_array_almost_equal(q, [0, 1, 0, 0])
q = at.rotvec_to_quaternion([0, 0, np.pi])
np.testing.assert_array_almost_equal(q, [0, 0, 1, 0])
q = at.rotvec_to_quaternion([1, 2, 3])
np.testing.assert_array_almost_equal(q, [0.25532186, 0.51064372, 0.76596558, -0.29555113])
q = at.rotvec_to_quaternion([[0], [0], [np.pi]])
np.testing.assert_array_almost_equal(q, [[0], [0], [1], [0]])
q = at.rotvec_to_quaternion([[np.pi, 0, 1, 0],
[0, 0, 2, 0],
[0, 0, 3, 0]])
np.testing.assert_array_almost_equal(q, [[1, 0, 0.25532186, 0],
[0, 0, 0.51064372, 0],
[0, 0, 0.76596558, 0],
[0, 1, -0.29555113, 1]])
class TestRotMatToQuaternion(TestCase):
def test_rotmat_to_quaternion(self):
q = at.rotmat_to_quaternion(np.eye(3))
np.testing.assert_allclose(q, [0, 0, 0, 1], atol=1e-16)
# figure out how to account for the fact that these can be positive or negative
q = at.rotmat_to_quaternion(np.array([[-1., 0, 0], [0, 1, 0], [0, 0, -1]]))
np.testing.assert_allclose(np.abs(q), [0, 1, 0, 0], atol=1e-16)
q = at.rotmat_to_quaternion(np.array([[1., 0, 0], [0, -1, 0], [0, 0, -1]]))
np.testing.assert_allclose(np.abs(q), [1, 0, 0, 0], atol=1e-16)
q = at.rotmat_to_quaternion(np.array([[-1., 0, 0], [0, -1, 0], [0, 0, 1]]))
np.testing.assert_allclose(np.abs(q), [0, 0, 1, 0], atol=1e-16)
q = at.rotmat_to_quaternion([np.eye(3)]*2)
np.testing.assert_allclose(q.T, [[0, 0, 0, 1]]*2, atol=1e-16)
q = at.rotmat_to_quaternion([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
np.testing.assert_allclose(q, [0, 0, -np.sqrt(2)/2, np.sqrt(2)/2], atol=1e-16)
q = at.rotmat_to_quaternion([[-0.69492056, -0.19200697, 0.69297817],
[0.71352099, -0.30378504, 0.6313497],
[0.08929286, 0.93319235, 0.34810748]])
np.testing.assert_allclose(q, [0.25532186, 0.51064372, 0.76596558, 0.29555113], atol=1e-16)
q = at.rotmat_to_quaternion([[[-0.69492056, -0.19200697, 0.69297817],
[0.71352099, -0.30378504, 0.6313497],
[0.08929286, 0.93319235, 0.34810748]],
np.eye(3)])
np.testing.assert_allclose(q.T, [[0.25532186, 0.51064372, 0.76596558, 0.29555113],
[0, 0, 0, 1]], atol=1e-16)
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([1, 2, 3])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1, 2, 3]])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1], [2], [3]])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([1, 2, 3, 4])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1, 2, 3, 4]])
with self.assertRaises(ValueError):
at.rotmat_to_quaternion([[1], [2], [3], [4]])
class TestRotMatToEuler(TestCase):
def test_rotmat_to_euler(self):
orders = ['xyz', 'zxy', 'yxz', 'yzx', 'xzy', 'zyx', 'xyx', 'yxy', 'xzx', 'zxz', 'yzy', 'zyz']
angles = [[np.pi/3, np.pi/3, 0], [0, np.pi/3, np.pi/3],
[np.pi/3, np.pi/3, np.pi/3],
[-np.pi/3, -np.pi/3, 0], [0, -np.pi/3, -np.pi/3],
[-np.pi/3, -np.pi/3, -np.pi/3],
[1, 2, 3], [1, -2, 3],
[[1, 2, 3, 1], [2, 3, 1, 2], [3, 1, 2, 3]]]
for angle in angles:
for order in orders:
with self.subTest(angle=angle, order=order):
rmat = at.euler_to_rotmat(angle, order=order)
euler = at.rotmat_to_euler(rmat, order=order)
rmat2 = at.euler_to_rotmat(euler, order=order)
np.testing.assert_almost_equal(rmat, rmat2)
class TestEulerToRotMat(TestCase):
def test_euler_to_rotmat(self):
orders = ['xyz', 'zxy', 'yxz', 'yzx', 'xzy', 'zyx', 'xyx', 'yxy', 'xzx', 'zxz', 'yzy', 'zyz']
angles = [[np.pi/3, 0, 0], [0, np.pi/3, 0], [0, 0, np.pi/3],
[np.pi/3, np.pi/3, 0], [0, np.pi/3, np.pi/3],
[np.pi/3, np.pi/3, np.pi/3],
[-np.pi/3, -np.pi/3, 0], [0, -np.pi/3, -np.pi/3],
[-np.pi/3, -np.pi/3, -np.pi/3],
[1, 2, 3], [1, -2, 3],
[[1, 2, 3, 1], [2, 3, 1, 2], [3, 1, 2, 3]]]
for angle in angles:
for order in orders:
with self.subTest(angle=angle, order=order):
rmat = at.euler_to_rotmat(angle, order=order)
rmat2 = np.eye(3)
for an, ax in zip(angle, order):
if ax.upper().lower() == 'x':
update = at.rot_x(an)
elif ax.upper().lower() == 'y':
update = at.rot_y(an)
elif ax.upper().lower() == 'z':
update = at.rot_z(an)
rmat2 = update @ rmat2
np.testing.assert_almost_equal(rmat, rmat2)
class TestRotX(TestCase):
def test_rot_x(self):
angles = [3*np.pi/2, np.pi, np.pi/2, np.pi/3, 0,
-3*np.pi/2, -np.pi, -np.pi/2, -np.pi/3,
[0, np.pi/2, np.pi/3],
[0, -np.pi/2, -np.pi/3]]
mats = [[[1, 0, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, -1, 0], [0, 0, -1]],
[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, 0.5, -np.sqrt(3)/2], [0, np.sqrt(3)/2, 0.5]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, -1, 0], [0, 0, -1]],
[[1, 0, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, 0.5, np.sqrt(3)/2], [0, -np.sqrt(3)/2, 0.5]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
[[1, 0, 0], [0, 0.5, -np.sqrt(3)/2], [0, np.sqrt(3)/2, 0.5]]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, 1], [0, -1, 0]],
[[1, 0, 0], [0, 0.5, np.sqrt(3)/2], [0, -np.sqrt(3)/2, 0.5]]]
]
for angle, solu in zip(angles, mats):
with self.subTest(angle=angle):
rmat = at.rot_x(angle)
np.testing.assert_almost_equal(rmat, solu)
class TestRotY(TestCase):
def test_rot_y(self):
angles = [3*np.pi/2, np.pi, np.pi/2, np.pi/3, 0,
-3*np.pi/2, -np.pi, -np.pi/2, -np.pi/3,
[0, np.pi/2, np.pi/3],
[0, -np.pi/2, -np.pi/3]]
srt3d2 = np.sqrt(3)/2
mats = [[[0, 0, -1], [0, 1, 0], [1, 0, 0]],
[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]],
[[0.5, 0, srt3d2], [0, 1, 0], [-srt3d2, 0, 0.5]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]],
[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]],
[[0.5, 0, -srt3d2], [0, 1, 0], [srt3d2, 0, 0.5]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [-1, 0, 0]],
[[0.5, 0, srt3d2], [0, 1, 0], [-srt3d2, 0, 0.5]]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, -1], [0, 1, 0], [1, 0, 0]],
[[0.5, 0, -srt3d2], [0, 1, 0], [srt3d2, 0, 0.5]]]
]
for angle, solu in zip(angles, mats):
with self.subTest(angle=angle):
rmat = at.rot_y(angle)
np.testing.assert_almost_equal(rmat, solu)
class TestRotZ(TestCase):
def test_rot_z(self):
angles = [3*np.pi/2, np.pi, np.pi/2, np.pi/3, 0,
-3*np.pi/2, -np.pi, -np.pi/2, -np.pi/3,
[0, np.pi/2, np.pi/3],
[0, -np.pi/2, -np.pi/3]]
srt3d2 = np.sqrt(3)/2
mats = [[[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
[[-1, 0, 0], [0, -1, 0], [0, 0, 1]],
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
[[0.5, -srt3d2, 0], [srt3d2, 0.5, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
[[-1, 0, 0], [0, -1, 0], [0, 0, 1]],
[[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
[[0.5, srt3d2, 0], [-srt3d2, 0.5, 0], [0, 0, 1]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, -1, 0], [1, 0, 0], [0, 0, 1]],
[[0.5, -srt3d2, 0], [srt3d2, 0.5, 0], [0, 0, 1]]],
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 1, 0], [-1, 0, 0], [0, 0, 1]],
[[0.5, srt3d2, 0], [-srt3d2, 0.5, 0], [0, 0, 1]]]
]
for angle, solu in zip(angles, mats):
with self.subTest(angle=angle):
rmat = at.rot_z(angle)
np.testing.assert_almost_equal(rmat, solu)
class TestSkew(TestCase):
def test_skew(self):
skew_mat = at.skew([1, 2, 3])
np.testing.assert_array_equal(skew_mat, [[0, -3, 2], [3, 0, -1], [-2, 1, 0]])
skew_mat = at.skew([[1, 2], [2, 3], [3, 4]])
np.testing.assert_array_equal(skew_mat, [[[0, -3, 2], [3, 0, -1], [-2, 1, 0]],
[[0, -4, 3], [4, 0, -2], [-3, 2, 0]]])
class TestNLERP(TestCase):
def test_nlerp(self):
with self.subTest(input_type=list):
q0 = [0, 0, 0, 1]
q1 = [0.5, 0.5, 0.5, 0.5]
qt = at.nlerp(q0, q1, 0)
np.testing.assert_allclose(qt, q0)
qt = at.nlerp(q0, q1, 1)
np.testing.assert_allclose(qt, q1)
qt = at.nlerp(q0, q1, 0.5)
qtrue = (np.array(q0)+np.array(q1))/2
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.25)
qtrue = np.array(q0)*(1-0.25)+np.array(q1)*0.25
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.79)
qtrue = np.array(q0)*(1-0.79) + np.array(q1)*0.79
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
q0 = np.array([0.23, 0.45, 0.67, 0.2])
q0 /= np.linalg.norm(q0)
q1 = np.array([-0.3, 0.2, 0.6, 0.33])
q1 /= np.linalg.norm(q1)
qt = at.nlerp(q0, q1, 0)
np.testing.assert_allclose(qt, q0)
qt = at.nlerp(q0, q1, 1)
np.testing.assert_allclose(qt, q1)
qt = at.nlerp(q0, q1, 0.5)
qtrue = (q0+q1)/2
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.25)
qtrue = q0*(1-0.25)+q1*0.25
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.nlerp(q0, q1, 0.79)
# comes from ODTBX matlab function
qtrue = (1-0.79)*q0+0.79*q1
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
with self.subTest(input_type=at.Rotation):
q0 = at.Rotation([0, 0, 0, 1])
q1 = at.Rotation([0.5, 0.5, 0.5, 0.5])
qt = at.nlerp(q0, q1, 0)
np.testing.assert_allclose(qt.q, q0.q)
qt = at.nlerp(q0, q1, 1)
np.testing.assert_allclose(qt.q, q1.q)
qt = at.nlerp(q0, q1, 0.5)
qtrue = (q0.q.flatten()+q1.q.flatten())/2 # type: np.ndarray
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt.q.flatten(), qtrue)
qt = at.nlerp(q0, q1, 0.25)
qtrue = (q0.q.flatten()*(1-0.25)+q1.q.flatten()*0.25) # type: np.ndarray
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt.q.flatten(), qtrue)
qt = at.nlerp(q0, q1, 0.79)
qtrue = q0.q.flatten()*(1-0.79)+q1.q.flatten()*0.79 # type: np.ndarray
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt.q.flatten(), qtrue)
q0 = np.array([0.23, 0.45, 0.67, 0.2])
q0 /= np.linalg.norm(q0)
q0 = at.Rotation(q0)
q1 = np.array([-0.3, 0.2, 0.6, 0.33])
q1 /= np.linalg.norm(q1)
q1 = at.Rotation(q1)
qt = at.nlerp(q0, q1, 0)
np.testing.assert_allclose(qt.q, q0.q)
qt = at.nlerp(q0, q1, 1)
np.testing.assert_allclose(qt.q, q1.q)
qt = at.nlerp(q0, q1, 0.5)
qtrue = (q0.q.flatten()+q1.q.flatten())/2 # type: np.ndarray
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt.q.flatten(), qtrue)
qt = at.nlerp(q0, q1, 0.25)
qtrue = q0.q.flatten()*(1-0.25)+q1.q.flatten()*0.25 # type: np.ndarray
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt.q.flatten(), qtrue)
qt = at.nlerp(q0, q1, 0.79)
qtrue = (1-0.79)*q0.q.flatten() + 0.79*q1.q.flatten() # type: np.ndarray
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt.q.flatten(), qtrue)
class TestSLERP(TestCase):
def test_slerp(self):
with self.subTest(input_type=list):
q0 = [0, 0, 0, 1]
q1 = [0.5, 0.5, 0.5, 0.5]
qt = at.slerp(q0, q1, 0)
np.testing.assert_allclose(qt, q0)
qt = at.slerp(q0, q1, 1)
np.testing.assert_allclose(qt, q1)
qt = at.slerp(q0, q1, 0.5)
qtrue = (np.array(q0)+np.array(q1))/2
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.slerp(q0, q1, 0.25)
qtrue = (np.array(q0)+qtrue)/2
qtrue /= np.linalg.norm(qtrue)
np.testing.assert_allclose(qt, qtrue)
qt = at.slerp(q0, q1, 0.79)
# comes from ODTBX matlab function
qtrue = [0.424985851398278, 0.424985851398278, 0.424985851398278, 0.676875969682661]
np.testing.assert_allclose(qt, qtrue)
q0 = np.array([0.23, 0.45, 0.67, 0.2])
q0 /= np.linalg.norm(q0)
q1 = | np.array([-0.3, 0.2, 0.6, 0.33]) | numpy.array |
import os
import glob
from contextlib import contextmanager
import pytest
from numpy.testing import assert_allclose
import numpy as np
from scipy import ndimage as ndi
from sklearn.linear_model import LogisticRegression as LR
import subprocess as sp
from gala import imio, features, agglo, evaluate as ev
@contextmanager
def tar_extract(fn):
sp.call(['tar', '-xzf', fn + '.tar.gz'])
ext_fn = os.path.basename(fn)
yield ext_fn
os.remove(ext_fn)
for sub_fn in glob.glob(ext_fn + '_*'):
os.remove(sub_fn)
rundir = os.path.dirname(__file__)
### fixtures
@pytest.fixture
def dummy_data():
frag = np.arange(1, 17, dtype=int).reshape((4, 4))
gt = np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3] * 4, [3] * 4], dtype=int)
fman = features.base.Mock(frag, gt)
g = agglo.Rag(frag, feature_manager=fman, use_slow=True)
return frag, gt, g, fman
@pytest.fixture
def dummy_data_fast():
frag, gt, _, fman = dummy_data()
frag = ndi.zoom(frag, 2, order=0)
gt = ndi.zoom(gt, 2, order=0)
g = agglo.Rag(frag, feature_manager=fman)
return frag, gt, g, fman
### tests
def test_generate_flat_learning_edges(dummy_data):
"""Run a flat epoch and ensure all edges are correctly represented."""
frag, gt, g, fman = dummy_data
feat, target, weights, edges = g.learn_flat(gt, fman)
assert feat.shape == (24, 2)
assert tuple(edges[0]) == (1, 2)
assert tuple(edges[-1]) == (15, 16)
assert np.sum(target[:, 0] == 1) == 6 # number of non-merge edges
def test_generate_flat_learning_edges_fast(dummy_data_fast):
"""Run a flat epoch and ensure all edges are correctly represented."""
frag, gt, g, fman = dummy_data_fast
feat, target, weights, edges = g.learn_flat(gt, fman)
assert feat.shape == (24, 2)
assert tuple(edges[0]) == (1, 2)
assert tuple(edges[-1]) == (15, 16)
assert np.sum(target[:, 0] == 1) == 6 # number of non-merge edges
def test_generate_lash_examples(dummy_data):
"""Run a flat epoch and an active epoch of learning, compare learned sets.
The mock feature manager places all merge examples at (0, 0) in feature
space, and all non-merge examples at (1, 0), *in flat learning*. During
agglomeration, non-merge examples go to (0, 1), which confuses the flat
classifier (which has only learned the difference along the first feature
dimension).
This test checks for those differences in learning using a simple
logistic regression.
"""
frag, gt, g, fman = dummy_data
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='permissive',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) == 15 # number of merges is |nodes| - 1
# approx. same learning results at (0., 0.) and (1., 0.)
print([(fpred(i), pred(i)) for i in [[0, 0], [1, 0], [0, 1]]])
assert_allclose(fpred([0, 0]), 0.2, atol=0.1)
assert_allclose(pred([0, 0]), 0.2, atol=0.1)
assert_allclose(fpred([1, 0]), 0.65, atol=0.1)
assert_allclose(pred([1, 0]), 0.65, atol=0.1)
# difference between agglomerative and flat learning in point (0., 1.)
assert_allclose(fpred([0, 1]), 0.2, atol=0.1)
assert_allclose(pred([0, 1]), 0.6, atol=0.1)
def test_generate_lash_examples_fast(dummy_data_fast):
"""Run a flat epoch and an active epoch of learning, compare learned sets.
The mock feature manager places all merge examples at (0, 0) in feature
space, and all non-merge examples at (1, 0), *in flat learning*. During
agglomeration, non-merge examples go to (0, 1), which confuses the flat
classifier (which has only learned the difference along the first feature
dimension).
This test checks for those differences in learning using a simple
logistic regression.
"""
frag, gt, g, fman = dummy_data_fast
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='permissive',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) == 15 # number of merges is |nodes| - 1
# approx. same learning results at (0., 0.) and (1., 0.)
print([(fpred(i), pred(i)) for i in [[0, 0], [1, 0], [0, 1]]])
assert_allclose(fpred([0, 0]), 0.2, atol=0.2)
assert_allclose(pred([0, 0]), 0.2, atol=0.2)
assert_allclose(fpred([1, 0]), 0.65, atol=0.15)
assert_allclose(pred([1, 0]), 0.65, atol=0.15)
# difference between agglomerative and flat learning in point (0., 1.)
assert_allclose(fpred([0, 1]), 0.2, atol=0.2) # < 0.4
assert_allclose(pred([0, 1]), 0.65, atol=0.2) # > 0.45
def test_generate_gala_examples(dummy_data):
"""As `test_generate_lash_examples`, but using strict learning. """
frag, gt, g, fman = dummy_data
np.random.seed(99)
summary, allepochs = g.learn_agglomerate(gt, fman,
learning_mode='strict',
classifier='logistic regression',
min_num_epochs=5)
feat, target, weights, edges = summary
ffeat, ftarget, fweights, fedges = allepochs[0] # flat
lr = LR().fit(feat, target[:, 0])
flr = LR().fit(ffeat, ftarget[:, 0])
def pred(v):
return lr.predict_proba([v])[0, 1]
def fpred(v):
return flr.predict_proba([v])[0, 1]
assert len(allepochs[1][0]) > 15 # number of merges is more than LASH
# approx. same learning results at (0., 0.) and (1., 0.)
assert_allclose(fpred([0, 0]), 0.2, atol=0.1)
assert_allclose(pred([0, 0]), 0.2, atol=0.1)
assert_allclose(fpred([1, 0]), 0.64, atol=0.1)
assert_allclose(pred([1, 0]), 0.64, atol=0.1)
# difference between agglomerative and flat learning in point (0., 1.);
# greater separation than with LASH
assert_allclose(fpred([0, 1]), 0.2, atol=0.1)
assert_allclose(pred([0, 1]), 0.7, atol=0.1)
def test_generate_gala_examples_fast_updateedges(dummy_data_fast):
"""As `test_generate_lash_examples`, but using strict learning. """
frag, gt, g, fman = dummy_data_fast
g = agglo.Rag(frag, feature_manager=fman, update_unchanged_edges=True)
| np.random.seed(99) | numpy.random.seed |
import os
# If server, need to use osmesa for pyopengl/pyrender
if os.cpu_count() > 20:
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
# https://github.com/marian42/mesh_to_sdf/issues/13
# https://pyrender.readthedocs.io/en/latest/install/index.html?highlight=ssh#getting-pyrender-working-with-osmesa
else:
os.environ['PYOPENGL_PLATFORM'] = 'egl' # default one was pyglet, which hangs sometime for unknown reason: https://github.com/marian42/mesh_to_sdf/issues/19;
import sys
import yaml
import logging
import logging.config
import time
import random
import math
import numpy as np
from numpy import array
import torch
import matplotlib.pyplot as plt
from src import INIT_TYPE, TEST_TYPE, GEN_TYPE
from src.sample_sdf import PointSampler
from src.sdf_net import SDFDecoder
from src.pointnet_encoder import PointNetEncoder
from src.cost_predictor import CostPredictor
from train_grasp import TrainGrasp
from src.dataset_grasp import TrainDataset
from eval_grasp import EvaluateGrasp
from util.misc import *
from util.mesh import *
class Runner:
def __init__(self, yaml_path, result_dir, device):
save__init__args(locals())
self.model_dir = result_dir + 'model/'
self.latent_img_dir = result_dir + 'latent_img/'
# Configure from yaml file
with open(yaml_path+'.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
self.config = config
self.voxel_resolution = config['voxel_resolution']
# always be one because of dataset design
self.batch_size = config['batch_size']
# NN params
self.dim_latent = config['dim_latent']
self.encoder_breadth = config['encoder_breadth']
self.decoder_breadth = config['decoder_breadth']
self.predictor_breadth = config['predictor_breadth']
# Set up networks, calculate number of params
self.encoder = PointNetEncoder(dim_latent=self.dim_latent,
breadth=self.encoder_breadth).to(device)
self.decoder = SDFDecoder(dim_latent=self.dim_latent,
breadth=self.decoder_breadth,
device=device).to(device)
self.predictor = CostPredictor(dim_latent=self.dim_latent,
dim_hidden=self.predictor_breadth).to(device)
print('Num of encoder parameters: %d' % sum(p.numel() for p in self.encoder.parameters() if p.requires_grad))
print('Num of decoder parameters: %d' % sum(p.numel() for p in self.decoder.parameters() if p.requires_grad))
print('Num of cost predictor parameters: %d' % sum(p.numel() for p in self.predictor.parameters() if p.requires_grad))
# Use one GPU
self.decoder_accessor = self.decoder
self.predictor_accessor = self.predictor
# Set up optimizer
self.optimizer = torch.optim.AdamW([
{'params': self.encoder.parameters(),
'lr': config['encoder_lr'],
'weight_decay': config['encoder_weight_decay']},
{'params': self.decoder.parameters(),
'lr': config['decoder_lr'],
'weight_decay': config['decoder_weight_decay']},
{'params': self.predictor.parameters(),
'lr': config['predictor_lr'],
'weight_decay': config['predictor_weight_decay']},
])
if config['decayLR_use']:
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones=config['decayLR_milestones'],
gamma=config['decayLR_gamma'])
else:
self.scheduler = None
def create_dataset(self, env_dir_dict, embed_id_dir_dict,
num_sdf_available_per_obj, num_sdf_per_obj,
num_surface_per_obj, **kwargs):
'''
Create dataholder, to be updated once new distribution generated
# num_sdf_available_per_obj: number of sdf points for each object available before downsampled
# num_sdf_per_obj: number of sdf points for each object - target!
# num_surface_per_obj: number of surface points for each object (for pointnet encoder)
'''
self.train_data = TrainDataset(env_dir_dict,
embed_id_dir_dict,
num_sdf_available_per_obj,
num_sdf_per_obj,
num_surface_per_obj,
device='cpu')
self.train_dataloader = torch.utils.data.DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=True,
drop_last=True,
pin_memory=True,
num_workers=4)
def embed(self, epoch, norm_loss_ratio, latent_all, label_all, num_sdf_per_obj, clamp_lip):
"""
Resets latent
"""
epoch_loss = 0
epoch_rec_loss = 0
epoch_reg_loss = 0
epoch_lip_loss = 0
num_batch = 0
# Switch NN mode
self.encoder.train()
self.decoder.train()
self.predictor.train()
l2 = torch.nn.MSELoss(reduction='none')
# Save all the predictions for debugging
pred_all = np.empty((0))
# Run batches
for batch_ind, data_batch in enumerate(self.train_dataloader):
# Zero gradient
self.optimizer.zero_grad(set_to_none=True)
###################### Extract data ######################
batch_sdf, batch_surface, batch_obj_id_chosen = data_batch
batch_sdf = batch_sdf.reshape(-1,4).to(self.device)
batch_sdf_values = batch_sdf[:,-1]
batch_sdf_points = batch_sdf[:,:3]
batch_surface = batch_surface.to(self.device)
batch_obj_id_chosen = batch_obj_id_chosen.squeeze(0)
###################### Encode ######################
batch_latent = self.encoder.forward(batch_surface) # batch x latent
###################### Decode ######################
batch_latent_all = batch_latent.repeat_interleave(num_sdf_per_obj, dim=0) # Assign latent to each point of the object
batch_sdf_pred = self.decoder.forward(batch_sdf_points, batch_latent_all) # Decode each latent/point to get sdf predictions
###################### Rec loss ######################
rec_loss = torch.mean((batch_sdf_pred - batch_sdf_values)**2)
###################### Reg loss ######################
batch_reward_pred = self.predictor.forward(batch_latent).flatten()
batch_label = torch.from_numpy(label_all[batch_obj_id_chosen]).float().to(self.device)
reg_loss = torch.mean(l2(batch_reward_pred, batch_label))
###################### Lip loss ######################
if clamp_lip is None:
lip_loss = torch.linalg.norm(self.predictor_accessor.linear_hidden[0].weight, ord=2)+torch.linalg.norm(self.predictor_accessor.linear_out[0].weight, ord=2) # spectral norm
else:
lip_loss = (torch.linalg.norm(self.predictor_accessor.linear_hidden[0].weight, ord=2)+torch.linalg.norm(self.predictor_accessor.linear_out[0].weight, ord=2)-clamp_lip*16)**2 # clamping
# Add reconstruction and regularization losses together
batch_loss = rec_loss+\
self.config['reg_loss_ratio']*reg_loss+\
self.config['lip_loss_ratio']*lip_loss+\
norm_loss_ratio*torch.mean(batch_latent**2)
# Backward pass to get gradients
batch_loss.backward()
# Clip gradient if specified
if self.config['gradientClip_use']:
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.config['gradientClip_thres'])
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.config['gradientClip_thres'])
torch.nn.utils.clip_grad_norm_(self.predictor.parameters(), self.config['gradientClip_thres'])
# Update weights using gradient
self.optimizer.step()
# Store loss
epoch_loss += batch_loss.item()
epoch_rec_loss += rec_loss.item()
epoch_reg_loss += reg_loss.item()
epoch_lip_loss += lip_loss.item()
num_batch += 1
# Update latents for all distributions
latent_all[batch_obj_id_chosen] =batch_latent.detach().cpu().numpy()
pred_all = np.concatenate((pred_all, batch_reward_pred.detach().cpu().numpy()))
# Decay learning rate if specified
if self.scheduler is not None:
self.scheduler.step()
# Get batch average loss
epoch_loss /= num_batch
epoch_rec_loss /= num_batch
epoch_reg_loss /= num_batch
epoch_lip_loss /= num_batch
return epoch_loss, epoch_rec_loss, epoch_reg_loss, epoch_lip_loss, latent_all, pred_all
def get_predictor_lip(self):
return self.predictor_accessor.get_lip()
def encode_batch(self, surface_batch):
"""
Assume the shape as N x num_surface_per_obj x 3
"""
surface_test = torch.from_numpy(surface_batch).float().to(self.device)
latent_test = self.encoder.forward(surface_test) # num_test_obj x latent_dim
return latent_test
def predict(self, latent):
"""
Using the cost predictor
"""
if isinstance(latent, np.ndarray):
latent = torch.from_numpy(latent).float().to(self.device)
with torch.no_grad():
pred = self.predictor.forward(latent).detach().cpu()
return pred.squeeze(1).numpy()
# return torch.where(pred > 0.5, 1., 0.).numpy()
def adversarial(self, latent, eta=1.0, gamma=1.0, steps=10, target_drop=0.0):
"""
Adversarially perturb latent using the cost predictor and evaluated label/cost. Following https://github.com/duchi-lab/certifiable-distributional-robustness/blob/master/attacks_tf.py
Also see https://github.com/ricvolpi/generalize-unseen-domains/blob/master/model.py
Only takes a single datapoint for now; tricky to get batch to work
"""
l2 = torch.nn.MSELoss()
latent = torch.from_numpy(latent).float().to(self.device).requires_grad_().reshape(1,-1)
latent_detach = latent.detach()
# Gradient ascent
max_num_itr = 10
for _ in range(max_num_itr):
# make a copy
eta_env = eta
gamma_env = gamma
latent_adv = latent.clone()
ini_pred_reward = self.predictor.forward(latent_adv)
latent_path_all = np.zeros((steps+1, latent.shape[1]))
latent_path_all[0] = latent_adv.detach().cpu().numpy()
for step in range(steps):
pred_reward = self.predictor.forward(latent_adv) # reward
loss = -pred_reward - gamma_env*l2(latent_adv, latent_detach)
grad = torch.autograd.grad(loss, latent_adv)[0] # returns a tuple of grads
latent_adv += eta_env*grad
# logging.info(f'step {step}, pred {pred_reward.item()}')
latent_path_all[step+1] = latent_adv.detach().cpu().numpy()
if (ini_pred_reward-pred_reward) > target_drop*1.5:
eta *= 0.8 # too much perturbation
gamma *= 2.0
elif (ini_pred_reward-pred_reward) > target_drop:
break # good
else:
eta *= 1.2 # too little perturbation
gamma *= 0.5
return latent_adv.detach().cpu().numpy(), latent_path_all
def generate(self, epoch, gen_dir, base_latent_all, eta, gamma, steps, target_drop=0.1, max_num_attempt=5):
"""
Generate new objects by adversarially perturbing existing latents using the cost predictor
Sometimes some latent cannot generate new object, so we need to re-sample latent adversarially for the same new distribution
"""
num_new = len(base_latent_all)
old_latent_all = base_latent_all
new_latent_all = np.zeros((num_new, self.dim_latent))
# Another attempt if not all objects processed
flags = np.ones((num_new))
height_all = np.zeros((num_new))
keep_concave_part = config['keep_concave_part']
for _ in range(max_num_attempt):
for env_ind in range(num_new):
# Skip if already generated
if flags[env_ind] < 1:
continue
# Generate new
old_latent = base_latent_all[env_ind]
new_latent, latent_path_all = self.adversarial(
latent=old_latent,
eta=eta, gamma=gamma, steps=steps,
target_drop=target_drop)
# Get mesh using decoder, possibly corrupt
old_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(old_latent).float().to(self.device), voxel_resolution=self.voxel_resolution)
new_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(new_latent).float().to(self.device), voxel_resolution=self.voxel_resolution)
if new_mesh is None or old_mesh is None:
print('Cannot generate from latent!')
continue
# Try processing
try:
old_mesh = process_mesh(old_mesh,
scale_down=True,
smooth=False, #!
random_scale=False)
new_mesh = process_mesh(new_mesh,
scale_down=True,
smooth=False, #!
random_scale=False)
# Scale to original height
new_mesh = match_mesh_height(new_mesh, old_mesh)
# Export as decomposed stl and urdf - create new subdir for convex obj - for pybullet
ensure_directory_hard(gen_dir + str(env_ind) + '/')
convex_pieces = save_convex_urdf(new_mesh,
gen_dir,
env_ind,
mass=0.1,
keep_concave_part=keep_concave_part)
except:
print('Cannot process generated!')
continue
if len(convex_pieces) > 20:
print('Too concave!')
continue
#? Use decompsoed parts as stl? avoid peculiarities when sampling sdf and causing reconstruction issue
if keep_concave_part: # Export as (un-decomposed) stl - for sdf
save_mesh = new_mesh
else:
save_mesh = create_mesh_from_pieces(convex_pieces)
save_mesh.export(gen_dir+str(env_ind)+'.stl')
# Add to all sampled dist; mark generated
new_latent_all[env_ind] = new_latent
flags[env_ind] = 0
height_all[env_ind]=(save_mesh.bounds[1]-save_mesh.bounds[0])[2]
# Quit if all objects perturbed
if np.sum(flags) < 1e-3:
break
# Find closer latent
eta /= 2
gamma *= 2
# steps = min(int(steps/2), 1)
logging.info(f'Epoch {epoch} generate, double gamma locally')
return old_latent_all, new_latent_all, flags, height_all
def visualize(self, old_latent_all, new_latent_all, num_random_obj=20):
"""
Sample latent from all existing and visualize objects
"""
num_obj_generated = 0
num_obj_attempt = 0
obj_ind_all = random.sample(range(new_latent_all.shape[0]), k=num_random_obj)
# Use subplots for all objects
fig_obj, _ = plt.subplots(5, 4) # assume 20 rn
while num_obj_generated < num_random_obj:
# Sample more if used up
if num_obj_attempt >= num_random_obj:
obj_ind_all = random.sample(range(new_latent_all.shape[0]), k=num_random_obj)
num_obj_attempt = 0
# Extract sample
old_obj = old_latent_all[obj_ind_all[num_obj_attempt]]
new_obj = new_latent_all[obj_ind_all[num_obj_attempt]]
# Try
num_obj_attempt += 1
# Reconstruct mesh from latent
old_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(old_obj).float().to(self.device), voxel_resolution=self.voxel_resolution)
new_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(new_obj).float().to(self.device), voxel_resolution=self.voxel_resolution)
if old_mesh is None or new_mesh is None:
print('Cannot generate sample!')
continue
# Center, orient, scale
try:
old_mesh = process_mesh(old_mesh,
scale_down=True,
smooth=False,
random_scale=False)
new_mesh = process_mesh(new_mesh,
scale_down=True,
smooth=False,
random_scale=False)
except:
print('Cannot process sampled!')
continue
# Save mesh for inspection - bot not decomposed
if num_obj_generated < 5:
old_mesh.export(self.latent_img_dir+str(epoch)+'_'+str(num_obj_generated)+'_old.stl')
new_mesh.export(self.latent_img_dir+str(epoch)+'_'+str(num_obj_generated)+'_new.stl')
# Predict
old_reward =self.predict(latent=old_obj.reshape(1,-1))[0]
new_reward =self.predict(latent=new_obj.reshape(1,-1))[0]
# Save image of 2D cross section
slice_2D_old, _ = old_mesh.section(plane_origin=old_mesh.centroid,
plane_normal=[0,0,1]).to_planar()
slice_2D_new, _ = new_mesh.section(plane_origin=new_mesh.centroid,
plane_normal=[0,0,1]).to_planar()
ax = fig_obj.axes[num_obj_generated]
ax.set_aspect('equal')
ax.scatter(slice_2D_old.vertices[:,0], slice_2D_old.vertices[:,1],
s=1,color='lightgray')
ax.scatter(slice_2D_new.vertices[:,0], slice_2D_new.vertices[:,1],
s=2,color='gray')
ax.text(x=0., y=0.01, s="{:.2f}".format(old_reward), fontsize=12, color='coral')
ax.text(x=0., y=-0.01, s="{:.2f}".format(new_reward), fontsize=12, color='red')
ax.axis('off')
# Count
num_obj_generated += 1
plt.savefig(self.latent_img_dir+str(epoch)+'_random_obj.png')
plt.close()
def save_model(self, dir):
torch.save(self.encoder.state_dict(), dir+'encoder.pt')
torch.save(self.decoder.state_dict(), dir+'decoder.pt')
torch.save(self.predictor.state_dict(), dir+'predictor.pt')
def load_model(self, dir):
self.encoder.load_state_dict(torch.load(dir+'encoder.pt', map_location=self.device))
self.decoder.load_state_dict(torch.load(dir+'decoder.pt', map_location=self.device))
self.predictor.load_state_dict(torch.load(dir+'predictor.pt', map_location=self.device))
def get_non_test_num_env_list(env_dict, dir_type_all=[INIT_TYPE, GEN_TYPE]):
l = []
for env_id_list, _, dir_type in env_dict.values():
if dir_type in dir_type_all:
l += [len(env_id_list)]
return l
if __name__ == '__main__':
# from IPython import embed; embed()
if os.cpu_count() > 20: # somehow on server, the default fork method does not work with pytorch, but works fine on desktop
import multiprocessing
multiprocessing.set_start_method('forkserver')
# Read config
yaml_file_name = sys.argv[1]
yaml_path = 'configs/'+yaml_file_name
with open(yaml_path+'.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# Fix seeds
seed = config['seed']
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = True # may speed up
# Hardware
cuda_idx = config['cuda_idx']
device = 'cuda:'+str(cuda_idx)
# Misc
num_eval_per_env = config['num_eval_per_env']
dim_latent = config['dim_latent']
norm_loss_ratio = config['norm_loss_ratio']
clamp_lip = config['clamp_lip']
# Data
initial_env_dir_list = config['initial_env_dir_list']
num_env_per_initial_dir = config['num_env_per_initial_dir']
test_env_dir_list = config['test_env_dir_list']
num_env_per_test_dir = config['num_env_per_test_dir']
# Generation (from latent)
num_epoch_per_gen = config['num_epoch_per_gen']
num_epoch_before_first_gen = config['num_epoch_before_first_gen']
num_env_per_gen = config['num_env_per_gen']
# Improving policy
num_env_per_retrain = config['num_env_per_retrain']
num_epoch_per_retrain = config['num_epoch_per_retrain']
num_epoch_before_first_retrain = config['num_epoch_before_first_retrain']
mu_list = config['mu_list']
mu = config['mu']
sigma = config['sigma']
retrain_args = config['retrain_args']
eval_args = config['eval_args']
# Adversarial (gradient ascent)
eta = config['eta']
gamma = config['gamma']
ga_steps = config['ga_steps']
target_drop_percentage = config['target_drop_percentage']
target_drop_percentage_rate = config['target_drop_percentage_rate']
# Env params
sdf_args = config['sdf_args']
# Initialize folders
data_parent_dir = config['data_parent_dir']
result_dir = 'result/'+yaml_file_name+'/'
model_dir = result_dir + 'runner_model/'
latent_img_dir = result_dir + 'latent_img/'
data_dir = data_parent_dir+yaml_file_name+'/'
ensure_directory(result_dir)
ensure_directory(model_dir)
ensure_directory(latent_img_dir)
ensure_directory(data_dir)
# Initialize dir dict: key is dir_path, value is a tuple of (1) id list and (2) type (0 for initial, 1 for test, 2 for gen)
env_dir_dict = {}
for env_dir in initial_env_dir_list:
height_all =list(np.load(env_dir+'dim.npy')[:num_env_per_initial_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_initial_dir)], height_all, INIT_TYPE)
# Save a copy of configuration
with open(result_dir+'config.yaml', 'w') as f:
yaml.dump(config, f, sort_keys=False)
# Initialize evaluating policy (always cpu)
evaluator = EvaluateGrasp(initial_policy_path=None,
mu_list=mu_list, mu=mu, sigma=sigma, **eval_args)
# Initialize training policy
trainer = TrainGrasp(result_dir=result_dir, device=device,
mu=mu, sigma=sigma, **retrain_args)
# Initialize running env
runner = Runner(yaml_path=yaml_path, result_dir=result_dir, device=device)
# Initialize point sampler
point_sampler = PointSampler(**sdf_args)
# Training details to be recorded
train_loss_list = []
train_rec_loss_list = []
train_reg_loss_list = []
train_lip_loss_list = []
train_success_list = []
test_success_list = []
train_lip_list = []
# Save the latent and (groun-truth) label/reward of all images
latent_all = np.zeros((num_env_per_initial_dir*len(initial_env_dir_list),
dim_latent))
# Add test dir to dict
for env_dir in test_env_dir_list:
height_all = list(np.load(env_dir+'dim.npy')[:num_env_per_test_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_test_dir)], height_all, TEST_TYPE)
# Name of saved training details
train_details_path = None
# Initialize counter
num_epoch_since_last_gen = 0
num_epoch_since_last_retrain = 0
num_env_gen = 0
num_dir_gen = 0
num_retrain = 0
# Logging
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(filename=result_dir+'log.txt',
level=logging.NOTSET,
format='%(process)d-%(levelname)s-%(asctime)s-%(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.info('start')
# Run
num_epoch = (config['num_retrain']-2)*num_epoch_per_retrain+num_epoch_before_first_retrain # minus 2 to account for retrain at epoch 0
epoch = 0
while epoch <= num_epoch:
# Record time for each epoch
epoch_start_time = time.time()
######################### New #########################
# Generate a new distribution every some epochs
if epoch >= num_epoch_before_first_gen and \
num_epoch_since_last_gen >= num_epoch_per_gen:
# Declare new path
new_gen_dir = data_dir + 'gen_' + str(num_dir_gen) + '/'
ensure_directory(new_gen_dir)
# Adversarially generate and save new envs - Note that not all latent are updated during last embedding since only a set of envs are embedded now
#? Favor sampling old envs with higher reward - prevent too difficult envs generated - not all envs re-evaluated in later stage of training, so less likely to sample them, but should be ok
print('Generating new...')
old_env_weights_all = np.exp(label_all*0) # uniform weight
old_env_weights_all /= np.sum(old_env_weights_all)
adv_env_id_all, _ = weighted_sample_without_replacement([*range(len(label_all))], weights=old_env_weights_all, k=min(num_env_per_gen, len(label_all)))
# Estimate the range of predictions
pred_range = np.max(pred_all)-np.min(pred_all)
target_drop = pred_range*target_drop_percentage
# Save hist
fig = plt.figure()
plt.hist(pred_all, bins=np.linspace(0.0, 1.0, 20))
plt.savefig(latent_img_dir+str(epoch)+'_pred_hist.png')
plt.close(fig)
# Perturb sampled latent adversarially
old_latent, new_latent, flags, height_all = runner.generate(
epoch=epoch,
gen_dir=new_gen_dir,
base_latent_all=latent_all[adv_env_id_all],
eta=eta, gamma=gamma, steps=ga_steps,
target_drop=target_drop)
# Filter ones actually generated
new_env_id_list = | np.where(flags<1) | numpy.where |
from __future__ import print_function
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from .query import gaussian_query
from .randomization import randomization
class screening(gaussian_query):
def __init__(self,
observed_data,
covariance,
randomizer,
perturb=None):
self.observed_score_state = -observed_data # -Z if Z \sim N(\mu,\Sigma), X^Ty in regression setting
self.nfeature = p = self.observed_score_state.shape[0]
self.covariance = covariance
self.randomizer = randomizer
self._initial_omega = perturb
def fit(self, perturb=None):
gaussian_query.fit(self, perturb=perturb)
self._randomized_score = self.observed_score_state - self._initial_omega
return self._randomized_score, self._randomized_score.shape[0]
def multivariate_targets(self, features, dispersion=1.):
"""
Entries of the mean of \Sigma[E,E]^{-1}Z_E
"""
score_linear = self.covariance[:, features].copy() / dispersion
Q = score_linear[features]
cov_target = np.linalg.inv(Q)
observed_target = -np.linalg.inv(Q).dot(self.observed_score_state[features])
crosscov_target_score = -score_linear.dot(cov_target)
alternatives = ['twosided'] * features.sum()
return observed_target, cov_target * dispersion, crosscov_target_score.T * dispersion, alternatives
def full_targets(self, features, dispersion=1.):
"""
Entries of the mean of \Sigma[E,E]^{-1}Z_E
"""
score_linear = self.covariance[:, features].copy() / dispersion
Q = self.covariance / dispersion
cov_target = (np.linalg.inv(Q)[features])[:, features]
observed_target = -np.linalg.inv(Q).dot(self.observed_score_state)[features]
crosscov_target_score = -np.identity(Q.shape[0])[:, features]
alternatives = ['twosided'] * features.sum()
return observed_target, cov_target * dispersion, crosscov_target_score.T * dispersion, alternatives
def marginal_targets(self, features):
"""
Entries of the mean of Z_E
"""
score_linear = self.covariance[:, features]
Q = score_linear[features]
cov_target = Q
observed_target = -self.observed_score_state[features]
crosscov_target_score = -score_linear
alternatives = ['twosided'] * features.sum()
return observed_target, cov_target, crosscov_target_score.T, alternatives
class marginal_screening(screening):
def __init__(self,
observed_data,
covariance,
randomizer,
threshold,
perturb=None):
threshold = np.asarray(threshold)
if threshold.shape == ():
threshold = np.ones_like(observed_data) * threshold
self.threshold = threshold
screening.__init__(self,
observed_data,
covariance,
randomizer,
perturb=None)
def fit(self, perturb=None):
_randomized_score, p = screening.fit(self, perturb=perturb)
active = np.fabs(_randomized_score) >= self.threshold
self._selected = active
self._not_selected = ~self._selected
sign = np.sign(-_randomized_score)
active_signs = sign[self._selected]
sign[self._not_selected] = 0
self.selection_variable = {'sign': sign,
'variables': self._selected.copy()}
self.observed_opt_state = (np.fabs(_randomized_score) - self.threshold)[self._selected]
self.num_opt_var = self.observed_opt_state.shape[0]
opt_linear = np.zeros((p, self.num_opt_var))
opt_linear[self._selected,:] = np.diag(active_signs)
opt_offset = np.zeros(p)
opt_offset[self._selected] = active_signs * self.threshold[self._selected]
opt_offset[self._not_selected] = _randomized_score[self._not_selected]
self._setup = True
A_scaling = -np.identity(len(active_signs))
b_scaling = np.zeros(self.num_opt_var)
self._setup_sampler(A_scaling,
b_scaling,
opt_linear,
opt_offset)
return self._selected
@staticmethod
def type1(observed_data,
covariance,
marginal_level,
randomizer_scale,
perturb=None):
'''
Threshold
'''
randomized_stdev = np.sqrt(np.diag(covariance) + randomizer_scale**2)
p = covariance.shape[0]
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
threshold = randomized_stdev * ndist.ppf(1. - marginal_level / 2.)
return marginal_screening(observed_data,
covariance,
randomizer,
threshold,
perturb=perturb)
# Stepup procedures like Benjamini-Hochberg
def stepup_selection(Z_values, stepup_Z):
absZ_argsort = np.argsort(np.fabs(Z_values))[::-1]
absZ_sorted = np.fabs(Z_values)[absZ_argsort]
survivors = absZ_sorted - stepup_Z >= 0
if np.any(survivors):
num_selected = max(np.nonzero(survivors)[0]) + 1
return (num_selected, # how many selected
absZ_argsort[:num_selected], # ordered indices of those selected
stepup_Z[num_selected - 1]) # the selected are greater than this number
else:
return 0, None, None
class stepup(screening):
def __init__(self,
observed_data,
covariance,
randomizer,
stepup_Z,
perturb=None):
screening.__init__(self,
observed_data,
covariance,
randomizer,
perturb=None)
self.stepup_Z = stepup_Z
if not (np.all(sorted(self.stepup_Z)[::-1] == self.stepup_Z) and
np.all(np.greater_equal(self.stepup_Z, 0))):
raise ValueError('stepup Z values should be non-negative and non-increasing')
def fit(self, perturb=None):
# condition on all those that survive, their sign,
# which was the last past the threshold
# and the observed (randomized) Z values of those that don't
_randomized_score, p = screening.fit(self, perturb=perturb)
K, selected_idx, last_cutoff = stepup_selection(_randomized_score, self.stepup_Z)
if K > 0:
self._selected = np.zeros(p, np.bool)
self._selected[selected_idx] = 1
self._not_selected = ~self._selected
sign = np.sign(-_randomized_score)
active_signs = sign[selected_idx]
sign[self._not_selected] = 0
self.selection_variable = {'sign': sign.copy(),
'variables': self._selected.copy(),
}
self.num_opt_var = self._selected.sum()
self.observed_opt_state = np.zeros(self.num_opt_var)
self.observed_opt_state[:] = np.fabs(_randomized_score[selected_idx]) - last_cutoff
opt_linear = np.zeros((p, self.num_opt_var))
for j in range(self.num_opt_var):
opt_linear[selected_idx[j], j] = active_signs[j]
opt_offset = np.zeros(p)
opt_offset[self._selected] = active_signs * last_cutoff
opt_offset[self._not_selected] = _randomized_score[self._not_selected]
self._setup = True
A_scaling = - | np.identity(self.num_opt_var) | numpy.identity |
import numpy as np
import utils.gen_cutouts as gc
from sklearn import metrics
import pandas as pd
import ipdb
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['mathtext.fontset'] = 'stixsans'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
MEAN_TEMP = 2.726 * (10**6)
DEFAULT_FONT = 24
import os
from global_settings import DATA_PATH, FULL_DATA_PATH, FULL_DATA_LABEL_PATH, CNN_MODEL_OUTPUT_DIR, CACHE_FULLDF, CACHE_MAPPED_HALOS, CACHE_FULLDF_DIST2EDGE_CAL
import os
def prepare_data_class(dir_test, num_frequency=3, get_all_components=False, label_fname="1025_hashalo_freq%03i.npy" % 148,
balanced=False,
suffix=""):
"""
read data from dir_test, and prepare data with different noise level (components)
"""
freqs=[90,148,219]
def _load_help(name_format):
paths = [os.path.join(dir_test, name_format%freq) for freq in freqs]
ret = [np.load(p) for p in paths]
#print(paths)
return ret
# set file names for data
#y_data = np.load(dir_test + "1025_hashalo_freq%03i.npy"%148) # y data (labels)
y_data = np.load(os.path.join(dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
nsamples = len(y_data)
#load data into dictionary
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
#load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
noises = [np.load(os.path.join(dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in ["_90","_150", "_220"]]
noises = [noises[0]*2.8, noises[1]*2.6, noises[2]*6.6]
#samples has CMB+TSZ
try:
com = ['samples','ksz','ir_pts','rad_pts','dust']
x_data_all['base'] = _load_help("1025_samples_freq%03i{}.npy".format(suffix))
ksz_comp = _load_help("1025_ksz_freq%03i{}.npy".format(suffix))
x_data_all['ksz'] = [x_data_all['base'][i] + ksz_comp[i] for i in range(3)]
ir_comp = _load_help("1025_ir_pts_freq%03i{}.npy".format(suffix))
x_data_all['ir'] = [x_data_all['ksz'][i] + ir_comp[i] for i in range(3)]
rad_comp = _load_help("1025_rad_pts_freq%03i{}.npy".format(suffix))
x_data_all['rad'] = [x_data_all['ir'][i] + rad_comp[i] for i in range(3)]
dust_comp = _load_help("1025_dust_freq%03i{}.npy".format(suffix))
x_data_all['dust'] = [x_data_all['rad'][i] + dust_comp[i] for i in range(3)]
except Exception as err:
print("error: ", err)
print("reading only the composite")
x_data_all['dust'] = _load_help("1025_skymap_freq%03i{}.npy".format(suffix))
#return x_data_all['dust'], y_data
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples,num_frequency,10,10),dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:,i,:,:] = np.squeeze(x_data_all[com1][i]*k2uk*Tcmb) + noises[i]
else:
x_data[com1][:,0,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,0,:,:] += np.squeeze(x_data_all[com1][1]*k2uk*Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:,1,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,1,:,:] += np.squeeze(x_data_all[com1][0]*k2uk*Tcmb) + noises[0]
if balanced:
n_pos = int(y_data.sum())
idx = np.arange(nsamples)
idx = np.concatenate([idx[y_data==0.0][:n_pos], idx[y_data==1.0]])
x_data = {k: x_data[k][idx] for k in x_data.keys()}
return x_data if get_all_components else x_data['dust'], y_data[idx], idx
return x_data if get_all_components else x_data['dust'], y_data
def prepare_data_class2(dir_test, num_frequency=3, component="skymap", label_fname="1025_hashalo_freq%03i.npy" % 148,
balanced=False,
use_noise=True,
get_test_idx=False,
suffix=""):
"""
read data from dir_test, and prepare data with different noise level (components)
"""
freqs=[90,148,219]
def _load_help(name_format):
paths = [os.path.join(dir_test, name_format%freq) for freq in freqs]
ret = [np.load(p) for p in paths]
#print(paths)
return ret
# set file names for data
y_data = np.load(os.path.join(dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
nsamples = len(y_data)
#load data into dictionary
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
#load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
if use_noise:
noises = [np.load(os.path.join(dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in ["_90","_150", "_220"]]
noises = [noises[0]*2.8, noises[1]*2.6, noises[2]*6.6]
else:
noises = [0., 0., 0.]
#samples has CMB+TSZ
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(component, suffix))
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples,num_frequency,10,10),dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:,i,:,:] = np.squeeze(x_data_all[com1][i]*k2uk*Tcmb) + noises[i]
else:
x_data[com1][:,0,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,0,:,:] += np.squeeze(x_data_all[com1][1]*k2uk*Tcmb) + noises[1]
if num_frequency > 1:
x_data[com1][:,1,:,:] = -np.squeeze(x_data_all[com1][2]*k2uk*Tcmb) - noises[2]
x_data[com1][:,1,:,:] += np.squeeze(x_data_all[com1][0]*k2uk*Tcmb) + noises[0]
splits = np.asarray([0.8, 0.2])
splits = np.round(splits / splits.sum() * nsamples).astype(int).cumsum()
split_idx = np.split(np.arange(nsamples),splits[:-1])
x_data, x_test = {k: x_data[k][split_idx[0]] for k in x_data.keys()}, {k: x_data[k][split_idx[-1]] for k in x_data.keys()}
y_data, y_test = y_data[split_idx[0]], y_data[split_idx[-1]]
nsamples = len(y_data)
if balanced:
n_pos = int(y_data.sum())
idx = np.arange(nsamples)
idx = np.concatenate([idx[y_data==0.0][:n_pos], idx[y_data==1.0]])
x_data = {k: x_data[k][idx] for k in x_data.keys()}
if get_test_idx: return x_data[component], y_data[idx], x_test[component], y_test, idx, split_idx[-1]
return x_data[component], y_data[idx], x_test[component], y_test, idx
if get_test_idx:
return x_data[component], y_data, x_test[component], y_test, split_idx[-1]
return x_data[component], y_data, x_test[component], y_test
class DataHolder:
def __init__(self, data, label, idx):
self.data = data
self.label = label
self.idx = idx
def get(self, which, ratio=None, incl_idx=False):
curr_idx = self.idx[which]
y_data = self.label[curr_idx]
if ratio is not None:
n_pos = int(y_data.sum())
idx = np.arange(len(y_data))
idx = np.concatenate([idx[y_data == 0.0][:int(ratio * n_pos)], idx[y_data == 1.0]])
curr_idx = curr_idx[idx]
if incl_idx:
return self.data[curr_idx], self.label[curr_idx], curr_idx
return self.data[curr_idx], self.label[curr_idx]
class DataGetter:
WO_DUST_MAPPING = ("dust", ['samples', 'ksz', 'ir_pts', 'rad_pts'])
def __init__(self, dir_test, overlap=False):
self.dir_test = dir_test
self.overlap = overlap
self.halocounter = gc.HalosCounter(overlap=overlap)
df = self.halocounter.get_complete_df()
if overlap:
df = df.reset_index().rename(columns={"index": "cutout_id"})
test_idx = df[(df['cutout_ra'] >= 0.5 * 90) & (df['cutout_dec'] > 0.5 * 90)].index
train_idx = df[~df.index.isin(test_idx)].index
n_samples = len(train_idx)
splits = np.asarray([0.65, 0.1])
splits = np.round(splits / splits.sum() * n_samples).astype(int).cumsum()
#print(splits)
#print(train_idx, len(train_idx))
split_idx = np.split(train_idx, splits[:-1])
split_idx = [split_idx[0], split_idx[1], test_idx]
#print(len(split_idx[0]), len(split_idx[1]), len(split_idx[2]))
#print(split_idx[0], split_idx[1], split_idx[2])
else:
n_samples = df.shape[0]
splits = np.asarray([0.7, 0.1, 0.2]) # (train ratio, valid ratio, test ratio)
splits = np.round(splits / splits.sum() * n_samples).astype(int).cumsum()
split_idx = np.split(np.arange(n_samples), splits[:-1])
#print(list(map(len, split_idx)), df.shape)
self.split_idx = {"train":split_idx[0], 'valid':split_idx[1], 'test':split_idx[2]}
pass
def get_labels(self, thres=5e13, which='full'):
if isinstance(thres, float) or isinstance(thres, int):
thres = ("%0.0e"%(thres)).replace("+", "")
label_fname = {"5e13": "m5e13_z0.25_y.npy", "2e14":"m2e14_z0.5_y.npy"}[thres]
y_data = np.load(os.path.join(self.dir_test, label_fname))
y_data[y_data > 1] = 1
y_data = y_data.astype(float)
if which == 'full': return y_data
return y_data[self.split_idx[which]]
def get_data(self, component, thres=5e13, use_noise=False, num_frequency=3):
suffix = "_overlap" if self.overlap else ""
freqs = [90, 148, 219]
def _load_help(name_format):
paths = [os.path.join(self.dir_test, name_format % freq) for freq in freqs]
return [np.load(p) for p in paths]
y_data = self.get_labels(thres, which='full')
nsamples = len(y_data)
x_data_all = {}
# load data
k2uk = 1.0e6
Tcmb = 2.726
# load noise (for SPT-3G 1500 sq deg patch, it's [2.8,2.6,6.6]uK-arcmin)
if use_noise:
noises = [np.load(os.path.join(self.dir_test, "noise_1uK-arcmin{}{}.npy".format(s, suffix))) for s in
["_90", "_150", "_220"]]
noises = [noises[0] * 2.8, noises[1] * 2.6, noises[2] * 6.6]
else:
noises = [0., 0., 0.]
# samples has CMB+TSZ
if isinstance(component, str):
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(component, suffix))
elif isinstance(component,tuple):
component, lc = component
x_data_all[component] = _load_help("1025_{}_freq%03i{}.npy".format(lc[0], suffix))
for cc in lc[1:]:
tx = _load_help("1025_{}_freq%03i{}.npy".format(cc, suffix))
assert len(tx) == len(x_data_all[component])
x_data_all[component] = [x_data_all[component][i] + tx[i] for i in range(len(tx))]
x_data = {}
for com1 in x_data_all.keys():
# add noise
x_data[com1] = np.empty((nsamples, num_frequency, 10, 10), dtype=np.float64)
if num_frequency == 3:
for i in range(3):
x_data[com1][:, i, :, :] = np.squeeze(x_data_all[com1][i] * k2uk * Tcmb) + noises[i]
else:
x_data[com1][:, 0, :, :] = -np.squeeze(x_data_all[com1][2] * k2uk * Tcmb) - noises[2]
x_data[com1][:, 0, :, :] += | np.squeeze(x_data_all[com1][1] * k2uk * Tcmb) | numpy.squeeze |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 15 10:03:23 2017
@author: thuzhang
"""
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import svm
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import preprocessing
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
File='USA_ALL.csv'
OriginData=pd.read_table(File,sep=",")
Data=OriginData.as_matrix().astype(np.float32)
Start=0
End=8000
OBSPWR=Data[Start:End,0]
HR_FCSR=Data[Start:End,1]
OBS_DIFF=Data[Start:End,2]
HR_DIFF=Data[Start:End,3]
HR_ERROR=Data[Start:End,4]
DIFF_ERROR=Data[Start:End,5]
def ClearData(Ori,Standard,index=0.6,lo=0.1):
Max= | np.max(Standard) | numpy.max |
import copy
import numpy as np
import torch
class Memory:
def __init__(self, memory_size, nb_total_classes, rehearsal, fixed=True):
self.memory_size = memory_size
self.nb_total_classes = nb_total_classes
self.rehearsal = rehearsal
self.fixed = fixed
self.x = self.y = self.t = None
self.nb_classes = 0
@property
def memory_per_class(self):
if self.fixed:
return self.memory_size // self.nb_total_classes
return self.memory_size // self.nb_classes if self.nb_classes > 0 else self.memory_size
def get_dataset(self, base_dataset):
dataset = copy.deepcopy(base_dataset)
dataset._x = self.x
dataset._y = self.y
dataset._t = self.t
return dataset
def get(self):
return self.x, self.y, self.t
def __len__(self):
return len(self.x) if self.x is not None else 0
def save(self, path):
np.savez(
path,
x=self.x, y=self.y, t=self.t
)
def load(self, path):
data = np.load(path)
self.x = data["x"]
self.y = data["y"]
self.t = data["t"]
assert len(self) <= self.memory_size, len(self)
self.nb_classes = len(np.unique(self.y))
def reduce(self):
x, y, t = [], [], []
for class_id in | np.unique(self.y) | numpy.unique |
import gm
from math import sqrt
from networkx.algorithms.cluster import average_clustering, triangles
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import matplotlib.pyplot as plt
import math
import networkx as nx
import matplotlib.animation as animation
import networkx.algorithms.community as nx_comm
import warnings
from matplotlib.patches import Rectangle
warnings.filterwarnings('ignore')
####################################################################
####################################################################
#-----------------------PARAMETERS BEGIN---------------------------#
####################################################################
####################################################################
# ------------------------ALGORITHM SETTINGS--------------------------#
N = 100 # Number of sensor nodes
M = 2 # Space dimensions
D = 10 # Desired distance among nodes,i.e. algebraic constraints
K = 1.2 # Scaling factor
R = K*D # Interaction range
A = 5
B = 5
C = np.abs(A-B)/np.sqrt(4*A*B)
D_prime = D * 3 # desired distance between obstacles and node
R_prime = K * D_prime # Interaction range with obstacles
EPSILON = 0.1
H_alpha = 0.2
H_beta = 0.9
C1_ALPHA = 10
C2_ALPHA = 2 * np.sqrt(C1_ALPHA)
C1_BETA = 150
C2_BETA = 2*np.sqrt(C1_BETA)
C1_GAMMA = 120
C2_GAMMA = 2 * np.sqrt(C1_GAMMA)
DELTA_T = 0.01 # time interval for calculating speed
ITERATION = 5000 # total step number
# ---------------------STATISTICAL SETTINGS---------------------------#
# whole process parameters recording
POSITION_X = np.zeros([N, ITERATION]) # X position of each agent
POSITION_Y = np.zeros([N, ITERATION]) # Y position of each agent
AVERAGE_VELOCITY = np.zeros([1,ITERATION]) # the average speed for each iter
MAX_V = np.zeros([1,ITERATION])# the max speed of points for each iter
AVERAGE_X_POSITION = np.zeros([1,ITERATION]) # average X
MAX_VELOCITY_X = np.zeros([1,ITERATION]) # max speed X
VELOCITY_MAGNITUDES = np.zeros([N, ITERATION]) # velocity of each agent
# acceleration
ACCELERACTION_X = np.zeros([N, ITERATION])
AVERAGE_X_ACC = | np.zeros([1, ITERATION]) | numpy.zeros |
import cv2, numpy as np, requests, time, parse
def get_mask(frame, bodypix_url='http://localhost:9000'):
_, data = cv2.imencode(".jpg", frame)
r = requests.post(
url=bodypix_url,
data=data.tobytes(),
headers={'Content-Type': 'application/octet-stream'})
mask = | np.frombuffer(r.content, dtype=np.uint8) | numpy.frombuffer |
import os
from unittest import main, TestCase
import numpy as np
from serenata_toolbox.chamber_of_deputies.speeches_dataset import SpeechesDataset
class TestSpeechesDataset(TestCase):
def setUp(self):
self.subject = SpeechesDataset()
def test_fetch(self):
df = self.subject.fetch('03/02/2015', '03/02/2015')
actualColumns = df.columns
expectedColumns = [
'session_code', 'session_date', 'session_num', 'phase_code',
'phase_desc', 'speech_speaker_num', 'speech_speaker_name',
'speech_speaker_party', 'speech_speaker_state',
'speech_started_at', 'speech_room_num', 'speech_insertion_num'
]
self.assertTrue(( | np.array(expectedColumns) | numpy.array |
import os.path
from pathlib import Path
from ctypes import *
from sys import platform
from numpy.ctypeslib import ndpointer
import numpy as np
from PIL import Image
try:
import eyeRendererHelperFunctions as eyeTools
except Exception as e:
print("Error importing eyeTools:", e)
print("This is most likely because you do not have the 'python-examples' folder set as a path in $PYTHONPATH.")
exit()
def getIdFromMap(mapImage, x, y):
r = mapImage[y,x,0] << 24 # Red
g = mapImage[y,x,1] << 16 # Green
b = mapImage[y,x,2] << 8 # Blue
a = mapImage[y,x,3] # Alpha
idOut = r | g | b | a
return(idOut)
def getProjectionImageUsingMap(vector, vectorMax, idMap, pjWidth,pjHeight):
np.copy(idMap)
output = np.zeros((pjWidth, pjHeight), dtype=np.uint8)
for x in range(pjWidth):
for y in range(pjHeight):
pixelId = getIdFromMap(idMap, x, y)
output[x,y] = int(vector[pixelId]/vectorMax * 255)
return(output)
try:
# Load the renderer
eyeRenderer = CDLL("../../build/make/lib/libEyeRenderer3.so")
print("Successfully loaded", eyeRenderer)
# Configure the renderer's function outputs and inputs using the helper functions
eyeTools.configureFunctions(eyeRenderer)
#Load a scene
print("Loading scene (please wait)...")
eyeRenderer.loadGlTFscene(c_char_p(b"../../data/natural-standin-sky.gltf"))
print("Scene loaded!")
# Make sure there's a place to save to
Path("output/generated-data/alias-demo-quantified/").mkdir(parents=True, exist_ok=True)
Path("output/vector-data/").mkdir(parents=True, exist_ok=True)
Path("output/view-images/").mkdir(parents=True, exist_ok=True)
Path("output/generated-data/spread-analysis/").mkdir(parents=True, exist_ok=True)
###### First, generate the ommatidial id map
#Resize the renderer display in order to render the spherically-projected variable sample rate
renderWidth = 700
renderHeight = 300
eyeTools.setRenderSize(eyeRenderer, renderWidth, renderHeight)
# Go to the 'insect-eye-spherical-projector' camera
eyeRenderer.gotoCameraByName(c_char_p(b"insect-eye-spherical-projector-ids"))
eyeRenderer.renderFrame() # Just straight-up render the spherical projector ids
idMap = np.copy(np.flipud(eyeRenderer.getFramePointer())) # Copy (remember here the data is still owned by the render, so we need this copy) the id map (plus flip it the right way up)
eyeRenderer.saveFrameAs(c_char_p(("output/generated-data/alias-demo-quantified/projection-ids.ppm").encode())) # Save the image for sanity checking
# Also generate a set of weights that store how much of an influence on an
# average each compound eye should have based on it's area coverage in steradians
perSteradianWeights = [1.0/i.getSolidAngle() for i in eyeTools.readEyeFile("../../data/eyes/1000-horizontallyAcute-variableDegree.eye")]
perSteradianWeights = np.asarray(perSteradianWeights)
###### Second, generate ommatidial sample data into a big multi-dim array to perform analysis on
# Change to vector rendering
eyeRenderer.gotoCameraByName(c_char_p(b"insect-eye-fast-vector"))
# Prepare to generate vector data (1000 ommatidia)
vectorWidth = 1000
maxOmmatidialSamples = renderWidth # The upper bound of how many samples will be taken per ommatidium in the analysis
spreadSampleCount = 1000 # How many times each frame is rendered to get a sense of the spread of results from a given ommatidium at different sampling rates
eyeTools.setRenderSize(eyeRenderer, vectorWidth, 1)
# Create a numpy array to store the eye data
# This is a set of eye matricies, each one being a 1st-order stack of samples (the width of the number of ommatidia, and 3 channels deep)
eyeSampleMatrix = np.zeros((maxOmmatidialSamples,spreadSampleCount, vectorWidth, 3), dtype=np.uint8)
# Iterate over eye sample counts
for idx, samples in enumerate(range(1, maxOmmatidialSamples+1)):
eyeRenderer.setCurrentEyeSamplesPerOmmatidium(samples)
eyeRenderer.renderFrame() # First call to ensure randoms are configured
# For each sample count, generate N images to compare
for i in range(spreadSampleCount):
renderTime = eyeRenderer.renderFrame() # Second call to actually render the image
# Retrieve the data
frameData = eyeRenderer.getFramePointer()
frameDataRGB = frameData[:,:,:3] # Remove the alpha channel
eyeSampleMatrix[idx,i,:,:] = np.copy(frameDataRGB[:, :, :])
eyeRenderer.displayFrame()
###### Now calculate the per-ommatidial sample variance and standard deviation at each sample rate
print("")
maxSd = 0
maxVariance = 0
variances = np.zeros((renderWidth, vectorWidth, 1))
standardDeviations = np.zeros((renderWidth, vectorWidth, 1))
avgVariancePerSteradianPerImage = | np.zeros(renderWidth) | numpy.zeros |
"""
<NAME>., et al. 2011, ApJ, 730, 61
http://arxiv.org/abs/1011.6370
For ssfr, values are corrected as seen in Behroozi et al. 2013 (http://arxiv.org/abs/1207.6105), Table 5.
"""
import numpy as np
info = \
{
'reference':'<NAME>., et al. 2011, ApJ, 730, 61',
'data': 'Behroozi, Table 5',
'imf': ('chabrier, 2003', (0.1, 100.)),
}
redshifts = [0.28, 0.49, 0.69, 0.89, 1.1, 1.38, 1.81, 2.27, 2.73]
wavelength = 1600.
ULIM = -1e10
fits = {}
# Table 1
tmp_data = {}
tmp_data['ssfr'] = \
{
0.28: {'M': [1.0964782E+10, 2.2387211E+10, 4.2657952E+10, 8.9125094E+10, 1.5848932E+11],
'phi': [-9.95, -10.15, -10.35, -10.6, -10.8],
'err': [(0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.301029995663981,
0.301029995663981), (0.3, 0.3)]
},
0.49: {'M': [1.1220185E+10, 2.1877616E+10, 4.3651583E+10, 8.3176377E+10, 1.7782794E+11],
'phi': [-9.5, -9.7, -9.95, -10.15, -10.5],
'err': [(0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.301029995663981,
0.301029995663981), (0.3, 0.3)]
},
0.69: {'M': [2.2387211E+10, 4.2657952E+10, 8.3176377E+10, 1.5848932E+11],
'phi': [-9.6, -9.75, -9.9, -10.05],
'err': [(0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.3,
0.3)]
},
0.89: {'M': [2.2387211E+10, 4.3651583E+10, 8.1283052E+10, 1.5848932E+11],
'phi': [-9.3, -9.55, -9.75, -9.9],
'err': [(0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.3,
0.3)]
},
1.1: {'M': [2.2387211E+10, 4.3651583E+10, 8.3176377E+10, 1.5848932E+11],
'phi': [-9.1, -9.3, -9.5, -9.6],
'err': [(0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.3,
0.3)]
},
1.38: {'M': [4.3651583E+10, 8.1283052E+10, 1.5848932E+11],
'phi': [-9.1, -9.3, -9.4],
'err': [(0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.3, 0.3)]
},
1.81: {'M': [4.2657952E+10, 8.3176377E+10, 1.5848932E+11],
'phi': [-8.8, -8.95, -9.0],
'err': [(0.301029995663981, 0.301029995663981), (0.301029995663981, 0.301029995663981), (0.3, 0.3)]
},
2.27: {'M': [8.5113804E+10, 1.5848932E+11],
'phi': [-8.8, -8.8],
'err': [(0.301029995663981, 0.301029995663981), (0.3, 0.3)]
},
2.73: {'M': [1.5848932E+11],
'phi': [-8.7],
'err': [(0.3, 0.3)]
},
}
units = {'ssfr': '1.'}
data = {}
data['ssfr'] = {}
for group in ['ssfr']:
for key in tmp_data[group]:
if key not in tmp_data[group]:
continue
subdata = tmp_data[group]
mask = []
for element in subdata[key]['err']:
if element == ULIM:
mask.append(1)
else:
mask.append(0)
mask = np.array(mask)
data[group][key] = {}
data[group][key]['M'] = np.ma.array(subdata[key]['M'], mask=mask)
data[group][key]['phi'] = | np.ma.array(subdata[key]['phi'], mask=mask) | numpy.ma.array |
'''
_______ _______ _______ _______ ________ ___ _______ __ __ ___________
/ __ / / ___ \ / ____/ / _____/ / ______\ / / / ____/ / \ / / /____ _____/
/ /__/ / / |__| | / /___ / /____ / / / / / /___ / \ / / / /
/ ______/ / __ ___/ / ____/ /______ / / / / / / ____/ / /\ \/ / / /
/ / / / \ \ / /____ _______/ / / /_____ / / / /___ / / \ / / /
/_/ /_/ \_\ /______/ /________/ /________/ /__/ /______/ /__/ \__/ /__/
Automated forecasting tool powered by Facebook Prophet.
Developed by <NAME>
01.02.2018
Version 0.1.0
'''
# TODO Get streaming data from Kafka
# TODO Online forecasting architecture
# TODO Model serialization
# TODO Time series database integration
import os
import subprocess
import numpy as np
import pandas as pd
import threading
import signal
from colorama import Fore
from concurrent.futures import ProcessPoolExecutor
from ProphetExecutor import ProphetExecutor
from PrescientConfig import PrescientConfig
from PrescientLogger import PrescientLogger
from collections import deque
import sys
# ------------------------ RESULT VARIABLES -----------------------
best_model = (0, 0, 0)
accuracies = deque()
accuracy_change_rates = deque()
best_accuracies = deque()
# -----------------------------------------------------------------
# ---------------------------------- CONFIGS ---------------------------------------
config = PrescientConfig(sys.argv[1]) # get configuration from file
dataset_filepath = config.get_str("forecastbase.dataset.filepath")
tdp_min = config.get_float("forecastbase.training.data.percent.min")
tdp_max = config.get_float("forecastbase.training.data.percent.max")
tdp_inc_by = config.get_float("forecastbase.training.data.percent.increment.by")
iw_min = config.get_float("forecastbase.interval.width.min")
iw_max = config.get_float("forecastbase.interval.width.max")
iw_inc_by = config.get_float("forecastbase.interval.width.increment.by")
cps_min = config.get_float("forecastbase.changepoint.prior.scale.min")
cps_max = config.get_float("forecastbase.changepoint.prior.scale.max")
cps_inc_by = config.get_float("forecastbase.changepoint.prior.scale.increment.by")
predict_next = config.get_int("forecastbase.predict.next")
predict_freq = config.get_str("forecastbase.predict.freq")
parallelism = config.get_int("forecastbase.paralellism")
measure_number = config.get_int("forecastbase.convergence.detection.measure.number")
average_acr_threshold = config.get_float("forecastbase.convergence.detection.acr.threshold")
holiday_weekends_enabled = config.get_bool("forecastbase.holiday.weekends.enabled")
holiday_special_days = config.get_list("forecastbase.holiday.special.days")
# -----------------------------------------------------------------------------------
# ----------------------------- HOLIDAY WEEKENDS SETTINGS -----------------------------
holiday_weekends = {}
if not holiday_weekends_enabled:
holiday_weekends = None
# -------------------------------------------------------------------------------------
semaphore = threading.BoundedSemaphore(value=1)
def run():
model_index = 1
prophet_executor = ProphetExecutor()
# Create training file and load weekends (if enabled) according to current percent
for training_data_percent_prep in np.arange(tdp_min, tdp_max + tdp_inc_by, tdp_inc_by):
prepare_training_file(training_data_percent_prep)
if holiday_weekends_enabled:
load_holiday_weekends(training_data_percent_prep)
# Submitting jobs
with ProcessPoolExecutor(max_workers=parallelism) as process_pool:
for training_data_percent in np.arange(tdp_min, tdp_max + tdp_inc_by, tdp_inc_by):
for interval_width in np.arange(iw_min, iw_max + iw_inc_by, iw_inc_by):
for changepoint_prior_scale in np.arange(cps_min, cps_max + cps_inc_by, cps_inc_by):
model_future = process_pool.submit(prophet_executor.execute,
model_index,
dataset_filepath,
training_data_percent,
interval_width,
changepoint_prior_scale,
predict_next,
predict_freq,
holiday_weekends,
holiday_special_days)
model_future.add_done_callback(model_training_done_callback)
model_index += 1
def prepare_training_file(training_data_percent):
# Get data count of file
data_count = int(subprocess.Popen(["wc", "-l", dataset_filepath], stdout=subprocess.PIPE).communicate()[0].split()[0])
# Calculate training data count according to percentage
training_data_count = (data_count * training_data_percent) / 100
PrescientLogger.console_log("FORECASTBASE", Fore.YELLOW, "Preparing training file for parameter training_data_percent=%" + str(training_data_percent) +
" Original data count:" + str(data_count) + " Training data count: " + str(training_data_count))
# Create training data file
os.system("head -" + str(int(training_data_count)) + " " + dataset_filepath + " > " + os.path.basename(dataset_filepath).split('.')[0] +
"_training_%" + str(training_data_percent) + ".csv")
def load_holiday_weekends(training_data_percent):
global holiday_weekends
PrescientLogger.console_log("FORECASTBASE", Fore.YELLOW, "Preparing weekends for parameter training_data_percent=%" + str(training_data_percent))
df_training_data = pd.read_csv(os.path.basename(dataset_filepath).split('.')[0] + "_training_%" + str(training_data_percent) + ".csv")
df_training_data['ds'] = pd.to_datetime(df_training_data['ds']) # Convert string to datetime
df_training_data['weekday'] = df_training_data['ds'].dt.weekday # Find number of day
df_training_data['ds'] = df_training_data['ds'].dt.date # Truncate time from datetime
# Selecting rows where day is Saturday or Sunday
df_holiday_weekends = df_training_data[(df_training_data['weekday'] == 5) | (df_training_data['weekday'] == 6)]
df_holiday_weekends = df_holiday_weekends.drop_duplicates(subset=['ds']) # Drop duplicate rows
df_holiday_weekends.drop(['y', 'weekday'], axis=1, inplace=True) # Drop unnecessary columns
holiday_weekends[str(training_data_percent)] = df_holiday_weekends
def show_intermediate_results(average_acr, acr_frame):
PrescientLogger.console_log(
None,
Fore.BLUE,
"########################################################################",
"Last " + str(measure_number) + " model's accuracies and accuracy change rates: \n",
acr_frame.to_string(),
"\nAverage accuracy change rate: " + str(average_acr),
"Best accuracy: " + str(best_model[0]),
"########################################################################\n")
def model_training_done_callback(model_fn):
global best_model
semaphore.acquire()
if model_fn.done():
error = model_fn.exception()
if error:
print(error)
else:
model = model_fn.result()
if accuracy_change_rates.__len__() < measure_number:
accuracy_change_rates.append(model[0] - best_model[0])
accuracies.append(model[0])
best_accuracies.append(best_model[0])
else:
# Remove oldest data and add last data
accuracy_change_rates.popleft()
accuracies.popleft()
best_accuracies.popleft()
accuracy_change_rates.append(model[0] - best_model[0]); accuracies.append(model[0]); best_accuracies.append(best_model[0])
# If trained model's accuracy is better than best model assign as new best model
if model[0] > best_model[0]:
best_model = model
if accuracy_change_rates.__len__() == measure_number:
# Calculate average accuracy change rate and show results
acr_frame = pd.DataFrame({'best_accuracy': best_accuracies, 'last_model_accuracy': accuracies, 'acr': accuracy_change_rates})
average_acr = acr_frame['acr'].mean()
show_intermediate_results(average_acr, acr_frame)
# If average accuracy change rate below threshold stop Forecastbase
if average_acr < average_acr_threshold:
PrescientLogger.console_log("FORECASTBASE", Fore.RED, "Convergence Detected!! Best model is accuracy=" + str(best_model[0]) +
" training_data_percent=" + str(best_model[1]) + " interval_width=" + str(best_model[2]) + " changepoint_prior_scale=" + str(best_model[3]))
semaphore.release() # Release acquired semaphore
# Remove training files
for training_data_percent in | np.arange(tdp_min, tdp_max + tdp_inc_by, tdp_inc_by) | numpy.arange |
from imutils.video import VideoStream
import time
import imutils
import cv2
import tensorflow.keras as k
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array
from numpy import expand_dims
import math
import numpy as np
TIME_PER_CAPTURE = 6
start = (20, 30) # start point for text used in putText
font = cv2.FONT_HERSHEY_DUPLEX
fontScale = 0.6
color = (0, 0, 0)
thickness = 1
image_size = (224, 224)
classes = 2
shift = 100
kappa, kappa_s = 7, 0
tic = time.time()
vs = VideoStream(src=0).start()
while True:
toc = time.time()
frame = vs.read()
frame = imutils.resize(frame, width=650, height=650)
frame = cv2.flip(frame, 1)
time_elapsed = round(toc - tic)
if time_elapsed == TIME_PER_CAPTURE:
break
else:
cv2.putText(frame, 'Background picture taken in: ' + str(TIME_PER_CAPTURE - time_elapsed), start, font,
fontScale, color, thickness)
cv2.imshow('Take Background Picture', frame)
cv2.waitKey(1)
cv2.destroyAllWindows()
vs.stop()
background = cv2.resize(frame, image_size)
while True:
cv2.putText(frame, 'Press q to quit', start, font, fontScale, color, thickness)
cv2.imshow('Background', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
model = k.models.Sequential([
k.layers.SeparableConv2D(64, (1, 1), activation='relu', input_shape=(224, 224, 3), depth_multiplier=3),
])
output_layer = 0
outputs = [model.layers[output_layer].output]
box_model = Model(inputs=model.inputs, outputs=outputs)
background_img = img_to_array(background)
background_img = expand_dims(background_img, axis=0)
feature_maps = box_model.predict(background_img)
fmap_back_avg = np.zeros(shape=(feature_maps.shape[1], feature_maps.shape[2]))
span = int(math.sqrt(feature_maps.shape[-1]))
for fmap in feature_maps:
i = 1
for _ in range(span):
for _ in range(span):
fmap_back_avg += fmap[:, :, i - 1].squeeze()
i += 1
fmap_back_avg /= (span ** 2)
vs = VideoStream(src=0).start()
sal_flag = False
while True:
frame = vs.read()
frame = imutils.resize(frame, width=650, height=650)
frame = cv2.flip(frame, 1)
input_image = cv2.resize(frame, image_size)
input_image = img_to_array(input_image)
input_image = expand_dims(input_image, axis=0)
feature_maps = box_model.predict(input_image)
fmap_avg = np.zeros(shape=(feature_maps.shape[1], feature_maps.shape[2]))
span = int(math.sqrt(feature_maps.shape[-1]))
for fmap in feature_maps:
i = 1
for _ in range(span):
for _ in range(span):
fmap_avg += fmap[:, :, i - 1].squeeze()
i += 1
fmap_avg /= (span ** 2)
diff = | np.round(fmap_back_avg - fmap_avg, 2) | numpy.round |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.