repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
leokoppel/gazecontour | gazecontour/recorder.py | 1 | 3455 | import inspect
from PySide import QtCore, QtGui
import pandas
class Recorder(QtCore.QObject):
"""
Takes incoming frame dicts, stores them, and records them to csv file on command
"""
recordingChanged = QtCore.Signal(bool)
def __init__(self, statusLabel):
super().__init__()
self._statusLabel = statusLabel
self.data = [] # Save data as list of frame dicts, for now
self.extraData = {}
self.pathData = {}
self.recording = False
self._desktopWidget = QtGui.QDesktopWidget()
self.clear()
def _flatten(self, d, parent_key=''):
"""
Flatten a dict like {'avg':{'x':42, 'y':0}} into {'avg_x':42, 'avg_y':0}
"""
import collections
items = []
for k, v in d.items():
new_key = parent_key + '_' + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def handleFrame(self, frame):
"""
Save the gaze frame to memory, along with mouse position
"""
if self.recording:
# Flatten frame dict to one level
datapoint = self._flatten(frame)
# Add to memory
self.data.append(datapoint)
self._statusLabel.setText('Recorded {} frames'.format(len(self.data)))
def setRecording(self, rec):
if self.recording != rec:
self.recording = rec
self.recordingChanged.emit(rec)
def saveStim(self, stimItem, stimPos):
"""
Keep track of what function was used to draw a stimulus, and it's position (QPoint)
"""
if stimItem is None:
self.extraData.clear()
else:
self.extraData.update({'stim_item': stimItem.__name__,
'stim_module': stimItem.__module__,
'stim_x': stimPos.x(),
'stim_y': stimPos.y()})
def savePath(self, points):
"""
Similar to saveStim, save the drawn path (currently only one)
points = array of QPointF
"""
if points is None:
self.pathData.clear()
else:
self.pathData.update({'x': [p.x() for p in points],
'y': [p.y() for p in points]})
def clear(self):
self.data.clear()
self._statusLabel.setText('Recorder ready')
def toDataFrame(self):
""" Return a pandas.DataFrame with all data in memory up to this point """
df = pandas.DataFrame(self.data)
if len(df) > 0:
df.set_index('timestamp', inplace=True)
df.index.name = None
return df
def saveToFile(self):
""" Save data to csv file, after opening a file dialog """
filename, _ = QtGui.QFileDialog.getSaveFileName(self.parent(), 'Save File', '',
'Excel Workbook (*.xlsx);; Excel 97-2003 Workbook (*.xls)')
if filename:
writer = pandas.ExcelWriter(filename)
self.toDataFrame().to_excel(writer, 'GazeData')
pandas.DataFrame.from_dict(self.extraData, orient='index').to_excel(writer, 'Extra')
pandas.DataFrame.from_dict(self.pathData).to_excel(writer, 'Path')
writer.save()
| mit |
mitmul/chainer-faster-rcnn | utils/draw_plot.py | 2 | 1696 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import glob
import json
import os
from collections import defaultdict
import numpy as np
if True:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--result_dir', type=str)
parser.add_argument('--x_axis', type=str, default='iteration')
parser.add_argument('--start_x', type=int, default=0)
args = parser.parse_args()
for log_fn in sorted(glob.glob('{}/log*'.format(args.result_dir))):
logs = json.load(open(log_fn))
loss = defaultdict(list)
for log in logs:
if args.x_axis == 'iteration' and log['iteration'] < args.start_x:
continue
elif args.x_axis == 'epoch' and log['epoch'] < args.start_x:
continue
for k, v in log.items():
if 'epoch' in k or 'iteration' in k or 'elapsed_time' in k:
continue
loss[k].append([log[args.x_axis], v])
for k, v in loss.items():
y = np.array(sorted(v))
f = plt.figure()
a = f.add_subplot(111)
a.set_xlabel(args.x_axis)
a.set_ylabel(k)
x = np.where(y[:, 0] >= args.start_x)[0][0]
a.plot(y[x:, 0], y[x:, 1], label=k)
l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.savefig(
os.path.join(args.result_dir,
'{}.png'.format(k.replace('/', '_'))),
bbox_extra_artists=(l,), bbox_inches='tight')
| mit |
Eric-Dilcher/Simulate-Microcavity | main_microcavity.py | 1 | 8211 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 16:15:06 2013
@author: Eric Dilcher
This code relies on the transfer matrix theory (see "Introduction to Optics" by Pedrotti)
to characterize the reflective and transmissive properties of multilayer films.
Any types, number of, thicknesses of films can be analyzed by entering the characteristics of the film in the config file.
There is a config file 'config.ini' where the user is to enter the parameters of the simulation.
If the user would like to expand the allowable materials, update the Material_Dictionary below. In addition
update the comments in config.ini, and configspec.ini
Note: the package "configobj" needs to be installed before this code can be written
"""
from scipy import matrix
import numpy as np
from matplotlib import pyplot as plt
import read_config as init
from Layer import Layer
#--------------------------------------------------------------#
# Functions
def create_layer_objs(param_dict):
layers_objs = []
for layer, thick in zip(param_dict["layer_list"], param_dict["thickness_list"]):
layers_objs.append(Layer(layer, thick, param_dict))
return layers_objs
def calculate(param_dictionary, medium, substrate, layer_objs):
start = param_dictionary["lambda_start"]
end = param_dictionary["lambda_end"]
num_points = param_dictionary["num_points"]
polarization = param_dictionary["polarization"]
lambda_values = np.linspace(start, end, num_points)
transm_coeff = []
refl_coeff = []
for wavelen in lambda_values:
update_all_layers(wavelen, param_dictionary, layer_objs)#recalculate the values taking into account the changed wavelength
if polarization == -1:
T_perp, R_perp = calc_trans_refl_at_wavelen_perp(wavelen, medium, substrate, layer_objs)
T_para, R_para = calc_trans_refl_at_wavelen_para(wavelen, medium, substrate, layer_objs)
transm_coeff.append(np.average([T_perp, T_para]))
refl_coeff.append(np.average([R_perp, R_para]))
elif polarization == 0:
T, R = calc_trans_refl_at_wavelen_perp(wavelen, medium, substrate, layer_objs)
transm_coeff.append(T)
refl_coeff.append(R)
else:
T, R = calc_trans_refl_at_wavelen_para(wavelen, medium, substrate, layer_objs)
transm_coeff.append(T)
refl_coeff.append(R)
#transform to numpy arrays
transm_coeff = np.array(transm_coeff)
refl_coeff = np.array(refl_coeff)
return (lambda_values, transm_coeff, refl_coeff)
#update all the layer values taking into account the changed wavelength
def update_all_layers(wavelen, param_dict, layer_objs):
for layer in layer_objs:
layer.refresh_values(wavelen, param_dict)
#--------------------------------------------------------------#
# Perpendicular polarization specific functions
def calc_trans_refl_at_wavelen_perp(wavelen, medium, substrate, layer_objs):
sysmat = calc_sysmat_perp(layer_objs)#recalculate the system matrix
t = calc_transmission_perp(medium, substrate, sysmat)#recalculate the transmission coefficient
T=(np.abs(t)**2)*100#recalculate the transmission amplitude in percent
r = calc_reflection_perp(medium, substrate, sysmat)#recalculate the reflection coefficient
R = (np.abs(r)**2)*100#recalculate the reflection amplitude in percent
return (T,R)
#calculate the system matrix
def calc_sysmat_perp(layer_objs):
sysmat = matrix([[1.0, 0], [0, 1.0]])
for layer in layer_objs:
sysmat = sysmat*layer.tmatrix_perp
return sysmat
#function to find the transmission coefficient of a stack of layers
def calc_transmission_perp(medium, substrate, sysmat_perp):
g_0 = medium.gamma_perp
g_s = substrate.gamma_perp
return 2*g_0/(g_0*sysmat_perp[0,0]+g_0*g_s*sysmat_perp[0,1]+sysmat_perp[1,0]+g_s*sysmat_perp[1,1])
#function to find the reflection coefficient of a stack of layers
def calc_reflection_perp(medium, substrate, sysmat_perp):
g_0 = medium.gamma_perp
g_s = substrate.gamma_perp
return (g_0*sysmat_perp[0,0]+g_0*g_s*sysmat_perp[0,1]-sysmat_perp[1,0]-g_s*sysmat_perp[1,1])/(g_0*sysmat_perp[0,0]+g_0*g_s*sysmat_perp[0,1]+sysmat_perp[1,0]+g_s*sysmat_perp[1,1])
#--------------------------------------------------------------#
# Parallel polarization specific functions
def calc_trans_refl_at_wavelen_para(wavelen, medium, substrate, layer_objs):
sysmat = calc_sysmat_para(layer_objs)#recalculate the system matrix
t = calc_transmission_para(medium, substrate, sysmat)#recalculate the transmission coefficient
T=(np.abs(t)**2)*100#recalculate the transmission amplitude in percent
r = calc_reflection_para(medium, substrate, sysmat)#recalculate the reflection coefficient
R = (np.abs(r)**2)*100#recalculate the reflection amplitude in percent
return (T,R)
def calc_sysmat_para(layer_objs):
sysmat = matrix([[1.0, 0], [0, 1.0]])
for layer in layer_objs:
sysmat = sysmat*layer.tmatrix_para
return sysmat
def calc_transmission_para(medium, substrate, sysmat_para):
g_0 = medium.gamma_para
g_s = substrate.gamma_para
return 2*g_0/(g_0*sysmat_para[0,0]+g_0*g_s*sysmat_para[0,1]+sysmat_para[1,0]+g_s*sysmat_para[1,1])
def calc_reflection_para(medium, substrate, sysmat_para):
g_0 = medium.gamma_para
g_s = substrate.gamma_para
return (g_0*sysmat_para[0,0]+g_0*g_s*sysmat_para[0,1]-sysmat_para[1,0]-g_s*sysmat_para[1,1])/(g_0*sysmat_para[0,0]+g_0*g_s*sysmat_para[0,1]+sysmat_para[1,0]+g_s*sysmat_para[1,1])
def write_file(param_dictionary, wavelens, transmissions, reflections, filename = ""):
polarization_dict = {-1: "Unpolarized", 0: "Perpendicular", 1: "Parallel"}
wavelens, transmissions, reflections = np.array(wavelens), np.array(transmissions), np.array(reflections)
data = np.c_[wavelens, transmissions, reflections] #stack arrays horizontally (3 colums)
start = param_dictionary["lambda_start"]
end = param_dictionary["lambda_end"]
increment = (1.0*end - start)/param_dictionary["num_points"]
center_wavelen = param_dictionary["lambda_0"]
polarization = param_dictionary["polarization"]
if filename == "":
filename = "microcavity_output_"+str(center_wavelen)+"nm.txt"
header = ("Start wavelength: "+str(start)+"\tEnd wavelength: "+str(end)+"\tIncrement by: "+str(increment)+ "\n"
+ "Design wavelength: "+str(center_wavelen) + "\tPolarization: "+ polarization_dict[polarization] + "\n"
+ "Wavelength (nm) \tTransmission (%)\tReflection (%)")
np.savetxt(filename, data, delimiter = "\t", header = header )
def plot_data(wavelens, transm_coeff, refl_coeff, param_dictionary):
polarization_dict = {-1: "Unpolarized", 0: "Perpendicular", 1: "Parallel"}
center_wavelen = param_dictionary["lambda_0"]
incident_angle = param_dictionary["inc_angle"]
polarization = param_dictionary["polarization"]
title = ("Design Wavelength: %d Incident Angle: %d degrees\n" %(center_wavelen, incident_angle)
+"Polarization: " + polarization_dict[polarization])
plt.plot(wavelens, transm_coeff, label = "Transmission")
plt.plot(wavelens, refl_coeff, label = "Reflection")
plt.ylim(-5,105)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Transmission/Reflection (%)")
plt.title(title)
plt.legend()
plt.show()
#-------------------------------------------------------------#
# Simulation Setup
param_dictionary = init.param_dictionary
medium = Layer(param_dictionary["medium"], -1, param_dictionary)#create a layer object for the medium
substrate = Layer(param_dictionary["substrate"], -1, param_dictionary)#create a layer object for the substrate
layer_objs = create_layer_objs(param_dictionary)#create a list of layer objects using the create_layer_objects function
#--------------------------------------------------------------#
# Run Simulation
lambda_values, transm_coeff, refl_coeff = calculate(param_dictionary, medium, substrate, layer_objs)
write_file(param_dictionary, lambda_values, transm_coeff, refl_coeff)
plot_data(lambda_values, transm_coeff, refl_coeff, param_dictionary)
| gpl-2.0 |
josh-willis/pycbc | bin/hdfcoinc/pycbc_plot_Nth_loudest_coinc_omicron.py | 10 | 6303 | """
Generates a plot that shows the time-frequency trace of
Nth loudest coincident trigger overlaid on a background of
Omicron triggers.
"""
import logging
import h5py
import numpy as np
import argparse
import glob
from glue.ligolw import ligolw, lsctables, table, utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pycbc.events
from pycbc.waveform import get_td_waveform, frequency_from_polarizations, amplitude_from_polarizations
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
class DefaultContentHandler(ligolw.LIGOLWContentHandler):
pass
lsctables.use_in(DefaultContentHandler)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--coinc-file', type=str, required=True,
help='HDF file containing coincident CBC triggers')
parser.add_argument('--single-ifo-trigs', type=str, required=True,
help='HDF file containing single IFO CBC triggers')
parser.add_argument('--ifo', type=str, required=True,
help='IFO, L1 or H1')
parser.add_argument('--tmpltbank-file', type=str, required=True,
help='HDF file containing template information for CBC search')
parser.add_argument('--output-file', type=str, required=True,
help='Full path to output file')
parser.add_argument('--loudest-event-number', type=int, required=True, default=1,
help='Script will plot the Nth loudest coincident trigger')
parser.add_argument('--omicron-dir', type=str, required=True,
help='Directory containing Omicron triggers. Ex: /home/detchar/triggers/ER7/')
parser.add_argument('--omicron-snr-thresh', type=int, required=False, default=5,
help='SNR threshold for choosing which Omicron triggers to plot.')
parser.add_argument('--plot-window', type=float, required=False, default=32,
help='Time window to plot around CBC trigger')
parser.add_argument('--omicron-channel',type=str, required=False, default='GDS-CALIB_STRAIN',
help='Channel to plot Omicron triggers for, do not include IFO')
parser.add_argument('--analysis-level', type=str, required=False, default='foreground',
choices = ['foreground','background','background_exc'],
help='Designates which level of the analysis output to search')
args = parser.parse_args()
logging.info('Reading HDF files')
coinc_trig_file = h5py.File(args.coinc_file,'r')
single_trig_file = h5py.File(args.single_ifo_trigs,'r')
template_file = h5py.File(args.tmpltbank_file,'r')
logging.info('Parsing HDF files')
coinc_newsnr = coinc_trig_file[args.analysis_level]['stat'][:]
Nth_loudest_idx = np.argsort(coinc_newsnr)[-args.loudest_event_number]
if coinc_trig_file.attrs['detector_1'] == args.ifo:
idx = coinc_trig_file[args.analysis_level]['trigger_id1'][Nth_loudest_idx]
else:
idx = coinc_trig_file[args.analysis_level]['trigger_id2'][Nth_loudest_idx]
# get info about single detector triggers that comprise loudest background event
# and calculate newSNR
snr = single_trig_file[args.ifo]['snr'][idx]
chisq = single_trig_file[args.ifo]['chisq'][idx]
chisq_dof = single_trig_file[args.ifo]['chisq_dof'][idx]
reduced_chisq = chisq/(2*chisq_dof - 2)
newsnr = pycbc.events.ranking.newsnr(snr,reduced_chisq)
cbc_end_time = single_trig_file[args.ifo]['end_time'][idx]
template_id = single_trig_file[args.ifo]['template_id'][idx]
m1 = template_file['mass1'][template_id]
m2 = template_file['mass2'][template_id]
s1z = template_file['spin1z'][template_id]
s2z = template_file['spin2z'][template_id]
omicron_start_time = cbc_end_time - args.plot_window
omicron_end_time = cbc_end_time + args.plot_window
logging.info('Fetching omicron triggers')
# Generate list of directories to search over
gps_era_start = str(omicron_start_time)[:5]
gps_era_end = str(omicron_end_time)[:5]
eras = map(str,range(int(gps_era_start),int(gps_era_end)))
if not eras:
eras = [gps_era_start]
# Grab all relevant Omicron trigger files
omicron_times = []
omicron_snr = []
omicron_freq = []
for era in eras:
# Generate list of all Omicron SnglBurst xml trigger files
file_list = glob.glob(args.omicron_dir +
'/%s/%s_Omicron/%s/%s-%s_Omicron-*.xml.gz'
%(args.ifo,args.omicron_channel,era,args.ifo,args.omicron_channel.replace('-','_')))
# Parse trigger files into SNR, time, and frequency for Omicron triggers
for file_name in file_list:
omicron_xml = utils.load_filename(file_name, contenthandler=DefaultContentHandler)
snglburst_table = table.get_table(omicron_xml, lsctables.SnglBurstTable.tableName)
for row in snglburst_table:
if (row.snr > args.omicron_snr_thresh and
omicron_start_time < row.peak_time < omicron_end_time):
omicron_times.append(row.peak_time + row.peak_time_ns * 10**(-9))
omicron_snr.append(row.snr)
omicron_freq.append(row.peak_frequency)
# Generate inspiral waveform and calculate f(t) to plot on top of Omicron triggers
hp, hc = get_td_waveform(approximant='SEOBNRv2', mass1=m1, mass2=m2,
spin1x=0, spin1y=0, spin1z=s1z,
spin2x=0, spin2y=0, spin2z=s2z,
delta_t=(1./32768.), f_lower=30)
f = frequency_from_polarizations(hp, hc)
amp = amplitude_from_polarizations(hp, hc)
stop_idx = amp.abs_max_loc()[1]
f = f[:stop_idx]
freq = np.array(f.data)
times = np.array(f.sample_times) + cbc_end_time
logging.info('Plotting')
plt.figure(0)
cm = plt.cm.get_cmap('Reds')
plt.scatter(omicron_times,omicron_freq,c=omicron_snr,s=30,cmap=cm,linewidth=0)
plt.grid(b=True, which='both')
cbar = plt.colorbar()
cbar.set_label('%s Omicron trigger SNR' % (args.ifo))
plt.yscale('log')
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
plt.xlim(omicron_start_time,omicron_end_time)
plt.suptitle('%s CBC trigger SNR = ' % (args.ifo) + format(snr,'.2f') +
", newSNR = " + format(newsnr,'.2f'),fontsize=12)
plt.title(format(m1,'.2f') + " - " + format(m2,'.2f') +
" solar masses at GPS time " + format(cbc_end_time,'.2f'),fontsize=12)
plt.hold(True)
plt.plot(times,freq)
plt.savefig(args.output_file)
logging.info('Done! Exiting script.')
| gpl-3.0 |
stas-vilchik/bdd-ml | main.py | 1 | 3199 | import math
import csv
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import datetime
tf.set_random_seed(0)
data = []
with open('image_and_complexity.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
data.append(row)
data = np.array(data)
data = data.astype(np.int)
labels = data[:, 0]
labels = [[1 if label < 6 else 0, 1 if 6 <= label < 13 else 0, 1 if 13 <= label else 0] for label in labels]
tokens_length = 107
inputs = []
for row in data[:, 1:]:
inputs.append(row - 1)
X_train, X_test, y_train, y_test = train_test_split(inputs, labels, test_size=0.33, random_state=42)
def next_batch(num, data, labels):
idx = np.arange(0, len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[i] for i in idx]
labels_shuffle = [labels[i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
x = tf.placeholder(tf.int32, [None, 1024])
xx = tf.one_hot(indices=x, depth=tokens_length, on_value=1.0, off_value=0.0, axis=-1)
print(xx.shape)
xxx = tf.reshape(xx, [-1, 1024, tokens_length, 1])
print(xxx.shape)
y_ = tf.placeholder(tf.float32, [None, 3]) # answers
lr = tf.placeholder(tf.float32)
W1 = tf.Variable(tf.truncated_normal([4, tokens_length, 1, 4], stddev=0.1))
B1 = tf.Variable(tf.ones([4]) / 10)
Y1 = tf.nn.relu(tf.nn.conv2d(xxx, W1, strides=[1, 2, 107, 1], padding='VALID') + B1)
print("Y1 " + str(Y1.shape))
fully_connected_size = 2044
YY1 = tf.reshape(Y1, shape=[-1, fully_connected_size])
print("YY1 " + str(YY1.shape))
W2 = tf.Variable(tf.truncated_normal([fully_connected_size, 3], stddev=0.1))
print("W2 " + str(W2.shape))
B2 = tf.Variable(tf.zeros([3]))
Ylogits = tf.matmul(YY1, W2) + B2
Y = tf.nn.softmax(Ylogits)
cost_fn = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=y_)
cost_fn = tf.reduce_mean(cost_fn) * 100
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(y_, 1))
accuracy_fn = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cost_fn)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
epoch_count = 10
step_count = 100
print(datetime.datetime.now().time())
for epoch in range(epoch_count):
for step in range(step_count):
total_step = epoch * step_count + step
# training on batches of 100 items
batch_x, batch_y = next_batch(100, X_train, y_train)
# learning rate decay
max_learning_rate = 0.001
min_learning_rate = 0.00003
decay_speed = 2000.0
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(
-total_step / decay_speed)
# learning_rate = 0.005
# the backpropagation training step
sess.run(train_step, {x: batch_x, y_: batch_y, lr: learning_rate})
accuracy, = sess.run([accuracy_fn], {x: X_test, y_: y_test})
print(datetime.datetime.now().time())
print("********* epoch " + str(epoch) + " ********* accuracy: " + str(accuracy))
| mit |
ElvisLouis/code | work/ML/tensorflow/separa/extract_contour.py | 1 | 2875 | import cv2 as cv
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
from test import *
def mat_show(img):
window_name = "CV_IMAGE"
cv.namedWindow(winname=window_name)
while True:
cv.imshow(winname=window_name, mat=img)
character = cv.waitKey(100)
if character == 97: # for waiting the 'ESC' to excape
cv.destroyWindow(window_name)
return
def min_max_normalization(data, start=0, end=1):
data = data.astype(np.float)
min_max = preprocessing.MinMaxScaler(feature_range=(start, end))
if len(data.shape) == 4:
NUM_CHANNEL = data.shape[3]
if NUM_CHANNEL == 1:
data = np.reshape(data, newshape=[data.shape[0], data.shape[1], data.shape[2]])
for x in range(data.shape[0]):
data[x] = min_max.fit_transform(data[x])
data = np.reshape(data, newshape=[data.shape[0], data.shape[1], data.shape[2], NUM_CHANNEL])
else:
for index in range(data.shape[0]):
img = data[index]
R = img[::, ::, 2]
G = img[::, ::, 1]
B = img[::, ::, 0]
for channel in range(NUM_CHANNEL):
R = min_max.fit_transform(R)
G = min_max.fit_transform(G)
B = min_max.fit_transform(B)
img[::, ::, 2] = R
img[::, ::, 1] = G
img[::, ::, 0] = B
data[index] = img
elif len(data.shape) == 2:
for x in range(data.shape[0]):
value = np.reshape(data[x], [-1, 1])
media = min_max.fit_transform(value)
data[x] = np.reshape(media, data[x].shape)
elif len(data.shape) == 3:
for x in range(data.shape[0]):
data[x] = min_max.fit_transform(data[x])
elif len(data.shape) == 1:
data = min_max.fit_transform(data)
return data
img = cv.imread("./png/gauss-n100-5d-3largeCl_PCA_data_cls3.png", cv.IMREAD_GRAYSCALE)
img = img[65:645, 65:645]
img = cv.threshold(img, 100, 255, cv.THRESH_BINARY)[1]
mat_show(img_rotate(img, 90))
contour_extracter = PCA(n_components=100, whiten=True)
out = contour_extracter.fit_transform(img)
main_value = contour_extracter.explained_variance_ratio_
# print (np.sum(main_value))
out = contour_extracter.inverse_transform(out)
out = min_max_normalization(out, 0, 255)
out = np.where(out > 0, 255, 0)
out = out.astype(np.uint8)
temp = np.where(out == 0)
y_set = temp[1]
x_set = temp[0]
x = np.int(np.round(np.mean(x_set)))
y = np.int(np.round(np.mean(y_set)))
# x = np.sum(x_set) / len(x_set)
# y = np.sum(y_set) / len(y_set)
width = 400
height = 400
print (x)
print (y)
x1 = x - width / 2
x2 = x + width / 2
y1 = y - height / 2
y2 = y + width / 2
plt.scatter(x_set, y_set)
X = [x1, x2]
Y = [y1, y1]
plt.plot(X, Y)
X = [x2, x2]
Y = [y1, y2]
plt.plot(X, Y)
X = [x2, x1]
Y = [y2, y2]
plt.plot(X, Y)
X = [x1, x1]
Y = [y2, y1]
plt.plot(X, Y)
plt.plot()
plt.show()
# plt.show()
"""
cv_show = np.zeros(out.shape, dtype=np.uint8)
cv_show += 255
cv_show[x_set, y_set] = 0
mat_show(cv_show)
""" | gpl-2.0 |
EzAccount/spring | data.py | 1 | 1528 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
def evolve(state, t_step, gamma, w_squared, Ampl, Omega):
'''Evolution function for the problem. Takes state, produces the evalution of the system at this point.'''
position_prime = state[2];
speed_prime = - 2 * gamma * state[2] - w_squared**2 * state[1] - Ampl * np.cos(Omega * state[0]);
return np.array([1, position_prime, speed_prime]);
def rk_coefs(state, t_step, gamma, w_squared, Ampl, Omega):
'''RK-4 coeficents. Takes state, returns increment of the state.'''
coef = evolve(state, t_step, gamma, w_squared, Ampl, Omega) * t_step;
temp_state = state + coef / 2;
temp_evolution = evolve(temp_state, t_step, gamma, w_squared, Ampl, Omega) * t_step;
coef = coef + 2 * temp_evolution;
temp_state = state + temp_evolution / 2;
temp_evolution = evolve(temp_state, t_step, gamma, w_squared, Ampl, Omega) * t_step;
coef = coef + 2 * temp_evolution;
temp_state = state + temp_evolution;
temp_evolution = evolve(temp_state, t_step, gamma, w_squared, Ampl, Omega) * t_step;
coef = coef + temp_evolution;
coef = coef/ 6;
return coef;
def main():
Time = np.arange(100);
X = np.zeros(100);
state = np.array([0,0,1]);
for time in Time:
X[time] = state[1];
temp = rk_coefs(state, 0.1, 0, 1, 0, 0)
state = state + temp;
plt.plot(Time, X)
plt.plot(Time, np.sin(Time*0.1))
plt.show()
if __name__ == '__main__':
main();
| mit |
kevin-intel/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 5 | 8046 | """ test the label propagation module """
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn.semi_supervised import _label_propagation as label_propagation
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from sklearn.datasets import make_classification
from sklearn.exceptions import ConvergenceWarning
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelPropagation, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert clf.transduction_[2] == 1
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
def test_label_spreading_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200,
random_state=0)
y[::3] = -1
clf = label_propagation.LabelSpreading().fit(X, y)
# adopting notation from Zhou et al (2004):
S = clf._build_graph()
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
Y = Y[:, :-1]
for alpha in [0.1, 0.3, 0.5, 0.7, 0.9]:
expected = np.dot(np.linalg.inv(np.eye(len(S)) - alpha * S), Y)
expected /= expected.sum(axis=1)[:, np.newaxis]
clf = label_propagation.LabelSpreading(max_iter=10000, alpha=alpha)
clf.fit(X, y)
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_label_propagation_closed_form():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200,
random_state=0)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = label_propagation.LabelPropagation(max_iter=10000,
gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx,
indexing='ij'))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx,
indexing='ij'))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
assert_array_almost_equal(expected, clf.label_distributions_, 4)
def test_valid_alpha():
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200,
random_state=0)
for alpha in [-0.1, 0, 1, 1.1, None]:
with pytest.raises(ValueError):
label_propagation.LabelSpreading(alpha=alpha).fit(X, y)
def test_convergence_speed():
# This is a non-regression test for #5774
X = np.array([[1., 0.], [0., 1.], [1., 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel='rbf', max_iter=5000)
mdl.fit(X, y)
# this should converge quickly:
assert mdl.n_iter_ < 10
assert_array_equal(mdl.predict(X), [0, 1, 1])
def test_convergence_warning():
# This is a non-regression test for #5774
X = np.array([[1., 0.], [0., 1.], [1., 2.5]])
y = np.array([0, 1, -1])
mdl = label_propagation.LabelSpreading(kernel='rbf', max_iter=1)
warn_msg = ('max_iter=1 was reached without convergence.')
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelPropagation(kernel='rbf', max_iter=1)
with pytest.warns(ConvergenceWarning, match=warn_msg):
mdl.fit(X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = label_propagation.LabelSpreading(kernel='rbf', max_iter=500)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
mdl = label_propagation.LabelPropagation(kernel='rbf', max_iter=500)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
@pytest.mark.parametrize("LabelPropagationCls",
[label_propagation.LabelSpreading,
label_propagation.LabelPropagation])
def test_label_propagation_non_zero_normalizer(LabelPropagationCls):
# check that we don't divide by zero in case of null normalizer
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/15946
# https://github.com/scikit-learn/scikit-learn/issues/9292
X = np.array([[100., 100.], [100., 100.], [0., 0.], [0., 0.]])
y = np.array([0, 1, -1, -1])
mdl = LabelPropagationCls(kernel='knn',
max_iter=100,
n_neighbors=1)
with pytest.warns(None) as record:
mdl.fit(X, y)
assert len(record) == 0
def test_predict_sparse_callable_kernel():
# This is a non-regression test for #15866
# Custom sparse kernel (top-K RBF)
def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5):
nn = NearestNeighbors(n_neighbors=10, metric='euclidean', n_jobs=-1)
nn.fit(X)
W = -1 * nn.kneighbors_graph(Y, mode='distance').power(2) * gamma
np.exp(W.data, out=W.data)
assert issparse(W)
return W.T
n_classes = 4
n_samples = 500
n_test = 10
X, y = make_classification(n_classes=n_classes,
n_samples=n_samples,
n_features=20,
n_informative=20,
n_redundant=0,
n_repeated=0,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=n_test,
random_state=0)
model = label_propagation.LabelSpreading(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
model = label_propagation.LabelPropagation(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test) >= 0.9
| bsd-3-clause |
chrisjdavie/Cookit | 5-a-day-thing/old_age_video/make_movie.py | 1 | 2851 | '''
This makes a movie of the bar charts between each year. It doesn't
actually make the movie, it outputs a few hundred images. Using
command line tools to stich them together.
It doesn't really show what I intended, but it does make a movie.
Created on 13 Nov 2014
@author: chris
'''
def main():
from quintiles import plottity
import matplotlib.pyplot as pl
pic_dir = '/tmp/movies/'
fig_i = 0
'''By old age'''
fname = '/home/chris/Projects/Cookit/family-food-datasets/movie_dat/dat.p'
import pickle
with open(fname,'rb') as f:
dat = pickle.load(f)
Ages = [ 'Under 30', '30-39', '40-49', '50-64', '65-74', '75 and over' ]
years = ['2003-04', '2011.0', '2007.0', '2008.0', '2012.0', '2006.0', '2001-02', '2009.0', '2004-05', '2005-06', '2010.0', '2002-03']
nums = [ 3, 11, 7, 8, 12, 6, 1, 9, 4, 5, 10, 2 ]
import numpy as np
i_sort = np.argsort(nums)
years = np.array(years)[i_sort]
years = sorted(years)
i_m = 0
for yr_0, yr_1 in zip(years[:-1],years[1:]):
# yr_0 = years[-2]
dat_0 = dat[yr_0]
# yr_1 = years[-1]
dat_1 = dat[yr_1]
fs = 25.0
sp0 = 1.0/fs
sps = np.arange(0.0,1.0,sp0)
for sp in sps:
dat_sp = {}
for Age in Ages:
a_s0 = dat_0[Age]
a_s1 = dat_1[Age]
a_ssp = {}
for a_name in a_s0.keys():
v0 = a_s0[a_name]
v1 = a_s1[a_name]
vsp = (1.0-sp)*v0 + v1*sp
a_ssp[a_name] = vsp
dat_sp[Age] = a_ssp
Ages_labels = [ 'Under 30', '30-39', '40-49', '50-64', '65-74', '75\n and over' ]
fig_i += 1
# for yr in years:
plottity(dat_sp, Ages, Ages_labels, 'Age ranges', 'age', fig_i, yr=yr_0)
fig = pl.gcf()
fig.subplots_adjust(bottom=0.15,top=0.95)
f_str = 1000 + i_m
f_str = str(int(f_str))[1:]
print f_str
pl.savefig(pic_dir + f_str + '.png', format='png')
i_m += 1
pl.close()
yr = years[-1]
plottity(dat_sp, Ages, Ages_labels, 'Age ranges', 'age', fig_i, yr=yr)
fig = pl.gcf()
fig.subplots_adjust(bottom=0.15,top=0.95)
f_str = 1000 + i_m
f_str = str(int(f_str))[1:]
print f_str
pl.savefig(pic_dir + f_str + '.png', format='png')
pl.close()
if __name__ == '__main__':
main() | mit |
brainiak/brainiak | tests/eventseg/test_event.py | 2 | 6744 | from brainiak.eventseg.event import EventSegment
from scipy.special import comb
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
def test_create_event_segmentation():
es = EventSegment(5)
assert es, "Invalid EventSegment instance"
def test_fit_shapes():
K = 5
V = 3
T = 10
es = EventSegment(K, n_iter=2)
sample_data = np.random.rand(V, T)
es.fit(sample_data.T)
assert es.segments_[0].shape == (T, K), "Segmentation from fit " \
"has incorrect shape"
assert np.isclose(np.sum(es.segments_[0], axis=1), np.ones(T)).all(), \
"Segmentation from learn_events not correctly normalized"
T2 = 15
sample_data2 = np.random.rand(V, T2)
test_segments, test_ll = es.find_events(sample_data2.T)
assert test_segments.shape == (T2, K), "Segmentation from find_events " \
"has incorrect shape"
assert np.isclose(np.sum(test_segments, axis=1), np.ones(T2)).all(), \
"Segmentation from find_events not correctly normalized"
es_invalid = EventSegment(K)
with pytest.raises(ValueError):
es_invalid.model_prior(K-1)
# ``with`` block is about to end with no error.
pytest.fail("T < K should cause error")
with pytest.raises(ValueError):
es_invalid.set_event_patterns(np.zeros((V, K-1)))
pytest.fail("#Events < K should cause error")
def test_simple_boundary():
es = EventSegment(2)
random_state = np.random.RandomState(0)
sample_data = np.array([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]]) + \
random_state.rand(2, 7) * 10
es.fit(sample_data.T)
events = np.argmax(es.segments_[0], axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly segment two events"
events_predict = es.predict(sample_data.T)
assert np.array_equal(events_predict, [0, 0, 0, 1, 1, 1, 1]), \
"Error in predict interface"
def test_event_transfer():
es = EventSegment(2)
sample_data = np.asarray([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]])
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T)[0]
pytest.fail("Should need to set variance")
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
pytest.fail("Should need to set patterns")
es.set_event_patterns(np.asarray([[1, 0], [0, 1]]))
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
events = np.argmax(seg, axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly transfer two events to new data"
def test_weighted_var():
es = EventSegment(2)
D = np.zeros((8, 4))
for t in range(4):
D[t, :] = (1/np.sqrt(4/3)) * np.array([-1, -1, 1, 1])
for t in range(4, 8):
D[t, :] = (1 / np.sqrt(4 / 3)) * np.array([1, 1, -1, -1])
mean_pat = D[[0, 4], :].T
weights = np.zeros((8, 2))
weights[:, 0] = [1, 1, 1, 1, 0, 0, 0, 0]
weights[:, 1] = [0, 0, 0, 0, 1, 1, 1, 1]
assert np.array_equal(
es.calc_weighted_event_var(D, weights, mean_pat), [0, 0]),\
"Failed to compute variance with 0/1 weights"
weights[:, 0] = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
weights[:, 1] = [0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1]
true_var = (4 * 0.5 * 12)/(6 - 5/6) * np.ones(2) / 4
assert np.allclose(
es.calc_weighted_event_var(D, weights, mean_pat), true_var),\
"Failed to compute variance with fractional weights"
def test_sym():
es = EventSegment(4)
evpat = np.repeat(np.arange(10).reshape(-1, 1), 4, axis=1)
es.set_event_patterns(evpat)
D = np.repeat(np.arange(10).reshape(1, -1), 20, axis=0)
ev = es.find_events(D, var=1)[0]
# Check that events 1-4 and 2-3 are symmetric
assert np.all(np.isclose(ev[:, :2], np.fliplr(np.flipud(ev[:, 2:])))),\
"Fit with constant data is not symmetric"
def test_chains():
es = EventSegment(5, event_chains=np.array(['A', 'A', 'B', 'B', 'B']))
sample_data = np.array([[0, 0, 0], [1, 1, 1]])
with pytest.raises(RuntimeError):
seg = es.fit(sample_data.T)[0]
pytest.fail("Can't use fit() with event chains")
es.set_event_patterns(np.array([[1, 1, 0, 0, 0],
[0, 0, 1, 1, 1]]))
seg = es.find_events(sample_data.T, 0.1)[0]
ev = np.nonzero(seg > 0.99)[1]
assert np.array_equal(ev, [2, 3, 4]),\
"Failed to fit with multiple chains"
def test_prior():
K = 10
T = 100
es = EventSegment(K)
mp = es.model_prior(T)[0]
p_bound = np.zeros((T, K-1))
norm = comb(T-1, K-1)
for t in range(T-1):
for k in range(K-1):
# See supplementary material of Neuron paper
# https://doi.org/10.1016/j.neuron.2017.06.041
p_bound[t+1, k] = comb(t, k) * comb(T-t-2, K-k-2) / norm
p_bound = np.cumsum(p_bound, axis=0)
mp_gt = np.zeros((T, K))
for k in range(K):
if k == 0:
mp_gt[:, k] = 1 - p_bound[:, 0]
elif k == K - 1:
mp_gt[:, k] = p_bound[:, k-1]
else:
mp_gt[:, k] = p_bound[:, k-1] - p_bound[:, k]
assert np.all(np.isclose(mp, mp_gt)),\
"Prior does not match analytic solution"
def test_split_merge():
ev = np.array(
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4])
random_state = np.random.RandomState(0)
ev_pat = random_state.rand(5, 10)
D = np.zeros((len(ev), 10))
for t in range(len(ev)):
D[t, :] = ev_pat[ev[t], :] + 0.1*random_state.rand(10)
hmm_sm = EventSegment(5, split_merge=True, split_merge_proposals=2)
hmm_sm.fit(D)
hmm_events = np.argmax(hmm_sm.segments_[0], axis=1)
assert np.all(ev == hmm_events),\
"Merge/split fails to find highly uneven events"
def test_sym_ll():
ev = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2])
random_state = np.random.RandomState(0)
ev_pat = random_state.rand(3, 10)
D_forward = np.zeros((len(ev), 10))
for t in range(len(ev)):
D_forward[t, :] = ev_pat[ev[t], :] + 0.1 * random_state.rand(10)
D_backward = np.flip(D_forward, axis=0)
hmm_forward = EventSegment(3)
hmm_forward.set_event_patterns(ev_pat.T)
_, ll_forward = hmm_forward.find_events(D_forward, var=1)
hmm_backward = EventSegment(3)
hmm_backward.set_event_patterns(np.flip(ev_pat.T, axis=1))
_, ll_backward = hmm_backward.find_events(D_backward, var=1)
assert (ll_forward == ll_backward),\
"Log-likelihood not symmetric forward/backward"
| apache-2.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/tests/test_tightlayout.py | 9 | 4316 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
import matplotlib.pyplot as plt
from nose.tools import assert_raises
from numpy.testing import assert_array_equal
def example_plot(ax, fontsize=12):
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
@image_comparison(baseline_images=['tight_layout1'])
def test_tight_layout1():
'Test tight_layout for a single subplot'
fig = plt.figure()
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout2'])
def test_tight_layout2():
'Test tight_layout for mutiple subplots'
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout3'])
def test_tight_layout3():
'Test tight_layout for mutiple subplots'
fig = plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout4'],
freetype_version=('2.4.5', '2.4.9'))
def test_tight_layout4():
'Test tight_layout for subplot2grid'
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout5'])
def test_tight_layout5():
'Test tight_layout for image'
fig = plt.figure()
ax = plt.subplot(111)
arr = np.arange(100).reshape((10, 10))
ax.imshow(arr, interpolation="none")
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout6'])
def test_tight_layout6():
'Test tight_layout for gridspec'
# This raises warnings since tight layout cannot
# do this fully automatically. But the test is
# correct since the layout is manually edited
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
fig = plt.figure()
import matplotlib.gridspec as gridspec
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),
0.5, 1 - (gs1.top-top)])
gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),
None, 1 - (gs2.top-top)],
h_pad=0.45)
@image_comparison(baseline_images=['tight_layout7'])
def test_tight_layout7():
# tight layout with left and right titles
fig = plt.figure()
fontsize = 24
ax = fig.add_subplot(111)
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Left Title', loc='left', fontsize=fontsize)
ax.set_title('Right Title', loc='right', fontsize=fontsize)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout8'])
def test_tight_layout8():
'Test automatic use of tight_layout'
fig = plt.figure()
fig.set_tight_layout({'pad': .1})
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
| mit |
Haunter17/MIR_SU17 | exp2/exp2_0e.py | 1 | 5094 | import numpy as np
import tensorflow as tf
import h5py
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
# Functions for initializing neural nets parameters
def init_weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def init_bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
print('==> Experiment 2 0E')
filepath = '../taylorswift_out/exp2_d15_1s.mat'
print('==> Loading data from {}'.format(filepath))
# benchmark
t_start = time.time()
# reading data
# f = h5py.File(filepath)
# X_train = np.array(f.get('trainingFeatures'))
# y_train = np.array(f.get('trainingLabels'))
# X_val = np.array(f.get('validationFeatures'))
# y_val = np.array(f.get('validationLabels'))
# t_end = time.time()
# print('--Time elapsed for loading data: {t:.2f} \
# seconds'.format(t = t_end - t_start))
# del f
# print('-- Number of training samples: {}'.format(X_train.shape[0]))
# print('-- Number of validation samples: {}'.format(X_val.shape[0]))
# Neural-network model set-up
num_training_vec, num_val_vec, total_features = 172961, 73857, 1936
num_freq = 121
num_frames = int(total_features / num_freq)
num_classes = 71
k = 16
filter_row, filter_col = 121, 1
X_train = np.random.rand(num_training_vec, total_features)
y_train = np.random.randint(num_classes, size=(num_training_vec, 1))
X_val = np.random.rand(num_val_vec, total_features)
y_val = np.random.randint(num_classes, size=(num_val_vec, 1))
try:
filter_row = int(sys.argv[1])
filter_col = int(sys.argv[2])
except Exception, e:
print('-- {}'.format(e))
print('-- Filter size is {} x {}'.format(filter_row, filter_col))
batch_size = 1000
num_epochs = 50
print_freq = 1
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), num_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), num_classes)
# Set-up input and output label
x = tf.placeholder(tf.float32, [None, total_features])
y_ = tf.placeholder(tf.float32, [None, num_classes])
# first convolutional layer
W_conv1 = init_weight_variable([filter_row, filter_col, 1, k])
b_conv1 = init_bias_variable([k])
x_image = tf.reshape(x, [-1, num_freq, num_frames, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_conv1_flat = tf.reshape(h_conv1, [-1, (num_freq - filter_row + 1) * (num_frames - filter_col + 1) * k])
# softmax layer
W_sm = init_weight_variable([(num_freq - filter_row + 1) * (num_frames - filter_col + 1) * k, num_classes])
b_sm = init_bias_variable([num_classes])
y_conv = tf.matmul(h_conv1_flat, W_sm) + b_sm
# evaluations
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# session
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
# benchmark
t_start = time.time()
for epoch in range(num_epochs):
for i in range(0, num_training_vec, batch_size):
batch_end_point = min(i + batch_size, num_training_vec)
train_batch_data = X_train[i : batch_end_point]
train_batch_label = y_train[i : batch_end_point]
train_step.run(feed_dict={x: train_batch_data, y_: train_batch_label})
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val, y_: y_val})
val_acc_list.append(val_acc)
train_err = cross_entropy.eval(feed_dict={x: X_train, y_: y_train})
train_err_list.append(train_err)
val_err = cross_entropy.eval(feed_dict={x: X_val, y_: y_val})
val_err_list.append(val_err)
print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = range(0, print_freq * len(train_acc_list), print_freq)
train_err_plot, = plt.plot(x_list, train_err_list, 'b.')
val_err_plot, = plt.plot(x_list, val_err_list, '.', color='orange')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs with Filter Size of {} x {}'.format(filter_row, filter_col))
plt.legend((train_err_plot, val_err_plot), ('training', 'validation'), loc='best')
plt.savefig('exp2_0E_error_{}x{}.png'.format(filter_row, filter_col), format='png')
plt.close()
print('==> Done.')
| mit |
airanmehr/bio | Scripts/FlyOxidativeStress/Hyperoxia/Util.py | 1 | 3552 | import numpy as np
import pandas as pd
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = True
import Utils.Util as utl
import Utils.Plots as pplt
import pylab as plt
path='/pedigree2/projects/arya/Data/Dmelanogaster/OxidativeStress/'
def rename(c):
def one(x):
if 'final' in x:
gen = {1: 1, 2: 7, 3: 12, 4: 15, 5: 31, 6: 61, 7: 114}
x = x[1:].split('_')
return 'H', gen[int(x[0])], int(x[1].split('.')[0])
if 'Clean' in x:
x = x.split('_')[1:]
return x[0][0], 180, int(x[0][-1])
ash=utl.execute('cat /home/arya/fly/F4-17/SraRunTable.tsv | cut -f7,9').iloc[1:].set_index(0)[1]
return ash.apply(lambda x: x[1:]).apply(lambda x: (x[-2].replace('H','L'),int(x[:-2]),int(x[-1]) )).loc[x]
if len(c.columns)==1:
c.columns = pd.MultiIndex.from_tuples(map(one, c.columns), names=['POP', 'GEN', 'REP'])
else:
cols= [x+(y,) for x,y in zip(map(one, c.columns.get_level_values(0)), c.columns.get_level_values(1))]
c.columns=pd.MultiIndex.from_tuples(cols, names=['POP', 'GEN', 'REP','READ'])
return c.sort_index(1)
def saveGVCFRC():
a = utl.getEuChromatin(pd.read_csv('all', sep='\t').set_index(['CHROM', 'POS'])).applymap(lambda x: map(int, x.split(',')))
a=pd.concat([a.applymap(lambda x: x[1]),a.applymap(sum)], 1, keys=['C','D']).reorder_levels([1, 0], 1).sort_index(1)
a.to_pickle('all.df')
def SaveDF():
def one(fn):
a = pd.read_csv(fn, sep='\t').set_index(['CHROM', 'POS']).rename(columns={'ALT': 'C'})
a=a[~a.C.apply(lambda x: ',' in x)].astype(int)
return utl.quickMergeGenome([a.sum(1).rename('D'), a.C], CHROMS=a.index.levels[0])
import glob
f=[x for x in glob.glob(path+'vcf/RC/*') if '.' not in x]
a = map(one, f)
gen = {1: 1, 2: 7, 3: 12, 4: 15, 5: 31, 6: 61, 7:114}
keys=map(lambda x: tuple(map(int,x.split('/')[-1][1:].split('_'))[::-1]), f)
keys=map(lambda x: (x[0],gen[x[1]]),keys)
b = utl.quickMergeGenome(a, CHROMS=a[0].index.levels[0],keys=keys).sort_index(1)
b = pd.concat([b], 1, keys=['H'])
b.columns.names=['POP', 'REP','GEN','READ']
b=b.reorder_levels([0,2,1,3],1)
b.columns.levels
a.columns.levels
a=pd.read_pickle(path+'df/data.df')
a=utl.quickMergeGenome([a,b],CHROMS=b.index.levels[0])
a.to_pickle(path+'df/all.df')
a=pd.read_pickle(path+'df/all.df')
a = a.dropna().stack([0, 1, 2])
a = a[a.D > 10]
a=(a.C/a.D).round(2)
a
x0=a.xs(1,0,4).unstack(2)
x1 = a.xs(180, 0, 3).unstack(2)
x1
X.plot.scatter(x=0,y=1)
x=a.unstack([2,3,4])
x.dropna()
X=utl.pcaX(x.dropna().T,2)
c=pd.DataFrame(map(lambda x: list(x)[:2],X.index)).drop_duplicates()
c['c']=pplt.getColorMap(c.shape[0])
c['m'] = pplt.getMarker(c.shape[0],False)
c=c.set_index([0,1])
c.loc['L', 'c'] = 'darkblue'
c.loc['H', 'c'] = 'r'
c.loc['C', 'c'] = 'g'
# c=pd.Series(pplt.getColorMap(X.index.levels[1].size),index=X.index.levels[1])
# c[12]='olive';c[180]='r'
plt.figure(dpi=150);ax=plt.gca()
for i in c.index:
X.sort_index().loc[i].plot.scatter(x=0,y=1,c=c.loc[i].c,label='{}.{}'.format(i[0],i[1]),ax=ax,s=70,alpha=0.6,marker=c.loc[i].m)
plt.xlabel('PC1');plt.ylabel('PC2')
plt.title('Genomewide PCA (H:Hyperoxia, C:Control, L:Hypoxia) of Flies')
y=(x1.mean(1)-x0.mean(1)).dropna().round(2)
z=utl.scanGenome(y.abs()>0.6,np.sum)
mpl.rcParams['text.usetex'] = False
pplt.Manhattan(z)
| mit |
hagne/atm-py | atmPy/tools/time_tools.py | 7 | 1900 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 27 17:04:22 2015
@author: htelg
"""
import datetime
import numpy as np
import time
import pandas as pd
# ToDo: appand to particular timezones, e.g. UTC
def get_timestamp():
"""creates a time stamp of the local time"""
# tz= time.timezone
nowLoc = time.localtime()
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', nowLoc)
return timestamp
def get_time_formate():
"""
Returns a string of the time format used in atmPy
"""
return "%Y-%m-%d %H:%M:%S.%f"
def string2timestamp(ts_string):
"""Converts a timestamp string into a pandas Timestamp
Arguments
---------
ts_string: string representing a timestamp of the following formats:
- '%Y-%m-%d %H:%M:%S.%f'
- '%Y-%m-%d %H:%M:%S'
Example
-------
>>> bla = string2timestamp('2015-01-31 20:00:00.00')
>>> type(bla)
pandas.tslib.Timestamp
"""
try:
ts_time = pd.to_datetime(ts_string, format='%Y-%m-%d %H:%M:%S.%f')
except ValueError:
try:
ts_time = pd.to_datetime(ts_string, format='%Y-%m-%d %H:%M:%S')
except ValueError:
raise ValueError("""Can't understand timestamp, make sure tu use on of the following two:
- '%Y-%m-%d %H:%M:%S.%f'
- '%Y-%m-%d %H:%M:%S'
e.g.: 2015-01-31 20:00:00.00""")
return ts_time
def time_mac2dt(secs, timezone = 0, verbose = False, dateZero = "19040101" ):
""" creates a dateTime opject from a timestamp which represents seconds from 1904-01-01
parameters:
\t secs:\t array-like opject of seconds (float) since 1904-01-01"""
d0 = datetime.datetime.strptime(dateZero, "%Y%m%d")
out = []
for t in secs:
out.append(d0 + datetime.timedelta(seconds = t))
if verbose:
print (out[0].strftime("%Y-%m-%d_%H:%M:%S:%f"))
return np.array(out) | mit |
zrhans/python | exemplos/Examples.lnk/bokeh/charts/scatter.py | 1 | 1654 |
from collections import OrderedDict
import pandas as pd
from bokeh.charts import Scatter
from bokeh.plotting import output_file, show, VBox
from bokeh.sampledata.iris import flowers
setosa = flowers[(flowers.species == "setosa")][["petal_length", "petal_width"]]
versicolor = flowers[(flowers.species == "versicolor")][["petal_length", "petal_width"]]
virginica = flowers[(flowers.species == "virginica")][["petal_length", "petal_width"]]
xyvalues = OrderedDict([("setosa", setosa.values), ("versicolor", versicolor.values), ("virginica", virginica.values)])
scatter1 = Scatter(xyvalues, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left', marker="triangle")
groupped_df = flowers[["petal_length", "petal_width", "species"]].groupby("species")
scatter2 = Scatter(groupped_df, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left')
pdict = OrderedDict()
for i in groupped_df.groups.keys():
labels = groupped_df.get_group(i).columns
xname = labels[0]
yname = labels[1]
x = getattr(groupped_df.get_group(i), xname)
y = getattr(groupped_df.get_group(i), yname)
pdict[i] = list(zip(x, y))
df = pd.DataFrame(pdict)
scatter3 = Scatter(df, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left')
scatter4 = Scatter(list(xyvalues.values()), title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left')
output_file("scatter.html")
show(VBox([scatter1, scatter2, scatter3, scatter4]))
| gpl-2.0 |
zimmerle/gnuradio | gr-utils/src/python/plot_psd_base.py | 75 | 12725 | #!/usr/bin/env python
#
# Copyright 2007,2008,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
from gnuradio.eng_option import eng_option
class plot_psd_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename),
weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ",
weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = scipy.array(self.sp_iq.get_xlim())
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
return False
else:
# retesting length here as newer version of scipy does not throw a MemoryError, just
# returns a zero-length array
if(len(self.iq) > 0):
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.iq_psd, self.freq = self.dopsd(self.iq)
return True
else:
print "End of File"
return False
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap)
psd = 10.0*log10(abs(psd))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
r = self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time(self.time, self.iq) # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd(self.freq, self.iq_psd) # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec(self.time, self.iq)
draw()
def draw_time(self, t, iq):
reals = iq.real
imags = iq.imag
self.plot_iq[0].set_data([t, reals])
self.plot_iq[1].set_data([t, imags])
self.sp_iq.set_xlim(t.min(), t.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_psd(self, f, p):
self.plot_psd[0].set_data([f, p])
self.sp_psd.set_ylim([p.min()-10, p.max()+10])
self.sp_psd.set_xlim([f.min(), f.max()])
def draw_spec(self, t, s):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(s, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[t.min(), t.max()])
def update_plots(self):
self.draw_time(self.time, self.iq)
self.draw_psd(self.freq, self.iq_psd)
if self.dospec:
self.draw_spec(self.time, self.iq)
self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
#xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))
iq = scipy.array(self.iq[xmin : xmax])
time = scipy.array(self.time[xmin : xmax])
iq_psd, freq = self.dopsd(iq)
self.draw_psd(freq, iq_psd)
self.xlim = scipy.array(self.sp_iq.get_xlim())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
r = self.get_data()
if(r):
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
r = self.get_data()
if(r):
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(option_class=eng_option, conflict_handler="resolve",
usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_psd_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_psd_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
nrhine1/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
laurent-george/bokeh | examples/compat/mpl/listcollection.py | 34 | 1602 | from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
def make_segments(x, y):
'''
Create list of line segments from x and y coordinates.
'''
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(x, y, colors=None, linewidth=3, alpha=1.0):
'''
Plot a line with segments.
Optionally, specify segments colors and segments widths.
'''
# Make a list of colors cycling through the rgbcmyk series.
# You have several ways to input the colors:
# colors = ['r','g','b','c','y','m','k']
# colors = ['red','green','blue','cyan','yellow','magenta','black']
# colors = ['#ff0000', '#008000', '#0000ff', '#00bfbf', '#bfbf00', '#bf00bf', '#000000']
# colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0), (0.0, 0.75, 0.75, 1.0),
# (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0), (0.0, 0.0, 0.0, 1.0)]
colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k']
widths = [5, 10, 20, 40, 20, 10, 5]
segments = make_segments(x, y)
lc = LineCollection(segments, colors=colors, linewidth=widths, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
# Colored sine wave
x = np.linspace(0, 4 * np.pi, 100)
y = np.sin(x)
colorline(x, y)
plt.title("MPL support for ListCollection in Bokeh")
plt.xlim(x.min(), x.max())
plt.ylim(-1.0, 1.0)
output_file("listcollection.html")
show(mpl.to_bokeh())
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/sandbox/mle.py | 33 | 1701 | '''What's the origin of this file? It is not ours.
Does not run because of missing mtx files, now included
changes: JP corrections to imports so it runs, comment out print
'''
from __future__ import print_function
import numpy as np
from numpy import dot, outer, random, argsort
from scipy import io, linalg, optimize
from scipy.sparse import eye as speye
import matplotlib.pyplot as plt
def R(v):
rq = dot(v.T,A*v)/dot(v.T,B*v)
res = (A*v-rq*B*v)/linalg.norm(B*v)
data.append(linalg.norm(res))
return rq
def Rp(v):
""" Gradient """
result = 2*(A*v-R(v)*B*v)/dot(v.T,B*v)
#print "Rp: ", result
return result
def Rpp(v):
""" Hessian """
result = 2*(A-R(v)*B-outer(B*v,Rp(v))-outer(Rp(v),B*v))/dot(v.T,B*v)
#print "Rpp: ", result
return result
A = io.mmread('nos4.mtx') # clustered eigenvalues
#B = io.mmread('bcsstm02.mtx.gz')
#A = io.mmread('bcsstk06.mtx.gz') # clustered eigenvalues
#B = io.mmread('bcsstm06.mtx.gz')
n = A.shape[0]
B = speye(n,n)
random.seed(1)
v_0=random.rand(n)
print("try fmin_bfgs")
full_output = 1
data=[]
v,fopt, gopt, Hopt, func_calls, grad_calls, warnflag, allvecs = \
optimize.fmin_bfgs(R,v_0,fprime=Rp,full_output=full_output,retall=1)
if warnflag == 0:
plt.semilogy(np.arange(0,len(data)),data)
print('Rayleigh quotient BFGS',R(v))
print("fmin_bfgs OK")
print("try fmin_ncg")
#
# WARNING: the program may hangs if fmin_ncg is used
#
data=[]
v,fopt, fcalls, gcalls, hcalls, warnflag, allvecs = \
optimize.fmin_ncg(R,v_0,fprime=Rp,fhess=Rpp,full_output=full_output,retall=1)
if warnflag==0:
plt.figure()
plt.semilogy(np.arange(0,len(data)),data)
print('Rayleigh quotient NCG',R(v))
| bsd-3-clause |
Parallel-in-Time/pySDC | pySDC/projects/parallelSDC/minimization.py | 1 | 2418 | import matplotlib
import matplotlib.pylab as plt
import numpy as np
import scipy.optimize as opt
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
def main():
def rho(x):
return max(abs(np.linalg.eigvals(np.eye(M) - np.diag([x[i] for i in range(M)]).dot(coll.Qmat[1:, 1:]))))
M = 2
coll = CollGaussRadau_Right(M, 0, 1)
x0 = np.ones(M)
d = opt.minimize(rho, x0, method='Nelder-Mead')
print(d)
numsteps = 800
xdim = np.linspace(0, 8, numsteps)
ydim = np.linspace(0, 13, numsteps)
minfield = np.zeros((len(xdim), len(ydim)))
for idx, x in enumerate(xdim):
for idy, y in enumerate(ydim):
minfield[idx, idy] = max(abs(np.linalg.eigvals(np.eye(M) - np.diag([x, y]).dot(coll.Qmat[1:, 1:]))))
# Set up plotting parameters
params = {'legend.fontsize': 20,
'figure.figsize': (12, 8),
'axes.labelsize': 20,
'axes.titlesize': 20,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'lines.linewidth': 3
}
plt.rcParams.update(params)
matplotlib.style.use('classic')
plt.figure()
plt.pcolor(xdim, ydim, minfield.T, cmap='Reds', vmin=0, vmax=1)
plt.text(d.x[0], d.x[1], 'X', horizontalalignment='center', verticalalignment='center')
plt.xlim((min(xdim), max(xdim)))
plt.ylim((min(ydim), max(ydim)))
plt.xlabel('component 1')
plt.ylabel('component 2')
cbar = plt.colorbar()
cbar.set_label('spectral radius')
fname = 'data/parallelSDC_minimizer_full.png'
plt.savefig(fname, bbox_inches='tight')
plt.figure()
xdim_part = xdim[int(0.25 * numsteps):int(0.75 * numsteps) + 1]
ydim_part = ydim[0:int(0.25 * numsteps)]
minfield_part = minfield[int(0.25 * numsteps):int(0.75 * numsteps) + 1, 0:int(0.25 * numsteps)]
plt.pcolor(xdim_part, ydim_part, minfield_part.T, cmap='Reds', vmin=0, vmax=1)
plt.text(d.x[0], d.x[1], 'X', horizontalalignment='center', verticalalignment='center')
plt.xlim((min(xdim_part), max(xdim_part)))
plt.ylim((min(ydim_part), max(ydim_part)))
plt.xlabel('component 1')
plt.ylabel('component 2')
cbar = plt.colorbar()
cbar.set_label('spectral radius')
fname = 'data/parallelSDC_minimizer_zoom.png'
plt.savefig(fname, bbox_inches='tight')
if __name__ == "__main__":
main()
| bsd-2-clause |
richardliaw/ray | python/ray/util/xgboost/simple_example.py | 1 | 1100 | from sklearn import datasets
from sklearn.model_selection import train_test_split
from ray.util.xgboost import RayDMatrix, RayParams, train
# __xgboost_begin__
def main():
# Load dataset
data, labels = datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25)
train_set = RayDMatrix(train_x, train_y)
test_set = RayDMatrix(test_x, test_y)
# Set config
config = {
"tree_method": "approx",
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": 3,
}
evals_result = {}
# Train the classifier
bst = train(
config,
train_set,
evals=[(test_set, "eval")],
evals_result=evals_result,
ray_params=RayParams(max_actor_restarts=1),
verbose_eval=False)
bst.save_model("simple.xgb")
print("Final validation error: {:.4f}".format(
evals_result["eval"]["error"][-1]))
# __xgboost_end__
if __name__ == "__main__":
main()
| apache-2.0 |
themrmax/scikit-learn | sklearn/tests/test_multioutput.py | 4 | 12671 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
assert_false(hasattr(MultiOutputRegressor(Lasso), 'partial_fit'))
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X, y)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test),
rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
def test_multi_output_classification_partial_fit_parallelism():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1)
mor.partial_fit(X, y, classes)
est1 = mor.estimators_[0]
mor.partial_fit(X, y)
est2 = mor.estimators_[0]
# parallelism requires this to be the case for a sane implementation
assert_false(est1 is est2)
def test_multi_output_classification_partial_fit():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
# train the multi_target_linear and also get the predictions.
half_index = X.shape[0] // 2
multi_target_linear.partial_fit(
X[:half_index], y[:half_index], classes=classes)
first_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), first_predictions.shape)
multi_target_linear.partial_fit(X[half_index:], y[half_index:])
second_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), second_predictions.shape)
# train the linear classification with each column and assert that
# predictions are equal after first partial_fit and second partial_fit
for i in range(3):
# create a clone with the same state
sgd_linear_clf = clone(sgd_linear_clf)
sgd_linear_clf.partial_fit(
X[:half_index], y[:half_index, i], classes=classes[i])
assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i])
sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i])
assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i])
def test_mutli_output_classifiation_partial_fit_no_first_classes_exception():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
assert_raises_regex(ValueError, "classes must be passed on the first call "
"to partial_fit.",
multi_target_linear.partial_fit, X, y)
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert len(predict_proba) == n_outputs
for class_probabilities in predict_proba:
assert_equal((n_samples, n_classes), class_probabilities.shape)
assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1),
predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multiclass_multioutput_estimator_predict_proba():
seed = 542
# make test deterministic
rng = np.random.RandomState(seed)
# random features
X = rng.normal(size=(5, 5))
# random labels
y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes
y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes
Y = np.concatenate([y1, y2], axis=1)
clf = MultiOutputClassifier(LogisticRegression(random_state=seed))
clf.fit(X, Y)
y_result = clf.predict_proba(X)
y_actual = [np.array([[0.23481764, 0.76518236],
[0.67196072, 0.32803928],
[0.54681448, 0.45318552],
[0.34883923, 0.65116077],
[0.73687069, 0.26312931]]),
np.array([[0.5171785, 0.23878628, 0.24403522],
[0.22141451, 0.64102704, 0.13755846],
[0.16751315, 0.18256843, 0.64991843],
[0.27357372, 0.55201592, 0.17441036],
[0.65745193, 0.26062899, 0.08191907]])]
for i in range(len(y_actual)):
assert_almost_equal(y_result[i], y_actual[i])
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_classification_partial_fit_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
yw = [[3, 2], [2, 3], [3, 2]]
w = np.asarray([2., 1., 1.])
sgd_linear_clf = SGDClassifier(random_state=1)
clf_w = MultiOutputClassifier(sgd_linear_clf)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
y = [[3, 2], [3, 2], [2, 3], [3, 2]]
sgd_linear_clf = SGDClassifier(random_state=1)
clf = MultiOutputClassifier(sgd_linear_clf)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5]]
assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
# ValueError when y is continuous
assert_raise_message(ValueError, "Unknown label type", moc.fit, X, X[:, 1])
| bsd-3-clause |
mindriot101/bokeh | examples/app/clustering/main.py | 15 | 5969 | import numpy as np
np.random.seed(0)
from bokeh.io import curdoc
from bokeh.layouts import widgetbox, row, column
from bokeh.models import ColumnDataSource, Select, Slider
from bokeh.plotting import figure
from bokeh.palettes import Spectral6
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
# define some helper functions
def clustering(X, algorithm, n_clusters):
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# Generate the new colors:
if algorithm=='MiniBatchKMeans':
model = cluster.MiniBatchKMeans(n_clusters=n_clusters)
elif algorithm=='Birch':
model = cluster.Birch(n_clusters=n_clusters)
elif algorithm=='DBSCAN':
model = cluster.DBSCAN(eps=.2)
elif algorithm=='AffinityPropagation':
model = cluster.AffinityPropagation(damping=.9,
preference=-200)
elif algorithm=='MeanShift':
model = cluster.MeanShift(bandwidth=bandwidth,
bin_seeding=True)
elif algorithm=='SpectralClustering':
model = cluster.SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
elif algorithm=='Ward':
model = cluster.AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward',
connectivity=connectivity)
elif algorithm=='AgglomerativeClustering':
model = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock",
n_clusters=n_clusters,
connectivity=connectivity)
model.fit(X)
if hasattr(model, 'labels_'):
y_pred = model.labels_.astype(np.int)
else:
y_pred = model.predict(X)
return X, y_pred
def get_dataset(dataset, n_samples):
if dataset == 'Noisy Circles':
return datasets.make_circles(n_samples=n_samples,
factor=0.5,
noise=0.05)
elif dataset == 'Noisy Moons':
return datasets.make_moons(n_samples=n_samples,
noise=0.05)
elif dataset == 'Blobs':
return datasets.make_blobs(n_samples=n_samples,
random_state=8)
elif dataset == "No Structure":
return np.random.rand(n_samples, 2), None
# set up initial data
n_samples = 1500
n_clusters = 2
algorithm = 'MiniBatchKMeans'
dataset = 'Noisy Circles'
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
spectral = np.hstack([Spectral6] * 20)
colors = [spectral[i] for i in y]
# set up plot (styling in theme.yaml)
plot = figure(toolbar_location=None, title=algorithm)
source = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))
plot.circle('x', 'y', fill_color='colors', line_color=None, source=source)
# set up widgets
clustering_algorithms= [
'MiniBatchKMeans',
'AffinityPropagation',
'MeanShift',
'SpectralClustering',
'Ward',
'AgglomerativeClustering',
'DBSCAN',
'Birch'
]
datasets_names = [
'Noisy Circles',
'Noisy Moons',
'Blobs',
'No Structure'
]
algorithm_select = Select(value='MiniBatchKMeans',
title='Select algorithm:',
width=200,
options=clustering_algorithms)
dataset_select = Select(value='Noisy Circles',
title='Select dataset:',
width=200,
options=datasets_names)
samples_slider = Slider(title="Number of samples",
value=1500.0,
start=1000.0,
end=3000.0,
step=100,
width=400)
clusters_slider = Slider(title="Number of clusters",
value=2.0,
start=2.0,
end=10.0,
step=1,
width=400)
# set up callbacks
def update_algorithm_or_clusters(attrname, old, new):
global X
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data = dict(colors=colors, x=X[:, 0], y=X[:, 1])
plot.title.text = algorithm
def update_samples_or_dataset(attrname, old, new):
global X, y
dataset = dataset_select.value
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
n_samples = int(samples_slider.value)
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data = dict(colors=colors, x=X[:, 0], y=X[:, 1])
algorithm_select.on_change('value', update_algorithm_or_clusters)
clusters_slider.on_change('value', update_algorithm_or_clusters)
dataset_select.on_change('value', update_samples_or_dataset)
samples_slider.on_change('value', update_samples_or_dataset)
# set up layout
selects = row(dataset_select, algorithm_select, width=420)
inputs = column(selects, widgetbox(samples_slider, clusters_slider))
# add to document
curdoc().add_root(row(inputs, plot))
curdoc().title = "Clustering"
| bsd-3-clause |
paninski-lab/yass | src/yass/rf/util.py | 1 | 9229 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import sklearn.decomposition as decomp
import networkx as nx
import scipy
import scipy.signal
import scipy.ndimage as img
import scipy.signal as sig
#there are two steps to getting the rf. The first step is to get a core set of significant pixels using a 2x2
# spatial filter. The second step expands the rf around the coreset. This was originally because I was trying to get
#fatter rfs for the multi-rf splitting project
def get_seed(image, dim = 2):
kernel = np.full((dim, dim), 1/(float(dim)**2))
smoothed_image = sig.convolve2d(image, kernel, 'same')
seed = np.vstack(np.where(np.abs((smoothed_image - np.mean(smoothed_image))) > 4*np.std(smoothed_image)))
std_pixels = np.array(seed).T
dists = scipy.spatial.distance.cdist(std_pixels, std_pixels)
thresh = 1.5
upper=1
lower=0
bin_dists = np.where(dists<thresh, upper, lower)
#print (bin_dists)
#compute connected nodes and sum spikes over them
G = nx.from_numpy_array(bin_dists)
con = list(nx.connected_components(G))
cluster_list = [std_pixels[list(con[i])] for i in range(len(con))]
return cluster_list
def expand_rf(image, cluster_list):
std = np.std(image)
std_pixels = np.vstack(np.where(np.abs(image - np.mean(image))>std*2))
if(std_pixels.shape[0] ==0):
np.asarray(std_pixels = [])
return "poop", std_pixels
std_pixels = np.array(std_pixels).T
dists = scipy.spatial.distance.cdist(std_pixels, std_pixels)
thresh = 1.5
upper=1
lower=0
bin_dists = np.where(dists<thresh, upper, lower)
#print (bin_dists)
#compute connected nodes and sum spikes over them
G = nx.from_numpy_array(bin_dists)
con = list(nx.connected_components(G))
test_elements = [element[0] for element in cluster_list]
valid_list = []
for element in con:
pixels = std_pixels[list(element)]
indicator = [np.any([np.all(pixel == test_element) for pixel in pixels]) for test_element in test_elements]
if np.any(indicator) == True:
valid_list.append(element)
return [std_pixels[list(element)] for element in valid_list]
#wrapper for the 2 steps
def get_rf(image, dim):
seed = get_seed(image)
return expand_rf(image, seed)
def classifiy_contours(gaussian_sd,
green_val,
fr_rates,
sd_mean_noise_th,
sd_ratio_noise_th,
green_noise_th,
midget_on_th,
midget_off_th,
large_on_th,
large_off_th,
sbc_fr_th):
n_units = gaussian_sd.shape[0]
labels = np.ones(n_units, 'int32')*-1
# label:
# 0: on parasol
# 1: off parasol
# 2: on midget
# 3: off midget
# 4: on large
# 5: off large
# 6: sbc
# 7: unknown
cell_types = ['On-Parasol', 'Off-Parasol', 'On-Midget', 'Off-Midget',
'On-Large', 'Off-Large', 'SBC', 'Unknown']
sd_mean = np.mean(gaussian_sd, 1)
max_sd = np.max(gaussian_sd, 1)
max_sd[max_sd==0] = 1
sd_ratio = np.min(gaussian_sd, 1)/max_sd
# first find out bad contours
idx_noise = np.logical_or(
sd_mean < sd_mean_noise_th,
sd_ratio < sd_ratio_noise_th)
idx_noise = np.logical_or(idx_noise, np.abs(green_val) < green_noise_th)
idx_noise = np.logical_and(idx_noise, labels==-1)
labels[idx_noise] = 7
# classify on
idx_on = green_val >= green_noise_th
idx_on_midget = np.logical_and(idx_on, sd_mean < midget_on_th)
idx_on_midget = np.logical_and(idx_on_midget, labels==-1)
labels[idx_on_midget] = 2
idx_on_para = np.logical_and(idx_on, sd_mean < large_on_th)
idx_on_para = np.logical_and(idx_on_para, labels==-1)
labels[idx_on_para] = 0
idx_on_large = np.logical_and(idx_on, labels==-1)
labels[idx_on_large] = 4
# classify off
idx_off = green_val <= -green_noise_th
idx_off_midget = np.logical_and(idx_off, sd_mean < midget_off_th)
idx_off_midget = np.logical_and(idx_off_midget, labels==-1)
labels[idx_off_midget] = 3
idx_off_large = np.logical_and(idx_off, sd_mean >= large_off_th)
idx_off_large = np.logical_and(idx_off_large, labels==-1)
labels[idx_off_large] = 5
idx_off_para = np.logical_and(idx_off, labels==-1)
idx_off_para = np.logical_and(idx_off_para, fr_rates>sbc_fr_th)
labels[idx_off_para] = 1
idx_sbc = labels==-1
labels[idx_sbc] = 6
return labels, cell_types
def get_circle_plotting_data(i_cell, Gaussian_params):
# Adapted from Nora's matlab code, hasn't been tripled checked
circle_samples = np.arange(0,2*np.pi+0.1,0.1)
x_circle = np.cos(circle_samples)
y_circle = np.sin(circle_samples)
# Get Gaussian parameters
angle = Gaussian_params[i_cell,5]
sd = Gaussian_params[i_cell,3:5]
x_shift = Gaussian_params[i_cell,1]
y_shift = Gaussian_params[i_cell,2]
R = np.asarray([[np.cos(angle), np.sin(angle)],[-np.sin(angle), np.cos(angle)]])
L = np.asarray([[sd[0], 0],[0, sd[1]]])
circ = np.concatenate([x_circle.reshape((-1,1)),y_circle.reshape((-1,1))],axis=1)
X = np.dot(R,np.dot(L,np.transpose(circ)))
X[0] = X[0]+x_shift
X[1] = np.abs(X[1]+y_shift)
plotting_data = X
return plotting_data
def contour_plots(contours, labels, cell_type_name, stim_size):
n_types = len(cell_type_name)
n_col = 5
n_row = int(np.ceil(n_types/n_col))
for j in range(n_types):
plt.subplot(n_row, n_col, j+1)
idx = np.where(labels == j)[0]
for unit in idx:
plt.plot(contours[unit][:, 0], contours[unit][:, 1], 'k', alpha=0.5)
plt.xlim([0, stim_size[1]])
plt.ylim([0, stim_size[0]])
plt.title(cell_type_name[j])
def fr_plots(f_rates, mean_sd, labels, cell_type_name):
f_rates[f_rates == 0] = np.exp(-11)
mean_sd[mean_sd == 0] = np.exp(-11)
f_rates = np.log(f_rates)
mean_sd = np.log(mean_sd)
n_types = len(cell_type_name)
max_sd = np.max(mean_sd)
max_fr = np.max(f_rates)
n_col = 2
n_row = int(np.ceil(n_types/n_col))
for j in range(n_types):
plt.subplot(n_row, n_col, j+1)
idx = np.where(labels == j)[0]
plt.scatter(mean_sd[idx], f_rates[idx], color='k', alpha=0.5)
plt.ylim([-2, max_fr])
plt.xlim([-2, max_sd])
if j % n_col == 0:
plt.ylabel('log of firing rates (Hz)')
if j >= ((n_row-1)*n_col):
plt.xlabel('log of mean Gaussian SD')
plt.title(cell_type_name[j])
def make_classification_plot(gaussian_sd,
green_val,
f_rates,
sd_mean_noise_th,
sd_ratio_noise_th,
green_noise_th,
midget_on_th,
midget_off_th,
large_on_th,
large_off_th,
sbc_fr_th,
contours,
stim_size):
labels, cell_type_name = classifiy_contours(
gaussian_sd,
green_val,
f_rates,
sd_mean_noise_th,
sd_ratio_noise_th,
green_noise_th/100,
midget_on_th,
midget_off_th,
large_on_th,
large_off_th,
sbc_fr_th)
colors = ['blue','red','green','cyan',
'magenta','brown','pink', 'black']
# label:
# 0: on parasol
# 1: off parasol
# 2: on midget
# 3: off midget
# 4: on large
# 5: off large
# 6: sbc
# 8: unknown
n_cell_types = len(cell_type_name)
mean_sd = np.mean(gaussian_sd, 1)
max_sd = np.max(gaussian_sd, 1)
max_sd[max_sd==0] = 1
sd_ratio = np.min(gaussian_sd, 1)/max_sd
dot_size = 5
plt.figure(figsize=(12, 5))
plt.subplot(1,2,1)
for j in range(n_cell_types):
plt.scatter(mean_sd[labels==j], green_val[labels==j], s=dot_size, color=colors[j])
plt.ylabel('Most extreme green value')
plt.xlabel('Mean SD')
plt.ylim([np.min(green_val), np.max(green_val)])
plt.xlim([0, 6])
plt.subplot(1,2,2)
for j in range(n_cell_types):
plt.scatter(mean_sd[labels==j], sd_ratio[labels==j], s=dot_size, color=colors[j])
plt.ylabel('Min/Max ratio of Gaussian SD')
plt.xlabel('Mean Gaussian SD')
custom_lines = [Line2D([], [], color=colors[i], marker='o', linestyle='None') for i in range(len(cell_type_name)-1)]
plt.legend(custom_lines, cell_type_name[:-1])
#plt.ylim([-0.05, 0.05])
#plt.xlim([0, 6])
plt.show()
plt.figure(figsize=(12, 12))
contour_plots(contours, labels, cell_type_name, stim_size)
plt.show()
plt.figure(figsize=(12, 10))
fr_plots(f_rates, mean_sd, labels, cell_type_name)
plt.show()
return labels
| apache-2.0 |
alexsavio/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 110 | 5681 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM does not assume any parametric form of the data distribution
and can therefore model the complex shape of the data much better.
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
pythonstock/stock | old_jobs/guess_indicators_lite_buy_daily_job.py | 1 | 5160 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import libs.common as common
import pandas as pd
import numpy as np
import math
import datetime
import heapq
### 对每日指标数据,进行筛选。将符合条件的。二次筛选出来。
def stat_all_lite(tmp_datetime):
# 要操作的数据库表名称。
table_name = "guess_indicators_lite_buy_daily"
datetime_str = (tmp_datetime).strftime("%Y-%m-%d")
datetime_int = (tmp_datetime).strftime("%Y%m%d")
print("datetime_str:", datetime_str)
print("datetime_int:", datetime_int)
# try:
# # 删除老数据。guess_indicators_lite_buy_daily 是一张单表,没有日期字段。
# del_sql = " DELETE FROM `stock_data`.`%s` WHERE `date`= '%s' " % (table_name, datetime_int)
# print("del_sql:", del_sql)
# common.insert(del_sql)
# print("del_sql")
# except Exception as e:
# print("error :", e)
sql_1 = """
SELECT `date`, `code`, `name`, `changepercent`, `trade`,`turnoverratio`, `pb` ,`kdjj`,`rsi_6`,`cci`
FROM stock_data.guess_indicators_lite_daily WHERE `date` = %s
and `changepercent` > 2 and `pb` > 0
"""
# and `changepercent` > 2 and `pb` > 0 and `turnoverratio` > 5 去除掉换手率参数。
data = pd.read_sql(sql=sql_1, con=common.engine(), params=[datetime_int])
data = data.drop_duplicates(subset="code", keep="last")
print("######## len data ########:", len(data))
# del data["name"]
# print(data)
data["trade_float32"] = data["trade"].astype('float32', copy=True)
# 输入 date 用作历史数据查询。
stock_merge = pd.DataFrame({
"date": data["date"], "code": data["code"], "wave_mean": data["trade"],
"wave_crest": data["trade"], "wave_base": data["trade"]}, index=data.index.values)
print(stock_merge.head(1))
stock_merge = stock_merge.apply(apply_merge, axis=1) # , axis=1)
del stock_merge["date"] # 合并前删除 date 字段。
# 合并数据
data_new = pd.merge(data, stock_merge, on=['code'], how='left')
# 使用 trade_float32 参加计算。
data_new = data_new[data_new["trade_float32"] > data_new["wave_base"]] # 交易价格大于波谷价格。
data_new = data_new[data_new["trade_float32"] < data_new["wave_crest"]] # 小于波峰价格
# wave_base wave_crest wave_mean
data_new["wave_base"] = data_new["wave_base"].round(2) # 数据保留2位小数
data_new["wave_crest"] = data_new["wave_crest"].round(2) # 数据保留2位小数
data_new["wave_mean"] = data_new["wave_mean"].round(2) # 数据保留2位小数
data_new["up_rate"] = (data_new["wave_mean"].sub(data_new["trade_float32"])).div(data_new["wave_crest"]).mul(100)
data_new["up_rate"] = data_new["up_rate"].round(2) # 数据保留2位小数
data_new["buy"] = 1
data_new["sell"] = 0
data_new["today_trade"] = data_new["trade"]
data_new["income"] = 0
# 重命名 date
data_new.columns.values[0] = "buy_date"
del data_new["trade_float32"]
try:
common.insert_db(data_new, table_name, False, "`code`")
print("insert_db")
except Exception as e:
print("error :", e)
# 重命名
del data_new["name"]
print(data_new)
def apply_merge(tmp):
date = tmp["date"]
code = tmp["code"]
date_end = datetime.datetime.strptime(date, "%Y%m%d")
date_start = (date_end + datetime.timedelta(days=-300)).strftime("%Y-%m-%d")
date_end = date_end.strftime("%Y-%m-%d")
print(code, date_start, date_end)
# open, high, close, low, volume, price_change, p_change, ma5, ma10, ma20, v_ma5, v_ma10, v_ma20, turnover
# 使用缓存方法。加快计算速度。
stock = common.get_hist_data_cache(code, date_start, date_end)
# 增加空判断,如果是空返回 0 数据。
if stock is None:
return list([code, date, 0, 0, 0])
stock = pd.DataFrame({"close": stock["close"]}, index=stock.index.values)
stock = stock.sort_index(0) # 将数据按照日期排序下。
# print(stock.head(10))
arr = pd.Series(stock["close"].values)
# print(df_arr)
wave_mean = arr.mean()
max_point = 3 # 获得最高的几个采样点。
# 计算股票的波峰值。
wave_crest = heapq.nlargest(max_point, enumerate(arr), key=lambda x: x[1])
wave_crest_mean = pd.DataFrame(wave_crest).mean()
# 输出元祖第一个元素是index,第二元素是比较的数值 计算数据的波谷值
wave_base = heapq.nsmallest(max_point, enumerate(arr), key=lambda x: x[1])
wave_base_mean = pd.DataFrame(wave_base).mean()
# 输出数据
print("##############", len(stock))
if len(stock) > 180:
# code date wave_base wave_crest wave_mean 顺序必须一致。返回的是行数据,然后填充。
return list([code, date, wave_base_mean[1], wave_crest_mean[1], wave_mean])
else:
return list([code, date, 0, 0, 0])
# main函数入口
if __name__ == '__main__':
# 二次筛选数据。
tmp_datetime = common.run_with_args(stat_all_lite)
| apache-2.0 |
rohanp/scikit-learn | examples/manifold/plot_lle_digits.py | 1 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Laplacian Eigenmap")
embedder = manifold.LaplacianEigenmap(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
ARudiuk/mne-python | mne/viz/evoked.py | 1 | 48115 | """Functions to make simple plot on evoked M/EEG data (besides topographies)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import numpy as np
from ..io.pick import (channel_type, pick_types, _picks_by_type,
_pick_data_channels)
from ..externals.six import string_types
from ..defaults import _handle_default
from .utils import (_draw_proj_checkbox, tight_layout, _check_delayed_ssp,
plt_show, _process_times)
from ..utils import logger, _clean_names, warn
from ..fixes import partial
from ..io.pick import pick_info
from .topo import _plot_evoked_topo
from .topomap import (_prepare_topo_plot, plot_topomap, _check_outlines,
_draw_outlines, _prepare_topomap, _topomap_animation)
from ..channels import find_layout
def _butterfly_onpick(event, params):
"""Helper to add a channel name on click"""
params['need_draw'] = True
ax = event.artist.axes
ax_idx = np.where([ax is a for a in params['axes']])[0]
if len(ax_idx) == 0: # this can happen if ax param is used
return # let the other axes handle it
else:
ax_idx = ax_idx[0]
lidx = np.where([l is event.artist for l in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Helper to only draw once for picking"""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _butterfly_onselect(xmin, xmax, ch_types, evoked, text=None):
"""Function for drawing topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type for type in ch_types if type in ('eeg', 'grad', 'mag')]
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
ylim = ax.get_ylim()
vert_lines.append(ax.plot([xmin, xmin], ylim, zorder=0, color='red'))
vert_lines.append(ax.plot([xmax, xmax], ylim, zorder=0, color='red'))
fill = ax.fill_betweenx(ylim, x1=xmin, x2=xmax, alpha=0.2,
color='green')
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
times = evoked.times
xmin *= 0.001
minidx = np.abs(times - xmin).argmin()
xmax *= 0.001
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(1, len(ch_types), squeeze=False,
figsize=(3 * len(ch_types), 3))
for idx, ch_type in enumerate(ch_types):
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(evoked,
ch_type,
layout=None)
data = evoked.data[picks, minidx:maxidx]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
title = '%s RMS' % ch_type
else:
title = ch_type
data = np.average(data, axis=1)
axarr[0][idx].set_title(title)
plot_topomap(data, pos, axes=axarr[0][idx], show=False)
fig.suptitle('Average over %.2fs - %.2fs' % (xmin, xmax), fontsize=15,
y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines,
fill=fill)
fig.canvas.mpl_connect('close_event', close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
def _topo_closed(events, ax, lines, fill):
"""Callback for removing lines from evoked plot as topomap is closed."""
for line in lines:
ax.lines.remove(line[0])
ax.collections.remove(fill)
ax.get_figure().canvas.draw()
def _rgb(info, x, y, z):
"""Helper to transform x, y, z values into RGB colors"""
all_pos = np.array([ch['loc'][:3] for ch in info['chs']])
for idx, dim in enumerate([x, y, z]):
this_pos = all_pos[:, idx]
dim_min = this_pos.min()
dim_max = (this_pos - dim_min).max()
dim -= dim_min
dim /= dim_max
return np.asarray([x, y, z]).T
def _plot_legend(pos, colors, axis, bads, outlines):
"""Helper function to plot color/channel legends for butterfly plots
with spatial colors"""
from mpl_toolkits.axes_grid.inset_locator import inset_axes
bbox = axis.get_window_extent() # Determine the correct size.
ratio = bbox.width / bbox.height
ax = inset_axes(axis, width=str(30 / ratio) + '%', height='30%', loc=2)
pos_x, pos_y = _prepare_topomap(pos, ax)
ax.scatter(pos_x, pos_y, color=colors, s=25, marker='.', zorder=1)
for idx in bads:
ax.scatter(pos_x[idx], pos_y[idx], s=5, marker='.', color='w',
zorder=1)
if isinstance(outlines, dict):
_draw_outlines(ax, outlines)
def _plot_evoked(evoked, picks, exclude, unit, show,
ylim, proj, xlim, hline, units,
scalings, titles, axes, plot_type,
cmap=None, gfp=False, window_title=None,
spatial_colors=False, set_tight_layout=True,
selectable=True):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
from matplotlib.widgets import SpanSelector
info = evoked.info
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
if isinstance(gfp, string_types) and gfp != 'only':
raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
# Valid data types ordered for consistency
valid_channel_types = ['eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg',
'dipole', 'gof', 'bio', 'ecog']
if picks is None:
picks = list(range(info['nchan']))
bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']
if ch in info['ch_names']]
if len(exclude) > 0:
if isinstance(exclude, string_types) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, string_types) for ch in exclude)):
exclude = [info['ch_names'].index(ch) for ch in exclude]
else:
raise ValueError('exclude has to be a list of channel names or '
'"bads"')
picks = list(set(picks).difference(exclude))
picks = np.array(picks)
types = np.array([channel_type(info, idx) for idx in picks])
n_channel_types = 0
ch_types_used = []
for t in valid_channel_types:
if t in types:
n_channel_types += 1
ch_types_used.append(t)
axes_init = axes # remember if axes were given as input
fig = None
if axes is None:
fig, axes = plt.subplots(n_channel_types, 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if axes_init is not None:
fig = axes[0].get_figure()
if window_title is not None:
fig.canvas.set_window_title(window_title)
if not len(axes) == n_channel_types:
raise ValueError('Number of axes (%g) must match number of channel '
'types (%d: %s)' % (len(axes), n_channel_types,
sorted(ch_types_used)))
# instead of projecting during each iteration let's use the mixin here.
if proj is True and evoked.proj is not True:
evoked = evoked.copy()
evoked.apply_proj()
times = 1e3 * evoked.times # time in milliseconds
texts = list()
idxs = list()
lines = list()
selectors = list() # for keeping reference to span_selectors
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w",
alpha=0.75)]
for ax, t in zip(axes, ch_types_used):
line_list = list() # 'line_list' contains the lines for this axes
ch_unit = units[t]
this_scaling = scalings[t]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
idx = list(picks[types == t])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * evoked.data[idx, :]
# Parameters for butterfly interactive plots
if plot_type == 'butterfly':
text = ax.annotate('Loading...', xy=(0.01, 0.1),
xycoords='axes fraction', fontsize=20,
color='green', zorder=3)
text.set_visible(False)
if selectable:
callback_onselect = partial(
_butterfly_onselect, ch_types=ch_types_used,
evoked=evoked, text=text)
blit = False if plt.get_backend() == 'MacOSX' else True
selectors.append(SpanSelector(
ax, callback_onselect, 'horizontal', minspan=10,
useblit=blit, rectprops=dict(alpha=0.5,
facecolor='red')))
gfp_only = (isinstance(gfp, string_types) and gfp == 'only')
if not gfp_only:
if spatial_colors:
chs = [info['chs'][i] for i in idx]
locs3d = np.array([ch['loc'][:3] for ch in chs])
x, y, z = locs3d.T
colors = _rgb(info, x, y, z)
if t in ('meg', 'mag', 'grad', 'eeg'):
layout = find_layout(info, ch_type=t, exclude=[])
else:
layout = find_layout(info, None, exclude=[])
# drop channels that are not in the data
used_nm = np.array(_clean_names(info['ch_names']))[idx]
names = np.asarray([name for name in used_nm
if name in layout.names])
name_idx = [layout.names.index(name) for name in names]
if len(name_idx) < len(chs):
warn('Could not find layout for all the channels. '
'Legend for spatial colors not drawn.')
else:
# find indices for bads
bads = [np.where(names == bad)[0][0] for bad in
info['bads'] if bad in names]
pos, outlines = _check_outlines(layout.pos[:, :2],
'skirt', None)
pos = pos[name_idx]
_plot_legend(pos, colors, ax, bads, outlines)
else:
colors = ['k'] * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
for ch_idx in range(len(D)):
line_list.append(ax.plot(times, D[ch_idx], picker=3.,
zorder=1,
color=colors[ch_idx])[0])
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.,) if spatial_colors else (0., 1., 0.)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = ax.get_ylim() if (ylim is None or t not in
ylim.keys()) else ylim[t]
if not gfp_only:
y_offset = this_ylim[0]
else:
y_offset = 0.
this_gfp += y_offset
ax.fill_between(times, y_offset, this_gfp, color='none',
facecolor=gfp_color, zorder=1, alpha=0.25)
line_list.append(ax.plot(times, this_gfp, color=gfp_color,
zorder=3)[0])
ax.text(times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
'GFP', zorder=4, color=gfp_color,
path_effects=gfp_path_effects)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors:
line.set_linestyle("--")
ax.set_ylabel('data (%s)' % ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
elif plot_type == 'image':
im = ax.imshow(D, interpolation='nearest', origin='lower',
extent=[times[0], times[-1], 0, D.shape[0]],
aspect='auto', cmap=cmap)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
ax.set_ylabel('channels (%s)' % 'index')
else:
raise ValueError("plot_type has to be 'butterfly' or 'image'."
"Got %s." % plot_type)
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and t in ylim:
if plot_type == 'butterfly':
ax.set_ylim(ylim[t])
elif plot_type == 'image':
im.set_clim(ylim[t])
ax.set_title(titles[t] + ' (%d channel%s)' % (
len(D), 's' if len(D) > 1 else ''))
ax.set_xlabel('time (ms)')
if (plot_type == 'butterfly') and (hline is not None):
for h in hline:
c = ('r' if not spatial_colors else 'grey')
ax.axhline(h, linestyle='--', linewidth=2, color=c)
lines.append(line_list)
if plot_type == 'butterfly':
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=info['ch_names'], idxs=idxs, need_draw=False,
path_effects=path_effects, selectors=selectors)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
if axes_init is None:
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,
types=types, units=units, scalings=scalings, unit=unit,
ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
plt_show(show)
fig.canvas.draw() # for axes plots update axes.
if set_tight_layout:
tight_layout(fig=fig)
return fig
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None, gfp=False,
window_title=None, spatial_colors=False, selectable=True):
"""Plot evoked data using butteryfly plots
Left click to a line shows the channel name. Selecting an area by clicking
and holding left mouse button plots a topographic map of the painted area.
Note: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
ylim for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
hline : list of floats | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot GFP in green if True or "only". If "only", then the individual
channel traces will not be shown.
window_title : str | None
The title to put at the top of the figure.
spatial_colors : bool
If True, the lines are color coded by mapping physical sensor
coordinates into color values. Spatially similar channels will have
similar colors. Bad channels will be dotted. If False, the good
channels are plotted black and bad channels red. Defaults to False.
selectable : bool
Whether to use interactive features. If True (default), it is possible
to paint an area to draw topomaps. When False, the interactive features
are disabled. Disabling interactive features reduces memory consumption
and is useful when using ``axes`` parameter to draw multiaxes figures.
.. versionadded:: 0.13.0
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the butterfly plots.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=ylim, proj=proj, xlim=xlim,
hline=hline, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="butterfly",
gfp=gfp, window_title=window_title,
spatial_colors=spatial_colors, selectable=selectable)
def plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
merge_grads=False, show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad, misc.
If None, the ylim parameter for each channel is determined by
the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
merge_grads : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, merge_grads=merge_grads,
show=show)
def _animate_evoked_topomap(evoked, ch_type='mag', times=None, frame_rate=None,
butterfly=False, blit=True, show=True):
"""Make animation of evoked data as topomap timeseries. Animation can be
paused/resumed with left mouse button. Left and right arrow keys can be
used to move backward or forward in time
Parameters
----------
evoked : instance of Evoked
The evoked data.
ch_type : str | None
Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg'.
If None, first available channel type from ('mag', 'grad', 'eeg') is
used. Defaults to None.
times : array of floats | None
The time points to plot. If None, 10 evenly spaced samples are
calculated over the evoked time series. Defaults to None.
frame_rate : int | None
Frame rate for the animation in Hz. If None, frame rate = sfreq / 10.
Defaults to None.
butterfly : bool
Whether to plot the data as butterfly plot under the topomap.
Defaults to False.
blit : bool
Whether to use blit to optimize drawing. In general, it is recommended
to use blit in combination with ``show=True``. If you intend to save
the animation it is better to disable blit. Defaults to True.
show : bool
Whether to show the animation. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
The figure.
anim : instance of matplotlib FuncAnimation
Animation of the topomap.
Notes
-----
.. versionadded:: 0.12.0
"""
return _topomap_animation(evoked, ch_type=ch_type, times=times,
frame_rate=frame_rate, butterfly=butterfly,
blit=blit, show=show)
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r'):
"""Plot evoked data as images
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
clim for plots (after scaling has been applied). e.g.
clim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim,
hline=None, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="image",
cmap=cmap)
def _plot_update_evoked(params, bools):
""" update the plot evoked lines
"""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
times = evoked.times * 1e3
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
for line, di in zip(ax.lines, D):
line.set_data(times, di)
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
def plot_evoked_white(evoked, noise_cov, show=True):
"""Plot whitened evoked response
Plots the whitened evoked response and the whitened GFP as described in
[1]_. If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger for each noise
covariance estimator that is passed.
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance | str
The noise covariance as computed by ``mne.cov.compute_covariance``.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
"""
return _plot_evoked_white(evoked=evoked, noise_cov=noise_cov,
scalings=None, rank=None, show=show)
def _plot_evoked_white(evoked, noise_cov, scalings=None, rank=None, show=True):
"""helper to plot_evoked_white
Additional Parameters
---------------------
scalings : dict | None
The rescaling method to be applied to improve the accuracy of rank
estimaiton. If dict, it will override the following default values
(used if None)::
dict(mag=1e12, grad=1e11, eeg=1e5)
Note. Theses values were tested on different datests across various
conditions. You should not need to update them.
rank : dict of int | None
Dict of ints where keys are 'eeg', 'mag' or 'grad'. If None,
the rank is detected automatically. Defaults to None. Note.
The rank estimation will be printed by the logger for each noise
covariance estimator that is passed.
"""
from ..cov import whiten_evoked, read_cov # recursive import
from ..cov import _estimate_rank_meeg_cov
import matplotlib.pyplot as plt
if scalings is None:
scalings = dict(mag=1e12, grad=1e11, eeg=1e5)
ch_used = [ch for ch in ['eeg', 'grad', 'mag'] if ch in evoked]
has_meg = 'mag' in ch_used and 'grad' in ch_used
if isinstance(noise_cov, string_types):
noise_cov = read_cov(noise_cov)
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
proc_history = evoked.info.get('proc_history', [])
has_sss = False
if len(proc_history) > 0:
# if SSSed, mags and grad are not longer independent
# for correct display of the whitening we will drop the cross-terms
# (the gradiometer * magnetometer covariance)
has_sss = 'max_info' in proc_history[0] and has_meg
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
evoked = evoked.copy() # handle ref meg
evoked.info['projs'] = [] # either applied already or not-- else issue
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
evoked.pick_channels([evoked.ch_names[k] for k in picks])
# important to re-pick. will otherwise crash on systems with ref channels
# as first sensor block
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
picks_list = _picks_by_type(evoked.info, meg_combined=has_sss)
if has_meg and has_sss:
# reduce ch_used to combined mag grad
ch_used = list(zip(*picks_list))[0]
# order pick list by ch_used (required for compat with plot_evoked)
picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
n_ch_used = len(ch_used)
# make sure we use the same rank estimates for GFP and whitening
rank_list = []
for cov in noise_cov:
rank_ = {}
C = cov['data'].copy()
picks_list2 = [k for k in picks_list]
if rank is None:
if has_meg and not has_sss:
picks_list2 += _picks_by_type(evoked.info,
meg_combined=True)
for ch_type, this_picks in picks_list2:
this_info = pick_info(evoked.info, this_picks)
idx = np.ix_(this_picks, this_picks)
this_rank = _estimate_rank_meeg_cov(C[idx], this_info,
scalings)
rank_[ch_type] = this_rank
if rank is not None:
rank_.update(rank)
rank_list.append(rank_)
evokeds_white = [whiten_evoked(evoked, n, picks, rank=r)
for n, r in zip(noise_cov, rank_list)]
axes_evoked = None
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
fig, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
ax_gfp = None
if any(((n_columns == 1 and n_ch_used == 1),
(n_columns == 1 and n_ch_used > 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
times = evoked.times * 1e3
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = {'eeg': 'black', 'mag': 'blue', 'grad': 'cyan',
'meg': 'steelblue'}
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k')
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--')
# Now plot the GFP
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')
ax_gfp[i].set_title(title if n_columns > 1 else
'whitened global field power (GFP),'
' method = "%s"' % label)
data = evoked_white.data[sub_picks]
gfp = whitened_gfp(data, rank=this_rank)
ax_gfp[i].plot(times, gfp,
label=(label if n_columns > 1 else title),
color=color if n_columns > 1 else ch_colors[ch])
ax_gfp[i].set_xlabel('times [ms]')
ax_gfp[i].set_ylabel('GFP [chi^2]')
ax_gfp[i].set_xlim(times[0], times[-1])
ax_gfp[i].set_ylim(0, 10)
ax_gfp[i].axhline(1, color='red', linestyle='--')
if n_columns > 1:
i += 1
ax = ax_gfp[0]
if n_columns == 1:
ax.legend( # mpl < 1.2.1 compatibility: use prop instead of fontsize
loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
else:
ax.legend(loc='upper right', prop=dict(size=10))
params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
bottom=[0.22, 0.13, 0.09][n_rows - 1])
if has_sss:
params['hspace'] = 0.49
fig.subplots_adjust(**params)
fig.canvas.draw()
plt_show(show)
return fig
def plot_snr_estimate(evoked, inv, show=True):
"""Plot a data SNR estimate
Parameters
----------
evoked : instance of Evoked
The evoked instance. This should probably be baseline-corrected.
inv : instance of InverseOperator
The minimum-norm inverse operator.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from ..minimum_norm import estimate_snr
snr, snr_est = estimate_snr(evoked, inv, verbose=True)
fig, ax = plt.subplots(1, 1)
lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
ax.plot([0, 0], lims[2:], 'k:')
ax.plot(lims[:2], [0, 0], 'k:')
# Colors are "bluish green" and "vermilion" taken from:
# http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
ax.plot(evoked.times, snr_est, color=[0.0, 0.6, 0.5])
ax.plot(evoked.times, snr, color=[0.8, 0.4, 0.0])
ax.set_xlim(lims[:2])
ax.set_ylim(lims[2:])
ax.set_ylabel('SNR')
ax.set_xlabel('Time (sec)')
if evoked.comment is not None:
ax.set_title(evoked.comment)
plt.draw()
plt_show(show)
return fig
def _connection_line(x, fig, sourceax, targetax):
"""Helper function to connect time series and topolots"""
from matplotlib.lines import Line2D
transFigure = fig.transFigure.inverted()
tf = fig.transFigure
(xt, yt) = transFigure.transform(targetax.transAxes.transform([.5, .25]))
(xs, _) = transFigure.transform(sourceax.transData.transform([x, 0]))
(_, ys) = transFigure.transform(sourceax.transAxes.transform([0, 1]))
return Line2D((xt, xs), (yt, ys), transform=tf, color='grey',
linestyle='-', linewidth=1.5, alpha=.66, zorder=0)
def plot_evoked_joint(evoked, times="peaks", title='', picks=None,
exclude=None,
show=True, ts_args=None, topomap_args=None):
"""Plot evoked data as butterfly plot and add topomaps for selected
time points.
Parameters
----------
evoked : instance of Evoked
The evoked instance.
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", 5 evenly spaced topographies
between the first and last time instant will be shown. If "peaks",
finds time points automatically by checking for 3 local maxima in
Global Field Power. Defaults to "peaks".
title : str | None
The title. If `None`, suppress printing channel type. Defaults to ''.
picks : array-like of int | None
The indices of channels to plot. If None show all. Defaults to None.
exclude : None | list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to None.
show : bool
Show figure if True. Defaults to True.
ts_args : None | dict
A dict of `kwargs` that are forwarded to `evoked.plot` to
style the butterfly plot. `axes` and `show` are ignored.
If `spatial_colors` is not in this dict, `spatial_colors=True`
will be passed. Beyond that, if ``None``, no customizable arguments
will be passed. Defaults to ``None``.
topomap_args : None | dict
A dict of `kwargs` that are forwarded to `evoked.plot_topomap`
to style the topomaps. `axes` and `show` are ignored. If `times`
is not in this dict, automatic peak detection is used. Beyond that,
if ``None`, no customizable arguments will be passed.
Defaults to ``None``.
Returns
-------
fig : instance of matplotlib.figure.Figure | list
The figure object containing the plot. If `evoked` has multiple
channel types, a list of figures, one for each channel type, is
returned.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
if ts_args is None:
ts_args = dict()
if topomap_args is None:
topomap_args = dict()
# channel selection
# simply create a new evoked object(s) with the desired channel selection
evoked = evoked.copy()
if picks is not None:
pick_names = [evoked.info['ch_names'][pick] for pick in picks]
else: # only pick channels that are plotted
picks = _pick_data_channels(evoked.info, exclude=[])
pick_names = [evoked.info['ch_names'][pick] for pick in picks]
evoked.pick_channels(pick_names)
if exclude == 'bads':
exclude = [ch for ch in evoked.info['bads']
if ch in evoked.info['ch_names']]
if exclude is not None:
evoked.drop_channels(exclude)
info = evoked.info
data_types = ['eeg', 'grad', 'mag', 'seeg', 'ecog']
ch_types = set(ch_type for ch_type in data_types if ch_type in evoked)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
figs = list()
for t in ch_types: # pick only the corresponding channel type
ev_ = evoked.copy().pick_channels(
[info['ch_names'][idx] for idx in range(info['nchan'])
if channel_type(info, idx) == t])
if len(set([channel_type(ev_.info, idx)
for idx in range(ev_.info['nchan'])
if channel_type(ev_.info, idx) in data_types])) > 1:
raise RuntimeError('Possibly infinite loop due to channel '
'selection problem. This should never '
'happen! Please check your channel types.')
figs.append(
plot_evoked_joint(
ev_, times=times, title=title, show=show, ts_args=ts_args,
exclude=list(), topomap_args=topomap_args))
return figs
fig = plt.figure(figsize=(8.0, 4.2))
# set up time points to show topomaps for
times = _process_times(evoked, times, few=True)
# butterfly/time series plot
# most of this code is about passing defaults on demand
ts_ax = fig.add_subplot(212)
ts_args_pass = dict((k, v) for k, v in ts_args.items() if k not in
['axes', 'show', 'colorbar', 'set_tight_layout'])
ts_args_def = dict(picks=None, unit=True, ylim=None, xlim='tight',
proj=False, hline=None, units=None, scalings=None,
titles=None, gfp=False, window_title=None,
spatial_colors=True)
for key in ts_args_def:
if key not in ts_args:
ts_args_pass[key] = ts_args_def[key]
_plot_evoked(evoked, axes=ts_ax, show=False, plot_type='butterfly',
exclude=[], set_tight_layout=False, **ts_args_pass)
# handle title
# we use a new axis for the title to handle scaling of plots
old_title = ts_ax.get_title()
ts_ax.set_title('')
if title is not None:
title_ax = plt.subplot(4, 3, 2)
title = ', '.join([title, old_title]) if len(title) > 0 else old_title
title_ax.text(.5, .5, title, transform=title_ax.transAxes,
horizontalalignment='center',
verticalalignment='center')
title_ax.axis('off')
# prepare axes for topomap
# slightly convoluted due to colorbar placement and for vertical alignment
ts = len(times) + 2
map_ax = [plt.subplot(4, ts, x + 2 + ts) for x in range(ts - 2)]
cbar_ax = plt.subplot(4, 3 * (ts + 1), 6 * (ts + 1))
# topomap
topomap_args_pass = dict((k, v) for k, v in topomap_args.items() if
k not in ['times', 'axes', 'show', 'colorbar'])
topomap_args_pass['outlines'] = (topomap_args['outlines'] if 'outlines'
in topomap_args else 'skirt')
evoked.plot_topomap(times=times, axes=map_ax, show=False,
colorbar=False, **topomap_args_pass)
if topomap_args.get('colorbar', True):
from matplotlib import ticker
cbar = plt.colorbar(map_ax[0].images[0], cax=cbar_ax)
cbar.locator = ticker.MaxNLocator(nbins=5)
cbar.update_ticks()
plt.subplots_adjust(left=.1, right=.93, bottom=.14,
top=1. if title is not None else 1.2)
# connection lines
# draw the connection lines between time series and topoplots
tstimes = [timepoint * 1e3 for timepoint in times]
lines = [_connection_line(timepoint, fig, ts_ax, map_ax_)
for timepoint, map_ax_ in zip(tstimes, map_ax)]
for line in lines:
fig.lines.append(line)
# mark times in time series plot
for timepoint in tstimes:
ts_ax.axvline(timepoint, color='grey', linestyle='-',
linewidth=1.5, alpha=.66, zorder=0)
# show and return it
plt_show(show)
return fig
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
liangz0707/scikit-learn | sklearn/tests/test_random_projection.py | 79 | 14035 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
ubccr/tacc_stats | analyze/process_pickles/htrate.py | 1 | 2095 | #!/usr/bin/env python
import analyze_conf
import sys
import datetime, glob, job_stats, os, subprocess, time
import matplotlib
if not 'matplotlib.pyplot' in sys.modules:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
import scipy, scipy.stats
import argparse
import tspl, tspl_utils
class Colors:
def __init__(self):
self.colors=['b','g','r','c','m','y','k']
self.loc=0
def next(self):
if self.loc == len(self.colors):
self.loc=0
c=self.colors[self.loc]
self.loc+=1
return c
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', help='Set full mode', action='store_true')
parser.add_argument('filearg', help='File, directory, or quoted'
' glob pattern', nargs='?',default='jobs')
n=parser.parse_args()
filelist=tspl_utils.getfilelist(n.filearg)
for file in filelist:
try:
full=''
ts=tspl.TSPLBase(file,['amd64_sock', 'amd64_sock', 'amd64_sock'],
['HT0', 'HT1', 'HT2'])
except tspl.TSPLException as e:
continue
if not tspl_utils.checkjob(ts,3600,16): # 1 hour, 16way only
continue
elif ts.numhosts < 2: # At least 2 hosts
print ts.j.id + ': 1 host'
continue
print ts.j.id
tmid=(ts.t[:-1]+ts.t[1:])/2.0
dt=numpy.diff(ts.t)
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=80)
ax.hold=True
xmin,xmax=[0.,0.]
c=Colors()
for k in ts.j.hosts.keys():
h=ts.j.hosts[k]
col=c.next()
for i in range(3):
for j in range(4):
rate=numpy.divide(numpy.diff(ts.data[i][k][j]),dt)
xmin,xmax=[min(xmin,min(rate)),max(xmax,max(rate))]
ax.plot(tmid/3600,rate,'-'+col)
if xmax > 2.0e9:
print ts.j.id + ' over limit: %(v)8.3f' % {'v' : xmax}
else:
plt.close()
continue
plt.suptitle(ts.title)
xmin,xmax=tspl_utils.expand_range(xmin,xmax,.1)
ax.set_ylim(bottom=xmin,top=xmax)
fname='_'.join(['graph',ts.j.id,'HT_rates'])
fig.savefig(fname)
plt.close()
if __name__ == '__main__':
main()
| lgpl-2.1 |
warmspringwinds/scikit-image | skimage/future/graph/rag.py | 4 | 14201 | try:
import networkx as nx
except ImportError:
msg = "Graph functions require networkx, which is not installed"
class nx:
class Graph:
def __init__(self, *args, **kwargs):
raise ImportError(msg)
import warnings
warnings.warn(msg)
import numpy as np
from scipy.ndimage import filters
from scipy import ndimage as nd
import math
from ... import draw, measure, segmentation, util, color
try:
from matplotlib import colors
from matplotlib import cm
except ImportError:
pass
def min_weight(graph, src, dst, n):
"""Callback to handle merging nodes by choosing minimum weight.
Returns either the weight between (`src`, `n`) or (`dst`, `n`)
in `graph` or the minimum of the two when both exist.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The verices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
weight : float
The weight between (`src`, `n`) or (`dst`, `n`) in `graph` or the
minimum of the two when both exist.
"""
# cover the cases where n only has edge to either `src` or `dst`
default = {'weight': np.inf}
w1 = graph[n].get(src, default)['weight']
w2 = graph[n].get(dst, default)['weight']
return min(w1, w2)
class RAG(nx.Graph):
"""
The Region Adjacency Graph (RAG) of an image, subclasses
`networx.Graph <http://networkx.github.io/documentation/latest/reference/classes.graph.html>`_
"""
def __init__(self, data=None, **attr):
super(RAG, self).__init__(data, **attr)
try:
self.max_id = max(self.nodes_iter())
except ValueError:
# Empty sequence
self.max_id = 0
def merge_nodes(self, src, dst, weight_func=min_weight, in_place=True,
extra_arguments=[], extra_keywords={}):
"""Merge node `src` and `dst`.
The new combined node is adjacent to all the neighbors of `src`
and `dst`. `weight_func` is called to decide the weight of edges
incident on the new node.
Parameters
----------
src, dst : int
Nodes to be merged.
weight_func : callable, optional
Function to decide edge weight of edges incident on the new node.
For each neighbor `n` for `src and `dst`, `weight_func` will be
called as follows: `weight_func(src, dst, n, *extra_arguments,
**extra_keywords)`. `src`, `dst` and `n` are IDs of vertices in the
RAG object which is in turn a subclass of
`networkx.Graph`.
in_place : bool, optional
If set to `True`, the merged node has the id `dst`, else merged
node has a new id which is returned.
extra_arguments : sequence, optional
The sequence of extra positional arguments passed to
`weight_func`.
extra_keywords : dictionary, optional
The dict of keyword arguments passed to the `weight_func`.
Returns
-------
id : int
The id of the new node.
Notes
-----
If `in_place` is `False` the resulting node has a new id, rather than
`dst`.
"""
src_nbrs = set(self.neighbors(src))
dst_nbrs = set(self.neighbors(dst))
neighbors = (src_nbrs | dst_nbrs) - set([src, dst])
if in_place:
new = dst
else:
new = self.next_id()
self.add_node(new)
for neighbor in neighbors:
w = weight_func(self, src, new, neighbor, *extra_arguments,
**extra_keywords)
self.add_edge(neighbor, new, weight=w)
self.node[new]['labels'] = (self.node[src]['labels'] +
self.node[dst]['labels'])
self.remove_node(src)
if not in_place:
self.remove_node(dst)
return new
def add_node(self, n, attr_dict=None, **attr):
"""Add node `n` while updating the maximum node id.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n, attr_dict, **attr)
self.max_id = max(n, self.max_id)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between `u` and `v` while updating max node id.
.. seealso:: :func:`networkx.Graph.add_edge`."""
super(RAG, self).add_edge(u, v, attr_dict, **attr)
self.max_id = max(u, v, self.max_id)
def copy(self):
"""Copy the graph with its max node id.
.. seealso:: :func:`networkx.Graph.copy`."""
g = super(RAG, self).copy()
g.max_id = self.max_id
return g
def next_id(self):
"""Returns the `id` for the new node to be inserted.
The current implementation returns one more than the maximum `id`.
Returns
-------
id : int
The `id` of the new node to be inserted.
"""
return self.max_id + 1
def _add_node_silent(self, n):
"""Add node `n` without updating the maximum node id.
This is a convenience method used internally.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n)
def _add_edge_filter(values, graph):
"""Create edge in `g` between the first element of `values` and the rest.
Add an edge between the first element in `values` and
all other elements of `values` in the graph `g`. `values[0]`
is expected to be the central value of the footprint used.
Parameters
----------
values : array
The array to process.
graph : RAG
The graph to add edges in.
Returns
-------
0 : int
Always returns 0. The return value is required so that `generic_filter`
can put it in the output array.
"""
values = values.astype(int)
current = values[0]
for value in values[1:]:
if value != current:
graph.add_edge(current, value)
return 0
def rag_mean_color(image, labels, connectivity=2, mode='distance',
sigma=255.0):
"""Compute the Region Adjacency Graph using mean colors.
Given an image and its initial segmentation, this method constructs the
corresponding Region Adjacency Graph (RAG). Each node in the RAG
represents a set of pixels within `image` with the same label in `labels`.
The weight between two adjacent regions represents how similar or
dissimilar two regions are depending on the `mode` parameter.
Parameters
----------
image : ndarray, shape(M, N, [..., P,] 3)
Input image.
labels : ndarray, shape(M, N, [..., P,])
The labelled image. This should have one dimension less than
`image`. If `image` has dimensions `(M, N, 3)` `labels` should have
dimensions `(M, N)`.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
`scipy.ndimage.filters.generate_binary_structure`.
mode : {'distance', 'similarity'}, optional
The strategy to assign edge weights.
'distance' : The weight between two adjacent regions is the
:math:`|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean
colors of the two regions. It represents the Euclidean distance in
their average color.
'similarity' : The weight between two adjacent is
:math:`e^{-d^2/sigma}` where :math:`d=|c_1 - c_2|`, where
:math:`c_1` and :math:`c_2` are the mean colors of the two regions.
It represents how similar two regions are.
sigma : float, optional
Used for computation when `mode` is "similarity". It governs how
close to each other two colors should be, for their corresponding edge
weight to be significant. A very large value of `sigma` could make
any two colors behave as though they were similar.
Returns
-------
out : RAG
The region adjacency graph.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels)
References
----------
.. [1] Alain Tremeau and Philippe Colantoni
"Regions Adjacency Graph Applied To Color Image Segmentation"
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274
"""
graph = RAG()
# The footprint is constructed in such a way that the first
# element in the array being passed to _add_edge_filter is
# the central value.
fp = nd.generate_binary_structure(labels.ndim, connectivity)
for d in range(fp.ndim):
fp = fp.swapaxes(0, d)
fp[0, ...] = 0
fp = fp.swapaxes(0, d)
# For example
# if labels.ndim = 2 and connectivity = 1
# fp = [[0,0,0],
# [0,1,1],
# [0,1,0]]
#
# if labels.ndim = 2 and connectivity = 2
# fp = [[0,0,0],
# [0,1,1],
# [0,1,1]]
filters.generic_filter(
labels,
function=_add_edge_filter,
footprint=fp,
mode='nearest',
output=np.zeros(labels.shape, dtype=np.uint8),
extra_arguments=(graph,))
for n in graph:
graph.node[n].update({'labels': [n],
'pixel count': 0,
'total color': np.array([0, 0, 0],
dtype=np.double)})
for index in np.ndindex(labels.shape):
current = labels[index]
graph.node[current]['pixel count'] += 1
graph.node[current]['total color'] += image[index]
for n in graph:
graph.node[n]['mean color'] = (graph.node[n]['total color'] /
graph.node[n]['pixel count'])
for x, y, d in graph.edges_iter(data=True):
diff = graph.node[x]['mean color'] - graph.node[y]['mean color']
diff = np.linalg.norm(diff)
if mode == 'similarity':
d['weight'] = math.e ** (-(diff ** 2) / sigma)
elif mode == 'distance':
d['weight'] = diff
else:
raise ValueError("The mode '%s' is not recognised" % mode)
return graph
def draw_rag(labels, rag, img, border_color=None, node_color='#ffff00',
edge_color='#00ff00', colormap=None, thresh=np.inf,
desaturate=False, in_place=True):
"""Draw a Region Adjacency Graph on an image.
Given a labelled image and its corresponding RAG, draw the nodes and edges
of the RAG on the image with the specified colors. Nodes are marked by
the centroids of the corresponding regions.
Parameters
----------
labels : ndarray, shape (M, N)
The labelled image.
rag : RAG
The Region Adjacency Graph.
img : ndarray, shape (M, N, 3)
Input image.
border_color : colorspec, optional
Any matplotlib colorspec.
node_color : colorspec, optional
Any matplotlib colorspec. Yellow by default.
edge_color : colorspec, optional
Any matplotlib colorspec. Green by default.
colormap : colormap, optional
Any matplotlib colormap. If specified the edges are colormapped with
the specified color map.
thresh : float, optional
Edges with weight below `thresh` are not drawn, or considered for color
mapping.
desaturate : bool, optional
Convert the image to grayscale before displaying. Particularly helps
visualization when using the `colormap` option.
in_place : bool, optional
If set, the RAG is modified in place. For each node `n` the function
will set a new attribute ``rag.node[n]['centroid']``.
Returns
-------
out : ndarray, shape (M, N, 3)
The image with the RAG drawn.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.coffee()
>>> labels = segmentation.slic(img)
>>> g = graph.rag_mean_color(img, labels)
>>> out = graph.draw_rag(labels, g, img)
"""
if not in_place:
rag = rag.copy()
if desaturate:
img = color.rgb2gray(img)
img = color.gray2rgb(img)
out = util.img_as_float(img, force_copy=True)
cc = colors.ColorConverter()
edge_color = cc.to_rgb(edge_color)
node_color = cc.to_rgb(node_color)
# Handling the case where one node has multiple labels
# offset is 1 so that regionprops does not ignore 0
offset = 1
map_array = np.arange(labels.max() + 1)
for n, d in rag.nodes_iter(data=True):
for label in d['labels']:
map_array[label] = offset
offset += 1
rag_labels = map_array[labels]
regions = measure.regionprops(rag_labels)
for (n, data), region in zip(rag.nodes_iter(data=True), regions):
data['centroid'] = region['centroid']
if border_color is not None:
border_color = cc.to_rgb(border_color)
out = segmentation.mark_boundaries(out, rag_labels, color=border_color)
if colormap is not None:
edge_weight_list = [d['weight'] for x, y, d in
rag.edges_iter(data=True) if d['weight'] < thresh]
norm = colors.Normalize()
norm.autoscale(edge_weight_list)
smap = cm.ScalarMappable(norm, colormap)
for n1, n2, data in rag.edges_iter(data=True):
if data['weight'] >= thresh:
continue
r1, c1 = map(int, rag.node[n1]['centroid'])
r2, c2 = map(int, rag.node[n2]['centroid'])
line = draw.line(r1, c1, r2, c2)
if colormap is not None:
out[line] = smap.to_rgba([data['weight']])[0][:-1]
else:
out[line] = edge_color
circle = draw.circle(r1, c1, 2)
out[circle] = node_color
return out
| bsd-3-clause |
fitermay/intellij-community | python/helpers/pydev/pydevd.py | 3 | 63162 | '''
Entry point module (keep at root):
This module starts the debugger.
'''
from __future__ import nested_scopes # Jython 2.1 support
import atexit
import os
import sys
import traceback
from _pydevd_bundle.pydevd_constants import IS_JYTH_LESS25, IS_PY3K, IS_PY34_OLDER, get_thread_id, dict_keys, dict_pop, dict_contains, \
dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame, xrange, \
clear_cached_thread_id
from _pydev_bundle import fix_getpass
from _pydev_bundle import pydev_imports, pydev_log
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_io, pydevd_vm_type, pydevd_tracing
from _pydevd_bundle import pydevd_utils
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle.pydevd_additional_thread_info import PyDBAdditionalThreadInfo
from _pydevd_bundle.pydevd_breakpoints import ExceptionBreakpoint, update_exception_hook
from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO, CMD_STEP_OVER, \
CMD_STEP_RETURN, CMD_STEP_INTO_MY_CODE, CMD_THREAD_SUSPEND, CMD_RUN_TO_LINE, \
CMD_ADD_EXCEPTION_BREAK, CMD_SMART_STEP_INTO, InternalConsoleExec, NetCommandFactory, \
PyDBDaemonThread, _queue, ReaderThread, GetGlobalDebugger, get_global_debugger, \
set_global_debugger, WriterThread, pydevd_find_thread_by_id, pydevd_log, \
start_client, start_server, InternalGetBreakpointException, InternalSendCurrExceptionTrace, \
InternalSendCurrExceptionTraceProceeded
from _pydevd_bundle.pydevd_custom_frames import CustomFramesContainer, custom_frames_container_init
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from _pydevd_bundle.pydevd_trace_dispatch import trace_dispatch as _trace_dispatch
from _pydevd_bundle.pydevd_utils import save_main_module
from pydevd_concurrency_analyser.pydevd_concurrency_logger import ThreadingLogger, AsyncioLogger, send_message, cur_time
from pydevd_concurrency_analyser.pydevd_thread_wrappers import wrap_threads
__version_info__ = (0, 0, 5)
__version_info_str__ = []
for v in __version_info__:
__version_info_str__.append(str(v))
__version__ = '.'.join(__version_info_str__)
#IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from _pydevd_bundle.pydevd_plugin_utils import PluginManager
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
connected = False
bufferStdOutToServer = False
bufferStdErrToServer = False
remote = False
file_system_encoding = getfilesystemencoding()
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self._py_db_command_thread_event = py_db._py_db_command_thread_event
self.py_db = py_db
self.setName('pydevd.CommandThread')
def _on_run(self):
for i in xrange(1, 10):
time.sleep(0.5) #this one will only start later on (because otherwise we may not have any non-daemon threads
if self.killReceived:
return
if self.pydev_do_not_trace:
self.py_db.SetTrace(None) # no debugging on this thread
try:
while not self.killReceived:
try:
self.py_db.process_internal_commands()
except:
pydevd_log(0, 'Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.5)
except:
pydev_log.debug(sys.exc_info()[0])
#only got this error in interpreter shutdown
#pydevd_log(0, 'Finishing debug communication...(3)')
#=======================================================================================================================
# CheckOutputThread
# Non-daemonic thread guaranties that all data is written even if program is finished
#=======================================================================================================================
class CheckOutputThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self)
self.py_db = py_db
self.setName('pydevd.CheckAliveThread')
self.daemon = False
py_db.output_checker = self
def _on_run(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
while not self.killReceived:
time.sleep(0.3)
if not self.py_db.has_threads_alive() and self.py_db.writer.empty() \
and not has_data_to_redirect():
try:
pydev_log.debug("No alive threads, finishing debug session")
self.py_db.finish_debugging_session()
kill_all_pydev_threads()
except:
traceback.print_exc()
self.killReceived = True
self.py_db.check_output_redirect()
def do_kill_pydev_thread(self):
self.killReceived = True
#=======================================================================================================================
# PyDB
#=======================================================================================================================
class PyDB:
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling process_net_command.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
def __init__(self):
set_global_debugger(self)
pydevd_tracing.replace_sys_set_trace_func()
self.reader = None
self.writer = None
self.output_checker = None
self.quitting = None
self.cmd_factory = NetCommandFactory()
self._cmd_queue = {} # the hash of Queues. Key is thread id, value is thread
self.breakpoints = {}
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.ready_to_run = False
self._main_lock = thread.allocate_lock()
self._lock_running_thread_ids = thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self._finish_debugging_session = False
self._termination_event_set = False
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.break_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
# Suspend debugger even if breakpoint condition raises an exception
SUSPEND_ON_BREAKPOINT_EXCEPTION = True
self.suspend_on_breakpoint_exception = SUSPEND_ON_BREAKPOINT_EXCEPTION
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
#this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
#acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
#find that thread alive anymore, we must remove it from this list and make the java side know that the thread
#was killed.
self._running_thread_ids = {}
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
#working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
self.thread_analyser = None
self.asyncio_analyser = None
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
self._filename_to_not_in_scope = {}
self.first_breakpoint_reached = False
self.is_filter_enabled = pydevd_utils.is_filter_enabled()
self.is_filter_libraries = pydevd_utils.is_filter_libraries()
self.show_return_values = False
self.remove_return_values_flag = False
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def not_in_scope(self, filename):
return pydevd_utils.not_in_project_roots(filename)
def is_ignored_by_filters(self, filename):
return pydevd_utils.is_ignored_by_filter(filename)
def first_appearance_in_scope(self, trace):
if trace is None or self.not_in_scope(trace.tb_frame.f_code.co_filename):
return False
else:
trace = trace.tb_next
while trace is not None:
frame = trace.tb_frame
if not self.not_in_scope(frame.f_code.co_filename):
return False
trace = trace.tb_next
return True
def has_threads_alive(self):
for t in threadingEnumerate():
if getattr(t, 'is_pydev_daemon_thread', False):
#Important: Jython 2.5rc4 has a bug where a thread created with thread.start_new_thread won't be
#set as a daemon thread, so, we also have to check for the 'is_pydev_daemon_thread' flag.
#See: https://github.com/fabioz/PyDev.Debugger/issues/11
continue
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if is_thread_alive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def finish_debugging_session(self):
self._finish_debugging_session = True
def initialize_network(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = WriterThread(sock)
self.reader = ReaderThread(sock)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
s = start_client(host, port)
else:
s = start_server(port)
self.initialize_network(s)
def get_internal_queue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
try:
return self._cmd_queue[thread_id]
except KeyError:
return self._cmd_queue.setdefault(thread_id, _queue.Queue()) #@UndefinedVariable
def post_internal_command(self, int_cmd, thread_id):
""" if thread_id is *, post to all """
if thread_id == "*":
threads = threadingEnumerate()
for t in threads:
thread_id = get_thread_id(t)
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
else:
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
def check_output_redirect(self):
global bufferStdOutToServer
global bufferStdErrToServer
if bufferStdOutToServer:
init_stdout_redirect()
self.check_output(sys.stdoutBuf, 1) #@UndefinedVariable
if bufferStdErrToServer:
init_stderr_redirect()
self.check_output(sys.stderrBuf, 2) #@UndefinedVariable
def check_output(self, out, outCtx):
'''Checks the output to see if we have to send some buffered output to the debug server
@param out: sys.stdout or sys.stderr
@param outCtx: the context indicating: 1=stdout and 2=stderr (to know the colors to write it)
'''
try:
v = out.getvalue()
if v:
self.cmd_factory.make_io_message(v, outCtx, self)
except:
traceback.print_exc()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from _pydev_bundle.pydev_import_hook import import_hook_manager
for module in dict_keys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, dict_pop(self.mpl_modules_for_patching, module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def suspend_all_other_threads(self, thread_suspended_at_bp):
all_threads = threadingEnumerate()
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif hasattr(t, 'pydev_do_not_trace'):
pass # skip some other threads, i.e. ipython history saving thread from debug console
else:
if t is thread_suspended_at_bp:
continue
additional_info = None
try:
additional_info = t.additional_info
except AttributeError:
pass # that's ok, no info currently set
if additional_info is not None:
for frame in additional_info.iter_frames(t):
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=True)
del frame
self.set_suspend(t, CMD_THREAD_SUSPEND)
else:
sys.stderr.write("Can't suspend thread: %s\n" % (t,))
def process_internal_commands(self):
'''This function processes internal commands
'''
self._main_lock.acquire()
try:
self.check_output_redirect()
curr_thread_id = get_thread_id(threadingCurrentThread())
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
self._lock_running_thread_ids.acquire()
try:
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
elif is_thread_alive(t):
if not self._running_thread_ids:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
# TODO: Investigate: should we do this for all threads in threading.enumerate()?
# (i.e.: if a fork happens on Linux, this seems likely).
old_thread_id = get_thread_id(t)
clear_cached_thread_id(t)
clear_cached_thread_id(threadingCurrentThread())
thread_id = get_thread_id(t)
curr_thread_id = get_thread_id(threadingCurrentThread())
if pydevd_vars.has_additional_frames_by_id(old_thread_id):
frames_by_id = pydevd_vars.get_additional_frames_by_id(old_thread_id)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_by_id)
else:
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
if not dict_contains(self._running_thread_ids, thread_id):
if not hasattr(t, 'additional_info'):
# see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329
# Let's create the additional info right away!
t.additional_info = PyDBAdditionalThreadInfo()
self._running_thread_ids[thread_id] = t
self.writer.add_command(self.cmd_factory.make_thread_created_message(t))
queue = self.get_internal_queue(thread_id)
cmdsToReadd = [] # some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydevd_log(2, "Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
pydevd_log(2, "processing internal command ", str(int_cmd))
int_cmd.do_it(self)
else:
pydevd_log(2, "NOT processing internal command ", str(int_cmd))
cmdsToReadd.append(int_cmd)
except _queue.Empty: #@UndefinedVariable
for int_cmd in cmdsToReadd:
queue.put(int_cmd)
# this is how we exit
thread_ids = list(self._running_thread_ids.keys())
for tId in thread_ids:
if not dict_contains(program_threads_alive, tId):
program_threads_dead.append(tId)
finally:
self._lock_running_thread_ids.release()
for tId in program_threads_dead:
try:
self._process_thread_not_alive(tId)
except:
sys.stderr.write('Error iterating through %s (%s) - %s\n' % (
program_threads_alive, program_threads_alive.__class__, dir(program_threads_alive)))
raise
if len(program_threads_alive) == 0:
self.finish_debugging_session()
for t in all_threads:
if hasattr(t, 'do_kill_pydev_thread'):
t.do_kill_pydev_thread()
finally:
self._main_lock.release()
def set_tracing_for_untraced_contexts(self, ignore_frame=None, overwrite_prev_trace=False):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
threads = threadingEnumerate()
try:
for t in threads:
if getattr(t, 'is_pydev_daemon_thread', False):
continue
# TODO: optimize so that we only actually add that tracing if it's in
# the new breakpoint context.
additional_info = None
try:
additional_info = t.additional_info
except AttributeError:
pass # that's ok, no info currently set
if additional_info is not None:
for frame in additional_info.iter_frames(t):
if frame is not ignore_frame:
self.set_trace_for_frame_and_parents(frame, overwrite_prev_trace=overwrite_prev_trace)
finally:
frame = None
t = None
threads = None
additional_info = None
def consolidate_breakpoints(self, file, id_to_breakpoint, breakpoints):
break_dict = {}
for breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[file] = break_dict
def add_break_on_exception(
self,
exception,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.error("Error unable to add break on exception for: %s (exception could not be imported)\n" % (exception,))
return None
if eb.notify_on_terminate:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook on terminate: %s\n" % (cp,))
self.break_on_uncaught_exceptions = cp
if eb.notify_always:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook always: %s\n" % (cp,))
self.break_on_caught_exceptions = cp
return eb
def update_after_exceptions_added(self, added):
updated_on_caught = False
updated_on_uncaught = False
for eb in added:
if not updated_on_uncaught and eb.notify_on_terminate:
updated_on_uncaught = True
update_exception_hook(self)
if not updated_on_caught and eb.notify_always:
updated_on_caught = True
self.set_tracing_for_untraced_contexts()
def _process_thread_not_alive(self, threadId):
""" if thread is not alive, cancel trace_dispatch processing """
self._lock_running_thread_ids.acquire()
try:
thread = self._running_thread_ids.pop(threadId, None)
if thread is None:
return
wasNotified = thread.additional_info.pydev_notify_kill
if not wasNotified:
thread.additional_info.pydev_notify_kill = True
finally:
self._lock_running_thread_ids.release()
cmd = self.cmd_factory.make_thread_killed_message(threadId)
self.writer.add_command(cmd)
def set_suspend(self, thread, stop_reason):
thread.additional_info.suspend_type = PYTHON_SUSPEND
thread.additional_info.pydev_state = STATE_SUSPEND
thread.stop_reason = stop_reason
# If conditional breakpoint raises any exception during evaluation send details to Java
if stop_reason == CMD_SET_BREAK and self.suspend_on_breakpoint_exception:
self._send_breakpoint_condition_exception(thread)
def _send_breakpoint_condition_exception(self, thread):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = get_thread_id(thread)
conditional_breakpoint_exception_tuple = thread.additional_info.conditional_breakpoint_exception
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
# Reset the conditional_breakpoint_exception details to None
thread.additional_info.conditional_breakpoint_exception = None
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack_proceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.post_internal_command(int_cmd, thread_id)
self.process_internal_commands()
def do_wait_suspend(self, thread, frame, event, arg): #@UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
"""
self.process_internal_commands()
message = thread.additional_info.pydev_message
cmd = self.cmd_factory.make_thread_suspend_message(get_thread_id(thread), frame, thread.stop_reason, message)
self.writer.add_command(cmd)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
from_this_thread = []
for frame_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
# print >> sys.stderr, 'Frame created: ', frame_id
self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(frame_id, custom_frame.name))
self.writer.add_command(self.cmd_factory.make_thread_suspend_message(frame_id, custom_frame.frame, CMD_THREAD_SUSPEND, ""))
from_this_thread.append(frame_id)
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
imported = False
info = thread.additional_info
if info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
# before every stop check if matplotlib modules were imported inside script code
if len(self.mpl_modules_for_patching) > 0:
for module in dict_keys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = dict_pop(self.mpl_modules_for_patching, module)
activate_function()
self.mpl_in_use = True
while info.pydev_state == STATE_SUSPEND and not self._finish_debugging_session:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
try:
if not imported:
from pydev_ipython.inputhook import get_inputhook
imported = True
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
self.process_internal_commands()
time.sleep(0.01)
# process any stepping instructions
if info.pydev_step_cmd == CMD_STEP_INTO or info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE:
info.pydev_step_stop = None
info.pydev_smart_step_stop = None
elif info.pydev_step_cmd == CMD_STEP_OVER:
info.pydev_step_stop = frame
info.pydev_smart_step_stop = None
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd == CMD_SMART_STEP_INTO:
self.set_trace_for_frame_and_parents(frame)
info.pydev_step_stop = None
info.pydev_smart_step_stop = frame
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT :
self.set_trace_for_frame_and_parents(frame)
if event == 'line' or event == 'exception':
#If we're already in the correct context, we have to stop it now, because we can act only on
#line events -- if a return was the next statement it wouldn't work (so, we have this code
#repeated at pydevd_frame).
stop = False
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if curr_func_name == info.pydev_func_name:
line = info.pydev_next_line
if frame.f_lineno == line:
stop = True
else :
if frame.f_trace is None:
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
frame.f_trace = None
stop = True
if stop:
info.pydev_state = STATE_SUSPEND
self.do_wait_suspend(thread, frame, event, arg)
return
elif info.pydev_step_cmd == CMD_STEP_RETURN:
back_frame = frame.f_back
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
del frame
cmd = self.cmd_factory.make_thread_run_message(get_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print >> sys.stderr, 'Removing created frame: ', frame_id
self.writer.add_command(self.cmd_factory.make_thread_killed_message(frame_id))
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
def handle_post_mortem_stop(self, thread, frame, frames_byid, exception):
pydev_log.debug("We are stopping in post-mortem\n")
thread_id = get_thread_id(thread)
pydevd_vars.add_additional_frame_by_id(thread_id, frames_byid)
try:
try:
add_exception_to_frame(frame, exception)
self.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK)
self.do_wait_suspend(thread, frame, 'exception', None)
except:
pydev_log.error("We've got an error while stopping in post-mortem: %s\n"%sys.exc_info()[0])
finally:
pydevd_vars.remove_additional_frame_by_id(thread_id)
def set_trace_for_frame_and_parents(self, frame, also_add_to_passed_frame=True, overwrite_prev_trace=False, dispatch_func=None):
if dispatch_func is None:
dispatch_func = self.trace_dispatch
if also_add_to_passed_frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
while frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
del frame
def update_trace(self, frame, dispatch_func, overwrite_prev):
if frame.f_trace is None:
frame.f_trace = dispatch_func
else:
if overwrite_prev:
frame.f_trace = dispatch_func
else:
try:
#If it's the trace_exception, go back to the frame trace dispatch!
if frame.f_trace.im_func.__name__ == 'trace_exception':
frame.f_trace = frame.f_trace.im_self.trace_dispatch
except AttributeError:
pass
frame = frame.f_back
del frame
def prepare_to_run(self):
''' Shared code to prepare debugging by installing traces and registering threads '''
self.patch_threads()
pydevd_tracing.SetTrace(self.trace_dispatch)
PyDBCommandThread(self).start()
if self.signature_factory is not None or self.thread_analyser is not None:
# we need all data to be sent to IDE even after program finishes
CheckOutputThread(self).start()
def patch_threads(self):
try:
# not available in jython!
import threading
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import patch_thread_modules
patch_thread_modules()
def get_fullname(self, mod_name):
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def run(self, file, globals=None, locals=None, module=False, set_trace=True):
if module:
filename = self.get_fullname(file)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
if set_trace:
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run ust be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(file)[0])
self.prepare_to_run()
while not self.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
if self.thread_analyser is not None:
wrap_threads()
t = threadingCurrentThread()
self.thread_analyser.set_start_time(cur_time())
send_message("threading_event", 0, t.getName(), get_thread_id(t), "thread", "start", file, 1, None, parent=get_thread_id(t))
if self.asyncio_analyser is not None:
# we don't have main thread in asyncio graph, so we should add a fake event
send_message("asyncio_event", 0, "Task", "Task", "thread", "stop", file, 1, frame=None, parent=None)
try:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
traceback.print_exc()
pydev_imports.execfile(file, globals, locals) # execute the script
def exiting(self):
sys.stdout.flush()
sys.stderr.flush()
self.check_output_redirect()
cmd = self.cmd_factory.make_exit_message()
self.writer.add_command(cmd)
def wait_for_commands(self, globals):
thread = threading.currentThread()
from _pydevd_bundle import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = get_thread_id(thread)
from _pydevd_bundle import pydevd_vars
pydevd_vars.add_additional_frame_by_id(thread_id, {id(frame): frame})
cmd = self.cmd_factory.make_show_console_message(thread_id, frame)
self.writer.add_command(cmd)
while True:
self.process_internal_commands()
time.sleep(0.01)
trace_dispatch = _trace_dispatch
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def enable_qt_support():
from _pydev_bundle import pydev_monkey_qt
pydev_monkey_qt.patch_qt()
def process_command_line(argv):
""" parses the arguments.
removes our arguments from the command line """
setup = {}
setup['client'] = ''
setup['server'] = False
setup['port'] = 0
setup['file'] = ''
setup['multiproc'] = False #Used by PyCharm (reuses connection: ssh tunneling)
setup['multiprocess'] = False # Used by PyDev (creates new connection to ide)
setup['save-signatures'] = False
setup['save-threading'] = False
setup['save-asyncio'] = False
setup['qt-support'] = False
setup['print-in-debugger-startup'] = False
setup['cmd-line'] = False
setup['module'] = False
i = 0
del argv[0]
while (i < len(argv)):
if argv[i] == '--port':
del argv[i]
setup['port'] = int(argv[i])
del argv[i]
elif argv[i] == '--vm_type':
del argv[i]
setup['vm_type'] = argv[i]
del argv[i]
elif argv[i] == '--client':
del argv[i]
setup['client'] = argv[i]
del argv[i]
elif argv[i] == '--server':
del argv[i]
setup['server'] = True
elif argv[i] == '--file':
del argv[i]
setup['file'] = argv[i]
i = len(argv) # pop out, file is our last argument
elif argv[i] == '--DEBUG_RECORD_SOCKET_READS':
del argv[i]
setup['DEBUG_RECORD_SOCKET_READS'] = True
elif argv[i] == '--DEBUG':
del argv[i]
set_debug(setup)
elif argv[i] == '--multiproc':
del argv[i]
setup['multiproc'] = True
elif argv[i] == '--multiprocess':
del argv[i]
setup['multiprocess'] = True
elif argv[i] == '--save-signatures':
del argv[i]
setup['save-signatures'] = True
elif argv[i] == '--save-threading':
del argv[i]
setup['save-threading'] = True
elif argv[i] == '--save-asyncio':
del argv[i]
setup['save-asyncio'] = True
elif argv[i] == '--qt-support':
del argv[i]
setup['qt-support'] = True
elif argv[i] == '--print-in-debugger-startup':
del argv[i]
setup['print-in-debugger-startup'] = True
elif (argv[i] == '--cmd-line'):
del argv[i]
setup['cmd-line'] = True
elif (argv[i] == '--module'):
del argv[i]
setup['module'] = True
else:
raise ValueError("unexpected option " + argv[i])
return setup
def usage(doExit=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('pydevd.py --port=N [(--client hostname) | --server] --file executable [file_options]\n')
if doExit:
sys.exit(0)
def init_stdout_redirect():
if not getattr(sys, 'stdoutBuf', None):
sys.stdoutBuf = pydevd_io.IOBuf()
sys.stdout_original = sys.stdout
sys.stdout = pydevd_io.IORedirector(sys.stdout, sys.stdoutBuf) #@UndefinedVariable
def init_stderr_redirect():
if not getattr(sys, 'stderrBuf', None):
sys.stderrBuf = pydevd_io.IOBuf()
sys.stderr_original = sys.stderr
sys.stderr = pydevd_io.IORedirector(sys.stderr, sys.stderrBuf) #@UndefinedVariable
def has_data_to_redirect():
if getattr(sys, 'stdoutBuf', None):
if not sys.stdoutBuf.empty():
return True
if getattr(sys, 'stderrBuf', None):
if not sys.stderrBuf.empty():
return True
return False
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdoutToServer=False,
stderrToServer=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: if True we'll reset the frame.f_trace of frames which are already being traced
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
)
finally:
_set_trace_lock.release()
_set_trace_lock = thread.allocate_lock()
def _locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
):
if patch_multiprocessing:
try:
from _pydev_bundle import pydev_monkey
except:
pass
else:
pydev_monkey.patch_new_process_functions()
if host is None:
from _pydev_bundle import pydev_localhost
host = pydev_localhost.get_localhost()
global connected
global bufferStdOutToServer
global bufferStdErrToServer
if not connected :
pydevd_vm_type.setup_type()
debugger = PyDB()
debugger.connect(host, port) # Note: connect can raise error.
# Mark connected only if it actually succeeded.
connected = True
bufferStdOutToServer = stdoutToServer
bufferStdErrToServer = stderrToServer
if bufferStdOutToServer:
init_stdout_redirect()
if bufferStdErrToServer:
init_stderr_redirect()
patch_stdin(debugger)
debugger.set_trace_for_frame_and_parents(get_frame(), False, overwrite_prev_trace=overwrite_prev_trace)
CustomFramesContainer.custom_frames_lock.acquire() # @UndefinedVariable
try:
for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
debugger.set_trace_for_frame_and_parents(custom_frame.frame, False)
finally:
CustomFramesContainer.custom_frames_lock.release() # @UndefinedVariable
t = threadingCurrentThread()
try:
additional_info = t.additional_info
except AttributeError:
additional_info = PyDBAdditionalThreadInfo()
t.additional_info = additional_info
while not debugger.ready_to_run:
time.sleep(0.1) # busy wait until we receive run command
# note that we do that through pydevd_tracing.SetTrace so that the tracing
# is not warned to the user!
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
# As this is the first connection, also set tracing for any untraced threads
debugger.set_tracing_for_untraced_contexts(ignore_frame=get_frame(), overwrite_prev_trace=overwrite_prev_trace)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
PyDBCommandThread(debugger).start()
CheckOutputThread(debugger).start()
#Suspend as the last thing after all tracing is in place.
if suspend:
debugger.set_suspend(t, CMD_THREAD_SUSPEND)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
debugger = get_global_debugger()
debugger.set_trace_for_frame_and_parents(get_frame(), False)
t = threadingCurrentThread()
try:
additional_info = t.additional_info
except AttributeError:
additional_info = PyDBAdditionalThreadInfo()
t.additional_info = additional_info
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
if suspend:
debugger.set_suspend(t, CMD_THREAD_SUSPEND)
def stoptrace():
global connected
if connected:
pydevd_tracing.restore_sys_set_trace_func()
sys.settrace(None)
try:
#not available in jython!
threading.settrace(None) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
debugger = get_global_debugger()
if debugger:
debugger.set_trace_for_frame_and_parents(
get_frame(), also_add_to_passed_frame=True, overwrite_prev_trace=True, dispatch_func=lambda *args:None)
debugger.exiting()
kill_all_pydev_threads()
connected = False
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = start_client(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.pydev_do_not_trace = False #we run reader in the same thread so we don't want to loose tracing
self.reader.run()
def close(self):
try:
self.reader.do_kill_pydev_thread()
except :
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(self, self.dispatcher.client)
def _on_run(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread._on_run(self)
def handle_except(self):
ReaderThread.handle_except(self)
def process_command(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self.killReceived = True
DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev
DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm
DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION:
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked():
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
host, port = dispatch()
from _pydevd_bundle import pydevd_tracing
pydevd_tracing.restore_sys_set_trace_func()
if port is not None:
global connected
connected = False
custom_frames_container_init()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
)
#=======================================================================================================================
# SetupHolder
#=======================================================================================================================
class SetupHolder:
setup = None
def apply_debugger_options(setup_options):
"""
:type setup_options: dict[str, bool]
"""
default_options = {'save-signatures': False, 'qt-support': False}
default_options.update(setup_options)
setup_options = default_options
debugger = GetGlobalDebugger()
if setup_options['save-signatures']:
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from _pydevd_bundle.pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
if setup_options['qt-support']:
enable_qt_support()
def patch_stdin(debugger):
from _pydev_bundle.pydev_console_utils import DebugConsoleStdIn
orig_stdin = sys.stdin
sys.stdin = DebugConsoleStdIn(debugger, orig_stdin)
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
# parse the command line. --file is our last argument that is required
try:
sys.original_argv = sys.argv[:]
setup = process_command_line(sys.argv)
SetupHolder.setup = setup
except ValueError:
traceback.print_exc()
usage(1)
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
fix_getpass.fix_getpass()
pydev_log.debug("Executing file %s" % setup['file'])
pydev_log.debug("arguments: %s"% str(sys.argv))
pydevd_vm_type.setup_type(setup.get('vm_type', None))
if os.getenv('PYCHARM_DEBUG') == 'True' or os.getenv('PYDEV_DEBUG') == 'True':
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', DebugInfoHolder.DEBUG_RECORD_SOCKET_READS)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', DebugInfoHolder.DEBUG_TRACE_LEVEL)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
debugger = PyDB()
try:
from _pydev_bundle import pydev_monkey
except:
pass #Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
# Note: we're not inside method, so, no need for 'global'
DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n" %port)
pydev_log.info("pydev debugger: process %d is connecting\n"% os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
else:
pydev_log.error("pydev debugger: couldn't get port for new debug process\n")
finally:
dispatcher.close()
else:
pydev_log.info("pydev debugger: starting\n")
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
traceback.print_exc()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
from _pydevd_bundle import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
pass # It's ok not having stackless there...
is_module = setup['module']
patch_stdin(debugger)
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
if setup['save-threading']:
debugger.thread_analyser = ThreadingLogger()
if setup['save-asyncio']:
if IS_PY34_OLDER:
debugger.asyncio_analyser = AsyncioLogger()
apply_debugger_options(setup)
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
connected = True # Mark that we're connected when started from inside ide.
globals = debugger.run(setup['file'], None, None, is_module)
if setup['cmd-line']:
debugger.wait_for_commands(globals)
| apache-2.0 |
eg-zhang/h2o-2 | py/testdir_single_jvm/test_GLM2_basic_cmp2.py | 9 | 7280 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_glm, h2o_jobs
import h2o_print as h2p
SCIPY_INSTALLED = True
try:
import scipy as sp
import numpy as np
import sklearn as sk
import statsmodels as sm
import statsmodels.api as sm_api
print "numpy, scipy and sklearn are installed. Will do extra checks"
except ImportError:
print "numpy, sklearn, or statsmodels is not installed. Will just do h2o stuff"
SCIPY_INSTALLED = False
# http://statsmodels.sourceforge.net/devel/glm.html#module-reference
# This seems better than the sklearn LogisticRegression I was using
# Using Logit is as simple as thishttp://statsmodels.sourceforge.net/devel/examples/generated/example_discrete.html
#*********************************************************************************
def do_statsmodels_glm(self, bucket, csvPathname, L, family='gaussian'):
h2p.red_print("Now doing statsmodels")
h2p.red_print("http://statsmodels.sourceforge.net/devel/glm.html#module-reference")
h2p.red_print("http://statsmodels.sourceforge.net/devel/generated/statsmodels.genmod.generalized_linear_model.GLM.html")
import numpy as np
import scipy as sp
from numpy import loadtxt
import statsmodels as sm
csvPathnameFull = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
if 1==1:
dataset = np.loadtxt(
open(csvPathnameFull,'r'),
skiprows=1, # skip the header
delimiter=',',
dtype='float');
# skipping cols from the begining... (ID is col 1)
# In newer versions of Numpy, np.genfromtxt can take an iterable argument,
# so you can wrap the file you're reading in a generator that generates lines,
# skipping the first N columns. If your numbers are comma-separated, that's something like
if 1==0:
f = open(csvPathnameFull,'r'),
np.genfromtxt(
(",".join(ln.split()[1:]) for ln in f),
skiprows=1, # skip the header
delimiter=',',
dtype='float');
print "\ncsv read for training, done"
# data is last column
# drop the output
n_features = len(dataset[0]) - 1;
print "n_features:", n_features
# don't want ID (col 0) or CAPSULE (col 1)
# get CAPSULE
target = [x[1] for x in dataset]
# slice off the first 2
train = np.array ( [x[2:] for x in dataset] )
n_samples, n_features = train.shape
print "n_samples:", n_samples, "n_features:", n_features
print "histogram of target"
print sp.histogram(target,3)
print "len(train):", len(train)
print "len(target):", len(target)
print "dataset shape:", dataset.shape
if family!='gaussian':
raise Exception("Only have gaussian logistic for scipy")
# train the classifier
gauss_log = sm_api.GLM(target, train, family=sm_api.families.Gaussian(sm_api.families.links.log))
start = time.time()
gauss_log_results = gauss_log.fit()
print "sm_api.GLM took", time.time() - start, "seconds"
print gauss_log_results.summary()
#*********************************************************************************
def do_h2o_glm(self, bucket, csvPathname, L, family='gaussian'):
h2p.red_print("\nNow doing h2o")
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local', timeoutSecs=180)
# save the resolved pathname for use in the sklearn csv read below
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print inspect
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
# Need to chop out the ID col?
# x = 'ID'
# y = 'CAPSULE'
family = family
alpha = '0'
lambda_ = L
nfolds = '0'
modelKey = 'GLM_Model'
y = 'GLEASON'
kwargs = {
'response' : y,
'ignored_cols' : 'ID, CAPSULE',
'family' : family,
'lambda' : lambda_,
'alpha' : alpha,
'n_folds' : nfolds, # passes if 0, fails otherwise
'destination_key' : modelKey,
}
timeoutSecs = 60
start = time.time()
glmResult = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
# this stuff was left over from when we got the result after polling the jobs list
# okay to do it again
# GLM2: when it redirects to the model view, we no longer have the job_key! (unlike the first response and polling)
(warnings, clist, intercept) = h2o_glm.simpleCheckGLM(self, glmResult, None, **kwargs)
cstring = "".join([("%.5e " % c) for c in clist])
h2p.green_print("h2o alpha ", alpha)
h2p.green_print("h2o lambda ", lambda_)
h2p.green_print("h2o coefficient list:", cstring)
h2p.green_print("h2o intercept", "%.5e " % intercept)
# other stuff in the json response
glm_model = glmResult['glm_model']
_names = glm_model['_names']
coefficients_names = glm_model['coefficients_names']
# the first submodel is the right one, if onely one lambda is provided as a parameter above
submodels = glm_model['submodels'][0]
beta = submodels['beta']
h2p.red_print("beta:", beta)
norm_beta = submodels['norm_beta']
iteration = submodels['iteration']
validation = submodels['validation']
auc = validation['auc']
aic = validation['aic']
null_deviance = validation['null_deviance']
residual_deviance = validation['residual_deviance']
print '_names', _names
print 'coefficients_names', coefficients_names
# did beta get shortened? the simple check confirms names/beta/norm_beta are same length
print 'beta', beta
print 'iteration', iteration
print 'auc', auc
#*********************************************************************************
# the actual test that will run both
#*********************************************************************************
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_basic_cmp2(self):
if 1==1:
bucket = 'smalldata'
importFolderPath = "logreg"
csvFilename = 'prostate.csv'
if 1==0:
bucket = 'home-0xdiag-datasets'
importFolderPath = "standard"
csvFilename = 'covtype.data'
csvPathname = importFolderPath + "/" + csvFilename
# use L for lambda in h2o, C=1/L in sklearn
family = 'gaussian'
L = 1e-4
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_statsmodels_glm(self, bucket, csvPathname, L, family)
# since we invert for C, can't use 0 (infinity)
L = 0
do_h2o_glm(self, bucket, csvPathname, L, family)
if SCIPY_INSTALLED:
do_statsmodels_glm(self, bucket, csvPathname, L, family)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
sdrdis/iarpa_contest_submission | chain_merge_pcs.py | 1 | 14911 | '''
This is the second part of our chain: it merges the 3d from all the pairs...
'''
import numpy as np
from params import *
from functions import *
import scipy.spatial
import scipy.misc
from scipy import ndimage
from sklearn import linear_model
import scipy.stats
import time
import cv2
from fastkml import kml
import struct
# It starts here...
def run(kml_path, out_path):
definition = 0.6
# First, extract the bounds from the kml file...
bounds, final_bounds, im_size, final_im_size, decal = get_bounds_and_imsize_from_kml(kml_path, definition)
# Loading all 3d from pairs
print '####### Loading PCs'
pcs = load_PCs(bounds, im_size)
# Aligning all the 3d using correlation
print '####### Correcting long lats'
try:
reference_pair_id = correct_all_long_lat(pcs, bounds, im_size)
except:
log_msg('WARNING: CORRECT ALL LONG LAT FAILED...')
reference_pair_id = 0
# Merging all 3d...
print '####### Get all heights'
f_infos = get_all_heights(pcs, reference_pair_id, im_size, final_im_size, decal)
if (is_debug_mode):
start_save_time = time.time()
np.savez_compressed('tmp/f_infos', f_infos=f_infos, bounds=final_bounds)
print 'save time:', time.time() - start_save_time
concensus_needed = int(len(pcs) * relative_consensus)
print 'Consensus needed:', concensus_needed, f_infos.shape
if height_map_post_process_enabled:
post_process_f_infos(f_infos, concensus_needed)
if (out_path[-4:] == '.npz'):
np.savez_compressed(out_path[:-4], f_infos=f_infos, bounds=final_bounds)
else:
# Converting height map back to 3d positions
spherical_c = final_heights_to_spherical_c(f_infos, final_bounds)
save_pc(out_path, spherical_c)
def get_correlation_score(im_1_np, im_2_np):
try:
defined_np = np.logical_not(np.logical_or(np.isnan(im_1_np), np.isnan(im_2_np)))
im_1_np = im_1_np[defined_np]
im_2_np = im_2_np[defined_np]
divider = np.sqrt(np.sum(im_1_np*im_1_np)*np.sum(im_2_np*im_2_np))
if (divider == 0):
return 0
return np.sum(im_1_np * im_2_np) / divider
except:
return 0
def find_D(r_im_np, im_np, init_D, area_size = 20):
w = r_im_np.shape[1]
h = r_im_np.shape[0]
scores = np.zeros((area_size * 2 + 1, area_size * 2 + 1))
for x in range(-area_size, +area_size + 1):
for y in range(-area_size, +area_size + 1):
r_from_x = max(-x-init_D[0], 0)
r_from_y = max(-y-init_D[1], 0)
r_to_x = min(w-x-init_D[0],w)
r_to_y = min(h-y-init_D[1],h)
from_x = max(x+init_D[0], 0)
from_y = max(y+init_D[1], 0)
to_x = min(w+x+init_D[0],w)
to_y = min(h+y+init_D[1],h)
r_extract_np = r_im_np[r_from_y:r_to_y, r_from_x:r_to_x]
extract_np = im_np[from_y:to_y, from_x:to_x]
score = get_correlation_score(r_extract_np, extract_np)
scores[y + area_size, x + area_size] = score
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(scores)
Dx = maxLoc[0] + init_D[0]
Dy = maxLoc[1] + init_D[1]
Dx -= area_size
Dy -= area_size
D = [Dx, Dy]
return D
def get_correlation_between_pairs(pcs, type_im = 1):
min_scales = []
scores = []
for pair_id in xrange(len(pcs)):
im_np = pcs[pair_id][:,:,type_im]
min_scales.append(scipy.misc.imresize(im_np, 1.0 / 2.0, 'bilinear', 'F'))
scores.append(0)
if (is_debug_mode):
imsave('tmp/mini-' + str(pair_id) + '.png', min_scales[pair_id])
for pair_id in xrange(len(scores)):
im_1_np = min_scales[pair_id]
for second_pair_id in xrange(pair_id + 1, len(scores)):
if (second_pair_id != pair_id):
im_2_np = min_scales[second_pair_id]
score = get_correlation_score(im_1_np, im_2_np)
scores[pair_id] += score
scores[second_pair_id] += score
return scores
def find_Ds(ims, reference_image_id, margin):
pyramid_scales = [4, 2, 1]
r_im_np = ims[reference_image_id]
r_pyr_ims = []
for i in xrange(len(pyramid_scales)):
r_pyr_ims.append(scipy.misc.imresize(r_im_np, 1.0 / pyramid_scales[i], 'bilinear', 'F'))
Ds = []
start_time = time.time()
for pair_id in xrange(len(ims)):
if (pair_id != reference_image_id):
im_np = ims[pair_id]
D = [0, 0]
search_area = 20
for i in xrange(len(pyramid_scales)):
scale = pyramid_scales[i]
pyr_im_np = scipy.misc.imresize(im_np, 1.0 / pyramid_scales[i], 'bilinear', 'F')
init_D = [D[0] / scale, D[1] / scale]
D = find_D(r_pyr_ims[i], pyr_im_np, init_D, search_area)
D[0] *= scale
D[1] *= scale
search_area = 5
Ds.append(D)
else:
Ds.append((0, 0))
log_msg('Recaling time: ' + str(time.time() - start_time))
return Ds
def save_color_map(path, color_map):
c_map = color_map.copy()
c_map[np.isnan(c_map)] = 0
scipy.misc.imsave(path, c_map)
def decal_pc(dx, dy, pc):
height_map = pc
new_height_map = np.roll(height_map, dy, 0)
new_height_map = np.roll(new_height_map, dx, 1)
if (dy < 0):
new_height_map[dy:,:,:] = np.nan
else:
new_height_map[:dy,:,:] = np.nan
if (dx < 0):
new_height_map[:,dx:,:] = np.nan
else:
new_height_map[:,:dx,:] = np.nan
return new_height_map
def get_reference_pair(pcs, Ds):
scores = get_correlation_between_pairs(pcs)
if (len(scores) > 5):
from_id = int(round(len(scores) / 3)) #todo: add 2.0 *
else:
from_id = 0
best_ids = np.argsort(scores)[from_id:]
d_scores = np.zeros(best_ids.shape[0])
i = 0
for pair_id in best_ids:
r_D = Ds[pair_id]
for second_pair_id in xrange(len(pcs)):
D = Ds[second_pair_id]
d_scores[i] += abs(r_D[0] - D[0]) + abs(r_D[1] - D[1])
i += 1
reference_pair_id = best_ids[np.argmin(d_scores)]
return reference_pair_id
return reference_pair_id
def correct_all_long_lat(pcs, bounds, im_size):
ims = []
for pair_id in xrange(len(pcs)):
color_map = pcs[pair_id][:,:,1]
ims.append(color_map)
if (is_debug_mode):
save_color_map(tmp_path + str(pair_id) + '-color.png', color_map)
margin = min(im_size[0] / 20, im_size[1] / 20)
r_im_np = ims[0]
print 'Computing Ds'
scores = get_correlation_between_pairs(pcs)
tmp_reference_pair_id = np.argmax(scores)
Ds = find_Ds(ims, tmp_reference_pair_id, margin)
print 'Matching all images'
for pair_id in xrange(len(pcs)):
if pair_id != tmp_reference_pair_id:
pcs[pair_id] = decal_pc(-Ds[pair_id][0], -Ds[pair_id][1], pcs[pair_id])
print 'Computing reference pair'
reference_pair_id = get_reference_pair(pcs, Ds)
print 'Recalling according to reference pair'
r_D = Ds[reference_pair_id]
for pair_id in xrange(len(pcs)):
pcs[pair_id] = decal_pc(r_D[0], r_D[1], pcs[pair_id])
if (is_debug_mode):
save_color_map(tmp_path + 'D-' + str(pair_id) + '.png', pcs[pair_id][:,:,1])
return reference_pair_id
def get_CPC(image_filepath, center_filepath, color_filepath, consistency_filepath, bounds, im_size):
pc = get_PC(image_filepath, center_filepath, color_filepath, consistency_filepath)
pair_heights = get_pair_heights(pc[1], bounds, im_size)
return pair_heights
def load_PCs(bounds, im_size):
start_time = time.time()
pc_dirs = [f for f in os.listdir(tmp_stereo_output_path) if os.path.isdir(join(tmp_stereo_output_path, f))]
pcs = []
pc_i = 0
for pc_dir in pc_dirs:
elapsed_time = time.time() - start_time
if (elapsed_time > merge_pcs_allocated_time):
log_msg('TAKING TOO MUCH TIME WHEN REFERENCING PCs, skipping...')
break
print pc_i, '/', len(pc_dirs), '-- Elapsed time:', elapsed_time
pc_i += 1
pair_path = join(tmp_stereo_output_path, pc_dir)
pc_file = pair_path + '/results/out-PC.tif'
print 'LOADING:', pc_file
if (os.path.isfile(pc_file)):
try:
pc = get_CPC(pc_file, pair_path + '/results/out-PC-center.txt', pair_path + '/results/out-L.tif', pair_path + '/results/consistency.npz', bounds, im_size)
pcs.append(pc)
except:
log_msg('ERROR WHILE READING PC FILE: ' + pc_file)
else:
log_msg('ERROR SINCE NO PC FILE: ' + pc_file)
log_msg('NB PCS: ' + str(len(pcs)))
return pcs
def get_pair_heights(spherical_c, bounds, im_size):
heights = [[[[], [], [], [], []] for j in xrange(im_size[1])] for i in xrange(im_size[0])]
x, y = spherical_to_image_positions(spherical_c, bounds, im_size)
x = np.round(x).astype(int)
y = np.round(y).astype(int)
selected = np.logical_and(np.logical_and(x >= 0, y >= 0), np.logical_and(x < im_size[1], y < im_size[0]))
x = x[selected]
y = y[selected]
sel_spherical_c = spherical_c[selected]
sel_heights = sel_spherical_c[:,0]
sel_grays = sel_spherical_c[:,3]
sel_lrc_init = sel_spherical_c[:,4]
sel_lrc_1 = sel_spherical_c[:,5]
sel_lrc_2 = sel_spherical_c[:,6]
print '-> Listing view heights'
for i in xrange(sel_heights.shape[0]):
heights[y[i]][x[i]][0].append(sel_heights[i])
heights[y[i]][x[i]][1].append(sel_grays[i])
heights[y[i]][x[i]][2].append(sel_lrc_init[i])
heights[y[i]][x[i]][3].append(sel_lrc_1[i])
heights[y[i]][x[i]][4].append(sel_lrc_2[i])
print '-> Merging view heights'
f_heights = np.zeros((im_size[0], im_size[1], 3))
f_heights[:] = np.nan
for y in xrange(len(heights)):
for x in xrange(len(heights[0])):
height_infos = heights[y][x]
if (len(height_infos[0]) == 0):
continue
infos = np.array(height_infos)
max_height = np.max(infos[0,:])
selected = infos[0,:] >= (max_height - acceptable_height_deviation)
mean_infos = np.mean(infos[:,selected], 1)
in_lrc_init = mean_infos[2] > 0.0001
in_lrc_1 = mean_infos[3] > 0.0001
in_lrc_2 = mean_infos[4] > 0.0001
in_both = in_lrc_1 and in_lrc_2
in_three = in_both and in_lrc_init
lrc_val = 0
if (in_both):
lrc_val = 1
if (in_three):
lrc_val = 2
f_heights[y,x,0] = float(mean_infos[0])
f_heights[y,x,1] = float(mean_infos[1])
f_heights[y,x,2] = lrc_val
print '-> View heights merged'
return f_heights
def correct_heights(pc, reference_pc):
height_np = pc[:,:,0]
r_height_np = reference_pc[:,:,0]
selected = np.logical_not(np.logical_or(np.isnan(height_np), np.isnan(r_height_np)))
if (np.sum(selected) > 0):
diff = np.median(r_height_np[selected] - height_np[selected])
pc[:,:,0] += diff
def get_all_heights(pcs, reference_pair_id, im_size, final_im_size, decal):
(dy, dx) = decal
for pair_id in xrange(len(pcs)):
print pair_id, '/', len(pcs)
if pair_id != reference_pair_id:
correct_heights(pcs[pair_id], pcs[reference_pair_id])
if (is_debug_mode):
pair_heights = pcs[pair_id]
imsave('tmp/F-' + str(pair_id) + '.png', pair_heights[:,:,0])
np.save('tmp/FF-' + str(pair_id), pair_heights[:,:,0])
print '-> Merging'
f_infos = np.zeros((final_im_size[0], final_im_size[1], 3))
f_infos[:] = np.nan
progression = 0
nb_total = final_im_size[0] * final_im_size[1]
for y in xrange(final_im_size[0]):
from_y = y + dy
for x in xrange(final_im_size[1]):
from_x = x + dx
if (progression % (nb_total / 40) == 0):
print progression * 100.0 / nb_total, '%'
item_heights = [[],[],[]]
for pair_id in xrange(len(pcs)):
pair_heights = pcs[pair_id]
if (np.isnan(pair_heights[from_y,from_x,0])):
continue
for i in xrange(3):
item_heights[i].append(struct.unpack('f', struct.pack('f', pair_heights[from_y,from_x,i]))[0])
try:
best_interval = get_final_height_from_heights(item_heights)
except:
log_msg('Error when getting interval for (' + str(y) + ', ' + str(x) + ')')
best_interval = None
if (best_interval is not None):
f_infos[y,x,0] = best_interval[3]
f_infos[y,x,1] = best_interval[2][0]
f_infos[y,x,2] = best_interval[2][1]
progression += 1
return f_infos
def get_bounds_and_imsize_from_kml(kml_file, definition = 0.6, margin = 100, final_margin = 22):
with open(kml_file, 'r') as content_file:
content = content_file.read()
k = kml.KML()
k.from_string(content)
f = list(list(k.features())[0].features())[0].geometry.bounds
wgs84 = pyproj.Proj('+proj=utm +zone=21 +datum=WGS84 +south')
lon, lat = wgs84([f[0], f[2]], [f[1], f[3]])
bounds = [[0, 0], [lon[0] - margin, lon[1] + margin], [lat[0] - margin, lat[1] + margin]]
final_bounds = [[0, 0], [lon[0] - final_margin, lon[1] + final_margin], [lat[0] - final_margin, lat[1] + final_margin]]
height = int(round((bounds[1][1] - bounds[1][0]) / definition))
width = int(round((bounds[2][1] - bounds[2][0]) / definition))
final_height = int(round((final_bounds[1][1] - final_bounds[1][0]) / definition))
final_width = int(round((final_bounds[2][1] - final_bounds[2][0]) / definition))
dx = int((width - final_width) / 2)
dy = int((height - final_height) / 2)
return bounds, final_bounds, (height, width), (final_height, final_width), (dy, dx)
if __name__ == "__main__":
nb_args = len(sys.argv)
if (nb_args < 3):
print 'Correct format: python chain_merge_pcs.py [Input KML file] [Output file]'
else:
kml_path = sys.argv[1]
out_path = sys.argv[2]
run(kml_path, out_path)
| mit |
iurilarosa/thesis | codici/Archiviati/numpy/Hough.py | 1 | 3068 | import scipy.io
import pandas
import numpy
import os
from matplotlib import pyplot
from scipy import sparse
import multiprocessing
import timeit
#carico file dati
percorsoFile = "/home/protoss/Documenti/TESI/DATI/peaks.mat"
#print(picchi.shape)
#picchi[0]
#nb: picchi ha 0-tempi
# 1-frequenze
# 4-pesi
#ora popolo il dataframe
tabella = pandas.DataFrame(scipy.io.loadmat(percorsoFile)['PEAKS'])
tabella.drop(tabella.columns[[2, 3]], axis = 1, inplace=True)
tabella.columns = ["tempi", "frequenze","pesi"]
#fascia di sicurezza
securbelt = 4000
headerFreq= scipy.io.loadmat(percorsoFile)['hm_job'][0,0]['fr'][0]
headerSpindown = scipy.io.loadmat(percorsoFile)['hm_job'][0,0]['sd'][0]
#nb: headerFreq ha 0- freq minima,
# 1- step frequenza,
# 2- enhancement in risoluzone freq,
# 3- freq massima,
#headerSpindown ha 0- spin down iniziale di pulsar
# 1- step spindown
# 2- numero di step di spindown
#Definisco relative variabili per comodita' e chiarezza del codice
#frequenze
minFreq = headerFreq[0]
maxFreq = headerFreq[3]
enhancement = headerFreq[2]
stepFrequenza = headerFreq[1]
stepFreqRaffinato = stepFrequenza/enhancement
freqIniz = minFreq- stepFrequenza/2 - stepFreqRaffinato
freqFin = maxFreq + stepFrequenza/2 + stepFreqRaffinato
nstepFrequenze = numpy.ceil((freqFin-freqIniz)/stepFreqRaffinato)+securbelt
#spindown
spindownIniz = headerSpindown[0]
stepSpindown = headerSpindown[1]
nstepSpindown = headerSpindown[2].astype(int)
# riarrangio gli array in modo che abbia i dati
# nel formato che voglio io
frequenze = tabella['frequenze'].values
frequenze = ((frequenze-freqIniz)/stepFreqRaffinato)-round(enhancement/2+0.001)
tempi = tabella['tempi'].values
tempi = ((tempi)*3600*24/stepFreqRaffinato)+1
tempi = tempi - numpy.amin(tempi)+1
#tempi = tempi.astype(int)
pesi = tabella['pesi'].values
#%reset_selective tabella
start_time = timeit.default_timer()
nstepSpindown = 300
spindowns = numpy.arange(1, nstepSpindown+1)
spindowns = numpy.multiply(spindowns,stepSpindown)
spindowns = numpy.add(spindowns, spindownIniz)
# cosi' ho i tre array delle tre grandezze
nRows = nstepSpindown
nColumns = nstepFrequenze.astype(int)
fakeRow = numpy.zeros(frequenze.size)
def itermatrix(stepIesimo):
sdPerTempo = spindowns[stepIesimo]*tempi
appoggio = numpy.round(frequenze-sdPerTempo+securbelt/2).astype(int)
matrix = sparse.coo_matrix((pesi, (fakeRow, appoggio))).todense()
matrix = numpy.ravel(matrix)
missColumns = (nColumns-matrix.size)
zeros = numpy.zeros(missColumns)
matrix = numpy.concatenate((matrix, zeros))
return matrix
pool = multiprocessing.Pool()
imageMapped = list(pool.map(itermatrix, range(nstepSpindown)))
pool.close()
imageMapped = numpy.array(imageMapped)
print(timeit.default_timer() - start_time)
ncolonne = nstepFrequenze.astype(int)
#pyplot.figure(figsize=(120, 36))
a = pyplot.imshow(imageMapped[:,3400:ncolonne-1500], aspect = 40)
pyplot.colorbar(shrink = 0.5,aspect = 10)
| gpl-3.0 |
juantascon/vestige | doc/experiments/initial/results/pie.py | 1 | 2187 | #!/usr/bin/env python
import matplotlib.pyplot as mpl
import csv
class Question:
def __init__(self, fname, title, answers):
self.fname = fname
self.title = title
self.answers = answers
self.full_fracs = [0, 0, 0, 0, 0]
for val in self.answers:
if val == -2: self.full_fracs[0] += 1
elif val == -1: self.full_fracs[1] += 1
elif val == 0: self.full_fracs[2] += 1
elif val == 1: self.full_fracs[3] += 1
elif val == 2: self.full_fracs[4] += 1
def average(self):
return sum(self.answers) / float(len(self.answers))
def fracs(self):
ret = []
for val in self.full_fracs:
if val != 0: ret.append(val)
return ret
def explode(self):
ret = []
for val in self.full_fracs:
if val != 0: ret.append(0.05)
return ret
def labels(self):
ret = []
full_labels = ["Strongly\nDisagree", "Disagree", "Neutral", "Agree", "Strongly\nAgree"]
for i in range(0, len(self.full_fracs)):
if self.full_fracs[i] != 0: ret.append(full_labels[i])
return ret
def colors(self):
ret = []
full_colors = ["k", "r", "y", "g", "b"]
for i in range(0, len(self.full_fracs)):
if self.full_fracs[i] != 0: ret.append(full_colors[i])
return ret
def tostring(self):
return "fname:[{0}] title:[{1}] answers:{2} avg:{3} fracs:{4}".format(self.fname, self.title, self.answers, self.average(), self.fracs())
questions = []
data = csv.reader(open("survey1.dat", "ru"))
for row in data:
fname = row[0]
title = row[1]
answers = []
for i in range(2,len(row)):
answers.append(int(row[i]))
questions.append(Question(fname, title, answers))
mpl.figure(1, figsize=(8,5))
#mpl.axes([0.1, 0.1, 0.8, 0.8])
mpl.rc('font', size=15)
for q in questions:
mpl.clf()
mpl.pie(q.fracs(), explode=q.explode(), labels=q.labels(), colors=q.colors(), autopct='%1.1f%%', shadow=True)
mpl.title(q.title, bbox={'facecolor':'0.8', 'pad':10})
mpl.savefig("img/{0}.png".format(q.fname))
| gpl-3.0 |
iancze/PSOAP | tests/test_orbit_astrometry_HD10009.py | 1 | 10205 | import pytest
import os
import pkg_resources
import numpy as np
from psoap import orbit_astrometry
from psoap import constants as C
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib
# Create plots of all of the orbits
from astropy.io import ascii
# Create plots of all of the orbits
# If it doesn't already exist, create a "plots/basic/" directory
outdir = "tests/plots/HD10009/"
if not os.path.exists(outdir):
print("Creating ", outdir)
os.makedirs(outdir)
# Load the data sets for radial velocity and astrometry
astro_fname = pkg_resources.resource_filename("psoap", "data/HD10009/astro.txt")
astro_data = ascii.read(astro_fname, format="csv", fill_values=[(".", '0')])
# print(astro_data["date"].data)
# convert UT date to JD
astro_dates = Time(astro_data["date"].data, format="decimalyear")
astro_dates.format = 'jd'
astro_jds = astro_dates.value
rho_data = astro_data["rho"]
rho_err = astro_data["rho_err"]
theta_data = astro_data["PA"]
theta_err = astro_data["PA_err"]
def test_data():
# Make a plot of the astrometric data on the sky
fig, ax = plt.subplots(nrows=1)
xs = rho_data * np.cos(theta_data * np.pi/180)
ys = rho_data * np.sin(theta_data * np.pi/180)
ax.plot(xs, ys, ".")
ax.set_xlabel("North")
ax.set_ylabel("East")
ax.plot(0,0, "k*")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "data_astro.png")
rv1_fname = pkg_resources.resource_filename("psoap", "data/HD10009/rv1.txt")
rv_data_1 = ascii.read(rv1_fname, format="csv")
rv2_fname = pkg_resources.resource_filename("psoap", "data/HD10009/rv2.txt")
rv_data_2 = ascii.read(rv2_fname, format="csv")
rv_jds_A = rv_data_1["date"] + 2400000
vAs_data = rv_data_1["rv"]
vAs_err = rv_data_1["err"]
rv_jds_B = rv_data_2["date"] + 2400000
vBs_data = rv_data_2["rv"]
vBs_err = rv_data_2["err"]
dpc = 37.03 # pc
# Orbital elements
a = 0.324 * dpc # [AU]
e = 0.798
i = 96.0 # [deg]
omega_2 = 251.6 # omega_1
omega = omega_2 + 180.0
Omega = 159.6
T0 = Time(1989.92, format="decimalyear")
T0.format = "jd"
T0 = T0.value # [Julian Date]
M_2 = 1.0 # [M_sun]
M_tot = 1.2 + M_2 # [M_sun]
gamma = 47.8 # [km/s]
P = np.sqrt(4 * np.pi**2 / (C.G * M_tot * C.M_sun) * (a * C.AU)**3) / (24 * 3600) # [day]
print(P/365)
# Pick a span of dates for one period
dates = np.linspace(T0, T0 + P, num=600)
# Initialize the orbit
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
full_dict = orb.get_full_orbit()
vAs, vBs, XYZ_As, XYZ_Bs, XYZ_ABs, xy_As, xy_Bs, xy_ABs = [full_dict[key] for key in ("vAs", "vBs", "XYZ_As", "XYZ_Bs", "XYZ_ABs", "xy_As", "xy_Bs", "xy_ABs")]
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
alpha_dec_As = XYZ_As/dpc # [arcsec]
alpha_dec_Bs = XYZ_Bs/dpc # [arcsec]
alpha_dec_ABs = XYZ_ABs/dpc # [arcsec]
rho_ABs = rho_ABs/dpc # [arcsec]
peri_A = orb._get_periastron_A()/dpc
peri_B = orb._get_periastron_B()/dpc
peri_BA = orb._get_periastron_BA()/dpc
asc_A = orb._get_node_A()/dpc
asc_B = orb._get_node_B()/dpc
asc_BA = orb._get_node_BA()/dpc
# Since we are plotting vs one date, we need to plot the dots using a color scale so we can figure them out along the orbit.
# Set a colorscale for the lnprobs
cmap_primary = matplotlib.cm.get_cmap("Blues")
cmap_secondary = matplotlib.cm.get_cmap("Oranges")
norm = matplotlib.colors.Normalize(vmin=np.min(dates), vmax=np.max(dates))
# Determine colors based on the ending lnprob of each walker
def plot_points(ax, dates, xs, ys, primary):
for date, x, y in zip(dates, xs, ys):
if primary:
c = cmap_primary(norm(date))
else:
c = cmap_secondary(norm(date))
ax.plot(x, y, "o", color=c, mew=0.1, ms=3, mec="k")
# Then, we will make 3D plots of the orbit so that we can square with what we think is happening.
# The final crowning grace will be a 3D matplotlib plot of the orbital path.
# Plot the Orbits
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, alpha_dec_ABs[:,0], alpha_dec_ABs[:,1], False)
ax.plot(0,0, "*k", ms=2)
ax.plot(peri_BA[0], peri_BA[1], "ko", ms=3)
ax.plot(asc_BA[0], asc_BA[1], "o", color="C2", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta $ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A.png")
# Make a series of astrometric plots from different angles.
def test_AB_Z():
# Now plot A and B together, viewed from the Z axis
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,0], alpha_dec_As[:,1], True)
plot_points(ax, dates, alpha_dec_Bs[:,0], alpha_dec_Bs[:,1], False)
ax.plot(peri_A[0], peri_A[1], "ko", ms=3)
ax.plot(peri_B[0], peri_B[1], "ko", ms=3)
ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3)
ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.15, right=0.85, bottom=0.15, top=0.85)
# Plot A and B together, viewed from the observer (along -Z axis).
fig.savefig(outdir + "orbit_AB_Z.png")
def test_AB_X():
# Now plot A and B together, viewed from the X axis
# This means Y will form the "X" axis, or North
# And Z will form the Y axis, or towards observer
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,1], alpha_dec_As[:,2], True)
plot_points(ax, dates, alpha_dec_Bs[:,1], alpha_dec_Bs[:,2], False)
ax.plot(peri_A[1], peri_A[2], "ko", ms=3)
ax.plot(peri_B[1], peri_B[2], "ko", ms=3)
ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3)
ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \alpha \cos delta$ mas")
ax.set_ylabel(r"$\Delta Z$ mas (towards observer)")
ax.axhline(0, ls=":", color="k")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_X.png")
def test_AB_Y():
# Now plot A and B together, viewed from the Y axis
# This means Z will form the "X" axis, or towards the observer
# And X will form the Y axis, or East
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,2], alpha_dec_As[:,0], True)
plot_points(ax, dates, alpha_dec_Bs[:,2], alpha_dec_Bs[:,0], False)
ax.plot(peri_A[2], peri_A[0], "ko", ms=3)
ax.plot(peri_B[2], peri_B[0], "ko", ms=3)
ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3)
ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3)
ax.axvline(0, ls=":", color="k")
ax.set_xlabel(r"$\Delta Z$ mas (towards observer)")
ax.set_ylabel(r"$\Delta \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_Y.png")
def test_vel_rho_theta_one_period():
# Plot velocities, rho, and theta as function of time for one period
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(8,8))
ax[0].plot(dates, vAs)
# ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
# ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
# ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
# ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
# ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="")
# ax[2].plot(astro_jds, rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]")
ax[3].plot(dates, theta_ABs)
# ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
# ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta_one_period.png", dpi=400)
# Now make a 3D Orbit and pop it up
def test_B_rel_A_plane():
# Plot the orbits in the plane
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_ABs[:,0], xy_ABs[:,1], False)
ax.plot(0,0, "*k", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A_plane.png")
def test_AB_plane():
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_As[:,0], xy_As[:,1], True)
plot_points(ax, dates, xy_Bs[:,0], xy_Bs[:,1], False)
ax.plot(0,0, "ko", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_plane.png")
# Redo this using a finer space series of dates spanning the full series of observations.
# Pick a span of dates for the observations
dates = np.linspace(2443500, 2452010, num=3000) # [day]
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
rho_ABs = rho_ABs/dpc # [arcsec]
def test_vel_rho_theta():
# Plot velocities, rho, and theta as function of time
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(12,8))
ax[0].plot(dates, vAs)
ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
# ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="")
ax[2].plot(astro_jds, rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]")
ax[3].plot(dates, theta_ABs)
# ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta.png", dpi=400)
plt.close('all')
plt.close('all')
| mit |
thatguyandy27/python-sandbox | Ex_Files_ML_EssT_Recommendations/Exercise Files/Chapter 6/make_recommendations final.py | 1 | 1698 | import numpy as np
import pandas as pd
import matrix_factorization_utilities
# Load user ratings
raw_dataset_df = pd.read_csv('movie_ratings_data_set.csv')
# Load movie titles
movies_df = pd.read_csv('movies.csv', index_col='movie_id')
# Convert the running list of user ratings into a matrix
ratings_df = pd.pivot_table(raw_dataset_df, index='user_id',
columns='movie_id',
aggfunc=np.max)
# Apply matrix factorization to find the latent features
U, M = matrix_factorization_utilities.low_rank_matrix_factorization(ratings_df.as_matrix(),
num_features=15,
regularization_amount=0.1)
# Find all predicted ratings by multiplying U and M matrices
predicted_ratings = np.matmul(U, M)
print("Enter a user_id to get recommendations (Between 1 and 100):")
user_id_to_search = int(input())
print("Movies previously reviewed by user_id {}:".format(user_id_to_search))
reviewed_movies_df = raw_dataset_df[raw_dataset_df['user_id'] == user_id_to_search]
reviewed_movies_df = reviewed_movies_df.join(movies_df, on='movie_id')
print(reviewed_movies_df[['title', 'genre', 'value']])
input("Press enter to continue.")
print("Movies we will recommend:")
user_ratings = predicted_ratings[user_id_to_search - 1]
movies_df['rating'] = user_ratings
already_reviewed = reviewed_movies_df['movie_id']
recommended_df = movies_df[movies_df.index.isin(already_reviewed) == False]
recommended_df = recommended_df.sort_values(by=['rating'], ascending=False)
print(recommended_df[['title', 'genre', 'rating']].head(5))
| mit |
rlkelly/keras | tests/manual/check_callbacks.py | 82 | 7540 | import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
| mit |
Titan-C/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 42 | 10505 | from sklearn.utils.testing import assert_true
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0,
average=average)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
for average in (False, True):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0,
average=average)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0,
average=average)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0,
average=average)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
Chris35Wills/Bristol_Geography_Python | geospatial_plotting/plot_vels_at_uncertainty.py | 1 | 3196 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pandas as pd
from matplotlib.collections import PatchCollection
from shapely.geometry import Point, Polygon, MultiPoint, MultiPolygon, LineString
from descartes import PolygonPatch
import georaster
import plotmap
rcParams['font.sans-serif'] = 'Arial'
rcParams['font.size'] = 6
rcParams['mathtext.fontset'] = 'stixsans'
err_thresh = 60.
mask = georaster.SingleBandRaster('/home/s1144267/rds/landsat/WRS12_annual/merge_1985_1986_snr4_rad340_nmin1_region_v3_pstere.mask.TIF')
dem = georaster.SingleBandRaster('/home/s1144267/rds/landsat/WRS12_annual/GIMP_dem_WRS12_merge_240m_pstere.TIF')
mask_vel = np.where((dem.r >= 400) & (dem.r <= 1100) & (mask.r == 1),1,0)
ntot = np.sum(mask_vel)
region = (-51.1,-49.2,67.450714,69.2)
lon_0 = -45
# Ice area
ice_mapo = plotmap.Map(extent=region,lon_0=lon_0)
shp_info = ice_mapo.map.readshapefile('Gimp_Ice_Mask_240m_EPSG4319',
'ice',drawbounds=False)
df_ice = pd.DataFrame({
'poly': [Polygon(xy) for xy in ice_mapo.map.ice],
'DN': [f['DN'] for f in ice_mapo.map.ice_info]
})
df_ice = df_ice[df_ice['DN'] == 1]
# draw ward patches from polygons
df_ice['patches'] = df_ice['poly'].map(lambda x: PolygonPatch(
x,
fc='#BDBDBD',
ec='none', lw=.25, alpha=.9,zorder=99))
ice_mapo = None
plt.close()
fig = plt.figure(figsize=(8,6))
with open('list_merge_files_v4.txt','r') as fh:
n = 1
for merge_file in fh:
merge_file = merge_file.strip()
if int(merge_file[6:10]) == 1985 and int(merge_file[11:15]) == 1994:
continue
if int(merge_file[6:10]) == 2007 and int(merge_file[11:15]) == 2014:
continue
im = georaster.SingleBandRaster(merge_file)
im_err = georaster.SingleBandRaster(merge_file.replace('.vel','.err'))
mask_temp = np.where((mask_vel == 1) & (im_err.r < err_thresh),1,0)
frac = float(np.sum(mask_temp)) / ntot
if frac < 0.35:
continue
retained_vel = np.where(mask_temp == 1, im.r, np.nan)
ax = plt.subplot(3,6,n)
mapo = plotmap.Map(extent=region,lon_0=lon_0,fig=fig,ax=ax)
#mapo = plotmap.Map(extent=region,lon_0=lon_0,figsize=(2,2.5))
# plot ice by adding the PatchCollection to the axes instance
mapo.ax.add_collection(PatchCollection(df_ice['patches'].values, match_original=True))
plt.imshow(retained_vel,
cmap='YlGnBu_r',vmin=0,vmax=300,
interpolation='none',
extent=im.get_extent_projected(mapo.map),zorder=100)
year_start = merge_file[6:10]
year_end = merge_file[11:15]
label = year_start + '-' + year_end
mapo.ax.annotate(label,fontsize=8, fontweight='bold', xy=(-0.1,1.15), xycoords='axes fraction',
horizontalalignment='left', verticalalignment='top',zorder=101)
if n in [1,]:
mapo.geo_ticks(2,0.7,rotate_parallels=True)
else:
mapo.geo_ticks(2,0.7,rotate_parallels=True,
mlabels=[0,0,0,0],plabels=[0,0,0,0])
#Remove border from axis
for axis in ['top','bottom','left','right']:
mapo.ax.spines[axis].set_linewidth(0)
n += 1
# plt.savefig(merge_file.replace('.vel.TIF','.vel_60myr.png'),dpi=300)
# plt.close()
plt.subplots_adjust(hspace=0.26)
plt.savefig('all_periods_60myr.png',dpi=300)
plt.close()
| gpl-2.0 |
cactusbin/nyt | matplotlib/doc/pyplots/whats_new_98_4_fancy.py | 6 | 1765 | import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
figheight = 8
fig = plt.figure(1, figsize=(9, figheight), dpi=80)
fontsize = 0.4 * fig.dpi
def make_boxstyles(ax):
styles = mpatch.BoxStyle.get_styles()
for i, (stylename, styleclass) in enumerate(styles.items()):
ax.text(0.5, (float(len(styles)) - 0.5 - i)/len(styles), stylename,
ha="center",
size=fontsize,
transform=ax.transAxes,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
def make_arrowstyles(ax):
styles = mpatch.ArrowStyle.get_styles()
ax.set_xlim(0, 4)
ax.set_ylim(0, figheight)
for i, (stylename, styleclass) in enumerate(sorted(styles.items())):
y = (float(len(styles)) -0.25 - i) # /figheight
p = mpatch.Circle((3.2, y), 0.2, fc="w")
ax.add_patch(p)
ax.annotate(stylename, (3.2, y),
(2., y),
#xycoords="figure fraction", textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=dict(arrowstyle=stylename,
patchB=p,
shrinkA=5,
shrinkB=5,
fc="w", ec="k",
connectionstyle="arc3,rad=-0.05",
),
bbox=dict(boxstyle="square", fc="w"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax1 = fig.add_subplot(121, frameon=False, xticks=[], yticks=[])
make_boxstyles(ax1)
ax2 = fig.add_subplot(122, frameon=False, xticks=[], yticks=[])
make_arrowstyles(ax2)
plt.show()
| unlicense |
HerdOfBears/Learning_Machine_Learning | Reinforcement Learning/double_DQN.py | 1 | 9334 | """
Author: Jyler Menard
USES DOUBLE Q-LEARNING which is NOT the same as a double DQN.
Purpose implement a Deep Q Network that uses double Q-learning rather than Q-learning.
Q-learning can easily overestimate the value of an action from a state, resulting in overoptimistic value estimates.
Double Q-learning decouples the action selection step and the action evaluation step.
"""
import numpy as np
import matplotlib.pyplot as plt
import gym
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
#import reinforcement.cart_pole_rbf as cpr
GAMMA = 0.99
ALL_POSSIBLE_ACTIONS = [0,1,2]
GAME = "CartPole-v1"
class NeuralNetwork():
def __init__(self, n_observations, n_actions):
# n_observations --> number of input nodes
# n_actions --> number of output nodes
self.n_actions = n_actions
self.n_observations = n_observations
print("Using Feed-forward Neural Network")
self.scaler = StandardScaler()
# MEMORY FOR EXPERIENCE REPLAY
self.mem = []
self.mem_min_size = 150
self.mem_max_size = 10000
self.mem_full = 0 # Default: False
self.tester = 0
##
# DEFINE NN ARCHITECTURE
##
learning_rate = 2.5e-4
hid1 = 200 #
hid2 = 200
#hid3 = 500
#print("hid1 = ", hid1, " hid2 = ",hid2)
print("hid1 = ",hid1, " learning_rate = ",learning_rate)
# DEFINE PLACEHOLDER(S)
self.x = tf.placeholder(tf.float32, shape=[None,n_observations])
self.y_true = tf.placeholder(tf.float32, shape=[None,n_actions])
self.A = tf.placeholder(tf.float32, shape=[None,n_actions])
# DEFINE VARIABLES
self.W1 = tf.Variable(tf.truncated_normal([n_observations,hid1],mean=0.0,stddev=0.1))
self.b1 = tf.Variable(tf.constant(0.1, shape=[hid1]))
self.W2 = tf.Variable(tf.truncated_normal([hid1,hid2],mean=0.0,stddev=0.1))
self.b2 = tf.Variable(tf.constant(0.1, shape=[hid2]))
#self.W3 = tf.Variable(tf.truncated_normal([hid2,hid3],mean=0.0,stddev=0.1))
#self.b3 = tf.Variable(tf.constant(0.1, shape=[hid3]))
self.W4 = tf.Variable(tf.truncated_normal([hid2, n_actions],mean=0.0, stddev=0.1))
self.b4 = tf.Variable(tf.constant(0.1, shape=[n_actions]))
# DEFINE ARCHITECTURE
y1 = tf.matmul(self.x, self.W1) + self.b1
z1 = tf.nn.tanh(y1)
y2 = tf.matmul(z1, self.W2) + self.b2
z2 = tf.nn.tanh(y2)
#y3 = tf.matmul(z2, self.W3) + self.b3
z3 = z2#tf.nn.relu(y3)
y_pred = tf.matmul(z3, self.W4) + self.b4
# DEFINE OPERATIONS AND COST FUNCTION
#selected_action_values = tf.reduce_sum(
#tf.multiply(y_pred,self.A),
# y_pred * tf.one_hot(self.A, n_actions),
# keepdims=True
#reduction_indices=[1]
# )
selected_action_values = y_pred * self.A#tf.one_hot(self.A, n_actions)
delta = selected_action_values - self.y_true
#delta = y_pred - self.y_true
#cost = tf.reduce_sum( delta*delta )
cost = tf.reduce_sum(tf.square(delta))
# OPS
self.train_ops = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#self.train_ops = tf.train.AdamOptimizer(learning_rate).minimize(cost)
self.predict_ops = y_pred
self.sess = tf.InteractiveSession()
sess = self.sess
init = tf.global_variables_initializer()
sess.run(init)
self.grad_vals = []
pass
def feedfwd(self,X,size):
input_size = int(X.get_shape()[1])
y = tf.matmul(X,self.W) + self.b1
z = tf.nn.relu(y)
return z
def update_test(self,num):
self.tester += 1
def partial_fit(self,G,X):
# X --> observations, 1x4 initially
# G --> vector of returns, 1x2 initially
#print("Shape = ", G.shape, " G = ",G)
if self.mem_full:
batch_X, batch_G, batch_A = self.batch_replay(32)
feed_dictionary = {self.x:batch_X,self.y_true:batch_G, self.A:batch_A}
self.sess.run(self.train_ops, feed_dict=feed_dictionary)
def predict(self,X):
# X --> observations
if not self.mem_full:
return np.random.random((1,self.n_actions))
y = self.sess.run(self.predict_ops, feed_dict={self.x:X})
#print("predicted y = ",y)
return y
def get_state(self,observations):
shape = observations.shape[0]
y = observations.reshape((1,shape))
return y
def store_in_mem(self,s,a,r,s_prime,G):
tup_4 = (s,a,r,s_prime,G)
if self.mem_full:
if len(self.mem)>=self.mem_max_size:
self.mem.pop(0)
self.mem.append(tup_4)
else:
self.mem.append(tup_4)
if len(self.mem) == self.mem_min_size:
print("Memory full")
self.mem_full = 1
def batch_replay(self, batch_size):
# mem filled with 4-tuples (s,a,r,s')
# Need to grab random batch of size batch_size
temp_batches = self.mem.copy()
np.random.shuffle(temp_batches)
temp_batches = temp_batches[:batch_size]
batch_G = np.zeros((batch_size,self.n_actions))
batch_X = np.zeros((batch_size,self.n_observations))
batch_A = np.zeros((batch_size,self.n_actions))#,dtype=np.int32)
#batch_A = []
for i in range(batch_size):
s, a, r, s_prime,temp_G = temp_batches[i]
#V_s_prime = self.predict(s_prime)
#batch_G[i][a] = r + GAMMA*np.max(V_s_prime)
batch_G[i] = temp_G
batch_X[i] = s
#batch_X[i] *= batch_A[i]
batch_A[i][a] = 1
#batch_A.append(a)
#print(batch_A)
return batch_X, batch_G, batch_A
def epsilon_greedy(model,model_2, s, epsilon, env):
p = np.random.random()
if p <= epsilon:
action = env.action_space.sample()#np.random.choice(ALL_POSSIBLE_ACTIONS)
return action
# Compute the value for each action given the state
V = model.predict(s)
V_2 = model_2.predict(s)
return np.argmax(V + V_2)
def get_return(model_1,model_2, s_prime,a,r, target_model):
## target_model says which model is going to make the target, Y, of Y-Y_pred.
if target_model == 1:
# model 1 selects act, model 2 evaluates it.
V_s_prime = model_2.predict(s_prime)
#print(V_s_prime, V_s_prime.shape)
V_s_prime_eval_act = model_1.predict(s_prime)
state_act_val = V_s_prime_eval_act[0][np.argmax(V_s_prime)]
G = np.zeros((1,V_s_prime.shape[1]))
else:
# model 2 selects act, model 1 evaluates it.
V_s_prime = model_1.predict(s_prime)
#print(V_s_prime, V_s_prime.shape)
V_s_prime_eval_act = model_2.predict(s_prime)
state_act_val = V_s_prime_eval_act[0][np.argmax(V_s_prime)]
G = np.zeros((1,V_s_prime.shape[1]))
G[0][a] = r + GAMMA*state_act_val
return G
def reward_function(observation, target_pos):
y = (target_pos - observation[0])/(target_pos*3)
return abs(y * 100)
def play_episode(env, model, model_2, epsilon, tot_acts):
done = False
obs = env.reset()
s = model.get_state(obs)
num = 0
run_avg = 0
prnt = 1
while not done and num<500:
num+=1
if num>300 and prnt==1:
print("num > 300, performing very well")
prnt = 0
tot_acts += 1
a = epsilon_greedy(model,model_2, s, epsilon,env)
observation, r, done, _ = env.step(a)
s_prime = model.get_state(observation)
# FOR CART-POLE GAME
if done:
r = -200
if r >-100:
run_avg += 1
# FOR MOUNTAIN CAR
#if observation[0] > 0:
# r = +50
#r = reward_function(observation, 0.6)
num_p = np.random.random()
if num_p >= 0.5:
G = get_return(model,model_2, s_prime, a,r,2)
model.store_in_mem(s,a,r,s_prime,G)
model.partial_fit(G, s)
else:
G = get_return(model,model_2, s_prime, a,r,1)
model_2.store_in_mem(s,a,r,s_prime,G)
model_2.partial_fit(G, s)
s = s_prime
return run_avg, tot_acts
def main(N=100):
#env = gym.make("CartPole-v1")
env = gym.make(GAME)
record_bool = input("Record every perfect cube training episode? [Y/n]")
while True:
if record_bool not in ["Y","n"]:
print("Wrong input")
else:
break
if record_bool=="Y":
env = gym.wrappers.Monitor(env, "videos",force=True)
else:
pass
D = len(env.observation_space.sample())
K = env.action_space.n
model = NeuralNetwork(D,K)
model_2 = NeuralNetwork(D,K)
running_average = []
positions = []
tot_run_avg = 0
tot_acts = 0
for i in range(N):
epsilon = 1.0/(np.sqrt(i) + 1)
temp_run_avg, temp_tot_acts = play_episode(env, model, model_2, epsilon, tot_acts)
tot_run_avg += temp_run_avg
tot_acts += temp_tot_acts
if i%50 == 0 and i!=0:
tot_run_avg/= 50
print("episode = ",i, " avg over 50 = ",tot_run_avg)
running_average.append(tot_run_avg)
tot_run_avg = 0
plt.plot(running_average)
plt.xlabel("No. games (x100)")
plt.ylabel("50-Game Time Average")
plt.show()
input("test?")
test(model, model_2, env)
def test(model,model_2, env):
num=0
alpha = 0.1
for i in range(10):
done = False
obs = env.reset()
s = model.get_state(obs)
while not done:
# if i == 1:
# env.render()
a = epsilon_greedy(model,model_2, s, -1,env)
observation, r, done, _ = env.step(a)
s_prime = model.get_state(observation)
s = s_prime
num+=1
# if i == 1:
# env.close()
print("tot = ",num/10)
#env = gym.make("CartPole-v1")
env = gym.make(GAME)
done =False
obs = env.reset()
s = model.get_state(obs)
while not done:
env.render()
a = epsilon_greedy(model,model_2, s, -1,env)
observation, r, done, _ = env.step(a)
s_prime = model.get_state(observation)
s = s_prime
env.close()
| mit |
nrhine1/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
kevindehecker/paparazzi | sw/tools/calibration/calibrate_gyro.py | 87 | 4686 | #! /usr/bin/env python
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#
# calibrate gyrometers using turntable measurements
#
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
from scipy import linspace, polyval, stats
import matplotlib.pyplot as plt
import calibration_utils
#
# lisa 3
# p : a=-4511.16 b=31948.34, std error= 0.603
# q : a=-4598.46 b=31834.48, std error= 0.734
# r : a=-4525.63 b=32687.95, std error= 0.624
#
# lisa 4
# p : a=-4492.05 b=32684.94, std error= 0.600
# q : a=-4369.63 b=33260.96, std error= 0.710
# r : a=-4577.13 b=32707.72, std error= 0.730
#
# crista
# p : a= 3864.82 b=31288.09, std error= 0.866
# q : a= 3793.71 b=32593.89, std error= 3.070
# r : a= 3817.11 b=32709.70, std error= 3.296
#
def main():
usage = "usage: %prog --id <ac_id> --tt_id <tt_id> --axis <axis> [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store", type=int, default=-1,
help="aircraft id to use")
parser.add_option("-t", "--tt_id", dest="tt_id",
action="store", type=int, default=-1,
help="turntable id to use")
parser.add_option("-a", "--axis", dest="axis",
type="choice", choices=['p', 'q', 'r'],
help="axis to calibrate (p, q, r)",
action="store")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if options.ac_id < 0 or options.ac_id > 255:
parser.error("Specify a valid aircraft id number!")
if options.tt_id < 0 or options.tt_id > 255:
parser.error("Specify a valid turntable id number!")
if options.verbose:
print("reading file "+filename+" for aircraft "+str(options.ac_id)+" and turntable "+str(options.tt_id))
samples = calibration_utils.read_turntable_log(options.ac_id, options.tt_id, filename, 1, 7)
if len(samples) == 0:
print("Error: found zero matching messages in log file!")
print("Was looking for IMU_TURNTABLE from id: "+str(options.tt_id)+" and IMU_GYRO_RAW from id: "+str(options.ac_id)+" in file "+filename)
sys.exit(1)
if options.verbose:
print("found "+str(len(samples))+" records")
if options.axis == 'p':
axis_idx = 1
elif options.axis == 'q':
axis_idx = 2
elif options.axis == 'r':
axis_idx = 3
else:
parser.error("Specify a valid axis!")
#Linear regression using stats.linregress
t = samples[:, 0]
xn = samples[:, axis_idx]
(a_s, b_s, r, tt, stderr) = stats.linregress(t, xn)
print('Linear regression using stats.linregress')
print(('regression: a=%.2f b=%.2f, std error= %.3f' % (a_s, b_s, stderr)))
print(('<define name="GYRO_X_NEUTRAL" value="%d"/>' % (b_s)))
print(('<define name="GYRO_X_SENS" value="%f" integer="16"/>' % (pow(2, 12)/a_s)))
#
# overlay fited value
#
ovl_omega = linspace(1, 7.5, 10)
ovl_adc = polyval([a_s, b_s], ovl_omega)
plt.title('Linear Regression Example')
plt.subplot(3, 1, 1)
plt.plot(samples[:, 1])
plt.plot(samples[:, 2])
plt.plot(samples[:, 3])
plt.legend(['p', 'q', 'r'])
plt.subplot(3, 1, 2)
plt.plot(samples[:, 0])
plt.subplot(3, 1, 3)
plt.plot(samples[:, 0], samples[:, axis_idx], 'b.')
plt.plot(ovl_omega, ovl_adc, 'r')
plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
antoinearnoud/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/almost_ideal_demand_system/aids_estimation_from_stata_three_categ.py | 4 | 2678 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 05 16:11:09 2016
@author: thomas.douenne
"""
import pandas as pd
import pkg_resources
import os
# Import data_quaids to get the results of the estimation run on Stata.
resultats_elasticite_depenses = dict()
resultats_elasticite_uncomp = dict()
borne_inferieure_el_dep = dict()
borne_superieure_el_dep = dict()
for year in ['carbu_all', 'energy_no_alime_all', 'energy_no_alime_2000', 'energy_no_alime_2005', 'energy_no_alime_2011']:
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
data_quaids = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quaids',
'data_quaids_{}.csv'.format(year)
), sep =',')
# Compute a weighted average of the elasticity of each household
# weights are the share of the household in total consumption
data_quaids['part_depenses_tot'] = data_quaids['depenses_tot'] / sum(data_quaids['depenses_tot'])
data_quaids.fillna(0, inplace=True)
assert 0.999 < sum(data_quaids['part_depenses_tot']) < 1.001, "the sum of the shares is not equal to 1"
for i in range(1, 4):
data_quaids['el_{}'.format(i)] = \
data_quaids['mu_{}'.format(i)] * data_quaids['part_depenses_tot']
# Compute the estimation of the income elasticities of consumption
for i in range(1, 4):
resultats_elasticite_depenses['el_{0}_{1}'.format(i, year)] = sum(data_quaids['el_{}'.format(i)])
# Compute the 95% confidence interval for those elasticities
for i in range(1, 4):
borne_superieure_el_dep['borne_sup_{0}_{1}'.format(i, year)] = (
resultats_elasticite_depenses['el_{0}_{1}'.format(i, year)] + 1.96 *
(data_quaids['mu_{}'.format(i)].describe()['std'] /
len(data_quaids['mu_{}'.format(i)]) ** 0.5)
)
borne_inferieure_el_dep['borne_inf_{0}_{1}'.format(i, year)] = (
resultats_elasticite_depenses['el_{0}_{1}'.format(i, year)] - 1.96 *
(data_quaids['mu_{}'.format(i)].describe()['std'] /
len(data_quaids['mu_{}'.format(i)]) ** 0.5)
)
for i in range(1, 4):
data_quaids['el_uncomp_{}'.format(i)] = \
data_quaids['ce_{}_{}'.format(i, i)] * data_quaids['part_depenses_tot']
# Compute the estimation of the uncompensated price elasticities of consumption
for i in range(1, 4):
resultats_elasticite_uncomp['el_uncomp_{0}_{1}'.format(i, year)] = sum(data_quaids['el_uncomp_{}'.format(i)])
| agpl-3.0 |
ndingwall/scikit-learn | examples/cross_decomposition/plot_pcr_vs_pls.py | 15 | 6952 | """
==================================================================
Principal Component Regression vs Partial Least Squares Regression
==================================================================
This example compares `Principal Component Regression
<https://en.wikipedia.org/wiki/Principal_component_regression>`_ (PCR) and
`Partial Least Squares Regression
<https://en.wikipedia.org/wiki/Partial_least_squares_regression>`_ (PLS) on a
toy dataset. Our goal is to illustrate how PLS can outperform PCR when the
target is strongly correlated with some directions in the data that have a
low variance.
PCR is a regressor composed of two steps: first,
:class:`~sklearn.decomposition.PCA` is applied to the training data, possibly
performing dimensionality reduction; then, a regressor (e.g. a linear
regressor) is trained on the transformed samples. In
:class:`~sklearn.decomposition.PCA`, the transformation is purely
unsupervised, meaning that no information about the targets is used. As a
result, PCR may perform poorly in some datasets where the target is strongly
correlated with *directions* that have low variance. Indeed, the
dimensionality reduction of PCA projects the data into a lower dimensional
space where the variance of the projected data is greedily maximized along
each axis. Despite them having the most predictive power on the target, the
directions with a lower variance will be dropped, and the final regressor
will not be able to leverage them.
PLS is both a transformer and a regressor, and it is quite similar to PCR: it
also applies a dimensionality reduction to the samples before applying a
linear regressor to the transformed data. The main difference with PCR is
that the PLS transformation is supervised. Therefore, as we will see in this
example, it does not suffer from the issue we just mentioned.
"""
print(__doc__)
# %%
# The data
# --------
#
# We start by creating a simple dataset with two features. Before we even dive
# into PCR and PLS, we fit a PCA estimator to display the two principal
# components of this dataset, i.e. the two directions that explain the most
# variance in the data.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
rng = np.random.RandomState(0)
n_samples = 500
cov = [[3, 3],
[3, 4]]
X = rng.multivariate_normal(mean=[0, 0], cov=cov, size=n_samples)
pca = PCA(n_components=2).fit(X)
plt.scatter(X[:, 0], X[:, 1], alpha=.3, label='samples')
for i, (comp, var) in enumerate(zip(pca.components_, pca.explained_variance_)):
comp = comp * var # scale component by its variance explanation power
plt.plot([0, comp[0]], [0, comp[1]], label=f"Component {i}", linewidth=5,
color=f"C{i + 2}")
plt.gca().set(aspect='equal',
title="2-dimensional dataset with principal components",
xlabel='first feature', ylabel='second feature')
plt.legend()
plt.show()
# %%
# For the purpose of this example, we now define the target `y` such that it is
# strongly correlated with a direction that has a small variance. To this end,
# we will project `X` onto the second component, and add some noise to it.
y = X.dot(pca.components_[1]) + rng.normal(size=n_samples) / 2
fig, axes = plt.subplots(1, 2, figsize=(10, 3))
axes[0].scatter(X.dot(pca.components_[0]), y, alpha=.3)
axes[0].set(xlabel='Projected data onto first PCA component', ylabel='y')
axes[1].scatter(X.dot(pca.components_[1]), y, alpha=.3)
axes[1].set(xlabel='Projected data onto second PCA component', ylabel='y')
plt.tight_layout()
plt.show()
# %%
# Projection on one component and predictive power
# ------------------------------------------------
#
# We now create two regressors: PCR and PLS, and for our illustration purposes
# we set the number of components to 1. Before feeding the data to the PCA step
# of PCR, we first standardize it, as recommended by good practice. The PLS
# estimator has built-in scaling capabilities.
#
# For both models, we plot the projected data onto the first component against
# the target. In both cases, this projected data is what the regressors will
# use as training data.
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import PLSRegression
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
pcr = make_pipeline(StandardScaler(), PCA(n_components=1), LinearRegression())
pcr.fit(X_train, y_train)
pca = pcr.named_steps['pca'] # retrieve the PCA step of the pipeline
pls = PLSRegression(n_components=1)
pls.fit(X_train, y_train)
fig, axes = plt.subplots(1, 2, figsize=(10, 3))
axes[0].scatter(pca.transform(X_test), y_test, alpha=.3, label='ground truth')
axes[0].scatter(pca.transform(X_test), pcr.predict(X_test), alpha=.3,
label='predictions')
axes[0].set(xlabel='Projected data onto first PCA component',
ylabel='y', title='PCR / PCA')
axes[0].legend()
axes[1].scatter(pls.transform(X_test), y_test, alpha=.3, label='ground truth')
axes[1].scatter(pls.transform(X_test), pls.predict(X_test), alpha=.3,
label='predictions')
axes[1].set(xlabel='Projected data onto first PLS component',
ylabel='y', title='PLS')
axes[1].legend()
plt.tight_layout()
plt.show()
# %%
# As expected, the unsupervised PCA transformation of PCR has dropped the
# second component, i.e. the direction with the lowest variance, despite
# it being the most predictive direction. This is because PCA is a completely
# unsupervised transformation, and results in the projected data having a low
# predictive power on the target.
#
# On the other hand, the PLS regressor manages to capture the effect of the
# direction with the lowest variance, thanks to its use of target information
# during the transformation: it can recogize that this direction is actually
# the most predictive. We note that the first PLS component is negatively
# correlated with the target, which comes from the fact that the signs of
# eigenvectors are arbitrary.
#
# We also print the R-squared scores of both estimators, which further confirms
# that PLS is a better alternative than PCR in this case. A negative R-squared
# indicates that PCR performs worse than a regressor that would simply predict
# the mean of the target.
print(f"PCR r-squared {pcr.score(X_test, y_test):.3f}")
print(f"PLS r-squared {pls.score(X_test, y_test):.3f}")
# %%
# As a final remark, we note that PCR with 2 components performs as well as
# PLS: this is because in this case, PCR was able to leverage the second
# component which has the most preditive power on the target.
pca_2 = make_pipeline(PCA(n_components=2), LinearRegression())
pca_2.fit(X_train, y_train)
print(f"PCR r-squared with 2 components {pca_2.score(X_test, y_test):.3f}")
| bsd-3-clause |
JT5D/scikit-learn | examples/applications/plot_stock_market.py | 5 | 8248 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import pylab as pl
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 01, 01)
d2 = datetime.datetime(2008, 01, 01)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WAG': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(symbol_dict.items()).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
pl.figure(1, facecolor='w', figsize=(10, 8))
pl.clf()
ax = pl.axes([0., 0., 1., 1.])
pl.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
pl.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=pl.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=pl.cm.hot_r,
norm=pl.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
pl.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=pl.cm.spectral(label / float(n_labels)),
alpha=.6))
pl.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
pl.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
pl.show()
| bsd-3-clause |
andaag/scikit-learn | sklearn/decomposition/nmf.py | 100 | 19059 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
JohannesBuchner/PLUTO | Tools/pyPLUTO/bin/GUI_pyPLUTO.py | 3 | 36620 | #!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi,log10,max,min,cos,isnan, meshgrid,sqrt,abs
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import pyPLUTO as pp
import string
import time
from Tkinter import *
import sys
import os
class App:
def __init__(self,master):
# create toplevel window
frame = Frame(master)
frame.grid(ipadx=10,ipady=10)
try:
sys.argv[1]
except:
self.datatype = None
else:
self.datatype = sys.argv[1].split('--')[1]
if self.datatype == 'hdf5':
print "GUI currently doesnot support pyPLUTO AMR Reader!!"
sys.exit()
self.I = pp.Image()
self.Tool = pp.Tools()
self.lb1=Label(frame, text="Nstep").grid(row=0,column=0)
self.enstep = Entry(frame,width=8)
self.enstep.grid(row=0,column=1)
self.enstep.insert(0, "0")
self.LoadedNstep = StringVar()
self.PresentTime = StringVar()
self.myData = self.loaddata()
self.varkeys = self.myData.vars
self.wdir = self.myData.wdir
if self.myData.n3 != 1:
self.Geom = '3D'
elif self.myData.n3 == 1 and self.myData.n2 != 1:
self.Geom = '2D'
else:
self.Geom = '1D'
self.ldatabutton=Button(frame,text="Load data",command=self.loaddata)
self.ldatabutton.grid(row=0,column=2)
############### MARK THE CUTS #################################
self.ex1 = Entry(frame,width=5)
self.ex1.grid(row=2,column=0)
self.ex1.insert(0, "x1")
self.ex2 = Entry(frame,width=5)
self.ex2.grid(row=2,column=1)
self.ex2.insert(0, "x2")
self.ex3 = Entry(frame,width=5)
self.ex3.grid(row=2,column=2)
self.ex3.insert(0, "x3")
if self.Geom == '2D':
self.ex3.config(state='disabled')
if self.Geom == '1D':
self.ex3.config(state='disabled')
self.ex2.config(state='disabled')
self.ex1.config(state='disabled')
# place a graph somewhere here
self.f = Figure(figsize=(7,7), dpi=100)
self.a = self.f.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.f, master=root)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0,column=3,columnspan=10,rowspan=10,sticky=E)
#self.toolbar = NavigationToolbar2TkAgg(self.canvas,tl)
#self.toolbar.update()
#self.canvas._tkcanvas.grid(row=60,column=15,sticky=E)
self.v = StringVar()
self.v.set("None")
################ VARIABLES TO PLOT #################################
for i in ['bx1s', 'bx2s', 'bx3s']:
try:
self.varkeys.remove(i)
except ValueError:
pass
for j in range(len(self.varkeys)):
self.ldata = Radiobutton(frame,text=self.varkeys[j],variable=self.v,value=self.varkeys[j],command=self.getmyvar)
self.ldata.grid(row=3+j,column=0,sticky=W)
################ SLICES CHOICE #################################
self.slvar = StringVar()
self.slvar.set("Choose Slice")
if self.Geom == '3D' :
SliceList = ("Along x1","Along x2","Along x3","Along x1-x2","Along x2-x3","Along x3-x1")
elif self.Geom == '2D' :
SliceList = ("Along x1", "Along x2", "Along x1-x2")
else:
SliceList = ()
for j in range(len(SliceList)):
self.sldata = Radiobutton(frame,text=SliceList[j],variable=self.slvar,value=SliceList[j],command=self.setslice)
self.sldata.grid(row=3+j,column=1,sticky=W)
############### PLOT PROPERTIES #################################
self.logvar = IntVar()
self.chkb = Checkbutton(frame,text="Log ",variable=self.logvar,onvalue=1,offvalue=0,command=self.logchkcall)
self.chkb.grid(row=3,column=2,sticky=W)#(row=15,column=0,sticky=W)
self.polarvar = IntVar()
self.polchkb = Checkbutton(frame,text="Polar",variable=self.polarvar,onvalue=1,offvalue=0,command=self.polchkcall)
self.polchkb.grid(row=4,column=2,sticky=W)#(row=15,column=1)
if self.Geom == '1D':
self.polchkb.config(state='disabled')
self.polarvar.set(0)
self.preaspect = IntVar()
self.aspectb = Checkbutton(frame,text="Aspect",variable=self.preaspect,onvalue=1,offvalue=0,command=self.aspchkcall)
self.aspectb.grid(row=5,column=2,sticky=W)#(row=15,column=2)
if self.Geom == '1D':
self.aspectb.config(state='disabled')
################ X and Y LABELS #################################
self.lb2=Label(frame,text="Labels").grid(row=22,column=0)
self.xlb = Entry(frame,width=15)
self.xlb.grid(row=22,column=1)
self.xlb.insert(0, "xlabel")
self.ylb = Entry(frame,width=15)
self.ylb.grid(row=22,column=2)
self.ylb.insert(0, "ylabel")
############### X and Y RANGE#######################
self.lb2a=Label(frame,text="XRange").grid(row=24,column=0)
self.lb2b=Label(frame,text="YRange").grid(row=26,column=0)
self.lb2c=Label(frame,text="VarRange").grid(row=28,column=0)
self.xrmin = Entry(frame,width=15)
self.xrmin.grid(row=24,column=1)
self.xrmin.insert(0,'')
self.xrmax = Entry(frame,width=15)
self.xrmax.grid(row=24,column=2)
self.xrmax.insert(0,'')
self.yrmin = Entry(frame,width=15)
self.yrmin.grid(row=26,column=1)
self.yrmin.insert(0,'')
self.yrmax = Entry(frame,width=15)
self.yrmax.grid(row=26,column=2)
self.yrmax.insert(0,'')
self.varmin = Entry(frame,width=15)
self.varmin.grid(row=28,column=1)
self.varmin.insert(0,'')
self.varmax = Entry(frame,width=15)
self.varmax.grid(row=28,column=2)
self.varmax.insert(0,'')
if self.Geom == '1D':
self.yrmin.config(state='disabled')
self.yrmax.config(state='disabled')
################ CONTOURS #################################
self.lb3=Label(frame,text="Contours").grid(row=16,column=0)
self.contvar = IntVar()
self.chkb = Checkbutton(frame,text="Contour",variable=self.contvar,onvalue=1,offvalue=0,command=self.contchkcall)
self.chkb.grid(row=6,column=2,sticky=W)#(row=16,column=0,sticky=W)
self.plcont = StringVar()
self.contkeys = ["None"]
if "bx3" in self.varkeys:
for item in self.varkeys:
self.contkeys.append(item)
self.contkeys.append("x1*bx3")
if "Ax3" in self.varkeys:
self.contkeys.append("x1*Ax3")
else:
for item in self.varkeys:
self.contkeys.append(item)
self.plcont.set("None")
self.contmenu = OptionMenu(frame, self.plcont,*self.contkeys)
self.contmenu.grid(row=16,column=1)
self.xlevb = Entry(frame,width=15)
self.xlevb.grid(row=16,column=2,sticky=W)
self.xlevb.insert(0, "Levels")
self.xlevb.config(state='disabled')
self.contmenu.config(state='disabled')
if self.Geom == '1D':
self.chkb.config(state = 'disabled')
################ ARROWS #################################
self.lb4=Label(frame,text="Arrows").grid(row=19,column=0)
self.arrowvar = IntVar()
self.arrowchkb = Checkbutton(frame,text="Arrows",variable=self.arrowvar,onvalue=1,offvalue=0,command=self.arrchkcall)
self.arrowchkb.grid(row=7,column=2,sticky=W)#(row=16,column=0,sticky=W)
self.arrspb = Entry(frame,width=15)
self.arrspb.grid(row=19,column=2,sticky=W)
self.arrspb.insert(0, "20")
self.plarr = StringVar()
self.arrkeys = ["None"]
self.arrkeys.append("Vp")
self.arrkeys.append("Vp_norm")
if "bx1" in self.varkeys:
self.arrkeys.append("Bp")
self.arrkeys.append("Bp_norm")
self.plarr.set("None")
self.arrmenu = OptionMenu(frame,self.plarr,*self.arrkeys)
self.arrmenu.grid(row=19,column=1)
self.arrmenu.config(state='disabled')
self.arrspb.config(state='disabled')
if self.Geom == '1D':
self.arrowchkb.config(state = 'disabled')
################ VARIOUS PLOTTING BUTTONS #################################
self.pltbutton=Button(frame,text="Plot",command=self.plotfinal)
self.pltbutton.grid(row=36,column=0)
if self.Geom == '1D':
self.pltbutton.config(state='active')
else:
self.pltbutton.config(state='disabled')
self.surfbutton=Button(frame,text="Surface",command=self.plotsurface)
self.surfbutton.grid(row=36,column=1)
self.surfbutton.config(state='disabled')
#if self.Geom == '1D':
# self.surfbutton.config(state='disabled')
self.clrbutton=Button(frame,text="Clear",command=self.plotclear)
self.clrbutton.grid(row=36,column=2)
################ INFORMATION #################################
self.lbinf0 = Label(frame,text="Information",font=("Times",12,"bold"))
self.lbinf0.grid(row=47,column=0,sticky=W,columnspan=3)
self.lbinf1a = Label(frame,text="Dir :",font=("Times",10,"bold")).grid(row=49,column=0,sticky=W,columnspan=3)
self.lbinf1 = Label(frame,text=self.wdir).grid(row=50,column=0,sticky=W,columnspan=3)
self.lbinf2a = Label(frame,text="Domain :",font=("Times",10,"bold")).grid(row=51,column=0,sticky=W,columnspan=3)
self.lbinf2 = Label(frame,text="n1 x n2 x n3 = %d x %d x %d " % (self.myData.n1,self.myData.n2,self.myData.n3)).grid(row=52,column=0,sticky=W,columnspan=3)
self.lbinf3a = Label(frame,text="Time Status",font=("Times",10,"bold")).grid(row=53,column=0,sticky=W,columnspan=3)
self.lbinf4 = Label(frame,text="Nlast = %d"% pp.nlast_info(w_dir=self.wdir,datatype=self.datatype)['nlast']).grid(row=54,column=0,sticky=W,columnspan=3)
self.lbinf5 = Label(frame,textvariable = self.LoadedNstep).grid(row=55,column=0,sticky=W,columnspan=3)
self.lbinf6 = Label(frame,textvariable = self.PresentTime).grid(row=56,column=0,sticky=W,columnspan=3)
################ VARIOUS FUNCTIONS #################################
def loaddata(self):
try:
int(self.enstep.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the proper value of Nstep"
else:
mynstep=int(self.enstep.get())
self.D = pp.pload(mynstep,datatype=self.datatype)
self.LoadedNstep.set("Loaded Nstep = "+self.enstep.get())
self.PresentTime.set("Present Time = "+str(self.D.SimTime) + " [cu]")
return self.D
def getmyvar(self):
try:
self.v.get() != "None"
except KeyError:
print "Specify the variable to plot"
else:
self.myvar=self.v.get()
def logchkcall(self):
self.logchk = self.logvar.get()
def contchkcall(self):
self.contchk = self.contvar.get()
if self.contchk == 1:
self.contmenu.config(state='normal')
self.xlevb.config(state='normal')
else:
self.contmenu.config(state='disabled')
self.xlevb.config(state='disabled')
def arrchkcall(self):
self.arrchk = self.arrowvar.get()
if self.arrchk == 1:
self.arrmenu.config(state='normal')
self.arrspb.config(state='normal')
else:
self.arrmenu.config(state='disabled')
self.arrspb.config(state='disabled')
def aspchkcall(self):
self.aspchk=self.preaspect.get()
def polchkcall(self):
self.polchk = self.polarvar.get()
def setslice(self):
self.slicename=self.slvar.get()
if self.slicename == "Along x1" or self.slicename == "Along x2" or self.slicename == "Along x3":
self.surfbutton.config(state='disabled')
self.arrowchkb.config(state = 'disabled')
self.arrowvar.set(0)
self.chkb.config(state = 'disabled')
self.contvar.set(0)
self.pltbutton.config(state='active')
self.polchkb.config(state='disabled')
self.polarvar.set(0)
else:
self.pltbutton.config(state='disabled')
self.arrowchkb.config(state = 'normal')
self.chkb.config(state = 'normal')
self.surfbutton.config(state='active')
self.polchkb.config(state='normal')
if self.slicename == "Along x2-x3":
self.polchkb.config(state='disabled')
self.polarvar.set(0)
def plotclear(self):
self.f.clf()
self.a = self.f.add_subplot(111)
self.canvas.show()
def plotfinal(self):
if self.getplotvar() == True:
self.a.axis([self.getxaxisrange()[0],self.getxaxisrange()[1],self.getvarrange()[0],self.getvarrange()[1]])
self.a.plot(self.x,self.var)
self.a.set_aspect('auto')
self.a.set_xlabel(self.xlb.get())
self.a.set_ylabel(self.ylb.get())
self.canvas.show()
def plotsurface(self):
tdum = time.time()
self.plotclear()
if self.preaspect.get() == 1:
self.a.set_aspect('equal')
else:
self.a.set_aspect('auto')
if self.polarvar.get() == 1:
if self.drawpolar() == True:
self.a.axis([self.getxaxisrange()[0],self.getxaxisrange()[1],self.getyaxisrange()[0],self.getyaxisrange()[1]])
self.image = self.a.imshow(self.SphData[self.myvar], origin='lower',extent=self.extent, interpolation='nearest',cmap="jet", vmin=self.getvarrange()[0],vmax=self.getvarrange()[1])
self.f.colorbar(self.image)
else:
if self.getsurfvar() == True:
self.a.axis([self.getxaxisrange()[0],self.getxaxisrange()[1],self.getyaxisrange()[0],self.getyaxisrange()[1]])
self.image=self.a.pcolormesh(self.x,self.y,self.var,cmap='jet',vmin=self.getvarrange()[0],vmax=self.getvarrange()[1])
self.f.colorbar(self.image)
if self.contvar.get() == 1:
try:
self.plcont.get() != "None"
except KeyError:
print "Specify the variable for Contour"
else:
self.drawcontour()
self.contlevlist=[]
self.contlevstr = string.split(self.xlevb.get(),',')
try:
if self.contlevstr[0] == 'log':
self.flevel = self.contlevstr[1]
self.varcont = log10(self.varcont)
else:
self.flevel = self.contlevstr[0]
float(self.flevel)
self.contlevlist = [float(self.flevel)]
except:
self.contlevlist = 5
else:
for j in range(1,len(self.contlevstr)):
self.contlevlist.append(float(self.contlevstr[j]))
self.cs1 = self.a.contour(self.xcont,self.ycont,self.varcont,self.contlevlist,colors="w")
self.a.clabel(self.cs1,inline=True)
if self.arrowvar.get() == 1:
try:
self.plarr.get() != "None"
except KeyError:
print "Specify the variable for plotting the arrow"
else:
self.drawarrow()
self.a.quiver(self.xcong, self.ycong, self.xveccong, self.yveccong,color='w')
self.a.set_xlabel(self.xlb.get())
self.a.set_ylabel(self.ylb.get())
self.canvas.show()
def getvarrange(self):
try:
float(self.varmin.get())
except:
if self.polarvar.get() != 1:
self.varminval = min(self.var)
else:
self.varminval = min(self.SphData[self.myvar][self.isnotnan].flat)#self.minPl
else:
self.varminval = float(self.varmin.get())
try:
float(self.varmax.get())
except:
if self.polarvar.get() != 1:
self.varmaxval = max(self.var)
else:
self.varmaxval = max(self.SphData[self.myvar][self.isnotnan].flat)#self.maxPl
else:
self.varmaxval = float(self.varmax.get())
return [self.varminval,self.varmaxval]
def getxaxisrange(self):
try:
float(self.xrmin.get())
except:
if self.polarvar.get() != 1:
self.xminval = min(self.x)
else:
self.xminval = min(self.R.flat)
else:
self.xminval = float(self.xrmin.get())
try:
float(self.xrmax.get())
except:
if self.polarvar.get() != 1:
self.xmaxval = max(self.x)
else:
self.xmaxval = max(self.R.flat)
else:
self.xmaxval = float(self.xrmax.get())
return [self.xminval,self.xmaxval]
def getyaxisrange(self):
try:
float(self.yrmin.get())
except:
if self.polarvar.get() != 1:
self.yminval = min(self.y)
else:
self.yminval = min(self.Z.flat)
else:
self.yminval = float(self.yrmin.get())
try:
float(self.yrmax.get())
except:
if self.polarvar.get() != 1:
self.ymaxval = max(self.y)
else:
self.ymaxval = max(self.Z.flat)
else:
self.ymaxval = float(self.yrmax.get())
return [self.yminval,self.ymaxval]
def getplotvar(self):
self.sucess = False
if self.logvar.get() == 1:
self.var = log10(self.D.__getattribute__(self.myvar))
else:
self.var = self.D.__getattribute__(self.myvar)
if self.Geom == '1D':
self.x = self.D.x1
self.sucess = True
else:
if self.slicename == "Along x1":
self.x = self.D.x1
if self.D.n3 == 1:
try:
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 cut"
else:
self.var = self.var[:,int(self.ex2.get())]
self.sucess = True
else:
try:
int(self.ex2.get().strip().split()[0])
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 or x3 cut"
else:
self.var = self.var[:,int(self.ex2.get()),int(self.ex3.get())]
self.sucess = True
elif self.slicename == "Along x2":
self.x = self.D.x2
if self.D.n3 == 1:
try:
int(self.ex1.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 cut"
else:
self.var = self.var[int(self.ex1.get()),:]
self.sucess = True
else:
try:
int(self.ex1.get().strip().split()[0])
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 or x3 cut"
else:
self.var = self.var[int(self.ex1.get()),:,int(self.ex3.get())]
self.sucess = True
else:
self.x = self.D.x3
try:
int(self.ex1.get().strip().split()[0])
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 or x2 cut"
else:
self.var = self.var[int(self.ex1.get()),int(self.ex2.get()),:]
self.sucess = True
return self.sucess
def getsurfvar(self):
self.sucess = False
if self.logvar.get() == 1:
self.var = log10(self.D.__getattribute__(self.myvar))
else:
self.var = self.D.__getattribute__(self.myvar)
if self.slicename == "Along x1-x2":
self.x = self.D.x1
self.y = self.D.x2
xmineed = (abs(self.x-self.getxaxisrange()[0])).argmin()
xmaneed = (abs(self.x-self.getxaxisrange()[1])).argmin()
ymineed = (abs(self.y-self.getyaxisrange()[0])).argmin()
ymaneed = (abs(self.y-self.getyaxisrange()[1])).argmin()
self.x = self.x[xmineed:xmaneed]
self.y = self.y[ymineed:ymaneed]
if self.D.n3 == 1:
self.var = self.var[xmineed:xmaneed,ymineed:ymaneed].T
self.sucess = True
else:
try:
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x3 cut"
else:
self.var = self.var[xmineed:xmaneed,ymineed:ymaneed,int(self.ex3.get())].T
self.sucess = True
elif self.slicename == "Along x2-x3":
self.x = self.D.x2
self.y = self.D.x3
xmineed = (abs(self.x-self.getxaxisrange()[0])).argmin()
xmaneed = (abs(self.x-self.getxaxisrange()[1])).argmin()
ymineed = (abs(self.y-self.getyaxisrange()[0])).argmin()
ymaneed = (abs(self.y-self.getyaxisrange()[1])).argmin()
self.x = self.x[xmineed:xmaneed]
self.y = self.y[ymineed:ymaneed]
try:
int(self.ex1.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x1 cut"
else:
self.var = self.var[int(self.ex1.get()),xmineed:xmaneed,ymineed:ymaneed].T
self.sucess = True
else:
self.x = self.D.x1
self.y = self.D.x3
xmineed = (abs(self.x-self.getxaxisrange()[0])).argmin()
xmaneed = (abs(self.x-self.getxaxisrange()[1])).argmin()
ymineed = (abs(self.y-self.getyaxisrange()[0])).argmin()
ymaneed = (abs(self.y-self.getyaxisrange()[1])).argmin()
self.x = self.x[xmineed:xmaneed]
self.y = self.y[ymineed:ymaneed]
try:
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 cut"
else:
self.var = self.var[xmineed:xmaneed,int(self.ex2.get()),ymineed:ymaneed].T
self.sucess = True
return self.sucess
def drawpolar(self):
self.sucess = False
if self.slicename == "Along x1-x2":
if self.D.n3 == 1:
self.R,self.Z,self.SphData = self.I.getSphData(self.D,w_dir=self.wdir,datatype=self.datatype, rphi=False)
self.sucess = True
else:
try:
int(self.ex3.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x3 cut"
else:
self.R,self.Z,self.SphData = self.I.getSphData(self.D,w_dir=self.wdir,datatype=self.datatype, rphi=False,x3cut=int(self.ex3.get()))
self.sucess = True
if self.slicename == "Along x3-x1":
try:
int(self.ex2.get().strip().split()[0])
except (ValueError, IndexError):
print "Specify the value of x2 cut"
else:
self.R,self.Z,self.SphData = self.I.getSphData(self.D,w_dir=self.wdir,datatype=self.datatype, rphi=True, x2cut=int(self.ex2.get()))
self.sucess = True
if self.sucess == True:
self.extent=(min(self.R.flat),max(self.R.flat),min(self.Z.flat),max(self.Z.flat))
self.dRR=max(self.R.flat)-min(self.R.flat)
self.dZZ=max(self.Z.flat)-min(self.Z.flat)
self.isnotnan=-isnan(self.SphData[self.myvar])
self.maxPl=max(self.SphData[self.myvar][self.isnotnan].flat)
self.minPl=min(self.SphData[self.myvar][self.isnotnan].flat)
self.normrange=False
if self.minPl<0:
self.normrange=True
if self.maxPl>-self.minPl:
self.minPl=-self.maxPl
else:
self.maxPl=-self.minPl
if (self.normrange and self.myvar !='rho' and self.myvar !='prs'):
self.SphData[self.myvar][-1][-1]=self.maxPl
self.SphData[self.myvar][-1][-2]=self.minPl
if self.logvar.get() == 1:
self.SphData[self.myvar] = log10(self.SphData[self.myvar])
return self.sucess
def drawcontour(self):
if self.polarvar.get() != 1:
if self.slicename == "Along x1-x2":
self.xcont = self.D.x1
self.ycont = self.D.x2
self.Xmesh, self.Ymesh = meshgrid(self.D.x1.T,self.D.x2.T)
if self.D.n3 == 1:
if self.plcont.get() == 'x1*Ax3':
self.varcont = self.Xmesh*(self.D.Ax3.T)
elif self.plcont.get() == 'x1*bx3':
self.varcont = self.Xmesh*(self.D.bx3.T)
else:
self.varcont = self.D.__getattribute__(self.plcont.get())[:,:].T
else:
if self.plcont.get() == 'x1*Ax3':
self.varcont = self.Xmesh*(self.D.Ax3[:,:,int(self.ex3.get())].T)
elif self.plcont.get() == 'x1*bx3':
self.varcont = self.Xmesh*(self.D.bx3[:,:,int(self.ex3.get())].T)
else:
self.varcont = self.D.__getattribute__(self.plcont.get())[:,:,int(self.ex3.get())].T
elif self.slicename == "Along x2-x3":
self.xcont = self.D.x2
self.ycont = self.D.x3
self.varcont = self.D.__getattribute__(self.plcont.get())[int(self.ex1.get()),:,:].T
else:
self.xcont = self.D.x1
self.ycont = self.D.x3
self.varcont = self.D.__getattribute__(self.plcont.get())[:,int(self.ex2.get()),:].T
else:
self.xcont = self.R
self.ycont = self.Z
if self.plcont.get() == 'x1*Ax3':
self.varcont = self.R*(self.SphData['Ax3'])
elif self.plcont.get() == 'x1*bx3':
self.varcont = self.R*(self.SphData['bx3'])
else:
if self.logvar.get() == 1 and self.plcont.get() == self.myvar:
self.varcont = 10**(self.SphData[self.plcont.get()])
else:
self.varcont = self.SphData[self.plcont.get()]
def drawarrow(self):
if self.polarvar.get() != 1:
if self.slicename == "Along x1-x2":
self.Xmesh, self.Ymesh = meshgrid(self.D.x1.T,self.D.x2.T)
self.xcong = self.Tool.congrid(self.Xmesh,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Ymesh,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
if self.D.n3 == 1:
self.vel1 = self.D.vx1[:,:].T
self.vel2 = self.D.vx2[:,:].T
else:
self.vel1 = self.D.vx1[:,:,int(self.ex3.get())].T
self.vel2 = self.D.vx2[:,:,int(self.ex3.get())].T
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
if self.D.n3 == 1:
self.mag1 = self.D.bx1[:,:].T
self.mag2 = self.D.bx2[:,:].T
else:
self.mag1 = self.D.bx1[:,:,int(self.ex3.get())].T
self.mag2 = self.D.bx2[:,:,int(self.ex3.get())].T
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
elif self.slicename == "Along x2-x3":
self.Xmesh, self.Ymesh = meshgrid(self.D.x2.T,self.D.x3.T)
self.xcong = self.Tool.congrid(self.Xmesh,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Ymesh,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
self.vel1 = self.D.vx2[int(self.ex1.get()),:,:].T
self.vel2 = self.D.vx3[int(self.ex1.get()),:,:].T
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
self.mag1 = self.D.bx2[int(self.ex1.get()),:,:].T
self.mag2 = self.D.bx3[int(self.ex1.get()),:,:].T
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
else:
self.Xmesh, self.Ymesh = meshgrid(self.D.x1.T,self.D.x3.T)
self.xcong = self.Tool.congrid(self.Xmesh,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Ymesh,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
self.vel1 = self.D.vx1[:,int(self.ex2.get()),:].T
self.vel2 = self.D.vx3[:,int(self.ex2.get()),:].T
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
self.mag1 = self.D.bx1[:,int(self.ex2.get()),:].T
self.mag2 = self.D.bx3[:,int(self.ex2.get()),:].T
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
else:
self.xcong = self.Tool.congrid(self.R,2*(int(self.arrspb.get()),),method='linear')
self.ycong = self.Tool.congrid(self.Z,2*(int(self.arrspb.get()),),method='linear')
if self.plarr.get() == 'Vp' or self.plarr.get() =='Vp_norm':
if self.slicename == "Along x1-x2":
self.vel1 = self.SphData['v1c']
self.vel2 = self.SphData['v2c']
else:
self.vel1 = self.SphData['v1c']
self.vel2 = self.SphData['v3c']
self.xveccong = self.Tool.congrid(self.vel1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.vel2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Vp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
if self.plarr.get() == 'Bp' or self.plarr.get() =='Bp_norm':
if self.slicename == "Along x1-x2":
self.mag1 = self.SphData['b1c']
self.mag2 = self.SphData['b2c']
else:
self.mag1 = self.SphData['b1c']
self.mag2 = self.SphData['b3c']
self.xveccong = self.Tool.congrid(self.mag1,2*(int(self.arrspb.get()),),method='linear')
self.yveccong = self.Tool.congrid(self.mag2,2*(int(self.arrspb.get()),),method='linear')
self.normVp = sqrt(self.xveccong**2 + self.yveccong**2)
if self.plarr.get() == 'Bp_norm':
self.xveccong = self.xveccong/self.normVp
self.yveccong = self.yveccong/self.normVp
def epssave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.eps')
def pngsave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.png')
def pdfsave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.pdf')
def jpgsave(self):
self.f.savefig(self.myvar+'_'+self.enstep.get()+'.jpg')
root=Tk()
app=App(root)
root.title("pyPLUTO")
menubar = Menu(root)
savemenu = Menu(menubar,tearoff=0)
savemenu.add_command(label='EPS',command=app.epssave)
savemenu.add_command(label='PDF',command=app.pdfsave)
savemenu.add_command(label='PNG',command=app.pngsave)
savemenu.add_command(label='JPG',command=app.jpgsave)
menubar.add_cascade(label="Save As", menu=savemenu)
#menubar.add_command(label='Plot',command = app.plotfinal)
#menubar.add_command(label='Surface',command=app.plotsurface)
#menubar.add_command(label='Clear',command=app.plotclear)
menubar.add_command(label='Quit',command=root.quit)
root.config(menu=menubar)
root.mainloop()
| gpl-2.0 |
MartinSavc/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
Moriadry/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mmottahedi/neuralnilm_prototype | scripts/e131.py | 2 | 12037 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
"""
def exp_a(name):
# e130a but no pretraining and 3 appliances but max_input_power is 5900
# Results: learns something. Still confuses TV for fridge. No aweful though. Appears to train very quickly though (at 250 epochs it's doing about as well as it did at 1750 epochs)
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'#,
#'dish washer',
#['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200],#, 2500, 2400],
on_power_thresholds=[5, 5, 5],#, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60],#, 1800, 1800],
min_off_durations=[12, 12, 12],#, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_b(name):
# same input as A but e59a's net (plus gradient steps) and batch size of 10
# hasn't learnt anything useful! Just means.
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'#,
#'dish washer',
#['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200],#, 2500, 2400],
on_power_thresholds=[5, 5, 5],#, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60],#, 1800, 1800],
min_off_durations=[12, 12, 12],#, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_c(name):
# same as B but all 5 appliances
# probably the best yet for all 5 appliances ;)
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_d(name):
# same as C but bool targets
# NaN after 372
# Showing some (but little) promise at 250 epochs
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=True,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': BLSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('abcd'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=2000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
e-baumer/sampling | sampling/base_sample.py | 1 | 11815 | from __future__ import division
from itertools import combinations
import numpy as np
import pandas as pd
import scipy.integrate
from statsmodels.tools.tools import ECDF
from sklearn import preprocessing
import seaborn as sns
class BaseSample(object):
def __init__(self, data_frame, number_arms=2):
self.integrate = None
self.data = pd.DataFrame.copy(data_frame)
self.n_arms = number_arms
self.label_encoders = {}
super(BaseSample, self).__init__()
def extract_values_by_arm(self, col_name, arm):
'''
Extract values for participants in a specified arm
col_name -- Name of dataframe column to extract data from
arm -- Data will be extracted for participants in this arm number
'''
extracted_vals = self.data[
self.data['arm_assignment']==arm
][col_name].values
return extracted_vals
def label_encoder(self, column_name, new_column_name=None,
encoder_name=None):
'''
This method integer encodes any categorical data.
column_name -- Column name from the dataframe to encode as integers
new_column_name -- If not None, then assign a new column with
new_column_name to dataframe with encoded data
encoder_name -- If this is not none the encoder is kept in dictionary
label_encoders. It can be accessed by
BaseSample.label_encoder[encoder_name]
'''
label_encoder = preprocessing.LabelEncoder()
encoded_data = label_encoder.fit_transform(
self.data[column_name].values
)
if not (new_column_name is None):
self.data[new_column_name] = encoded_data
# Save label encoder if name given
if not (encoded_data is None):
self.label_encoders[encoder_name] = label_encoder
return encoded_data, label_encoder
def display_covariate_dist(self, covariate_list, save_file=None):
'''
'''
n_covars = len(covariate_list)
for covariate in covariate_list:
g = sns.FacetGrid(self.data, col="arm_assignment")
if len(self.data[covariate].unique())>2:
g.map(sns.distplot, covariate, kde=False)
else:
g.map(sns.distplot, covariate, kde=False)
if save_file:
g.savefig(save_file, dpi=450)
if save_file is None:
sns.plt.show()
def nan_finder(self, column_names, percent_nan = 0.05):
'''
Looks through all of the data points and finds all values that are NaN
for any of the covariates to be included (listed in column_names). If
less than the percent_nan (default 5%) have NaNs, they will be deleted.
'''
#Initialize array to store indices of NaN values
nan_inds = np.array([])
#Cycle through all covariates to be included
for colname in column_names:
#Find the all NaN values for each column and add to the array
nan_inds = np.concatenate(
(nan_inds,np.where(np.isnan(self.data[colname]))[0])
)
#Extract all unique indices, this includes all of the data points
#that have NaN values for any of the covariates
all_nans = np.unique(nan_inds)
#If the number of data points with NaN values is less than the specifed
#total percentage threshold (percent_nan), delete those data points
if len(all_nans)/len(self.data) <= percent_nan:
self.data = self.data.drop(all_nans)
#If there are more data points that have NaN values than the acceptable
#percentage, print an error message with the percentage.
else:
raise ValueError("There are too many data points with NaN values. There \
are {:.3f} NaN data points with at least one NaN value for \
one of the covariates included. The limit is set to {:.3f}."\
.format(len(all_nans)/len(self.data), percent_nan))
return self.data
def set_integration_type(self, int_type='trapz'):
'''
Set the integration method for determining area under the Empirical CDF
'''
try:
self.integrate = getattr(scipy.integrate, int_type)
except AttributeError:
print("{} is not a valid integration method (trapz, cumtrapz, simps, romb)".format(int_type))
return False
def calculate_area_continuous(self, vals, int_type='trapz'):
'''
Determine Empirical CDF (for continuous covariate) and then determine
the area under the ECDF curve
'''
if self.integrate is None:
self.integrate = getattr(scipy.integrate, 'trapz')
print("No integration type specified for calculating the area"+\
" under ECDF. Using trapz")
#try:
#raise ValueError('You must set the integration method first!')
#except ValueError:
#print("No Integration method set for ECDF")
#raise
ecdf = ECDF(vals, side='left')
area = self.integrate(ecdf.y[1:], ecdf.x[1:])
return area
def find_imbalance_continuous(self, covar_arm1, covar_arm2, area1, area2):
'''
Find the normalized imbalance metric for a single continuous varible
(covariate) as defined by Lin and Su (2012). This is the normalized area
difference between the two ECDF defined for two seperate arms of a
study. See docs for original paper.
covar_arm1 -- Values of continuous covariate for participants enrolled
in one particular arm
covar_arm2 -- Values of continuous covariate for participants enrolled
in particular arm to be compared with covar_arm1
Both covar_arm1 and covar_arm2 correspond to the same covariate
area1 -- Area under the ECDF as given by covar_arm1
area2 -- Area under the ECDF as given by covar_arm2
'''
norm_imbalance = abs(area1 - area2) /\
(np.max(np.concatenate([covar_arm1, covar_arm2])) -\
np.min(np.concatenate([covar_arm1, covar_arm2])))
return norm_imbalance
def find_imbalance_categorical(self, covar_arm1, covar_arm2, n_categories):
'''
Find the normalized imbalance metric for a single categorical varible
(covariate) as defined by Lin and Su (2012). This is the normalized area
difference between the two ECDF defined for two seperate arms of a
study. See docs for original paper.
covar_arm1 -- Integer encoded values of categorical covariate for
participants enrolled in one particular arm
covar_arm2 -- Integer encoded values of categorical covariate for
participants enrolled in particular arm to be compared
with covar_arm1
n_categories -- Number of categories for categorical covariate
Both covar_arm1 and covar_arm2 correspond to the same covariate
'''
norm_imbalance = 0.
n_arm1 = len(covar_arm1)
n_arm2 = len(covar_arm2)
for i in range(n_categories):
n_covar_arm1 = len(np.where(covar_arm1==i)[0])
n_covar_arm2 = len(np.where(covar_arm2==i)[0])
norm_imbalance += abs(n_covar_arm1/n_arm1 - n_covar_arm2/n_arm2) / 2
return norm_imbalance
def calculate_imbalance(self, covariates_con, covariates_cat,
min_type='mean'):
'''
Calculate imbalance coefficient between all arm combinations over all
covariates, continuous and categorical.
Imbalance coefficients for individual covariates within a comparison of
two arms are averaged to find a single imbalance coefficient.
covariates_con -- list of column names in dataframe of continuous
covariates to balance on
covariates_cat -- list of column names in dataframe of categorical
covariates to balance on. These must be integer
encoded. Use label_encoder method.
min_type -- How to combine the imbalance coefficients for each
arm combiniation. Choices include max, mean, sum. For
max the maximum imbalance coefficient is used as the
overall imbalance coefficient. For sum, the sum of the
imbalance coefficients is used. For mean, the mean
value of the imbalance coefficient is used.
'''
imbalance_coeff_arm = []
arm_list = range(1,self.n_arms+1)
# Loop through all possible combinations of study arms
for comb in combinations(arm_list,2):
imb_coeff_comb = []
# Loop through all continuous covariates
for cont_covar in covariates_con:
# Get values of covariate for comb[0]
vals_1 = self.extract_values_by_arm(cont_covar, comb[0])
# Get values of covariate for comb[1]
vals_2 = self.extract_values_by_arm(cont_covar, comb[1])
# Calculate area under ECDF
area1 = self.calculate_area_continuous(vals_1)
area2 = self.calculate_area_continuous(vals_2)
# Calculate imbalance coefficient
imb_coef = self.find_imbalance_continuous(
vals_1, vals_2, area1, area2
)
imb_coeff_comb.append(imb_coef)
# Loop through all categorical covariates
for cat_covar in covariates_cat:
# Get values of covariate for comb[0]
vals_1 = self.extract_values_by_arm(cat_covar, comb[0])
# Get values of covariate for comb[1]
vals_2 = self.extract_values_by_arm(cat_covar, comb[1])
# Calculate imbalance coefficient
imb_coef = self.find_imbalance_categorical(
vals_1, vals_2, int(np.nanmax(np.concatenate([vals_1,vals_2]))
))
imb_coeff_comb.append(imb_coef)
# Find the mean of all the covariate imbalance coefficients
# TODO: Implement weighted mean***
imbalance_coeff_arm.append(np.nanmean(imb_coeff_comb))
# Capture overall imbalance coefficient for all arm combinations
if min_type.lower() == 'max':
imbalance_coeff = np.nanmax(imbalance_coeff_arm)
elif min_type.lower() == 'mean':
imbalance_coeff = np.nanmean(imbalance_coeff_arm)
elif min_type.lower() == 'sum':
imbalance_coeff = np.nansum(imbalance_coeff_arm)
else:
print('{} is an unrecognized minimization option type (max, mean, sum)'.format(min_type))
return False
return imbalance_coeff
| apache-2.0 |
rushter/heamy | heamy/pipeline.py | 1 | 8208 | # coding:utf-8
import numpy as np
import pandas as pd
from scipy.stats import gmean
from .dataset import Dataset
from .estimator import Regressor, Classifier
from .utils.main import report_score, generate_columns, group_models, feature_combiner
from .utils.optimizer import Optimizer
class ModelsPipeline(object):
"""Combines sequence of models."""
def __init__(self, *args):
self.models = []
for model in args:
if isinstance(model, (Regressor, Classifier)):
self.add(model)
else:
raise ValueError("Unrecognized estimator.")
def add(self, model):
"""Adds a single model.
Parameters
----------
model : `Estimator`
"""
if isinstance(model, (Regressor, Classifier)):
self.models.append(model)
else:
raise ValueError("Unrecognized estimator.")
def apply(self, func):
"""Applies function along models output.
Parameters
----------
func : function
Arbitrary function with one argument.
Returns
-------
`PipeApply`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.apply(lambda x: np.max(x,axis=0)).execute()
"""
return PipeApply(function=func, models=self.models)
def mean(self):
"""Returns the mean of the models predictions.
Returns
-------
`PipeApply`
Examples
--------
>>> # Execute
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.mean().execute()
>>> # Validate
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.mean().validate()
"""
return self.apply(lambda x: np.mean(x, axis=0))
def gmean(self):
"""Returns the gmean of the models predictions.
Returns
-------
`PipeApply`
"""
return self.apply(lambda x: gmean(x, axis=0))
def max(self):
"""Returns the max of the models predictions.
Returns
-------
`PipeApply`
"""
return self.apply(lambda x: np.max(x, axis=0))
def min(self):
"""Returns the min of the models predictions.
Returns
-------
`PipeApply`
"""
return self.apply(lambda x: np.min(x, axis=0))
def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True, add_diff=False):
"""Stacks sequence of models.
Parameters
----------
k : int, default 5
Number of folds.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If True then evaluate test dataset on the full data otherwise take the mean of every fold.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> stack_ds = pipeline.stack(k=10, seed=111)
"""
result_train = []
result_test = []
y = None
for model in self.models:
result = model.stack(
k=k, stratify=stratify, shuffle=shuffle, seed=seed, full_test=full_test
)
train_df = pd.DataFrame(
result.X_train, columns=generate_columns(result.X_train, model.name)
)
test_df = pd.DataFrame(
result.X_test, columns=generate_columns(result.X_test, model.name)
)
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1)
result_test = pd.concat(result_test, axis=1)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
ds = Dataset(X_train=result_train, y_train=y, X_test=result_test)
return ds
def blend(self, proportion=0.2, stratify=False, seed=100, indices=None, add_diff=False):
"""Blends sequence of models.
Parameters
----------
proportion : float, default 0.2
stratify : bool, default False
seed : int, default False
indices : list(np.ndarray,np.ndarray), default None
Two numpy arrays that contain indices for train/test slicing.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.blend(seed=15)
>>> # Custom indices
>>> train_index = np.array(range(250))
>>> test_index = np.array(range(250,333))
>>> res = model_rf.blend(indicies=(train_index,test_index))
"""
result_train = []
result_test = []
y = None
for model in self.models:
result = model.blend(
proportion=proportion, stratify=stratify, seed=seed, indices=indices
)
train_df = pd.DataFrame(
result.X_train, columns=generate_columns(result.X_train, model.name)
)
test_df = pd.DataFrame(
result.X_test, columns=generate_columns(result.X_test, model.name)
)
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1, ignore_index=True)
result_test = pd.concat(result_test, axis=1, ignore_index=True)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
return Dataset(X_train=result_train, y_train=y, X_test=result_test)
def find_weights(self, scorer, test_size=0.2, method="SLSQP"):
"""Finds optimal weights for weighted average of models.
Parameters
----------
scorer : function
Scikit-learn like metric.
test_size : float, default 0.2
method : str
Type of solver. Should be one of:
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
Returns
-------
list
"""
p = Optimizer(self.models, test_size=test_size, scorer=scorer)
return p.minimize(method)
def weight(self, weights):
"""Applies weighted mean to models.
Parameters
----------
weights : list
Returns
-------
np.ndarray
Examples
----------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> pipeline.weight([0.8,0.2])
"""
return self.apply(lambda x: np.average(x, axis=0, weights=weights))
class PipeApply(object):
def __init__(self, function, models):
self.models = models
self.function = function
def execute(self):
results = []
for model in self.models:
results.append(model.predict())
return self.function(results)
def validate(
self, scorer=None, k=1, test_size=0.1, stratify=False, shuffle=True, seed=100, indices=None
):
params = dict(
k=k,
test_size=test_size,
stratify=stratify,
scorer=scorer,
shuffle=shuffle,
seed=seed,
indices=indices,
)
scores = []
scorer = params["scorer"]
params["scorer"] = None
y_preds_grouped, y_true_grouped = group_models(self.models, params)
for i in y_preds_grouped.keys():
result = self.function(y_preds_grouped[i])
scores.append(scorer(y_true_grouped[i], result))
report_score(scores, scorer)
return scores
| mit |
cloudera/ibis | ibis/backends/dask/tests/execution/test_structs.py | 1 | 2536 | from collections import OrderedDict
import dask.dataframe as dd
import pandas as pd
import pytest
from dask.dataframe.utils import tm
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
@pytest.fixture(scope="module")
def value():
return OrderedDict([("fruit", "pear"), ("weight", 0)])
@pytest.fixture(scope="module")
def struct_client(value, npartitions):
df = dd.from_pandas(
pd.DataFrame(
{
"s": [
OrderedDict([("fruit", "apple"), ("weight", None)]),
value,
OrderedDict([("fruit", "pear"), ("weight", 1)]),
],
"key": list("aab"),
"value": [1, 2, 3],
}
),
npartitions=npartitions,
)
return connect({"t": df})
@pytest.fixture
def struct_table(struct_client):
return struct_client.table(
"t",
schema={
"s": dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
},
)
def test_struct_field_literal(value):
struct = ibis.literal(value)
assert struct.type() == dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
expr = struct['fruit']
result = execute(expr)
assert result == "pear"
expr = struct['weight']
result = execute(expr)
assert result == 0
def test_struct_field_series(struct_table):
t = struct_table
expr = t.s['fruit']
result = expr.execute()
expected = dd.from_pandas(
pd.Series(["apple", "pear", "pear"], name="fruit"), npartitions=1,
)
tm.assert_series_equal(result.compute(), expected.compute())
def test_struct_field_series_group_by_key(struct_table):
t = struct_table
expr = t.groupby(t.s['fruit']).aggregate(total=t.value.sum())
result = expr.execute()
expected = dd.from_pandas(
pd.DataFrame([("apple", 1), ("pear", 5)], columns=["fruit", "total"]),
npartitions=1,
)
tm.assert_frame_equal(result.compute(), expected.compute())
def test_struct_field_series_group_by_value(struct_table):
t = struct_table
expr = t.groupby(t.key).aggregate(total=t.s['weight'].sum())
result = expr.execute()
# these are floats because we have a NULL value in the input data
expected = dd.from_pandas(
pd.DataFrame([("a", 0.0), ("b", 1.0)], columns=["key", "total"]),
npartitions=1,
)
tm.assert_frame_equal(result.compute(), expected.compute())
| apache-2.0 |
manashmndl/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
Holisticnature/ArcNumerical-Tools | Scripts/ZStandardizeFields.py | 1 | 3918 | # Name: ZStandardizeFields.py
# Purpose: Will add selected fields as standarized Z scores by extending a numpy array to the feature class.
# Author: David Wasserman
# Last Modified: 2/7/2018
# Copyright: David Wasserman
# Python Version: 2.7-3.1
# ArcGIS Version: 10.4 (Pro)
# --------------------------------
# Copyright 2016 David J. Wasserman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import os, arcpy
import pandas as pd
import SharedArcNumericalLib as san
# Function Definitions
def add_Standarized_Fields(in_fc, input_Fields, ignore_nulls=True):
""" This function will take in an feature class, and use pandas/numpy to calculate Z-scores and then
join them back to the feature class using arcpy."""
try:
arcpy.env.overwriteOutput = True
desc = arcpy.Describe(in_fc)
OIDFieldName = desc.OIDFieldName
workspace = os.path.dirname(desc.catalogPath)
input_Fields_List = input_Fields
finalColumnList = []
scored_df = None
for column in input_Fields_List:
try:
field_series = san.arcgis_table_to_dataframe(in_fc, [column], skip_nulls=ignore_nulls, null_values=0)
san.arc_print("Creating standarized column for field {0}.".format(str(column)), True)
col_Standarized = arcpy.ValidateFieldName("Zscore_" + column, workspace)
field_series[col_Standarized] = (field_series[column] - field_series[column].mean()) / field_series[
column].std(ddof=0)
finalColumnList.append(col_Standarized)
if col_Standarized != column:
del field_series[column]
if scored_df is None:
san.arc_print("Test")
scored_df = field_series
else:
scored_df = pd.merge(scored_df, field_series, how="outer", left_index=True, right_index=True)
except Exception as e:
san.arc_print("Could not process field {0}".format(str(column)))
san.arc_print(e.args[0])
pass
JoinField = arcpy.ValidateFieldName("DFIndexJoin", workspace)
scored_df[JoinField] = scored_df.index
finalColumnList.append(JoinField)
san.arc_print("Exporting new standarized dataframe to structured numpy array.", True)
finalStandardArray = scored_df.to_records()
san.arc_print(
"Joining new standarized fields to feature class. The new fields are {0}".format(str(finalColumnList))
, True)
arcpy.da.ExtendTable(in_fc, OIDFieldName, finalStandardArray, JoinField, append_only=False)
san.arc_print("Script Completed Successfully.", True)
except arcpy.ExecuteError:
san.arc_print(arcpy.GetMessages(2))
except Exception as e:
san.arc_print(e.args[0])
# End do_analysis function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Define Inputs
FeatureClass = arcpy.GetParameterAsText(0)
InputFields = arcpy.GetParameterAsText(1).split(";")
IgnoreNulls = bool(arcpy.GetParameter(2))
add_Standarized_Fields(FeatureClass, InputFields, IgnoreNulls)
| apache-2.0 |
LukeC92/iris | lib/iris/tests/__init__.py | 2 | 45559 | # (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides testing capabilities and customisations specific to Iris.
.. note:: This module needs to control the matplotlib backend, so it
**must** be imported before ``matplotlib.pyplot``.
The primary class for this module is :class:`IrisTest`.
By default, this module sets the matplotlib backend to "agg". But when
this module is imported it checks ``sys.argv`` for the flag "-d". If
found, it is removed from ``sys.argv`` and the matplotlib backend is
switched to "tkagg" to allow the interactive visual inspection of
graphical test results.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import codecs
import collections
import contextlib
import datetime
import difflib
import filecmp
import functools
import gzip
import inspect
import json
import io
import math
import os
import os.path
import re
import shutil
import subprocess
import sys
import unittest
import threading
import warnings
import xml.dom.minidom
import zlib
try:
from unittest import mock
except ImportError:
import mock
import filelock
import numpy as np
import numpy.ma as ma
import requests
import iris.cube
import iris.config
import iris.util
# Test for availability of matplotlib.
# (And remove matplotlib as an iris.tests dependency.)
try:
import matplotlib
matplotlib.use('agg')
matplotlib.rcdefaults()
import matplotlib.testing.compare as mcompare
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
else:
MPL_AVAILABLE = True
try:
from osgeo import gdal
except ImportError:
GDAL_AVAILABLE = False
else:
GDAL_AVAILABLE = True
try:
from iris_grib.message import GribMessage
GRIB_AVAILABLE = True
except ImportError:
GRIB_AVAILABLE = False
try:
import iris_sample_data
except ImportError:
SAMPLE_DATA_AVAILABLE = False
else:
SAMPLE_DATA_AVAILABLE = True
try:
import nc_time_axis
NC_TIME_AXIS_AVAILABLE = True
except ImportError:
NC_TIME_AXIS_AVAILABLE = False
try:
requests.get('https://github.com/SciTools/iris')
INET_AVAILABLE = True
except requests.exceptions.ConnectionError:
INET_AVAILABLE = False
try:
import stratify
STRATIFY_AVAILABLE = True
except ImportError:
STRATIFY_AVAILABLE = False
#: Basepath for test results.
_RESULT_PATH = os.path.join(os.path.dirname(__file__), 'results')
#: Default perceptual hash size.
_HASH_SIZE = 16
#: Default maximum perceptual hash hamming distance.
_HAMMING_DISTANCE = 2
if '--data-files-used' in sys.argv:
sys.argv.remove('--data-files-used')
fname = '/var/tmp/all_iris_test_resource_paths.txt'
print('saving list of files used by tests to %s' % fname)
_EXPORT_DATAPATHS_FILE = open(fname, 'w')
else:
_EXPORT_DATAPATHS_FILE = None
if '--create-missing' in sys.argv:
sys.argv.remove('--create-missing')
print('Allowing creation of missing test results.')
os.environ['IRIS_TEST_CREATE_MISSING'] = 'true'
# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False
if (MPL_AVAILABLE and '-d' in sys.argv):
sys.argv.remove('-d')
plt.switch_backend('tkagg')
_DISPLAY_FIGURES = True
# Threading non re-entrant blocking lock to ensure thread-safe plotting.
_lock = threading.Lock()
def main():
"""A wrapper for unittest.main() which adds iris.test specific options to the help (-h) output."""
if '-h' in sys.argv or '--help' in sys.argv:
stdout = sys.stdout
buff = io.StringIO()
# NB. unittest.main() raises an exception after it's shown the help text
try:
sys.stdout = buff
unittest.main()
finally:
sys.stdout = stdout
lines = buff.getvalue().split('\n')
lines.insert(9, 'Iris-specific options:')
lines.insert(10, ' -d Display matplotlib figures (uses tkagg).')
lines.insert(11, ' NOTE: To compare results of failing tests, ')
lines.insert(12, ' use idiff.py instead')
lines.insert(13, ' --data-files-used Save a list of files used to a temporary file')
lines.insert(
14, ' -m Create missing test results')
print('\n'.join(lines))
else:
unittest.main()
def get_data_path(relative_path):
"""
Return the absolute path to a data file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
test_data_dir = iris.config.TEST_DATA_DIR
if test_data_dir is None:
test_data_dir = ''
data_path = os.path.join(test_data_dir, relative_path)
if _EXPORT_DATAPATHS_FILE is not None:
_EXPORT_DATAPATHS_FILE.write(data_path + '\n')
if isinstance(data_path, six.string_types) and not os.path.exists(data_path):
# if the file is gzipped, ungzip it and return the path of the ungzipped
# file.
gzipped_fname = data_path + '.gz'
if os.path.exists(gzipped_fname):
with gzip.open(gzipped_fname, 'rb') as gz_fh:
try:
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
except IOError:
# Put ungzipped data file in a temporary path, since we
# can't write to the original path (maybe it is owned by
# the system.)
_, ext = os.path.splitext(data_path)
data_path = iris.util.create_temp_filename(suffix=ext)
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
return data_path
class IrisTest_nometa(unittest.TestCase):
"""A subclass of unittest.TestCase which provides Iris specific testing functionality."""
_assertion_counts = collections.defaultdict(int)
@classmethod
def setUpClass(cls):
# Ensure that the CF profile if turned-off for testing.
iris.site_configuration['cf_profile'] = None
def _assert_str_same(self, reference_str, test_str, reference_filename, type_comparison_name='Strings'):
if reference_str != test_str:
diff = ''.join(difflib.unified_diff(reference_str.splitlines(1), test_str.splitlines(1),
'Reference', 'Test result', '', '', 0))
self.fail("%s do not match: %s\n%s" % (type_comparison_name, reference_filename, diff))
@staticmethod
def get_result_path(relative_path):
"""
Returns the absolute path to a result file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_RESULT_PATH, relative_path))
def assertStringEqual(self, reference_str, test_str,
type_comparison_name='strings'):
if reference_str != test_str:
diff = '\n'.join(difflib.unified_diff(reference_str.splitlines(),
test_str.splitlines(),
'Reference', 'Test result',
'', '', 0))
self.fail("{} do not match:\n{}".format(type_comparison_name,
diff))
def result_path(self, basename=None, ext=''):
"""
Return the full path to a test result, generated from the \
calling file, class and, optionally, method.
Optional kwargs :
* basename - File basename. If omitted, this is \
generated from the calling method.
* ext - Appended file extension.
"""
if ext and not ext.startswith('.'):
ext = '.' + ext
# Generate the folder name from the calling file name.
path = os.path.abspath(inspect.getfile(self.__class__))
path = os.path.splitext(path)[0]
sub_path = path.rsplit('iris', 1)[1].split('tests', 1)[1][1:]
# Generate the file name from the calling function name?
if basename is None:
stack = inspect.stack()
for frame in stack[1:]:
if 'test_' in frame[3]:
basename = frame[3].replace('test_', '')
break
filename = basename + ext
result = os.path.join(self.get_result_path(''),
sub_path.replace('test_', ''),
self.__class__.__name__.replace('Test_', ''),
filename)
return result
def assertCMLApproxData(self, cubes, reference_filename=None, **kwargs):
# passes args and kwargs on to approx equal
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
reference_filename = [self.get_result_path(reference_filename)]
for i, cube in enumerate(cubes):
fname = list(reference_filename)
# don't want the ".cml" for the json stats file
if fname[-1].endswith(".cml"):
fname[-1] = fname[-1][:-4]
fname[-1] += '.data.%d.json' % i
self.assertDataAlmostEqual(cube.data, fname, **kwargs)
self.assertCML(cubes, reference_filename, checksum=False)
def assertCDL(self, netcdf_filename, reference_filename=None, flags='-h'):
"""
Test that the CDL for the given netCDF file matches the contents
of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* netcdf_filename:
The path to the netCDF file.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* flags:
Command-line flags for `ncdump`, as either a whitespace
separated string or an iterable. Defaults to '-h'.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'cdl')
else:
reference_path = self.get_result_path(reference_filename)
# Convert the netCDF file to CDL file format.
cdl_filename = iris.util.create_temp_filename(suffix='.cdl')
if flags is None:
flags = []
elif isinstance(flags, six.string_types):
flags = flags.split()
else:
flags = list(map(str, flags))
with open(cdl_filename, 'w') as cdl_file:
subprocess.check_call(['ncdump'] + flags + [netcdf_filename],
stderr=cdl_file, stdout=cdl_file)
# Ingest the CDL for comparison, excluding first line.
with open(cdl_filename, 'r') as cdl_file:
lines = cdl_file.readlines()[1:]
# Sort the dimensions (except for the first, which can be unlimited).
# This gives consistent CDL across different platforms.
sort_key = lambda line: ('UNLIMITED' not in line, line)
dimension_lines = slice(lines.index('dimensions:\n') + 1,
lines.index('variables:\n'))
lines[dimension_lines] = sorted(lines[dimension_lines], key=sort_key)
cdl = ''.join(lines)
os.remove(cdl_filename)
self._check_same(cdl, reference_path, type_comparison_name='CDL')
def assertCML(self, cubes, reference_filename=None, checksum=True):
"""
Test that the CML for the given cubes matches the contents of
the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* cubes:
Either a Cube or a sequence of Cubes.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* checksum:
When True, causes the CML to include a checksum for each
Cube's data. Defaults to True.
"""
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
if isinstance(cubes, (list, tuple)):
xml = iris.cube.CubeList(cubes).xml(checksum=checksum, order=False,
byteorder=False)
else:
xml = cubes.xml(checksum=checksum, order=False, byteorder=False)
reference_path = self.get_result_path(reference_filename)
self._check_same(xml, reference_path)
def assertTextFile(self, source_filename, reference_filename, desc="text file"):
"""Check if two text files are the same, printing any diffs."""
with open(source_filename) as source_file:
source_text = source_file.readlines()
with open(reference_filename) as reference_file:
reference_text = reference_file.readlines()
if reference_text != source_text:
diff = ''.join(difflib.unified_diff(reference_text, source_text, 'Reference', 'Test result', '', '', 0))
self.fail("%s does not match reference file: %s\n%s" % (desc, reference_filename, diff))
def assertDataAlmostEqual(self, data, reference_filename, **kwargs):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
with open(reference_path, 'r') as reference_file:
stats = json.load(reference_file)
self.assertEqual(stats.get('shape', []), list(data.shape))
self.assertEqual(stats.get('masked', False),
ma.is_masked(data))
nstats = np.array((stats.get('mean', 0.), stats.get('std', 0.),
stats.get('max', 0.), stats.get('min', 0.)),
dtype=np.float_)
if math.isnan(stats.get('mean', 0.)):
self.assertTrue(math.isnan(data.mean()))
else:
data_stats = np.array((data.mean(), data.std(),
data.max(), data.min()),
dtype=np.float_)
self.assertArrayAllClose(nstats, data_stats, **kwargs)
else:
self._ensure_folder(reference_path)
stats = collections.OrderedDict([
('std', np.float_(data.std())),
('min', np.float_(data.min())),
('max', np.float_(data.max())),
('shape', data.shape),
('masked', ma.is_masked(data)),
('mean', np.float_(data.mean()))])
with open(reference_path, 'w') as reference_file:
reference_file.write(json.dumps(stats))
def assertFilesEqual(self, test_filename, reference_filename):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
fmt = 'test file {!r} does not match reference {!r}.'
self.assertTrue(filecmp.cmp(test_filename, reference_path),
fmt.format(test_filename, reference_path))
else:
self._ensure_folder(reference_path)
shutil.copy(test_filename, reference_path)
def assertString(self, string, reference_filename=None):
"""
Test that `string` matches the contents of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* string:
The string to check.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'txt')
else:
reference_path = self.get_result_path(reference_filename)
self._check_same(string, reference_path,
type_comparison_name='Strings')
def assertRepr(self, obj, reference_filename):
self.assertString(repr(obj), reference_filename)
def _check_same(self, item, reference_path, type_comparison_name='CML'):
if self._check_reference_file(reference_path):
with open(reference_path, 'rb') as reference_fh:
reference = ''.join(part.decode('utf-8')
for part in reference_fh.readlines())
self._assert_str_same(reference, item, reference_path,
type_comparison_name)
else:
self._ensure_folder(reference_path)
with open(reference_path, 'wb') as reference_fh:
reference_fh.writelines(
part.encode('utf-8')
for part in item)
def assertXMLElement(self, obj, reference_filename):
"""
Calls the xml_element method given obj and asserts the result is the same as the test file.
"""
doc = xml.dom.minidom.Document()
doc.appendChild(obj.xml_element(doc))
pretty_xml = doc.toprettyxml(indent=" ")
reference_path = self.get_result_path(reference_filename)
self._check_same(pretty_xml, reference_path,
type_comparison_name='XML')
def assertArrayEqual(self, a, b, err_msg=''):
np.testing.assert_array_equal(a, b, err_msg=err_msg)
def assertRaisesRegexp(self, *args, **kwargs):
"""
Emulate the old :meth:`unittest.TestCase.assertRaisesRegexp`.
Because the original function is now deprecated in Python 3.
Now calls :meth:`six.assertRaisesRegex()` (no final "p") instead.
It is the same, except for providing an additional 'msg' argument.
"""
# Note: invoke via parent class to avoid recursion as, in Python 2,
# "six.assertRaisesRegex" calls getattr(self, 'assertRaisesRegexp').
return six.assertRaisesRegex(super(IrisTest_nometa, self),
*args, **kwargs)
@contextlib.contextmanager
def _recordWarningMatches(self, expected_regexp=''):
# Record warnings raised matching a given expression.
matches = []
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
yield matches
messages = [str(warning.message) for warning in w]
expr = re.compile(expected_regexp)
matches.extend(message for message in messages
if expr.search(message))
@contextlib.contextmanager
def assertWarnsRegexp(self, expected_regexp=''):
# Check that a warning is raised matching a given expression.
with self._recordWarningMatches(expected_regexp) as matches:
yield
msg = "Warning matching '{}' not raised."
msg = msg.format(expected_regexp)
self.assertTrue(matches, msg)
@contextlib.contextmanager
def assertNoWarningsRegexp(self, expected_regexp=''):
# Check that no warning matching the given expression is raised.
with self._recordWarningMatches(expected_regexp) as matches:
yield
msg = "Unexpected warning(s) raised, matching '{}' : {!r}."
msg = msg.format(expected_regexp, matches)
self.assertFalse(matches, msg)
def _assertMaskedArray(self, assertion, a, b, strict, **kwargs):
# Define helper function to extract unmasked values as a 1d
# array.
def unmasked_data_as_1d_array(array):
array = ma.asarray(array)
if array.ndim == 0:
if array.mask:
data = np.array([])
else:
data = np.array([array.data])
else:
data = array.data[~ma.getmaskarray(array)]
return data
# Compare masks. This will also check that the array shapes
# match, which is not tested when comparing unmasked values if
# strict is False.
a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
np.testing.assert_array_equal(a_mask, b_mask)
if strict:
assertion(a.data, b.data, **kwargs)
else:
assertion(unmasked_data_as_1d_array(a),
unmasked_data_as_1d_array(b),
**kwargs)
def assertMaskedArrayEqual(self, a, b, strict=False):
"""
Check that masked arrays are equal. This requires the
unmasked values and masks to be identical.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
"""
self._assertMaskedArray(np.testing.assert_array_equal, a, b, strict)
def assertArrayAlmostEqual(self, a, b, decimal=6):
np.testing.assert_array_almost_equal(a, b, decimal=decimal)
def assertMaskedArrayAlmostEqual(self, a, b, decimal=6, strict=False):
"""
Check that masked arrays are almost equal. This requires the
masks to be identical, and the unmasked values to be almost
equal.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
* decimal (int):
Equality tolerance level for
:meth:`numpy.testing.assert_array_almost_equal`, with the meaning
'abs(desired-actual) < 0.5 * 10**(-decimal)'
"""
self._assertMaskedArray(np.testing.assert_array_almost_equal, a, b,
strict, decimal=decimal)
def assertArrayAllClose(self, a, b, rtol=1.0e-7, atol=0.0, **kwargs):
"""
Check arrays are equal, within given relative + absolute tolerances.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* rtol, atol (float):
Relative and absolute tolerances to apply.
Any additional kwargs are passed to numpy.testing.assert_allclose.
Performs pointwise toleranced comparison, and raises an assertion if
the two are not equal 'near enough'.
For full details see underlying routine numpy.testing.assert_allclose.
"""
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, **kwargs)
@contextlib.contextmanager
def temp_filename(self, suffix=''):
filename = iris.util.create_temp_filename(suffix)
try:
yield filename
finally:
os.remove(filename)
def file_checksum(self, file_path):
"""
Generate checksum from file.
"""
with open(file_path, "rb") as in_file:
return zlib.crc32(in_file.read())
def _unique_id(self):
"""
Returns the unique ID for the current assertion.
The ID is composed of two parts: a unique ID for the current test
(which is itself composed of the module, class, and test names), and
a sequential counter (specific to the current test) that is incremented
on each call.
For example, calls from a "test_tx" routine followed by a "test_ty"
routine might result in::
test_plot.TestContourf.test_tx.0
test_plot.TestContourf.test_tx.1
test_plot.TestContourf.test_tx.2
test_plot.TestContourf.test_ty.0
"""
# Obtain a consistent ID for the current test.
# NB. unittest.TestCase.id() returns different values depending on
# whether the test has been run explicitly, or via test discovery.
# For example:
# python tests/test_plot.py => '__main__.TestContourf.test_tx'
# ird -t => 'iris.tests.test_plot.TestContourf.test_tx'
bits = self.id().split('.')
if bits[0] == '__main__':
floc = sys.modules['__main__'].__file__
path, file_name = os.path.split(os.path.abspath(floc))
bits[0] = os.path.splitext(file_name)[0]
folder, location = os.path.split(path)
bits = [location] + bits
while location not in ['iris', 'example_tests']:
folder, location = os.path.split(folder)
bits = [location] + bits
test_id = '.'.join(bits)
# Derive the sequential assertion ID within the test
assertion_id = self._assertion_counts[test_id]
self._assertion_counts[test_id] += 1
return test_id + '.' + str(assertion_id)
def _check_reference_file(self, reference_path):
reference_exists = os.path.isfile(reference_path)
if not (reference_exists or
os.environ.get('IRIS_TEST_CREATE_MISSING')):
msg = 'Missing test result: {}'.format(reference_path)
raise AssertionError(msg)
return reference_exists
def _ensure_folder(self, path):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def check_graphic(self):
"""
Check the hash of the current matplotlib figure matches the expected
image hash for the current graphic test.
To create missing image test results, set the IRIS_TEST_CREATE_MISSING
environment variable before running the tests. This will result in new
and appropriately "<hash>.png" image files being generated in the image
output directory, and the imagerepo.json file being updated.
"""
import imagehash
from PIL import Image
dev_mode = os.environ.get('IRIS_TEST_CREATE_MISSING')
unique_id = self._unique_id()
repo_fname = os.path.join(_RESULT_PATH, 'imagerepo.json')
with open(repo_fname, 'rb') as fi:
repo = json.load(codecs.getreader('utf-8')(fi))
try:
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(os.path.dirname(__file__),
'result_image_comparison')
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise IOError('Write access to a local disk is required '
'to run image tests. Run the tests from a '
'current working directory you have write '
'access to to avoid this issue.')
else:
image_output_directory = os.path.join(
os.getcwd(), 'iris_image_test_output')
result_fname = os.path.join(image_output_directory,
'result-' + unique_id + '.png')
if not os.path.isdir(image_output_directory):
# Handle race-condition where the directories are
# created sometime between the check above and the
# creation attempt below.
try:
os.makedirs(image_output_directory)
except OSError as err:
# Don't care about "File exists"
if err.errno != 17:
raise
def _create_missing():
fname = '{}.png'.format(phash)
base_uri = ('https://scitools.github.io/test-iris-imagehash/'
'images/{}')
uri = base_uri.format(fname)
hash_fname = os.path.join(image_output_directory, fname)
uris = repo.setdefault(unique_id, [])
uris.append(uri)
print('Creating image file: {}'.format(hash_fname))
figure.savefig(hash_fname)
msg = 'Creating imagerepo entry: {} -> {}'
print(msg.format(unique_id, uri))
lock = filelock.FileLock(os.path.join(_RESULT_PATH,
'imagerepo.lock'))
# The imagerepo.json file is a critical resource, so ensure
# thread safe read/write behaviour via platform independent
# file locking.
with lock.acquire(timeout=600):
with open(repo_fname, 'wb') as fo:
json.dump(repo, codecs.getwriter('utf-8')(fo),
indent=4, sort_keys=True)
# Calculate the test result perceptual image hash.
buffer = io.BytesIO()
figure = plt.gcf()
figure.savefig(buffer, format='png')
buffer.seek(0)
phash = imagehash.phash(Image.open(buffer), hash_size=_HASH_SIZE)
if unique_id not in repo:
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
emsg = 'Missing image test result: {}.'
raise AssertionError(emsg.format(unique_id))
else:
uris = repo[unique_id]
# Create the expected perceptual image hashes from the uris.
to_hash = imagehash.hex_to_hash
expected = [to_hash(os.path.splitext(os.path.basename(uri))[0],
hash_size=_HASH_SIZE)
for uri in uris]
# Calculate the hamming distance vector for the result hash.
distances = [e - phash for e in expected]
if np.all([hd > _HAMMING_DISTANCE for hd in distances]):
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
msg = ('Bad phash {} with hamming distance {} '
'for test {}.')
msg = msg.format(phash, distances, unique_id)
if _DISPLAY_FIGURES:
emsg = 'Image comparion would have failed: {}'
print(emsg.format(msg))
else:
emsg = 'Image comparison failed: {}'
raise AssertionError(emsg.format(msg))
if _DISPLAY_FIGURES:
plt.show()
finally:
plt.close()
def _remove_testcase_patches(self):
"""Helper to remove per-testcase patches installed by :meth:`patch`."""
# Remove all patches made, ignoring errors.
for p in self.testcase_patches:
p.stop()
# Reset per-test patch control variable.
self.testcase_patches.clear()
def patch(self, *args, **kwargs):
"""
Install a mock.patch, to be removed after the current test.
The patch is created with mock.patch(*args, **kwargs).
Returns:
The substitute object returned by patch.start().
For example::
mock_call = self.patch('module.Class.call', return_value=1)
module_Class_instance.call(3, 4)
self.assertEqual(mock_call.call_args_list, [mock.call(3, 4)])
"""
# Make the new patch and start it.
patch = mock.patch(*args, **kwargs)
start_result = patch.start()
# Create the per-testcases control variable if it does not exist.
# NOTE: this mimics a setUp method, but continues to work when a
# subclass defines its own setUp.
if not hasattr(self, 'testcase_patches'):
self.testcase_patches = {}
# When installing the first patch, schedule remove-all at cleanup.
if not self.testcase_patches:
self.addCleanup(self._remove_testcase_patches)
# Record the new patch and start object for reference.
self.testcase_patches[patch] = start_result
# Return patch replacement object.
return start_result
def assertArrayShapeStats(self, result, shape, mean, std_dev, rtol=1e-6):
"""
Assert that the result, a cube, has the provided shape and that the
mean and standard deviation of the data array are also as provided.
Thus build confidence that a cube processing operation, such as a
cube.regrid, has maintained its behaviour.
"""
self.assertEqual(result.shape, shape)
self.assertArrayAllClose(result.data.mean(), mean, rtol=rtol)
self.assertArrayAllClose(result.data.std(), std_dev, rtol=rtol)
# An environment variable controls whether test timings are output.
#
# NOTE: to run tests with timing output, nosetests cannot be used.
# At present, that includes not using "python setup.py test"
# The typically best way is like this :
# $ export IRIS_TEST_TIMINGS=1
# $ python -m unittest discover -s iris.tests
# and commonly adding ...
# | grep "TIMING TEST" >iris_test_output.txt
#
_PRINT_TEST_TIMINGS = bool(int(os.environ.get('IRIS_TEST_TIMINGS', 0)))
def _method_path(meth):
cls = meth.im_class
return '.'.join([cls.__module__, cls.__name__, meth.__name__])
def _testfunction_timing_decorator(fn):
# Function decorator for making a testcase print its execution time.
@functools.wraps(fn)
def inner(*args, **kwargs):
start_time = datetime.datetime.now()
try:
result = fn(*args, **kwargs)
finally:
end_time = datetime.datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
msg = '\n TEST TIMING -- "{}" took : {:12.6f} sec.'
name = _method_path(fn)
print(msg.format(name, elapsed_time))
return result
return inner
def iristest_timing_decorator(cls):
# Class decorator to make all "test_.." functions print execution timings.
if _PRINT_TEST_TIMINGS:
# NOTE: 'dir' scans *all* class properties, including inherited ones.
attr_names = dir(cls)
for attr_name in attr_names:
attr = getattr(cls, attr_name)
if callable(attr) and attr_name.startswith('test'):
attr = _testfunction_timing_decorator(attr)
setattr(cls, attr_name, attr)
return cls
class _TestTimingsMetaclass(type):
# An alternative metaclass for IrisTest subclasses, which makes
# them print execution timings for all the testcases.
# This is equivalent to applying the @iristest_timing_decorator to
# every test class that inherits from IrisTest.
# NOTE: however, it means you *cannot* specify a different metaclass for
# your test class inheriting from IrisTest.
# See below for how to solve that where needed.
def __new__(cls, clsname, base_classes, attrs):
result = type.__new__(cls, clsname, base_classes, attrs)
if _PRINT_TEST_TIMINGS:
result = iristest_timing_decorator(result)
return result
class IrisTest(six.with_metaclass(_TestTimingsMetaclass, IrisTest_nometa)):
# Derive the 'ordinary' IrisTest from IrisTest_nometa, but add the
# metaclass that enables test timings output.
# This means that all subclasses also get the timing behaviour.
# However, if a different metaclass is *wanted* for an IrisTest subclass,
# this would cause a metaclass conflict.
# Instead, you can inherit from IrisTest_nometa and apply the
# @iristest_timing_decorator explicitly to your new testclass.
pass
get_result_path = IrisTest.get_result_path
class GraphicsTestMixin(object):
# nose directive: dispatch tests concurrently.
_multiprocess_can_split_ = True
def setUp(self):
# Acquire threading non re-entrant blocking lock to ensure
# thread-safe plotting.
_lock.acquire()
# Make sure we have no unclosed plots from previous tests before
# generating this one.
if MPL_AVAILABLE:
plt.close('all')
def tearDown(self):
# If a plotting test bombs out it can leave the current figure
# in an odd state, so we make sure it's been disposed of.
if MPL_AVAILABLE:
plt.close('all')
# Release the non re-entrant blocking lock.
_lock.release()
class GraphicsTest(GraphicsTestMixin, IrisTest):
pass
class GraphicsTest_nometa(GraphicsTestMixin, IrisTest_nometa):
# Graphicstest without the metaclass providing test timings.
pass
class TestGribMessage(IrisTest):
def assertGribMessageContents(self, filename, contents):
"""
Evaluate whether all messages in a GRIB2 file contain the provided
contents.
* filename (string)
The path on disk of an existing GRIB file
* contents
An iterable of GRIB message keys and expected values.
"""
messages = GribMessage.messages_from_filename(filename)
for message in messages:
for element in contents:
section, key, val = element
self.assertEqual(message.sections[section][key], val)
def assertGribMessageDifference(self, filename1, filename2, diffs,
skip_keys=(), skip_sections=()):
"""
Evaluate that the two messages only differ in the ways specified.
* filename[0|1] (string)
The path on disk of existing GRIB files
* diffs
An dictionary of GRIB message keys and expected diff values:
{key: (m1val, m2val),...} .
* skip_keys
An iterable of key names to ignore during comparison.
* skip_sections
An iterable of section numbers to ignore during comparison.
"""
messages1 = list(GribMessage.messages_from_filename(filename1))
messages2 = list(GribMessage.messages_from_filename(filename2))
self.assertEqual(len(messages1), len(messages2))
for m1, m2 in zip(messages1, messages2):
m1_sect = set(m1.sections.keys())
m2_sect = set(m2.sections.keys())
for missing_section in (m1_sect ^ m2_sect):
what = ('introduced'
if missing_section in m1_sect else 'removed')
# Assert that an introduced section is in the diffs.
self.assertIn(missing_section, skip_sections,
msg='Section {} {}'.format(missing_section,
what))
for section in (m1_sect & m2_sect):
# For each section, check that the differences are
# known diffs.
m1_keys = set(m1.sections[section]._keys)
m2_keys = set(m2.sections[section]._keys)
difference = m1_keys ^ m2_keys
unexpected_differences = difference - set(skip_keys)
if unexpected_differences:
self.fail("There were keys in section {} which \n"
"weren't in both messages and which weren't "
"skipped.\n{}"
"".format(section,
', '.join(unexpected_differences)))
keys_to_compare = m1_keys & m2_keys - set(skip_keys)
for key in keys_to_compare:
m1_value = m1.sections[section][key]
m2_value = m2.sections[section][key]
msg = '{} {} != {}'
if key not in diffs:
# We have a key which we expect to be the same for
# both messages.
if isinstance(m1_value, np.ndarray):
# A large tolerance appears to be required for
# gribapi 1.12, but not for 1.14.
self.assertArrayAlmostEqual(m1_value, m2_value,
decimal=2)
else:
self.assertEqual(m1_value, m2_value,
msg=msg.format(key, m1_value,
m2_value))
else:
# We have a key which we expect to be different
# for each message.
self.assertEqual(m1_value, diffs[key][0],
msg=msg.format(key, m1_value,
diffs[key][0]))
self.assertEqual(m2_value, diffs[key][1],
msg=msg.format(key, m2_value,
diffs[key][1]))
def skip_data(fn):
"""
Decorator to choose whether to run tests, based on the availability of
external data.
Example usage:
@skip_data
class MyDataTests(tests.IrisTest):
...
"""
no_data = (not iris.config.TEST_DATA_DIR
or not os.path.isdir(iris.config.TEST_DATA_DIR)
or os.environ.get('IRIS_TEST_NO_DATA'))
skip = unittest.skipIf(
condition=no_data,
reason='Test(s) require external data.')
return skip(fn)
def skip_gdal(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
GDAL library.
Example usage:
@skip_gdal
class MyGeoTiffTests(test.IrisTest):
...
"""
skip = unittest.skipIf(
condition=not GDAL_AVAILABLE,
reason="Test requires 'gdal'.")
return skip(fn)
def skip_plot(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
matplotlib library.
Example usage:
@skip_plot
class MyPlotTests(test.GraphicsTest):
...
"""
skip = unittest.skipIf(
condition=not MPL_AVAILABLE,
reason='Graphics tests require the matplotlib library.')
return skip(fn)
skip_grib = unittest.skipIf(not GRIB_AVAILABLE,
'Test(s) require "iris-grib" package, '
'which is not available.')
skip_sample_data = unittest.skipIf(not SAMPLE_DATA_AVAILABLE,
('Test(s) require "iris_sample_data", '
'which is not available.'))
skip_nc_time_axis = unittest.skipIf(
not NC_TIME_AXIS_AVAILABLE,
'Test(s) require "nc_time_axis", which is not available.')
skip_inet = unittest.skipIf(not INET_AVAILABLE,
('Test(s) require an "internet connection", '
'which is not available.'))
skip_stratify = unittest.skipIf(
not STRATIFY_AVAILABLE,
'Test(s) require "python-stratify", which is not available.')
def no_warnings(func):
"""
Provides a decorator to ensure that there are no warnings raised
within the test, otherwise the test will fail.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
with mock.patch('warnings.warn') as warn:
result = func(self, *args, **kwargs)
self.assertEqual(0, warn.call_count,
('Got unexpected warnings.'
' \n{}'.format(warn.call_args_list)))
return result
return wrapped
| lgpl-3.0 |
ldirer/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/tests/test_delaunay.py | 1 | 5887 | import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.func_name, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'])
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = 'test_%s' % func.func_name
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.func_name] = make_test(func)
make_all_testfuncs()
| gpl-2.0 |
mne-tools/mne-tools.github.io | 0.19/_downloads/1355f558a1df99f9a2cec657b05c2b56/plot_sleep.py | 1 | 11464 | # -*- coding: utf-8 -*-
"""
.. _tut-sleep-stage-classif:
Sleep stage classification from polysomnography (PSG) data
==========================================================
.. note:: This code is taken from the analysis code used in [3]_. If you reuse
this code please consider citing this work.
This tutorial explains how to perform a toy polysomnography analysis that
answers the following question:
.. important:: Given two subjects from the Sleep Physionet dataset [1]_ [2]_,
namely *Alice* and *Bob*, how well can we predict the sleep
stages of *Bob* from *Alice's* data?
This problem is tackled as supervised multiclass classification task. The aim
is to predict the sleep stage from 5 possible stages for each chunk of 30
seconds of data.
.. contents:: This tutorial covers:
:local:
:depth: 2
.. _Pipeline: https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
.. _FunctionTransformer: https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.FunctionTransformer.html
.. _physionet_labels: https://physionet.org/physiobank/database/sleep-edfx/#sleep-cassette-study-and-data
""" # noqa: E501
# Authors: Alexandre Gramfort <[email protected]>
# Stanislas Chambon <[email protected]>
# Joan Massich <[email protected]>
#
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets.sleep_physionet.age import fetch_data
from mne.time_frequency import psd_welch
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
##############################################################################
# Load the data
# -------------
#
# Here we download the data from two subjects and the end goal is to obtain
# :term:`epochs` and its associated ground truth.
#
# MNE-Python provides us with
# :func:`mne.datasets.sleep_physionet.age.fetch_data` to conveniently download
# data from the Sleep Physionet dataset [1]_ [2]_.
# Given a list of subjects and records, the fetcher downloads the data and
# provides us for each subject, a pair of files:
#
# * ``-PSG.edf`` containing the polysomnography. The :term:`raw` data from the
# EEG helmet,
# * ``-Hypnogram.edf`` containing the :term:`annotations` recorded by an
# expert.
#
# Combining these two in a :class:`mne.io.Raw` object then we can extract
# :term:`events` based on the descriptions of the annotations to obtain the
# :term:`epochs`.
#
# Read the PSG data and Hypnograms to create a raw object
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ALICE, BOB = 0, 1
[alice_files, bob_files] = fetch_data(subjects=[ALICE, BOB], recording=[1])
mapping = {'EOG horizontal': 'eog',
'Resp oro-nasal': 'misc',
'EMG submental': 'misc',
'Temp rectal': 'misc',
'Event marker': 'misc'}
raw_train = mne.io.read_raw_edf(alice_files[0])
annot_train = mne.read_annotations(alice_files[1])
raw_train.set_annotations(annot_train, emit_warning=False)
raw_train.set_channel_types(mapping)
# plot some data
raw_train.plot(duration=60, scalings='auto')
##############################################################################
# Extract 30s events from annotations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The Sleep Physionet dataset is annotated using `8 labels <physionet_labels>`:
# Wake (W), Stage 1, Stage 2, Stage 3, Stage 4 corresponding to the range from
# light sleep to deep sleep, REM sleep (R) where REM is the abbreviation for
# Rapid Eye Movement sleep, movement (M), and Stage (?) for any none scored
# segment.
#
# We will work only with 5 stages: Wake (W), Stage 1, Stage 2, Stage 3/4, and
# REM sleep (R). To do so, we use the ``event_id`` parameter in
# :func:`mne.events_from_annotations` to select which events are we
# interested in and we associate an event identifier to each of them.
annotation_desc_2_event_id = {'Sleep stage W': 1,
'Sleep stage 1': 2,
'Sleep stage 2': 3,
'Sleep stage 3': 4,
'Sleep stage 4': 4,
'Sleep stage R': 5}
events_train, _ = mne.events_from_annotations(
raw_train, event_id=annotation_desc_2_event_id, chunk_duration=30.)
# create a new event_id that unifies stages 3 and 4
event_id = {'Sleep stage W': 1,
'Sleep stage 1': 2,
'Sleep stage 2': 3,
'Sleep stage 3/4': 4,
'Sleep stage R': 5}
# plot events
mne.viz.plot_events(events_train, event_id=event_id,
sfreq=raw_train.info['sfreq'])
# keep the color-code for further plotting
stage_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
##############################################################################
# Create Epochs from the data based on the events found in the annotations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tmax = 30. - 1. / raw_train.info['sfreq'] # tmax in included
epochs_train = mne.Epochs(raw=raw_train, events=events_train,
event_id=event_id, tmin=0., tmax=tmax, baseline=None)
print(epochs_train)
##############################################################################
# Applying the same steps to the test data from Bob
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
raw_test = mne.io.read_raw_edf(bob_files[0])
annot_test = mne.read_annotations(bob_files[1])
raw_test.set_annotations(annot_test, emit_warning=False)
raw_test.set_channel_types(mapping)
events_test, _ = mne.events_from_annotations(
raw_test, event_id=annotation_desc_2_event_id, chunk_duration=30.)
epochs_test = mne.Epochs(raw=raw_test, events=events_test, event_id=event_id,
tmin=0., tmax=tmax, baseline=None)
print(epochs_test)
##############################################################################
# Feature Engineering
# -------------------
#
# Observing the power spectral density (PSD) plot of the :term:`epochs` grouped
# by sleeping stage we can see that different sleep stages have different
# signatures. These signatures remain similar between Alice and Bob's data.
#
# The rest of this section we will create EEG features based on relative power
# in specific frequency bands to capture this difference between the sleep
# stages in our data.
# visualize Alice vs. Bob PSD by sleep stage.
fig, (ax1, ax2) = plt.subplots(ncols=2)
# iterate over the subjects
stages = sorted(event_id.keys())
for ax, title, epochs in zip([ax1, ax2],
['Alice', 'Bob'],
[epochs_train, epochs_test]):
for stage, color in zip(stages, stage_colors):
epochs[stage].plot_psd(area_mode=None, color=color, ax=ax,
fmin=0.1, fmax=20., show=False,
average=True, spatial_colors=False)
ax.set(title=title, xlabel='Frequency (Hz)')
ax2.set(ylabel='uV^2/hz (dB)')
ax2.legend(ax2.lines[2::3], stages)
plt.show()
##############################################################################
# Design a scikit-learn transformer from a Python function
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We will now create a function to extract EEG features based on relative power
# in specific frequency bands to be able to predict sleep stages from EEG
# signals.
def eeg_power_band(epochs):
"""EEG relative power band feature extraction.
This function takes an ``mne.Epochs`` object and creates EEG features based
on relative power in specific frequency bands that are compatible with
scikit-learn.
Parameters
----------
epochs : Epochs
The data.
Returns
-------
X : numpy array of shape [n_samples, 5]
Transformed data.
"""
# specific frequency bands
FREQ_BANDS = {"delta": [0.5, 4.5],
"theta": [4.5, 8.5],
"alpha": [8.5, 11.5],
"sigma": [11.5, 15.5],
"beta": [15.5, 30]}
psds, freqs = psd_welch(epochs, picks='eeg', fmin=0.5, fmax=30.)
# Normalize the PSDs
psds /= np.sum(psds, axis=-1, keepdims=True)
X = []
for fmin, fmax in FREQ_BANDS.values():
psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].mean(axis=-1)
X.append(psds_band.reshape(len(psds), -1))
return np.concatenate(X, axis=1)
##############################################################################
# Multiclass classification workflow using scikit-learn
# -----------------------------------------------------
#
# To answer the question of how well can we predict the sleep stages of Bob
# from Alice's data and avoid as much boilerplate code as possible, we will
# take advantage of two key features of sckit-learn:
# `Pipeline`_ , and `FunctionTransformer`_.
#
# Scikit-learn pipeline composes an estimator as a sequence of transforms
# and a final estimator, while the FunctionTransformer converts a python
# function in an estimator compatible object. In this manner we can create
# scikit-learn estimator that takes :class:`mne.Epochs` thanks to
# `eeg_power_band` function we just created.
pipe = make_pipeline(FunctionTransformer(eeg_power_band, validate=False),
RandomForestClassifier(n_estimators=100, random_state=42))
# Train
y_train = epochs_train.events[:, 2]
pipe.fit(epochs_train, y_train)
# Test
y_pred = pipe.predict(epochs_test)
# Assess the results
y_test = epochs_test.events[:, 2]
acc = accuracy_score(y_test, y_pred)
print("Accuracy score: {}".format(acc))
##############################################################################
# In short, yes. We can predict Bob's sleeping stages based on Alice's data.
#
# Further analysis of the data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We can check the confusion matrix or the classification report.
print(confusion_matrix(y_test, y_pred))
##############################################################################
#
print(classification_report(y_test, y_pred, target_names=event_id.keys()))
##############################################################################
# Exercise
# --------
#
# Fetch 50 subjects from the Physionet database and run a 5-fold
# cross-validation leaving each time 10 subjects out in the test set.
#
# References
# ----------
#
# .. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis of
# a sleep-dependent neuronal feedback loop: the slow-wave
# microcontinuity of the EEG. IEEE-BME 47(9):1185-1194 (2000).
#
# .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
# Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
# PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
# Research Resource for Complex Physiologic Signals.
# Circulation 101(23):e215-e220
#
# .. [3] Chambon, S., Galtier, M., Arnal, P., Wainrib, G. and Gramfort, A.
# (2018)A Deep Learning Architecture for Temporal Sleep Stage
# Classification Using Multivariate and Multimodal Time Series.
# IEEE Trans. on Neural Systems and Rehabilitation Engineering 26:
# (758-769).
#
| bsd-3-clause |
Funtimezzhou/TradeBuildTools | SAT eBook/chapter16/train_test_split.py | 2 | 2309 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# train_test_split.py
from __future__ import print_function
import datetime
import sklearn
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.lda import LDA
from sklearn.metrics import confusion_matrix
from sklearn.qda import QDA
from sklearn.svm import LinearSVC, SVC
from create_lagged_series import create_lagged_series
if __name__ == "__main__":
# Create a lagged series of the S&P500 US stock market index
snpret = create_lagged_series(
"^GSPC", datetime.datetime(2001,1,10),
datetime.datetime(2005,12,31), lags=5
)
# Use the prior two days of returns as predictor
# values, with direction as the response
X = snpret[["Lag1","Lag2"]]
y = snpret["Direction"]
# Train/test split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.8, random_state=42
)
# Create the (parametrised) models
print("Hit Rates/Confusion Matrices:\n")
models = [("LR", LogisticRegression()),
("LDA", LDA()),
("QDA", QDA()),
("LSVC", LinearSVC()),
("RSVM", SVC(
C=1000000.0, cache_size=200, class_weight=None,
coef0=0.0, degree=3, gamma=0.0001, kernel='rbf',
max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
),
("RF", RandomForestClassifier(
n_estimators=1000, criterion='gini',
max_depth=None, min_samples_split=2,
min_samples_leaf=1, max_features='auto',
bootstrap=True, oob_score=False, n_jobs=1,
random_state=None, verbose=0)
)]
# Iterate through the models
for m in models:
# Train each of the models on the training set
m[1].fit(X_train, y_train)
# Make an array of predictions on the test set
pred = m[1].predict(X_test)
# Output the hit-rate and the confusion matrix for each model
print("%s:\n%0.3f" % (m[0], m[1].score(X_test, y_test)))
print("%s\n" % confusion_matrix(pred, y_test)) | gpl-3.0 |
huobaowangxi/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
RayMick/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
AlexaVillaume/RadiativeTransfer | planck_curve.py | 1 | 3126 | import sys
import numpy as np
import matplotlib.pyplot as plt
def compute_planck_nu(freq, temp):
"""
Input: Frequency [s^-1 numpy array], temp [K float]
Output: Brightness [erg s^-1 cm^-2 Hz^-1 ster^-1 numpy array]
All constants in cgs
Expression for brightness taken from Rybicki &
Lightman eq. 1.51
"""
brightness = ((2*planck_c*freq**3.)/(light_speed**2))*\
1./(np.exp(planck_c*freq/(boltzmann_c*temp))-1)
return brightness
def compute_planck_lamb(lamb, temp):
"""
Expression for brightness taken from Rybicki &
Lightman eq. 1.52
"""
brightness = ((2*planck_c*light_speed**2.)/(lamb**5.))*\
1./(np.exp((planck_c*light_speed)/(lamb*boltzmann_c*temp))-1)
return brightness
def compute_planck_tderive(freq, temp):
"""
Compute the curve of the derivative of the Planck curve
with respect to temperature.
Expression for this taken from Rybicki & Lightman eq. 1.55
"""
exp1 = (2*(planck_c**2)*(freq**4))/((light_speed**2)*(boltzmann_c)*(temp**2))
exp2 = np.exp(planck_c*freq/(boltzmann_c*temp))
return exp1*(exp2/(exp2-1)**2)
def plot_curve(freq, flux, peak, temp, **keywords):
plt.plot(freq, flux, color='k', lw=2, label='Temp = {}'.format(temp))
if keywords['lamb']:
plt.axvline(peak['peak'], ls='--', label='y = {}'.format(peak['y']))
else:
plt.axvline(peak['peak'], ls='--', label='x = {}'.format(peak['x']))
#plt.gca().invert_xaxis()
plt.xscale('log')
#plt.yscale('log')
plt.legend(frameon=False, loc='upper left')
def total_brightness_is(freq, flux):
"""
Integrate Planck spectrum over frequency to get
total brightness. Because of how the arrays are
defined need to reverse them for the integration
"""
return np.trapz(flux[::-1], freq[::-1])
def peak_ofcurve_is(freq, flux, temp, **keywords):
peak = np.where(flux == max(flux))
peak_space = freq[peak]
if keywords['lamb']:
const = (planck_c*light_speed)/(boltzmann_c * temp)
return {'peak': peak_space, 'y': (1/peak_space)*const}
else:
const = planck_c/(boltzmann_c * temp)
return {'peak': peak_space, 'x': peak_space*const}
light_speed = 2.9979e10 # cm s^-1
planck_c = 6.626e-27 #
boltzmann_c = 1.38e-16 #
if __name__ == '__main__':
temp = 10e5
lamb = np.linspace(10e-2, 10e-8, 10e6)
freq = np.linspace(10e20, 10e4, 10e6)
flux = compute_planck_lamb(lamb, temp)
peak = peak_ofcurve_is(lamb, flux, temp, lamb=True)
plot_curve(lamb, flux, peak, temp, lamb=True)
plt.savefig('planck_w_peak_lamb.pdf')
sys.exit()
#flux = compute_planck_nu(freq, temp)
#print total_brightness_is(freq, flux)
#peak = peak_ofcurve_is(freq, flux, temp)
#plot_curve(freq, flux, peak, temp)
#plt.savefig('planck_w_peak.pdf')
#plt.cla()
#plt.clf()
#sys.exit()
dBdT = compute_planck_tderive(freq, temp)
i = np.where(~np.isnan(dBdT))
peak = peak_ofcurve_is(freq[i], dBdT[i], temp)
plot_curve(freq, dBdT, peak, temp)
plt.savefig('derive_w_peak.pdf')
| apache-2.0 |
356255531/SpikingDeepRLControl | code/EnvBo/Q-Learning/Arm/goals.py | 2 | 1302 | #!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
MARKERSIZE = 15
LINEWIDTH = 3
SCENARIOS = [(0,30),(35,45),(0,150),(0,30)]
class Goal_Arm:
def __init__(self, scene_id, arm_length1, arm_length2):
self.ARM_LENGTH_1 = arm_length1
self.ARM_LENGTH_2 = arm_length2
angle1 = np.pi*SCENARIOS[scene_id][0]/180.0 #float(np.random.randint(0,359))*2.0*np.pi/360.0
angle2 = np.pi*SCENARIOS[scene_id][1]/180.0 #float(np.random.randint(0,359))*2.0*np.pi/360.0
x = arm_length1*np.cos(angle1) + arm_length2*np.cos(angle1+angle2)
y = arm_length1*np.sin(angle1) + arm_length2*np.sin(angle1+angle2)
self.pos = np.array([x, y])
def plot(self, ax):
ax.plot( self.pos[0],
self.pos[1],
'bo', markersize=MARKERSIZE, markeredgewidth=LINEWIDTH)
def get_position(self):
# goal position is normalized 2D [x,y] (positions are between -1 and 1)
normalized_pos = self.pos / (self.ARM_LENGTH_1+self.ARM_LENGTH_2)
return normalized_pos
def get_state(self):
# goal state is only goal position
normalized_pos = self.pos / (self.ARM_LENGTH_1+self.ARM_LENGTH_2)
return normalized_pos | gpl-3.0 |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/tests/test_bbox_tight.py | 1 | 3093 | from matplotlib import rcParams
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from matplotlib.ticker import FuncFormatter
import numpy as np
@image_comparison(baseline_images=['bbox_inches_tight'], remove_text=True,
savefig_kwarg=dict(bbox_inches='tight'), tol=15)
def test_bbox_inches_tight():
"Test that a figure saved using bbox_inches'tight' is clipped right"
data = [[ 66386, 174296, 75131, 577908, 32015],
[ 58230, 381139, 78045, 99308, 160454],
[ 89135, 80552, 152558, 497981, 603535],
[ 78415, 81858, 150656, 193263, 69638],
[ 139361, 331509, 343164, 781380, 52269]]
colLabels = rowLabels = [''] * 5
rows = len(data)
ind = np.arange(len(colLabels)) + 0.3 # the x locations for the groups
cellText = []
width = 0.4 # the width of the bars
yoff = np.array([0.0] * len(colLabels))
# the bottom values for stacked bar chart
fig, ax = plt.subplots(1,1)
for row in xrange(rows):
plt.bar(ind, data[row], width, bottom=yoff)
yoff = yoff + data[row]
cellText.append([''])
plt.xticks([])
plt.legend([''] * 5, loc = (1.2, 0.2))
# Add a table at the bottom of the axes
cellText.reverse()
the_table = plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, loc='bottom')
@image_comparison(baseline_images=['bbox_inches_tight_suptile_legend'],
remove_text=False, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_suptile_legend():
plt.plot(range(10), label='a straight line')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, )
plt.title('Axis title')
plt.suptitle('Figure title')
# put an extra long y tick on to see that the bbox is accounted for
def y_formatter(y, pos):
if int(y) == 4:
return 'The number 4'
else:
return str(y)
plt.gca().yaxis.set_major_formatter(FuncFormatter(y_formatter))
plt.xlabel('X axis')
@image_comparison(baseline_images=['bbox_inches_tight_clipping'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_clipping():
# tests bbox clipping on scatter points, and path clipping on a patch
# to generate an appropriately tight bbox
plt.scatter(range(10), range(10))
ax = plt.gca()
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
# make a massive rectangle and clip it with a path
patch = mpatches.Rectangle([-50, -50], 100, 100,
transform=ax.transData,
facecolor='blue', alpha=0.5)
path = mpath.Path.unit_regular_star(5).deepcopy()
path.vertices *= 0.25
patch.set_clip_path(path, transform=ax.transAxes)
plt.gcf().artists.append(patch)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
Spico197/Spico197.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| gpl-3.0 |
CETodd/4501project | main_pm_mrf.py | 1 | 23945 |
from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import scipy.interpolate
import scipy.ndimage
import numpy as np
import time
import argparse
import warnings
from sklearn.feature_extraction.image import reconstruct_from_patches_2d, extract_patches_2d
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('style_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype ='float32')
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size)
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print ("[congrid] dimensions error. "
"This routine currently only support "
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = [i for i in range(np.rank(newcoords))]
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n",
"Currently only \'neighbour\', \'nearest\',\'linear\',",
"and \'spline\' are supported.")
return None
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.style_image_paths
style_image_paths = [path for path in args.style_image_paths]
result_prefix = args.result_prefix
content_weight = args.content_weight
total_variation_weight = args.tv_weight
scale_sizes = []
size = args.img_size
while size > 64:
scale_sizes.append(size/2)
size /= 2
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
read_mode = "color"
style_weights = []
if len(style_image_paths) != len(args.style_weight):
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
style_weights = [weight*args.style_scale for weight in args.style_weight]
def pooling_func(x):
# return AveragePooling2D((2, 2), strides=(2, 2))(x)
return MaxPooling2D((2, 2), strides=(2, 2))(x)
#start proc_img
def preprocess_image(image_path, sc_size=args.img_size, load_dims=False):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB"
# mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = sc_size
img_height = int(img_width * aspect_ratio)
img = imresize(img, (img_width, img_height)).astype('float32')
# RGB -> BGR
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR -> RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
combination_prev = ""
for scale_size in scale_sizes:
base_image = K.variable(preprocess_image(base_image_path, scale_size, True))
style_reference_images = [K.variable(preprocess_image(path)) for path in style_image_paths]
# this will contain our generated image
if combination_prev != "":
combination_image = imresize(combination_prev, (img_width, img_height), interp="bilinear").astype('float32')
else:
combination_image = K.placeholder((1, img_width, img_height, 3)) # tensorflow
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
shape = (nb_tensors, img_width, img_height, 3) #tensorflow
#build the model
model_input = Input(tensor=input_tensor, shape=shape)
# build the VGG16 network with our 3 images as input
x = Convolution2D(64, 3, 3, activation='relu', name='conv1_1', border_mode='same')(model_input)
x = Convolution2D(64, 3, 3, activation='relu', name='conv1_2', border_mode='same')(x)
x = pooling_func(x)
x = Convolution2D(128, 3, 3, activation='relu', name='conv2_1', border_mode='same')(x)
x = Convolution2D(128, 3, 3, activation='relu', name='conv2_2', border_mode='same')(x)
x = pooling_func(x)
x = Convolution2D(256, 3, 3, activation='relu', name='conv3_1', border_mode='same')(x)
x = Convolution2D(256, 3, 3, activation='relu', name='conv3_2', border_mode='same')(x)
x = Convolution2D(256, 3, 3, activation='relu', name='conv3_3', border_mode='same')(x)
x = pooling_func(x)
x = Convolution2D(512, 3, 3, activation='relu', name='conv4_1', border_mode='same')(x)
x = Convolution2D(512, 3, 3, activation='relu', name='conv4_2', border_mode='same')(x)
x = Convolution2D(512, 3, 3, activation='relu', name='conv4_3', border_mode='same')(x)
x = pooling_func(x)
x = Convolution2D(512, 3, 3, activation='relu', name='conv5_1', border_mode='same')(x)
x = Convolution2D(512, 3, 3, activation='relu', name='conv5_2', border_mode='same')(x)
x = Convolution2D(512, 3, 3, activation='relu', name='conv5_3', border_mode='same')(x)
x = pooling_func(x)
model = Model(model_input, x)
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
print("Weights Path: ", weights)
model.load_weights(weights)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
# compute the neural style loss
# first we need to define 4 util functions
# the gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
def mrf_loss(style, combination, patch_size=3, patch_stride=1):
# extract patches from feature maps with PatchMatch algorithm
style_patches = style_pmatcher.get_patches_for(style)
style_patches_norm = style_pmatcher.normalize_patches(style)
combination_patches = style_pmatcher.get_patches_for(style)
# style_patches, style_patches_norm = make_patches(style, patch_size, patch_stride)
style_pmatcher.update(style, True)
patch_coords = style_pmatcher.coords()
best_style_patches = K.reshape(patch_coords, K.shape(style_patches))
loss = K.sum(K.square(best_style_patches - combination_patches)) / patch_size ** 2
return loss
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
channels = K.shape(base)[-1]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1 / (2. * channels ** 0.5 * size ** 0.5)
elif args.content_loss_type == 2:
multiplier = 1 / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer] # 'conv5_2' or 'conv4_2'
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
channel_index = -1
#Style Loss calculation
mrf_layers = ['conv3_1', 'conv4_1']
# feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in mrf_layers:
output_features = outputs_dict[layer_name]
shape = shape_dict[layer_name]
combination_features = output_features[nb_tensors - 1, :, :, :]
style_features = output_features[1:nb_tensors - 1, :, :, :]
sl = []
for j in range(nb_style_images):
sl.append(mrf_loss(style_features[j], combination_features))
for j in range(nb_style_images):
loss += (style_weights[j] / len(mrf_layers)) * sl[j]
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# # this Evaluator class makes it possible
# # to compute loss and gradients in one pass
# # while retrieving them via two separate functions,
# # "loss" and "grads". This is done because scipy.optimize
# # requires separate functions for loss and gradients,
# # but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# (L-BFGS)
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_dim_ordering() == "th":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image)
num_iter = args.num_iter
prev_min_val = -1
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
combination_prev = x
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print('Current loss value:', min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp="bilinear")
fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
| mit |
mnori/foldatlas | foldatlas/importers.py | 1 | 44199 | import settings
import models
from models import Strain, Gene, Transcript, Feature, \
GeneLocation, NucleotideMeasurementRun, StructurePredictionRun, NucleotideMeasurementSet, \
Structure, RawReactivities, RawReplicateCounts, values_str_unpack_float, Bppm
from utils import Timeline
from database import engine, db_session
import zlib, base64
def import_db(level):
try:
import_scratch()
# print("Rebuilding schema...")
# if level == 1:
# elif level == 2:
# import_l2()
# print("Import Complete.")
except Exception as e: # catch the exception so we can display a nicely formatted error message
print(str(e).replace("\\n", "\n").replace("\\t", "\t"))
raise e
# Import database from scratch using the raw text files
def import_scratch():
# # # # Delete the whole DB and recreate again, much more reliable than using ORM
db_session.execute("DROP DATABASE "+settings.db_name)
db_session.execute("CREATE DATABASE "+settings.db_name)
db_session.execute("USE "+settings.db_name)
db_session.commit()
# # # Create all the tables.
Base.metadata.create_all(bind=engine)
# # # # Add the annotations
SequenceImporter().execute()
# # # Add DMS reactivities. This should be raw reactivities from plus and minus first
# # # Includes adding coverage and normalisation
ReactivitiesImporter().execute(settings.dms_reactivities_experiment)
# # # Import all available RNA structures
StructureImporter().execute(settings.structures_in_silico)
StructureImporter().execute(settings.structures_in_vivo)
# Do PCA analysis on the structures
PcaImporter().execute(settings.structures_in_silico)
PcaImporter().execute(settings.structures_in_vivo)
# Imports raw technical replicate data into the raw_replicate_counts table
def import_raw_replicate_counts():
print("Importing raw replicate counts...")
db_session.execute("USE "+settings.db_name)
for lane_type in settings.raw_replicate_counts_keys:
entries = settings.raw_replicate_counts_keys
for bio_rep_ind in range(0, len(entries[lane_type])):
for tech_rep_ind in range(0, len(entries[lane_type][bio_rep_ind])):
tech_key = entries[lane_type][bio_rep_ind][tech_rep_ind]
# load the counts from the tech key
input_filepath = settings.data_folder+"/reps/"+tech_key+"/results.dist.txt"
print("Importing "+input_filepath)
import_raw_replicate_counts_file(
db_session, lane_type, bio_rep_ind + 1, tech_rep_ind + 1, input_filepath)
print("Committing...")
db_session.commit()
print("Done.")
# walk through replicates
def import_raw_replicate_counts_file(
db_session, lane_type, bio_rep_id, tech_rep_id, input_filepath):
n = 0
with open(input_filepath, "r") as i:
for line in i:
if n % 1000 == 0:
print(".", end="", flush=True)
n += 1
bits = line.strip().split("\t")
tid = bits[0]
reacts = bits[1:]
reacts_str = "\t".join(reacts)
counts = RawReplicateCounts(
nucleotide_measurement_run_id=1,
transcript_id=tid,
minusplus_id=lane_type,
bio_replicate_id=bio_rep_id,
tech_replicate_id=tech_rep_id,
values=reacts_str
)
db_session.add(counts)
# Parses genome sequence .fa and annotation .gff3 files into the database.
class SequenceImporter():
# how many genes to process before committing rows to the database.
gene_chunk_size = 2500
genes_to_write = []
transcripts_to_write = []
features_to_write = []
genes_seen = {}
transcripts_seen = {}
# for duplicate transcript ID detection
transcript_ids_seen_this_strain = set()
# limit on genes to process - for testing purposes
# None means it imports everything
# gene_limit = 10
gene_limit = None
# Set to true for testing
chr1_only = False
# Only import these genes. Can be None or a list.
# filter_genes = ["AT3G29370", "AT3G48550", "AT2G31360"]
filter_genes = None
# limit on chromosome sequence to add, in bp - for testing
bp_limit = None
gene_location_chunk_size = 1000
# max strains
strain_limit = None
# Use the genome sequence and annotation files to populate the database.
def execute(self):
n_strains = 0
for strain in settings.strains:
self.execute_strain(strain)
n_strains += 1
if self.strain_limit != None and n_strains >= self.strain_limit:
break
db_session.commit()
def execute_strain(self, strain_config):
self.transcript_ids_seen_this_strain = set()
print("Hydrating strain ["+strain_config["name"]+"]")
# add the strain
strain = Strain(id=strain_config["name"], description=strain_config["description"])
db_session.add(strain)
db_session.commit()
# add the chrosomomes
self.execute_chrosomomes(strain_config)
# add genes, transcripts, and feature annotations
self.execute_genes(strain_config)
self.cache_gene_locations(strain_config)
# Adding chromosomes to the DB is a little bit tricky, since the sequences are huge.
# Therefore a LOAD DATA INFILE strategy is used to import the data.
def execute_chrosomomes(self, strain_config):
print("Adding chromosomes...")
filepath = settings.data_folder+"/"+strain_config["sequence_filename"]
for record in SeqIO.parse(filepath, "fasta"): # loop through chromosomes
chr_id = record.id
if (chr_id in settings.ignored_chromosomes):
continue
seq_str = str(record.seq)
temp_filepath = settings.temp_folder+"/tmp.fa"
# Save a row of chromosome data to a text file
temp_file = open(temp_filepath, "w")
temp_file.write(strain_config["name"]+"\t"+chr_id+"\t"+seq_str)
temp_file.close()
# Import file into the DB
sql = """
LOAD DATA LOCAL INFILE '/tmp/tmp.fa'
REPLACE INTO TABLE chromosome
"""
db_session.execute(sql)
db_session.commit()
# Delete the file
os.remove(temp_filepath)
print("Added ["+chr_id+"]")
print("Finished adding chromosomes to ["+strain_config["name"]+"]")
def execute_genes(self, strain_config):
# gotta stratify this by chromosome
n_genes_added = {}
feature_rows = []
# open the annotation file and go through it line by line
with open(settings.data_folder+"/"+strain_config["annotation_filename"]) as gff_file:
for gff_line in gff_file:
if gff_line[0] == "#": # ignore comments
continue
bits = gff_line.split("\t")
feature_type = bits[2]
if feature_type == "gene":
if len(feature_rows) > 0: # this is needed to stop it going wrong at the beginning
# feature_rows contains all the data for a single gene.
self.execute_gene(feature_rows, strain_config["name"])
# reset the data collection
feature_rows = []
# initialise counter if it needs doing
if chr_id not in n_genes_added:
n_genes_added[chr_id] = 0
# make sure we haven't hit the limit
if self.gene_limit != None and n_genes_added[chr_id] >= self.gene_limit:
# if limit is hit, must continue since there might be other
# chromosomes to process
continue
else:
n_genes_added[chr_id] += 1
# show progress
if n_genes_added[chr_id] % 100 == 0:
print (str(n_genes_added[chr_id])+" genes processed")
# commit at regular intervals
if n_genes_added[chr_id] % self.gene_chunk_size == 0:
self.commit_all()
# keep track of the chromosome ID
chr_id = bits[0]
# this is for testing - only do the first chromosome
if self.chr1_only and chr_id != "Chr1":
break
# add feature row
feature_rows.append(bits)
# gotta add that last entry, if needed
if len(feature_rows) > 0 and \
(self.gene_limit == None or n_genes_added[chr_id] < self.gene_limit):
self.execute_gene(feature_rows, strain_config["name"])
n_genes_added[chr_id] += 1
self.commit_all()
print("Genes added total: "+str(n_genes_added))
def execute_gene(self, feature_rows, strain_id):
features = {}
sequence = None
transcript = None
gene_id = None
min_start = None
max_end = None
for feature_row in feature_rows: # Loop through annotation rows in the gff file, all related to the current gene
# keep track of start and end
start = feature_row[3]
end = feature_row[4]
direction = "forward" if feature_row[6] == "+" else "reverse"
chromosome_id = feature_row[0]
feature_type = feature_row[2]
attribs = feature_row[8].strip()
# This causes bugs.
# if feature_type == "gene": # Handle gene entries
# gene_id = attribs.split(";")[0].split(":")[1] # grab the gene ID - we'll want this for later
new_gene_id = self.find_attribs_value("ID=Gene", attribs)
if new_gene_id != None:
# only deal with proper genes. setting gene_id to None means nothing else will be processed.
# so it will essentially skip non-"gene" entries.
if feature_type != "gene":
gene_id = None
continue
# Check against filter list if there is one
if self.filter_genes != None and new_gene_id not in self.filter_genes:
# filter list exists, and gene is not in filter list
# skip this gene
return
gene_id = new_gene_id
# add the Gene entry - if it hasn't been already
if gene_id not in self.genes_seen:
gene = Gene(gene_id)
self.genes_to_write.append(gene)
self.genes_seen[gene_id] = gene
elif gene_id != None : # Handle transcript entries - if the gene is legit
transcript_id = self.find_attribs_value("ID=Transcript", attribs)
if transcript_id != None: # it's a transcript entry
# add the Transcript entry - if it hasn't been already
transcript_id = self.ensure_unique_transcript_id(transcript_id)
if transcript_id not in self.transcripts_seen:
transcript = Transcript(
id=transcript_id, gene_id=gene_id
)
self.transcripts_to_write.append(transcript)
self.transcripts_seen[transcript.id] = transcript
else: # Handle transcript feature entries
# for some reason, features for a given strain/transcript
# combination are not always added
transcript_id = self.find_attribs_value("Parent=Transcript", attribs)
if transcript_id != None: # it's a transcript feature entry
# put a filter here? some elements are not worth storing?
self.features_to_write.append(Feature(
transcript_id=transcript_id,
type_id=feature_row[2],
strain_id=strain_id,
chromosome_id=chromosome_id,
start=start,
end=end,
direction=direction
))
else:
pass # this happens for pseudogenes and TEs - which we aint interested in
# Cache gene locations in a redundant table by looking at the feature locations.
def cache_gene_locations(self, strain_config):
print("Caching gene locations...")
start = 0
while(True):
sql = ( "SELECT "
" transcript.gene_id, "
" feature.chromosome_id, "
" feature.direction, "
" MIN(start) min_start, "
" MAX(end) max_end "
"FROM feature, transcript "
"WHERE feature.transcript_id = transcript.id "
"AND feature.strain_id = 'Col_0' "
"GROUP BY transcript.gene_id "
"LIMIT "+str(start)+", "+str(self.gene_location_chunk_size))
results = engine.execute(sql)
if results.rowcount == 0:
break
for row in results:
db_session.add(GeneLocation(
gene_id=row["gene_id"],
strain_id=strain_config["name"],
chromosome_id=row["chromosome_id"],
start=row["min_start"],
end=row["max_end"],
direction=row["direction"]
))
start += self.gene_location_chunk_size
db_session.commit()
def commit_all(self):
self.commit_entities_list(self.genes_to_write, "Genes")
self.commit_entities_list(self.transcripts_to_write, "Transcripts")
self.commit_entities_list(self.features_to_write, "Features")
self.genes_to_write = []
self.transcripts_to_write = []
self.features_to_write = []
def commit_entities_list(self, entities, label):
print("Committing "+label+"...")
for entity in entities:
db_session.add(entity)
db_session.commit()
print("...done.")
def ensure_unique_transcript_id(self, transcript_id):
version = 1
candidate_transcript_id = transcript_id
while True:
if candidate_transcript_id in self.transcript_ids_seen_this_strain:
version += 1
candidate_transcript_id = transcript_id+"_v"+str(version)
else:
self.transcript_ids_seen_this_strain.add(transcript_id)
if candidate_transcript_id != transcript_id:
print("Transcript ID ["+transcript_id+"] was a duplicate, renamed to ["+candidate_transcript_id+"]")
return candidate_transcript_id
# Parse out the value of a key in the attribs field
# e.g.
# find_attribs_value("Parent=Transcript", "ID=five_prime_UTR:AT5G67630.1.1;Parent=Transcript:AT5G67630.1")
# will return
# AT5G67630.1
#
def find_attribs_value(self, key, attribs_str):
entries = attribs_str.split(";")
for entry in entries:
entry_bits = entry.split(":")
if (entry_bits[0] == key):
return ":".join(entry_bits[1:]) # we need all of the bits in the array
return None
# Class for doing alignments, one run per transcript.
class TranscriptAligner():
def align(self):
transcript_ids = self.fetch_transcript_ids()
for transcript_id in transcript_ids:
self.process_transcript_id(transcript_id)
def process_transcript_id(self, transcript_id):
print("Aligning ["+transcript_id+"]...")
sys.stdout.flush()
seqs_to_align = list(Transcript(transcript_id).get_sequences().values())
if len(seqs_to_align) <= 1:
print("Warning - not enough sequences to proceed with alignment")
return
temp_filepath = settings.temp_folder+"/tmp.fa"
# output to a fasta file for clustalw alignment
output_handle = open(temp_filepath, "w")
SeqIO.write(seqs_to_align, output_handle, "fasta")
output_handle.close()
# run the clustalw alignment
clustalw_cline = ClustalwCommandline("clustalw2", infile=temp_filepath, quicktree=True)
results = clustalw_cline()
# parse the results into the database
entries = AlignIO.read(settings.temp_folder+"/tmp.aln", "clustal")
for entry in entries:
obj = AlignmentEntry(transcript_id, entry.id, str(entry.seq))
db_session.add(obj)
db_session.commit()
print("Aligned")
# Fetch all the transcript IDs from the database. Order them for consistency
def fetch_transcript_ids(self):
transcript_ids = []
sql = "SELECT id FROM transcript ORDER BY id ASC"
rows = engine.execute(sql)
for row in rows:
transcript_ids.append(row["id"])
return transcript_ids
# Loads coverage data from a single file into the database.
# TODO ditch this and put coverages in ReactivitiesRaw instead
class CoverageImporter():
def execute(self, experiment_config):
from sqlalchemy import update
transcript_ids = get_inserted_transcript_ids()
coverage_filepath = experiment_config["coverage_filepath"]
print("coverage_filepath: ["+coverage_filepath+"]")
if not os.path.isfile(coverage_filepath):
print("WARNING: skipped import of missing ["+coverage_filepath+"]")
return
with open(coverage_filepath) as coverage_file:
for coverage_line in coverage_file:
(transcript_id, coverage) = coverage_line.strip().split("\t")
# skip transcripts not already in DB
if transcript_id not in transcript_ids:
continue
update_q = update(NucleotideMeasurementSet) \
.where(and_(
NucleotideMeasurementSet.nucleotide_measurement_run_id==experiment_config["nucleotide_measurement_run_id"],
NucleotideMeasurementSet.transcript_id==transcript_id,
))\
.values(coverage=coverage)
db_session.execute(update_q)
db_session.commit()
# Inserts DMS reactivities into the DB.
# Includes the normalisation step that starts from raw reactivities
class ReactivitiesImporter():
def execute(self, experiment_config):
# Wipe the tables
# Add the run entity
experiment = NucleotideMeasurementRun(
id=experiment_config["nucleotide_measurement_run_id"],
strain_id=experiment_config["strain_id"],
description=experiment_config["description"]
)
db_session.add(experiment)
db_session.commit() # insert the experiment into the DB.
print("Inserting data from ["+experiment_config["nucleotides_filepath"]+"] ...")
transcript_ids = get_inserted_transcript_ids()
transcripts = self.load_transcript_seqs()
n = 0
with open(experiment_config["nucleotides_filepath"], "r") as input_file:
while True:
n += 1
if n % 100 == 0:
print("Imported ["+str(n)+"] transcript reactivity sets")
plus_line = input_file.readline().strip()
if plus_line == "": # reached the end of the file
break
minus_line = input_file.readline().strip()
transcript_id, plus_counts = self.unpack_counts(plus_line)
transcript_id, minus_counts = self.unpack_counts(minus_line)
# skip transcripts not already in DB
if transcript_id not in transcript_ids:
continue
# print("Inserting reactivities for ["+transcript_id+"]")
# add the raw data
measurement_set = RawReactivities(
nucleotide_measurement_run_id=experiment_config["nucleotide_measurement_run_id"],
transcript_id=transcript_id,
minus_values="\t".join(list(map(str, minus_counts))),
plus_values="\t".join(list(map(str, plus_counts)))
)
db_session.add(measurement_set)
# # normalise the data and add that too
normalised_reactivities = self.norm_2_8(
transcript_id, transcripts[transcript_id], plus_counts, minus_counts)
if normalised_reactivities == None:
continue
coverage = self.calc_coverage(plus_counts, minus_counts)
normalised_set = NucleotideMeasurementSet(
nucleotide_measurement_run_id=experiment_config["nucleotide_measurement_run_id"],
transcript_id=transcript_id,
coverage=coverage,
values="\t".join(list(map(str, normalised_reactivities)))
)
db_session.add(normalised_set)
db_session.commit()
# Calc and return average coverage per base, plus and minus lanes summed
def calc_coverage(self, plus_counts, minus_counts):
tot = 0
n = 0
for pos in range(0, len(minus_counts)):
if minus_counts[pos] != None:
n += 1
tot += minus_counts[pos]
tot += plus_counts[pos]
return tot / n
# Carry out 2-8% normalisation using plus and minus values for a given transcript
# Could potentially add other normalisation methods as well
def norm_2_8(self, transcript_id, seq, plus_counts, minus_counts):
if len(seq) != len(plus_counts):
print("Skipped ["+transcript_id+"] due to length mismatch")
return None
plus_counts = self.remove_ignored(plus_counts, seq)
minus_counts = self.remove_ignored(minus_counts, seq)
# Take logs
log_plus_counts = self.log_counts(plus_counts)
log_minus_counts = self.log_counts(minus_counts)
# Get summed logs, excluding None values
sum_log_plus = sum(filter(None, log_plus_counts))
sum_log_minus = sum(filter(None, log_minus_counts))
# Skip if empty
if sum_log_plus == 0 or sum_log_minus == 0:
return None
# Take the length of the non None values only
length = len(list(filter(None, log_plus_counts)))
# Scale log counts by their averages
scaled_log_plus = self.scale_log_counts(log_plus_counts, sum_log_plus, length)
scaled_log_minus = self.scale_log_counts(log_minus_counts, sum_log_minus, length)
# Subtract minus from plus, whilst making sure that there is at least 1 value > 0
has_data = False
minus_subbed = []
# print(seq)
for pos in range(0, len(scaled_log_plus)):
if scaled_log_plus[pos] == None:
minus_subbed.append(None)
else:
subbed = max(0, scaled_log_plus[pos] - scaled_log_minus[pos])
if subbed > 0 and has_data == False:
has_data = True
minus_subbed.append(subbed)
# print(minus_subbed)
# ensure there is actually normalised data after minus subbed
if not has_data:
return None
# do the 2-8% normalisation step
# normalised = minus_subbed
normalised = self.scale_by_2_8(minus_subbed)
# print(normalised)
return normalised
# Sets ignored bases in the list to None
def remove_ignored(self, values, seq):
out = []
for pos in range(0, len(values)):
letter = seq[pos]
if letter in ["U", "T", "G"]:
out.append(None) # ignored nuc
else:
out.append(values[pos])
return out
# helper for norm_2_8
def log_counts(self, counts):
out = []
for count in counts:
if count == None:
out.append(None)
else:
out.append(math.log(float(count + 1), math.e))
return out
# Divide each log count value by the average log count, helper for norm_2_8
def scale_log_counts(self, log_counts, sum_log_counts, length):
out = []
for log_count in log_counts:
if log_count == None:
out.append(None)
else:
# this is correct, same result as the original version
out.append(float(log_count) / (float(sum_log_counts) / float(length)))
return out
def scale_by_2_8(self, minus_subbed):
norm_values = []
for value in minus_subbed:
if value != None and value > 0: # only consider values > 0
norm_values.append(float(value))
# Sort with highest values at the top
norm_values.sort(reverse = True)
# Generate 2-8% list
v8 = norm_values[
int(round(len(norm_values)*0.02)) : int(round(len(norm_values)*0.08)) + 1]
# Generate average of the 2-8% list
mean_28 = float(sum(v8)) / len(v8)
# Divide everything by the average of the 2-8%
out = []
for i in range(0, len(minus_subbed)):
value = minus_subbed[i]
if value == None:
out.append(None)
else:
out.append(minus_subbed[i] / mean_28)
return out # return the result
# Get all the transcript sequences from transcripts.fasta
def load_transcript_seqs(self):
out = {}
for record in SeqIO.parse(settings.transcripts_fasta_filepath, "fasta"):
out[record.id] = str(record.seq)
return out
def unpack_counts(self, line):
bits = line.split()
transcript_id = bits[0]
counts = list(map(float, bits[3:]))
return transcript_id, counts
# # Open the DMS reactivities file. These are normalised already.
# with open(experiment_config["nucleotides_filepath"], "r") as input_file:
# for line in input_file: # each line = 1 transcript
# bits = line.strip().split("\t")
# transcript_id = bits[0]
# transcript_len = len(bits) - 1
# # skip transcripts not already in DB
# if transcript_id not in transcript_ids:
# continue
# if len(bits) <= 1: # no measurements present
# continue
# count_strs = bits[1:]
# # Add set object. Will add coverage after going through reactivities
# measurement_set = NucleotideMeasurementSet(
# nucleotide_measurement_run_id=experiment_config["nucleotide_measurement_run_id"],
# transcript_id=transcript_id,
# coverage=0
# )
# db_session.add(measurement_set)
# db_session.commit()
# # go through reactivity entries, adding each to the database.
# position = 0
# for count_str in count_strs:
# position += 1
# if (count_str != "NA"): # skip adding "NA" entries.
# obj = NucleotideMeasurement(
# nucleotide_measurement_set_id=measurement_set.id,
# position=position,
# measurement=float(count_str)
# )
# db_session.add(obj)
# db_session.commit() # insert all the reactivity measurement rows into the DB
# # add the coverage
# # ...
# print("Added ["+transcript_id+"] ("+str(position)+" positions)")
input_file.close()
# fetch all of the transcript IDs from the database, store them in a set to check against.
def get_inserted_transcript_ids():
sql = ("SELECT id FROM transcript ORDER BY id ASC")
results = engine.execute(sql)
transcript_ids = set()
for result in results:
transcript_ids.add(result["id"])
return transcript_ids
class StructureImporter():
def execute(self, experiment_config):
print("Adding ["+experiment_config["description"]+"]")
# Add the new experiment row to the DB
experiment = StructurePredictionRun(
id=experiment_config["structure_prediction_run_id"],
strain_id=experiment_config["strain_id"],
description=experiment_config["description"]
)
db_session.add(experiment)
db_session.commit() # insert the experiment into the DB.
print("Importing structures for ["+experiment_config["description"]+"]")
transcript_ids = get_inserted_transcript_ids()
for transcript_id in transcript_ids:
structure_filepath = \
experiment_config["sauce_filepath"] + \
"/"+transcript_id+experiment_config["sauce_ext"]
if not os.path.isfile(structure_filepath):
print("["+structure_filepath+"] skipped")
else:
print("["+structure_filepath+"] found")
self.parse_ct(structure_filepath, transcript_id, experiment_config)
def parse_ct(self, ct_filepath, transcript_id, experiment_config):
structure = None
n_structs = 0
with open(ct_filepath) as ct_file:
for line in ct_file:
# if it's an energy line, we're looking at a brand new structure
# the .ct format is a bit annoying because it's not tab delimited.
# instead it's delimited by variable numbers of spaces.
# calling split() with no parameter makes it split on any length
# of whitespace - i.e. so that each element is 1 word
# from_pos = bits[0]
bits = line.strip().split()
if len(bits) != 6: # brand new structure
# save existing structure to DB
if structure != None:
db_session.add(structure)
# Parse the energy out using regex
search = re.search('ENERGY = (-[0-9\.]+)', line)
if search == None:
# No energy data - for some reason this happens for some structures.
# If this happens, just ignore the entire ct file by returning
return
energy = search.group(1)
# Create the new structure object, we'll commit it later...
structure = Structure(
structure_prediction_run_id=experiment_config["structure_prediction_run_id"],
transcript_id=transcript_id,
energy=energy
)
# insert the experiment into the DB. can now access ID
db_session.commit()
n_structs += 1
else:
to_pos = bits[4]
structure.add_value(to_pos)
db_session.add(structure)
db_session.commit() # insert remaining data into DB
print ("["+str(n_structs)+"] structures added")
# Carries out PCA using structures
class PcaImporter():
def execute(self, experiment_config):
transcript_structures = {}
# Get all transcript IDs for which there are structures
results = db_session \
.query(Structure.transcript_id) \
.filter(Structure.structure_prediction_run_id==experiment_config["structure_prediction_run_id"]) \
.distinct() \
.all()
for result in results:
transcript_id = result[0]
self.process_transcript_id(experiment_config, transcript_id)
def process_transcript_id(self, experiment_config, transcript_id):
# Fetch all of the structures matching the given transcript ID.
# this is an implicit join - no need to use join() here.
results = db_session \
.query(Structure) \
.filter(
Structure.structure_prediction_run_id==experiment_config["structure_prediction_run_id"],
Structure.transcript_id==transcript_id
) \
.order_by(Structure.id) \
.all()
# Map the data into a nice structure, including binary vectors describing what's
# paired and unpaired.
structure_vecs = {}
structures = {}
for structure in results:
# Map structure IDs to structures
structures[structure.id] = structure
# Initialise binary vector
structure_vec = []
# Fill the binary vector
bits = structure.structure.split("\t")
for value_str in bits:
structure_vec.append(1 if value_str != "0" else 0)
# Store the vector
structure_vecs[structure.id] = structure_vec
# Do PCA using structure vectors
pca_results = self.do_pca(structure_vecs)
if pca_results == None:
return
# Add the PC data to the DB
for structure_id in structures:
structure = structures[structure_id]
structure.pc1 = float(pca_results[structure.id][0])
structure.pc2 = float(pca_results[structure.id][1])
db_session.add(structure)
print("Did PCA for ["+transcript_id+"]")
db_session.commit()
def do_pca(self, structure_vecs):
from sklearn import decomposition
data = list(structure_vecs.values())
if len(data) < 2: # Need at least 2 structures to do PCA.
print("Warning - PCA failed, not enough structures.")
return None
# Do PCA.
# Results always listed in the order that they were added.
pca = decomposition.PCA(n_components=2)
pca.fit(data)
results = pca.transform(data)
# Rearrange the data so that it is keyed by structure ID
out = {}
i = 0
for structure_id in structure_vecs:
out[structure_id] = list(results[i])
i += 1
return out
# Export FoldAtlas coverages - these are plus + minus lane
# Not currently in use
class CoverageExporter():
def export(self):
measurements_data = db_session.query(NucleotideMeasurementSet).all()
with open(settings.coverage_filepath, "w") as f:
for measurement_set in measurements_data:
tid = measurement_set.transcript_id
coverage = measurement_set.coverage
f.write(tid+"\t"+str(coverage)+"\n")
print("Coverages written to "+settings.coverage_filepath)
# Make list of transcript IDs that have structures in our database
class StructureTidsExporter():
def export(self):
sql = "SELECT DISTINCT transcript_id FROM structure"
results = engine.execute(sql)
n = 0
with open(settings.structure_tids_filepath, "w") as f:
for row in results:
n += 1
f.write(row["transcript_id"]+"\n")
print(str(n)+" structure transcript IDs written to "+settings.structure_tids_filepath)
# adds the raw lane counts into the raw_reactivities table
class MinusPlusCompiler():
def __init__(self):
self.nucleotide_measurement_run_id = 1
self.chunk_size = 100
self.boundary = 1000
def run(self):
print("Compiling counts from raw lanes data...")
engine.execute("TRUNCATE TABLE raw_reactivities") # empty the table
sql = "SELECT DISTINCT id FROM transcript ORDER BY id"
results = engine.execute(sql)
tids = []
for row in results:
tids.append(row["id"])
n_tids = len(tids)
print(str(n_tids)+" transcript IDs fetched")
print("Inserting...")
chunk_start = 0
while(True): # loop through chunks
# gather transcript IDs
tids_chunk = []
for i in range(chunk_start, chunk_start + self.chunk_size):
if i >= n_tids:
break
tids_chunk.append(tids[i])
# grab all the raw lanes for the transcript IDs in the chunk
self.fetch_raw_replicate_counts(tids_chunk)
print(".", end="", flush=True)
chunk_start += self.chunk_size
if chunk_start % 1000 == 0:
print(chunk_start)
if chunk_start >= n_tids:
break
print(str(n_tids)+" transcripts processed")
def fetch_raw_replicate_counts(self, tids):
# fetch raw replicate lanes data
lanes = db_session \
.query(RawReplicateCounts) \
.filter(
RawReplicateCounts.nucleotide_measurement_run_id==self.nucleotide_measurement_run_id,
RawReplicateCounts.transcript_id.in_(tids)
) \
.order_by(
RawReplicateCounts.transcript_id,
RawReplicateCounts.minusplus_id,
RawReplicateCounts.bio_replicate_id,
RawReplicateCounts.tech_replicate_id
) \
.all()
# compile into counts
counts = {} # transcript_id => {minus_counts: ... , plus_counts: ... }
for lane in lanes:
lane_values = values_str_unpack_float(lane.values)
# initialise this transcript
if lane.transcript_id not in counts:
counts[lane.transcript_id] = {}
# set the plus or minus counts
if lane.minusplus_id not in counts[lane.transcript_id]:
counts[lane.transcript_id][lane.minusplus_id] = lane_values
else: # add to existing plus or minus counts
for pos in range(0, len(lane_values)):
counts[lane.transcript_id][lane.minusplus_id][pos] += lane_values[pos]
# insert the counts into the DB
for transcript_id in counts:
transcript_counts = counts[transcript_id]
# gotta handle the missing data gracefully
if "minus" not in transcript_counts:
minus_counts = [0] * len(transcript_counts["plus"])
else:
minus_counts = transcript_counts["minus"]
if "plus" not in transcript_counts:
plus_counts = [0] * len(transcript_counts["minus"])
else:
plus_counts = transcript_counts["plus"]
measurement_set = RawReactivities(
nucleotide_measurement_run_id=self.nucleotide_measurement_run_id,
transcript_id=transcript_id,
minus_values="\t".join(list(map(str, minus_counts))),
plus_values="\t".join(list(map(str, plus_counts)))
)
db_session.add(measurement_set)
db_session.commit()
# Imports base pair probability matrixes generated using RNAstructure
# It took about 3.8 hours for this to import around 11,000 transcript BPPMs for Ath DMS data.
class BppmImporter():
def __init__(self):
self.spr_id = 1
self.chunk_size = 10
self.boundary = 100
def run(self):
import os
print("Gathering transcript IDs...")
# engine.execute("TRUNCATE TABLE bppm") # empty the table
filenames = os.listdir(settings.data_folder+"/bppms")
tids = []
for filename in filenames:
tid = ".".join(filename.split(".")[:-1])
if tid == "":
print("Warning: cannot process "+filename)
tids.append(tid)
n_tids = len(tids)
print(str(n_tids)+" transcript IDs fetched")
print("Inserting...")
chunk_start = 0
tl = Timeline()
tl.log("start")
while(True): # loop through chunks
# gather transcript IDs
tids_chunk = []
for i in range(chunk_start, chunk_start + self.chunk_size):
if i >= n_tids:
break
tids_chunk.append(tids[i])
# grab all the raw lanes for the transcript IDs in the chunk
self.process_tids(tids_chunk)
print(".", end="", flush=True)
chunk_start += self.chunk_size
if chunk_start % self.boundary == 0:
print(chunk_start)
if chunk_start >= n_tids:
break
tl.log("end")
tl.dump()
print("\n"+str(n_tids)+" transcripts processed")
def process_tids(self, tids_chunk):
bppms_folder = settings.data_folder+"/bppms"
for tid in tids_chunk:
# print("Processing "+tid)
bppm_data = {}
# bppm_text = ""
# grab the text from file, trim off the first line
# also parse the bppm text into data structure
with open(bppms_folder+"/"+tid+".bppm", "r") as f:
first = True
for line in f:
if first:
first = False
continue
# add the text for the bppm table
if "Probability" in line: # skip header lines
continue
# extract the data, this will be used for structure BPPMs
bits = line.strip().split("\t")
pos_a = int(bits[0])
pos_b = int(bits[1])
bpp = -float(bits[2])
# bppm_text += str(pos_a)+"\t"+str(pos_b)+"\t"+str(bpp)+"\n"
if pos_a not in bppm_data:
bppm_data[pos_a] = {}
bppm_data[pos_a][pos_b] = bpp
# compress the BPPM string before saving to bppm table
# bppm_text = base64.b64encode(zlib.compress(bppm_text.encode("ascii")))
# measurement_set = Bppm(transcript_id=tid, data=bppm_text)
# db_session.add(measurement_set)
# grab all the structures matching the tid
structures = db_session \
.query(Structure) \
.filter(Structure.transcript_id==tid) \
.all()
# insert the bpps data for each structure
for structure in structures:
bits = structure.structure.split("\t")
bpps = []
for pos_ind in range(0, len(bits)):
# positions always start at 1. zero means not paired
pos_a = pos_ind + 1
pos_b = int(bits[pos_ind])
if pos_b == 0 or pos_a not in bppm_data or pos_b not in bppm_data[pos_a]:
# not base paired, or zero probability
bpps.append("NA")
else:
# base paired
bpps.append(str(bppm_data[pos_a][pos_b]))
bpps_str = "\t".join(bpps)
structure.bpps = bpps_str
db_session.add(structure)
db_session.commit()
| mit |
hpssjellis/forth-tensorflow | skflow-examples/z12_iris_save_restore.py | 2 | 1739 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets, metrics, cross_validation
from tensorflow.contrib import skflow
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
# Clean checkpoint folder if exists
try:
shutil.rmtree('/home/ubuntu/workspace/skflow_examples/iris/iris_custom_model')
except OSError:
pass
# Save model, parameters and learned variables.
classifier.save('/home/ubuntu/workspace/skflow-examples/iris_custom_model')
classifier = None
## Restore everything
new_classifier = skflow.TensorFlowEstimator.restore('/home/ubuntu/workspace/skflow-examples/iris_custom_model')
score = metrics.accuracy_score(y_test, new_classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| mit |
scienceopen/dascutils | dascutils/plots.py | 1 | 3876 | from pathlib import Path
import xarray
import numpy as np
from datetime import timedelta, datetime
from matplotlib.pyplot import draw, pause, figure
from matplotlib.colors import LogNorm
#
try:
import themisasi.plots as themisplot
except ImportError:
themisplot = None
def histogram_dasc(imgs: xarray.Dataset, odir=None):
"""
creates per wavelength histograms
the entries in list img correspond to wavelength, a 1-D array
"""
if odir is not None:
odir = Path(odir).expanduser()
fg = figure(figsize=(15, 5))
axs = fg.subplots(1, 3)
for a, i in zip(axs, imgs.data_vars):
a.hist(imgs[i].dropna(dim='time', how='all').values.ravel(), bins=128)
a.set_yscale('log')
a.set_title(r'$\lambda=' + f'{i}$ nm')
a.set_xlabel('14-bit data numbers')
if odir:
ofn = odir/'DASChistogram.png'
print('writing', ofn, end='\r')
fg.savefig(ofn, bbox_inches='tight')
def moviedasc(imgs: xarray.Dataset, odir: Path, cadence: float, rows=None, cols=None):
if odir:
print('writing to', odir)
odir = Path(odir).expanduser()
wavelen = list(imgs.data_vars)
fg = figure(figsize=(15, 5))
axs = np.atleast_1d(fg.subplots(1, len(np.unique(wavelen))))
if imgs.time.dtype == 'M8[ns]':
time = [datetime.utcfromtimestamp(t/1e9) for t in imgs.time.values.astype(int)]
else:
time = imgs.time.values.astype(datetime)
# %% setup figures
if 'unknown' not in imgs.data_vars:
Hi = []
Ht = []
for ax, w, mm, c in zip(axs,
np.unique(wavelen),
((350, 800), (350, 9000), (350, 900)),
('b', 'g', 'r')):
# ax.axis('off') #this also removes xlabel,ylabel
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel(f'{w} nm', color=c)
Hi.append(ax.imshow(imgs[w].dropna(dim='time', how='all')[0],
vmin=mm[0], vmax=mm[1],
origin='lower',
norm=LogNorm(), cmap='gray'))
Ht.append(ax.set_title('', color=c))
# fg.colorbar(hi[-1],ax=a).set_label('14-bit data numbers')
if themisplot is not None:
themisplot.overlayrowcol(ax, rows, cols)
fg.tight_layout(h_pad=1.08) # get rid of big white space in between figures
else:
ax = axs[0]
ax.set_xticks([])
ax.set_yticks([])
hi = ax.imshow(imgs['unknown'][0],
vmin=(350, 10000),
origin='lower',
norm=LogNorm(), cmap='gray')
ht = ax.set_title('')
if themisplot is not None:
themisplot.overlayrowcol(ax, rows, cols)
# %% loop
print('generating video until', time[-1])
t = time[0]
dt = timedelta(seconds=cadence)
while t <= time[-1]:
if 'unknown' not in imgs.data_vars:
for w, hi, ht in zip(np.unique(wavelen), Hi, Ht):
im = imgs[w].dropna(dim='time', how='all').sel(time=t, method='nearest')
hi.set_data(im)
try:
ht.set_text(str(im.time.values))
except OSError: # file had corrupted time
ht.set_text('')
else:
im = imgs['unknown'].sel(time=t, method='nearest')
hi.set_data(im)
try:
ht.set_text(str(im.time.values))
except OSError: # file had corrupted time
ht.set_text('')
draw(), pause(0.05) # the pause avoids random crashes
t += dt
if odir:
ofn = odir / (str(t)+'.png')
print('saving', ofn, end='\r')
fg.savefig(ofn, bbox_inches='tight', facecolor='k')
| gpl-3.0 |
maartenbreddels/vaex | tests/value_counts_test.py | 1 | 4942 | import pytest
import numpy as np
from common import *
def test_value_counts():
ds = create_base_ds()
assert len(ds.x.value_counts()) == 21
assert len(ds.y.value_counts()) == 19
assert len(ds.m.value_counts(dropmissing=True)) == 19
assert len(ds.m.value_counts()) == 20
assert len(ds.n.value_counts(dropna=False)) == 20
assert len(ds.n.value_counts(dropna=True)) == 19
assert len(ds.nm.value_counts(dropnan=True, dropmissing=True)) == 21-4
assert len(ds.nm.value_counts(dropnan=True, dropmissing=False)) == 21-3
assert len(ds.nm.value_counts(dropna=False, dropmissing=True)) == 21-3
assert len(ds.nm.value_counts(dropna=False, dropmissing=False)) == 21-2
assert len(ds.mi.value_counts(dropmissing=True)) == 21-2
assert len(ds.mi.value_counts(dropmissing=False)) == 21-1
v_counts_name = ds['name'].value_counts()
v_counts_name_arrow = ds.name_arrow.value_counts()
assert np.all(v_counts_name == v_counts_name_arrow)
def test_value_counts_object():
ds = create_base_ds()
assert len(ds.obj.value_counts(dropmissing=True)) == 17
assert len(ds.obj.value_counts(dropmissing=False)) == 18
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts_with_pandas(ds_local, dropna):
ds = ds_local
df = ds.to_pandas_df()
assert df.x.value_counts(dropna=dropna).values.tolist() == ds.x.value_counts(dropna=dropna).values.tolist()
def test_value_counts_simple():
x = np.array([0, 1, 1, 2, 2, 2, np.nan])
y = np.ma.array(x, mask=[True, True, False, False, False, False, False])
s = np.array(list(map(str, x)))
# print(s)
ds = vaex.from_arrays(x=x, y=y, s=s)
df = ds.to_pandas_df()
assert ds.x.value_counts(dropna=True, ascending=True).values.tolist() == [1, 2, 3]
assert ds.x.value_counts(dropna=False, ascending=True).values.tolist() == [1, 1, 2, 3]
# print(ds.s.value_counts(dropna=True, ascending=True))
assert set(ds.s.value_counts(dropna=True, ascending=True).index.tolist()) == {'0.0', 'nan', '1.0', '2.0'}
assert set(ds.s.value_counts(dropna=True, ascending=True).values.tolist()) == {1, 1.0, 2, 3}
assert set(ds.y.value_counts(dropna=True, ascending=True).index.tolist()) == {1, 2}
assert set(ds.y.value_counts(dropna=True, ascending=True).values.tolist()) == {1, 3}
# nan comparison with == never works
# assert ds.y.value_counts(dropna=False, ascending=True).index.tolist() == [1, np.nan, None, 2]
assert ds.y.value_counts(dropna=False, dropmissing=True, ascending=True).values.tolist() == [1, 1, 3]
assert ds.y.value_counts(dropna=False, dropmissing=False, ascending=True).values.tolist() == [2, 1, 1, 3]
# assert ds.y.value_counts(dropna=False, ascending=True).index.tolist() == ['2', 'missing', '1']
assert set(df.x.value_counts(dropna=False).values.tolist()) == set(ds.x.value_counts(dropna=False).values.tolist())
assert set(df.x.value_counts(dropna=True).values.tolist()) == set(ds.x.value_counts(dropna=True).values.tolist())
# do we want the index to be the same?
# assert df.y.value_counts(dropna=False).index.tolist() == ds.y.value_counts(dropna=False).index.tolist()
# assert df.y.value_counts(dropna=False).values.tolist() == ds.y.value_counts(dropna=False).values.tolist()
# assert df.y.value_counts(dropna=True).values.tolist() == ds.y.value_counts(dropna=True).values.tolist()
def test_value_counts_object_missing():
# Create sample data
x = np.array([None, 'A', 'B', -1, 0, 2, '', '', None, None, None, np.nan, np.nan])
df = vaex.from_arrays(x=x)
# assert correct number of elements found
assert len(df.x.value_counts(dropnan=False, dropmissing=False)) == 8
assert len(df.x.value_counts(dropnan=True, dropmissing=True)) == 6
def test_value_counts_masked_str():
x = np.ma.MaskedArray(data=['A' , 'A' , 'A' , 'B' , 'B' , 'B' , '' , '' , '' ],
mask=[False, True, False, False, True, True, False, True, False])
df = vaex.from_arrays(x=x)
value_counts = df.x.value_counts()
assert len(value_counts) == 4
assert value_counts['A'] == 2
assert value_counts['B'] == 1
assert value_counts[''] == 2
assert value_counts['missing'] == 4
value_counts = df.x.value_counts(dropmissing=True)
assert len(value_counts) == 3
assert value_counts['A'] == 2
assert value_counts['B'] == 1
assert value_counts[''] == 2
value_counts = df.x.value_counts(dropna=True)
assert len(value_counts) == 3
assert value_counts['A'] == 2
assert value_counts['B'] == 1
assert value_counts[''] == 2
def test_value_counts_add_strings():
x = ['car', 'car', 'boat']
y = ['red', 'red', 'blue']
df = vaex.from_arrays(x=x, y=y)
df['z'] = df.x + '-' + df.y
value_counts = df.z.value_counts()
assert list(value_counts.index) == ['car-red', 'boat-blue']
assert value_counts.values.tolist() == [2, 1]
| mit |
nrnb/mirna-pathway-finder | mirnapathwayfinder/mirna_pathway_finder.py | 2 | 10819 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
# import numpy as np
import os
import pandas as pd
import pystache
import re
import urllib
current_path = os.path.dirname(os.path.abspath(__file__))
def MirnaPathwayFinder(
mappings_path=None,
query_values=None,
query_value_list_column_index=0,
node_type='rna',
output_dir='.',
cache=True,
debug=False):
def print_debug(message):
if debug:
print message
def generate_widget_uri(mapping, highlight_string):
return ''.join([
'http://www.wikipathways.org/wpi/PathwayWidget.php?id=',
mapping['identifier'],
'&',
highlight_string,
])
def generate_pathways_table(html_template_input, query_value_list):
# TODO handle the case where the query values are NOT display names
highlight_values = map(
lambda query_value: 'label[]=' + urllib.quote(
query_value
), query_value_list
)
highlight_string = str.join('&', highlight_values) + '&colors=red'
f = open(current_path + '/table-template.html', 'r')
table_template = f.read()
widget_uri = generate_widget_uri(
html_template_input[0], highlight_string)
initial_html_string = pystache.render(
table_template, html_template_input)
html_string_with_widget_url = initial_html_string.replace(
'widget_uri',
widget_uri
)
update_widget_path = os.path.join(current_path, 'update-widget.js')
with open(update_widget_path, 'r') as update_widget:
update_widget_string = ''.join([
'var highlightString = \'',
highlight_string,
'\';\n',
update_widget.read()
])
html_string_with_update_widget = html_string_with_widget_url.replace(
'update_widget_string',
update_widget_string
)
f = open(os.path.join(output_dir, 'pathways.html'), 'w')
f.write(html_string_with_update_widget)
return html_string_with_update_widget
def has_targeter(row):
columns_to_check = (
'stem_loop_name',
'mature_name',
'mirbase',
'mirbase.mature',
'ncbigene',
'hgnc',
'targeter_stem_loop_name',
'targeter_mature_name',
'targeter_mirbase',
'targeter_mirbase.mature',
'targeter_ncbigene',
'targeter_hgnc',
)
possible_targeters = set(filter(lambda y: isinstance(y, str),
map(lambda x: row[x], columns_to_check)))
return len(possible_targeters.intersection(query_value_list)) > 0
def has_target(row):
columns_to_check = (
'targeter_stem_loop_name',
'targeter_mature_name',
'targeter_mirbase',
'targeter_mirbase.mature',
'targeter_ncbigene',
'targeter_hgnc',
)
possible_targeters = set(filter(lambda y: isinstance(y, str),
map(lambda x: row[x], columns_to_check)))
return len(possible_targeters.intersection(query_value_list)) > 0
results_limit = 20
query_value_list = set()
if os.path.isfile(query_values):
with open(query_values, 'rb') as csvfile:
query_value_list_reader = csv.reader(
csvfile, delimiter='\t', quotechar='|')
for row in query_value_list_reader:
query_value_list.add(row[query_value_list_column_index])
else:
if hasattr(query_values, '__iter__'):
query_value_list = set(query_values)
else:
query_value_list.add(query_values)
query_value_list = map(lambda x: re.sub(
'^http:\/\/identifiers.org\/(hgnc|ncbigene|mirbase|mirbase\.mature)\/',
'',
x
), query_value_list)
if mappings_path is None:
# TODO remove the date part of the file name
mappings_path = os.path.join(
current_path, '..', 'wp-mir-table-hs-20160715.tsv')
# TODO integrate this old code into the current code. Specifically,
# handle if the mapping data is provided as a Python object.
# # parse wp-mir-table-hs.csv (or other file, if specified)
# # to get mappings between pathways and mirnas,
# # including for each pathway:
# # * genes: all gene products in the pathway, annotated as genes
# # * mirna_hits_as_gene_specified: miRNAs actually existing in pathway,
# # annotated as genes
# # * mirna_hits_as_mirna_specified: miRNAs actually shown on pathway,
# # annotated as miRNAs
# # * mirna_hits_as_mirna_inferred: miRNAs NOT actually specified on the
# # pathway but
# # inferred to exist on the pathway because they target genes or
# # proteins that DO actually exist on the pathway
# pathway_to_mirna_mappings = mappings_path
# pathway_to_mirna_mappings_list = []
# if os.path.isfile(pathway_to_mirna_mappings):
# with open(pathway_to_mirna_mappings, 'rb') as csvfile:
# pathway_to_mirna_mappings_reader = csv.DictReader(csvfile)
# for row in pathway_to_mirna_mappings_reader:
# genes = parse_hits_field(row['genes'])
# mirna_hits_as_gene_specified = parse_hits_field(
# row['ghits'])
# mirna_hits_as_mirna_specified = parse_hits_field(
# row['mhits'])
# mirna_hits_as_mirna_inferred = parse_hits_field(
# row['mthits'])
#
# wp_identifier = re.search('WP\d+', row['link']).group(0)
# parsed_row = {
# 'name': row['name'],
# 'identifier': wp_identifier,
# 'id': row['link'],
# 'genes': genes,
# 'mirna_hits_as_gene_specified':
# mirna_hits_as_gene_specified,
# 'mirna_hits_as_mirna_specified':
# mirna_hits_as_mirna_specified,
# 'mirna_hits_as_mirna_inferred':
# mirna_hits_as_mirna_inferred,
# }
# pathway_to_mirna_mappings_list.append(parsed_row)
# else:
# if hasattr(pathway_to_mirna_mappings, '__iter__'):
# pathway_to_mirna_mappings_list += pathway_to_mirna_mappings
# else:
# pathway_to_mirna_mappings_list.append(pathway_to_mirna_mappings)
wp_mirna = pd.read_csv(mappings_path,
sep='\t',
dtype=str)
# TODO remove this. It's just for dev.
# wp_mirna = wp_mirna.head(1000)
with_targeter = wp_mirna[wp_mirna.apply(
lambda d: has_targeter(d), axis=1)]
with_targeter_by_pwy = with_targeter.groupby(['wikipathways'])
# get targeter count by pathway
targeter_n_by_pwy = with_targeter_by_pwy[
'shown_or_inferred_mature_name'].nunique()
# get shown_targeter count by pathway
shown_targeter_n_by_pwy = with_targeter_by_pwy[
'mature_name'].nunique()
# get target count by pathway
with_target = with_targeter[with_targeter.apply(
lambda d: has_target(d), axis=1)]
with_target_by_pwy = with_target.groupby(['wikipathways'])
target_n_by_pwy = with_target_by_pwy['ncbigene'].nunique()
pathways = with_targeter['wikipathways'].unique()
d = {
'wikipathways': pathways,
'targeter_count': targeter_n_by_pwy,
'shown_targeter_count': shown_targeter_n_by_pwy,
'target_count': target_n_by_pwy
}
wp_counts = pd.DataFrame(
data=d
).fillna(
value=0
).nlargest(
results_limit,
['shown_targeter_count', 'target_count', 'targeter_count'])
results = with_targeter.join(wp_counts,
on='wikipathways',
how='left',
lsuffix='',
rsuffix='_r',
sort=False)
pathway_level_columns = [
'shown_targeter_count',
'target_count',
'targeter_count',
'wikipathways',
'link',
'pathway_name'
]
results_sorted = results.sort_values(by=pathway_level_columns,
axis=0,
ascending=False,
inplace=False,
kind='quicksort',
na_position='last')
results_by_pwy = results_sorted.groupby(pathway_level_columns, sort=False)
html_template_input = []
for name_by_pwy, group_by_pwy in results_by_pwy:
targets_by_targeters = []
shown_targeters = []
mature_names = filter(lambda x: isinstance(x, str),
group_by_pwy['mature_name'])
if len(mature_names) > 0:
shown_targeters = shown_targeters + map(
lambda x: {'name': x}, mature_names)
result = {
'id': name_by_pwy[pathway_level_columns.index('link')],
'identifier': name_by_pwy[
pathway_level_columns.index('wikipathways')],
'name': name_by_pwy[pathway_level_columns.index('pathway_name')],
'targets_by_targeters': targets_by_targeters,
'shown_targeters': shown_targeters,
'shown_targeter_count': name_by_pwy[
pathway_level_columns.index('shown_targeter_count')],
'target_count': name_by_pwy[
pathway_level_columns.index('target_count')],
'targeter_count': name_by_pwy[
pathway_level_columns.index('targeter_count')]
}
by_targeter = group_by_pwy.groupby('targeter_mature_name')
for name_by_targeter, group_by_targeter in by_targeter:
targets_by_targeters.append({
'targeter': name_by_targeter,
'targets': ', '.join(group_by_targeter[
'ncbigene'].unique().tolist())
})
html_template_input.append(result)
generate_pathways_table(html_template_input, query_value_list)
| apache-2.0 |
jgliss/geonum | geonum/geosetup.py | 1 | 25367 | # -*- coding: utf-8 -*-
#
# Geonum is a Python library for geographical calculations in 3D
# Copyright (C) 2017 Jonas Gliss ([email protected])
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the GeoSetup class, a high level object for managing
muliple geo point and geo vector objects.
"""
from geonum import BASEMAP_AVAILABLE
if BASEMAP_AVAILABLE:
from geonum.mapping import Map
from numpy import asarray, nanmin, nanmax
from os.path import exists
from traceback import print_exc
from warnings import warn
from geonum.geopoint import GeoPoint
from geonum.geovector3d import GeoVector3D
from geonum.topodataaccess import TopoDataAccess
from geonum.topodata import TopoData
class GeoSetup(object):
"""The GeoSetup class represents a collection of GeoPoints and vectors
Attributes
----------
id : str
name of this setup
points : list
list of :class:`GeoPoint` objects assigned to this setup
vectors : list
list of :class:`GeoVector3D` objects assigned to this setup
Parameters
----------
points : list
list of :class:`GeoPoint` objects to be included in this setup
vectors : list
list of :class:`GeoVector3D` objects to be included in this setup
lat_ll : :obj:`float`, optional
lower left latitude of regime
lon_ll : :obj:`float`, optional
lower left longitude of regime
lat_tr : :obj:`float`, optional
top right latitude of regime
lon_tr : :obj:`float`, optional
top right longitude of regime
id : str
identification string of this setup
topo_access_mode : str
topo data mode, default is SRTM
(see :class:`TopoDataAccess` for details)
local_topo_path : str
local path were topography data (e.g. ETOPO1 data) is stored
cmap_vecs : str
String specifying a valid matplotlib colormap supposed to be
used for drawing :class:`GeoVector3D` objects into overview
maps
"""
def __init__(self, points=[], vectors=[], lat_ll=None, lon_ll=None,
lat_tr=None, lon_tr=None, id="MyGeoSetup",
topo_access_mode="srtm", local_topo_path=None,
cmap_vecs="Greens"):
self.id = id
self.points = {}
self.vectors = {}
from matplotlib.pyplot import get_cmap
try:
cmap = get_cmap(cmap_vecs)
except:
cmap = get_cmap("Greens")
self.cmap = cmap
self.topo_access_mode = topo_access_mode
self.local_topo_path = local_topo_path
self.topo_data = None
try:
iter(points)
except TypeError:
if isinstance(points, GeoPoint):
points = [points]
else:
print("Invalid input points: ", points)
points = []
try:
iter(vectors)
except:
if isinstance(points, GeoPoint):
points = [points]
else:
print("invalid input vectors: ", vectors)
vectors = []
for pt in points:
if isinstance(pt, GeoPoint):
self.add_geo_point(pt)
for vec in vectors:
if isinstance(vec, GeoVector3D):
self.add_geo_vector(vec)
self.set_borders_from_points()
#temporary creation of points ll and tr in case there is some valid input
try:
self.new_geo_point(lat_ll, lon_ll, name="ll")
self.new_geo_point(lat_tr, lon_tr, name="tr")
except (TypeError, ValueError):
pass
@property
def topo_access(self):
"""Topograph data access class"""
return TopoDataAccess(self.topo_access_mode,
self.local_topo_path)
def has_points(self):
"""Returns True, if this setup includes GeoPoints, False if not"""
if not bool(self.points):
return False
return True
def create_test_data(self):
"""Create exemplary test data set"""
source = GeoPoint(37.751005, 14.993435, name="Etna")
instrument = GeoPoint(37.765755, 15.016696, name="Observatory")
self.add_geo_points(source, instrument)
self.set_borders_from_points()
plume = GeoVector3D(azimuth=83, dist_hor = self.magnitude,
elevation=0, anchor=source, name="plume")
view_dir = GeoVector3D(azimuth=160, dist_hor=self.magnitude,
elevation=8, anchor=instrument, name="cfov")
self.add_geo_vectors(plume, view_dir)
def set_local_topo_path(self, p):
"""Sets local path for Etopo1 data files can be found
Note
----
The default topomode is "srtm" which provides online access, so
it is not mandatory to provide topography data locally. However,
the SRTM model has no global coverage, so there might be need to
use another of the provided topomodes and provide the respective
files locally.
Parameters
----------
p : str
new search path for topography data
"""
if not exists(p):
raise IOError("Input path does not exist")
self.topo_access.local_path = p
def change_topo_mode(self, new_mode="srtm", local_path=None):
"""Change the current mode for topography data access
Parameters
----------
new_mode : str
new topo access mode
local_path : :obj:`str`, optional
if not None and valid, update local topo access
"""
if local_path is not None and exists(local_path):
self.load_topo_path = local_path
self.topo_access_mode = new_mode
def get_topo(self):
"""Get current topo data"""
if not isinstance(self.topo_data, TopoData):
self.load_topo_data()
return self.topo_data
def load_topo_data(self):
"""Load topography data
.. note::
The loaded :class:`TopoData` object will also be set in all
:class:`GeoPoint` objects belonging to this setup
"""
if "ll" not in self.points:
self.set_borders_from_points()
self.topo_data = self.topo_access.get_data(self.ll.latitude,
self.ll.longitude,
self.tr.latitude,
self.tr.longitude)
for p in list(self.points.values()):
p.set_topo_data(self.topo_data)
@property
def ll(self):
"""Return lower left point of topo data range """
try:
return self.points["ll"]
except AttributeError:
print("Lower left corner (GeoPoint) not yet defined in GeoSetup")
@ll.setter
def ll(self, value):
if not isinstance(value, GeoPoint):
raise TypeError("Could not set lower left coordinate in "
"GeoSetup: need GeoPoint object")
self.points["ll"] = value
@property
def tr(self):
"""Return lower left point of topo data range"""
try:
return self.points["tr"]
except AttributeError:
print("Top right corner (GeoPoint) not yet defined in GeoSetup")
pass
@tr.setter
def tr(self, value):
if not isinstance(value, GeoPoint):
raise TypeError("Could not set top right coordinate in "
"GeoSetup: need GeoPoint object")
self.points["tr"] = value
@property
def lon_ll(self):
"""Lower left corner of object regime"""
return self.ll.longitude
@property
def lat_ll(self):
"""Lower left corner of object regime"""
return self.ll.latitude
@property
def lon_tr(self):
"""Lower left corner of object regime"""
return self.tr.longitude
@property
def lat_tr(self):
"""Lower left corner of object regime"""
return self.tr.latitude
@property
def delta_lon(self):
"""Returns longitude range"""
return abs(self.lon_tr - self.lon_ll)
@property
def delta_lat(self):
"""Returns latitude range"""
return abs(self.lat_tr - self.lat_ll)
@property
def center_coordinates(self):
"""Lat / Lon coordinates of center of data"""
return (self.lat_ll + self.delta_lat / 2.,
self.lon_ll + self.delta_lon / 2.)
def add_geo_points(self, *args):
"""Add multiple GeoPoints to the collection
:param *args: arbitrary amount of new geo points
"""
for arg in args:
self.add_geo_point(arg)
def has_point(self, name):
"""Checks if point with input name exists
:param str key: name of point
:return: bool
"""
if name in self.points:
return True
return False
def has_vector(self, name):
"""Checks if vector with input name exists"""
if name in self.vectors:
return True
return False
def add_geo_point(self, pt):
"""Add :class:`GeoPoint` to this collection
:param GeoPoint pt: the new point
"""
try:
if pt.name in self.points:
print(("Point ID %s already exists in GeoSetup" %(pt.name)))
pt2 = self.points[pt.name]
if pt.almost_equal(pt2) and pt.altitude == pt2.altitude:
print("Point is unchanged")
return
print("Updating name of existing GeoPoint to: %s_old" %pt.name)
pt2.name = pt2.name + "_old"
self.points[pt2.name] = pt2
self.points[pt.name] = pt
if not isinstance(pt.topo_data, TopoData):
pt.set_topo_data(self.topo_data)
except Exception as e:
print("Geopoint could not be added: " + repr(e))
def set_geo_point(self, p_id, pt):
"""Update an existing GeoPoint in the collection
:param str id: id of existing point in ``self.points``
:param GeoPoint pt: a new geo_point
"""
if not isinstance(pt, GeoPoint):
raise TypeError("Wrong input: " + type(pt))
self.points[p_id] = pt
def add_geo_vectors(self, *args):
"""Add multiple GeoPoints to the collection"""
for arg in args:
self.add_geo_vector(arg)
def add_geo_vector(self, vec):
"""Add :class:`GeoVector3D` to this collection
:param GeoVector3D vec: should be clear ;)
"""
if not isinstance(vec, GeoVector3D):
print(("Error adding GeoVector3D, wrong input type, need "
":class:`GeoVector3D` object, input type: %s" %type(vec)))
return
if vec.name in self.vectors:
print(("Vector ID %s already exists in %s" %(vec.name, self)))
vec2 = self.vectors[vec.name]
if (vec2.almost_equals(vec) and vec2.dz == vec.dz
and vec.anchor == vec2.anchor):
print("Vector is unchanged")
return
print("Updating name of existing vector to: %s_old" %vec.name)
vec2.name = vec2.name + "_old"
self.vectors[vec2.name] = vec2
self.vectors[vec.name] = vec
def delete_geo_point(self, name):
"""Remove one of the geo_points from the collection
:param str name: name of geo point
"""
del self.points[name]
def delete_geo_vector(self, name):
"""Remove one of the vectors from the collection
:param str name: name of geo vector
"""
del self.vectors[name]
def new_geo_point(self, *args, **kwargs):
"""Create new geo_point and add to collection
:param **kwargs: see :class:`GeoPoint` for initiation info
"""
try:
self.add_geo_point(GeoPoint(*args, **kwargs))
except (TypeError, ValueError):
return
except:
raise Exception(print_exc())
def _all_lats_lons(self):
"""Get 2 arrays including all latitudes and all longitudes of all
points included in this collection
.. note::
Existing points specifying the regime (i.e. lower left / top
right corner) are not considered here
"""
lats, lons = [], []
for id, p in self.points.items():
if not any([id == x for x in ["ll","tr"]]):
lats.append(p.latitude)
lons.append(p.longitude)
return asarray(lats), asarray(lons)
@property
def magnitude(self):
"""Returns dimension (in km) of area covered by this setup"""
return (self.tr - self.ll).norm
def set_borders_from_points(self, extend_km=1, to_square=True):
"""Set range of setup (lower left and upper right coordinates)
considering all points in this collection
:param float extend_km: extend range from the outermost points by
this number in km
:param float to_square (True): extend the shorter base side to the
size of the longer one (quadratic range)
"""
lats, lons= self._all_lats_lons()
if not len(lats) > 0:
#print "Borders could not be initiated, no objects found..."
return False
lat_ll, lon_ll, lat_tr , lon_tr = (nanmin(lats), nanmin(lons),
nanmax(lats), nanmax(lons))
pll, ptr = GeoPoint(lat_ll, lon_ll, 0.0), GeoPoint(lat_tr, lon_tr, 0.0)
if to_square:
v = ptr - pll
add = (abs(v.dx) - abs(v.dy)) / 2
if add > 0: #E/W extend (dx) is smaller than N/S extend (dy)
pll = pll.offset(azimuth = 180, dist_hor = add)
ptr = ptr.offset(azimuth = 0, dist_hor = add)
else:
pll = pll.offset(azimuth = 270, dist_hor = -add)
ptr = ptr.offset(azimuth = 90, dist_hor = -add)
self.set_geo_point("ll", pll.offset(azimuth=-135,
dist_hor=float(extend_km),
name="ll"))
self.set_geo_point("tr", ptr.offset(azimuth=45,
dist_hor=float(extend_km),
name="tr"))
return True
def create_map(self, *args, **kwargs):
"""Create a Basemap object for this regime"""
if not BASEMAP_AVAILABLE:
raise ImportError("Cannot create map: "
"Basemap library is not available")
if not isinstance(self.topo_data, TopoData):
self.load_topo_data()
if not "projection" in kwargs and self.magnitude < 150:
kwargs["projection"] = "lcc"
if not "llcrnrlon" in kwargs:
kwargs["llcrnrlat"] = self.lat_ll
kwargs["llcrnrlon"] = self.lon_ll
kwargs["urcrnrlat"] = self.lat_tr
kwargs["urcrnrlon"] = self.lon_tr
kwargs["lat_0"], kwargs["lon_0"] = self.center_coordinates
m = Map(*args, **kwargs)
m.set_topo_data(self.topo_data)
return m
def points_close(self, p, radius=None):
"""Finds all GeoPoints which are within a certain radius around another
point
:param GeoPoint p: the actual point for which the search is performed
:param float radius (None): radius (in km) specifying considered range
around point (is set to 10% of the magnitude of this setup if
unspecified)
:returns:
- list of point string IDs which are within the specified radius
around input point
"""
if radius == None:
radius = self.magnitude * .1
ids = []
for pt in list(self.points.values()):
if not pt is p and (pt - p).magnitude < radius:
ids.append(pt.name)
print(("Found %d points within radius of %.1f km of point %s"
%(len(ids), radius, p.name)))
return ids
def plot_2d(self, draw_all_points=True, draw_all_vectors=True,
draw_topo=True, draw_coastline=True, draw_mapscale=True,
draw_legend=True, *args, **kwargs):
"""Draw overview map of the current setup
:param bool draw_all_points (True): if true, all points are included
:param bool draw_all_vectors (True): if true, all vectors (with anchor)
are included
:param bool draw_topo (True): include topography into map
:param bool draw_coastline (True): include coastline into map
:param bool draw_mapscale (True): insert map scale
:param bool draw_legend (True): insert a (draggable) legend
:param *args: additional non-keyword parameters (passed to `basemap
<http://matplotlib.org/basemap/api/basemap_api.html#mpl
_toolkits.basemap.Basemap>`_)
:param **kwargs: additional keyword parameters (passed to `basemap
<http://matplotlib.org/basemap/api/basemap_api.html#mpl
_toolkits.basemap.Basemap>`_)
:return:
- :class:`geonum.mapping.Map` object
"""
if not BASEMAP_AVAILABLE:
raise ImportError("Cannot create overview map: Basemap module "
"is not available")
if not "ax" in kwargs:
#fig, ax = subplots(1,1)
from matplotlib.pyplot import figure
fig = figure(figsize=(10,8))
ax = fig.add_axes([0.12,0.15,0.8,0.8])
kwargs["ax"] = ax
m = self.create_map(*args, **kwargs)
if draw_coastline:
m.drawcoastlines()
if draw_topo:
m.draw_topo(insert_colorbar=True)
m.draw_topo_contour()
m.draw_coordinates()
if draw_mapscale:
m.draw_mapscale_auto()
p_close_count = 0
if draw_all_points:
dist = self.magnitude * .05
for pt in list(self.points.values()):
if not any([pt.name == x for x in ["ll", "tr"]]):
m.draw_geo_point_2d(pt)
ang = -45
num_close = len(self.points_close(pt))
if num_close > 0:
step = 360. / (4 * num_close)
ang = ang - step * p_close_count
p_close_count += 1
m.write_point_name_2d(pt, dist, ang)
#create some color indices for colormap
nums = [int(255.0 / k) for k in range(1, len(self.vectors)+3)]
if draw_all_vectors:
for i, vec in enumerate(self.vectors.values()):
m.draw_geo_vector_2d(vec,
ls="-",
c=self.cmap(nums[i]),
label=vec.name)
if draw_legend:
try:
m.legend()
except:
warn("Failed to draw legend in GeoSetup...")
return m
def plot_3d(self, draw_all_points=True, draw_all_vectors=True,
cmap_topo="Oranges", contour_color="#708090",
contour_lw=0.2, contour_antialiased=True, *args, **kwargs):
"""Create a 3D overview map of the current setup
Parameters
----------
draw_all_points : bool
if True, all current GeoPoint objects are plotted, defaults to
True
draw_all_vectors : bool
if True, all current GeoVector3D objects are plotted, defaults to
True
cmap : str
string ID of the colormap used to plot the topographic data,
defaults to "Oranges"
contour_color : str
string specifying color of contour lines colors of contour lines
(default: "#708090")
contour_lw :
width of drawn contour lines, defaults to 0.5, use 0 if you do not
want contour lines inserted
contour_antialiased : bool
apply antialiasing to surface plot of topography, defaults to False
*args :
additional non-keyword parameters (passed to `basemap
<http://matplotlib.org/basemap/api/basemap_api.html#mpl
_toolkits.basemap.Basemap>`_)
**kwargs :
additional keyword parameters (passed to `basemap
<http://matplotlib.org/basemap/api/basemap_api.html#mpl
_toolkits.basemap.Basemap>`_)
Returns
-------
Map
plotted 3D basemap
"""
if not BASEMAP_AVAILABLE:
raise ImportError("Cannot create overview map: Basemap module "
"is not available.")
m = self.create_map(*args, **kwargs)
m.draw_topo_3d(cmap=cmap_topo, contour_color=contour_color,
contour_lw=contour_lw,
contour_antialiased=contour_antialiased)
if draw_all_points:
zr = self.topo_data.alt_range * 0.05
alts = []
for name, pt in self.points.items():
if not any([name == x for x in ["ll", "tr"]]):
try:
add_alt = 0 #in m
for alt in alts:
if abs(alt - pt.altitude) < zr:
add_alt = 3 * zr
print("Add " + str(add_alt))
pt.plot_3d(m, add_name = True, dz_text = zr + add_alt)
alts.append(pt.altitude)
except Exception as e:
warn("Point %s could not be drawn: %s"
%(pt.name, repr(e)))
pass
if draw_all_vectors:
nums = [int(255.0 / k) for k in range(1, len(self.vectors)+3)]
for i, vec in enumerate(self.vectors.values()):
try:
m.draw_geo_vector_3d(vec, label=vec.name,
c=self.cmap(nums[i]),
ls="-",
**kwargs)
except Exception as e:
warn("Vector %s could not be drawn: %s"
%(vec.name, repr(e)))
pass
return m
def show_coordinate(geo_point=None, lat_pt=None, lon_pt=None, extend_km=10.0,
*args, **kwargs):
"""Draw overview map for a given point
Parameters
----------
geo_point : GeoPoint
Geographical location around which overview map is drawn
lat_pt : float
Latitude of geographical location around which overview map is
drawn (only considered if :attr:`geo_point` is invalid)
lon_pt : float
Longitude of geographical location around which overview map is
drawn (only considered if :attr:`geo_point` is invalid)
extend_km : float
map extend in km around considered geolocation
*args :
non-keyword arguments passed to :func:`plot_2d` of the
:class:`GeoSetup` instance that is created in order to draw the map
Returns
-------
Map
instance of :class:`geonum.Map`
"""
if not isinstance(geo_point, GeoPoint):
try:
geo_point = GeoPoint(lat=lat_pt, lon=lon_pt)
except:
raise TypeError("Invalid input, please provide information "
"about location of GeoPoint")
stp = GeoSetup(points=[geo_point])
stp.set_borders_from_points(extend_km=extend_km)
m = stp.plot_2d(*args, **kwargs)
return m | gpl-3.0 |
rahul-c1/scikit-learn | sklearn/datasets/mlcomp.py | 41 | 3803 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
najmacherrad/master_thesis | DynaMine/plotcomp1KG_dynamine.py | 1 | 8835 | #DynaMine
#Compare results between wild type and mutant
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
from scipy import stats
import pylab
from pylab import plot, show, savefig, xlim, figure, \
hold, ylim, legend, boxplot, setp, axes
def getColumn(filename, column,deli):
results = csv.reader(open(filename), delimiter=deli)
return [result[column] for result in results]
#Importer les fichiers
file_wt = 'dynamineresultsNEW_wt.csv'
file_mut = 'dynamineresultsNEW_1kg.csv'
#-----------------------------------------------------------------------------
# FELIBILITY S2
#-----------------------------------------------------------------------------
#--------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)):
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)):
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.2]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,1.2)
plt.ylim(0,1.2)
plt.xlabel('Wild types')
plt.ylabel('Neutral 1KGP mutants')
fig.savefig('pred_wtVS1kg.jpg')
#----------------------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 1.2, 100)
x2 = np.linspace(xmin2, 1.2, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'b',label='Neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Flexibility S2 predicted values')
plt.ylabel('Frequency')
plt.xlim(0,1.2)
plt.ylim(0,5)
plt.legend(loc='upper right')
fig.savefig('histo_missense_wtVS1kg.png')
# STATS
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.45873047720322435, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) #-> (T, pvalue) = (5982726.0, 6.1610257571015192e-82)
#So we reject H0 -> There is a significant difference between wt and mut
#-----------------------------------------------------------------------------
# FELIBILITY S2 ENVIRONMENT
#-----------------------------------------------------------------------------
#--------------
# SCATTER PLOT
predenvt_wt = getColumn(file_wt,4,'\t')
predenvt_mut = getColumn(file_mut,4,'\t')
predenvt_wt.pop(0)
predenvt_mut.pop(0)
x,y=[],[]
for i in range(0,len(predenvt_wt)):
if predenvt_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(predenvt_wt[i]))
for i in range(0,len(predenvt_mut)):
if predenvt_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(predenvt_mut[i]))
fig = plt.figure()
a=b=[0,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.2]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,1.2)
plt.ylim(0,1.2)
plt.xlabel('Wild types')
plt.ylabel('Neutral 1KGP mutants')
fig.savefig('predenvt_wtVS1kg.jpg')
#----------------------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 1.2, 100)
x2 = np.linspace(xmin2, 1.2, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'b',label='Neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Flexibility S2 predicted values')
plt.ylabel('Frequency')
plt.xlim(0,1.2)
plt.ylim(0,5)
plt.legend(loc='upper right')
fig.savefig('histo_missense_wtVS1kg_envt.png')
# STATS
miss2=[]
[miss2.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss2,'norm') # (D,pvalue) = (0.46756747979693603, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss2) # (T, pvalue) = (6232277.0, 2.6686982056384321e-70)
#So we reject H0 -> There is a significant difference between wt and mut
#-----------------------------------------------------------------------------
# OUTLIERS FOR FLEXIBILITY (24)
#-----------------------------------------------------------------------------
dyn_wt = getColumn(file_wt,3,'\t')
dyn_mut = getColumn(file_mut,3,'\t')
dyn_wt.pop(0)
dyn_mut.pop(0)
dyne_wt = getColumn(file_wt,4,'\t')
dyne_mut = getColumn(file_mut,4,'\t')
dyne_wt.pop(0)
dyne_mut.pop(0)
variant_liste = getColumn(file_wt,0,'\t')
output = open('dynamine_outliers_1kg.csv','w')
output.write('ID,pred_wt,pred_mut,difference,pred_envt_wt,pred_envt_mut,difference_envt\n')
for i in range(0,len(dyn_wt)):
for j in range(0,len(dyn_mut)):
if i==j:
if dyn_wt[i]!='NA'and dyn_mut[j]!='NA':
if (abs(float(dyn_wt[i])-float(dyn_mut[j]))) > 0.25:
output.write(variant_liste[i+1] + ',' + dyn_wt[i] + ',' + dyn_mut[j] + ',' + str(abs(float(dyn_wt[i])-float(dyn_mut[j]))) + ',' + dyne_wt[i] + ',' + dyne_mut[i] + ',' + str(abs(float(dyne_wt[i])-float(dyne_mut[j]))) + '\n')
output.close()
#-----------------------------------------------------------------------------
# FLEXIBILITY : COMPARISON deleterious DIDA mutants VS neutral 1KGP mutants
#-----------------------------------------------------------------------------
file_DIDAmut = 'dynamineresults_mut.csv'
pred_DIDA = getColumn(file_DIDAmut,3,'\t')
pred_1kg = getColumn(file_mut,3,'\t')
pred_DIDA.pop(0)
pred_1kg.pop(0)
xpred,ypred=[],[]
for i in range(0,len(pred_DIDA): #241
if pred_DIDA[i]=='NA':
xpred.append(np.nan)
else:
xpred.append(float(pred_DIDA[i]))
for i in range(0,len(pred_1kg)): #5846
if pred_1kg[i]=='NA':
ypred.append(np.nan)
else:
ypred.append(float(pred_1kg[i]))
fig = figure()
mu1, std1 = stats.norm.fit(xpred)
mu2, std2 = stats.norm.fit(ypred)
bins = np.linspace(0, 1.2, 35)
plt.hist(xpred,bins,normed=True,alpha=0.3, color='red',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.hist(ypred,bins,normed=True,alpha=0.3, label='neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2),color='blue')
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, xmax1, 100)
x2 = np.linspace(xmin2, xmax2, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'r', linewidth=2)
plt.plot(x2, p2, 'b', linewidth=2)
plt.xlabel('Flexibility S2 predicted values')
plt.ylabel('Frequency')
plt.xlim(0,1.2)
plt.ylim(0,5.5)
plt.legend(loc='upper left')
fig.savefig('histo_dynamine_DIDAVS1kg.png')
#MANN-WHITNEY:
stats.ranksums(xpred,ypred) # (U,p-value) = (2.9860551466029612, 0.0028260167537576602)
# Reject H0
# The distributions of two sets of variables have a difference
#-----------------------------------------------------------------------------
# FLEXIBILITY ENVT: COMPARISON deleterious DIDA mutants VS neutral 1KGP mutants
#-----------------------------------------------------------------------------
pred_DIDA = getColumn(file_DIDAmut,4,'\t')
pred_1kg = getColumn(file_mut,4,'\t')
pred_DIDA.pop(0)
pred_1kg.pop(0)
xpred,ypred=[],[]
for i in range(0,len(pred_DIDA)): #241
if pred_DIDA[i]=='NA':
xpred.append(np.nan)
else:
xpred.append(float(pred_DIDA[i]))
for i in range(0,len(pred_1kg)): #5846
if pred_1kg[i]=='NA':
ypred.append(np.nan)
else:
ypred.append(float(pred_1kg[i]))
fig = figure()
mu1, std1 = stats.norm.fit(xpred)
mu2, std2 = stats.norm.fit(ypred)
bins = np.linspace(0, 1.2, 35)
plt.hist(xpred, bins, alpha=0.3, label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu1, std1),normed=True,color='red')
plt.hist(ypred, bins, alpha=0.3, label='Neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2),normed=True,color='blue')
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, xmax1, 100)
x2 = np.linspace(xmin2, xmax2, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'r', linewidth=2)
plt.plot(x2, p2, 'b', linewidth=2)
plt.xlabel('Flexibility S2 predicted values')
plt.ylabel('Frequency')
plt.legend(loc='upper left')
plt.xlim(0,1.2)
plt.ylim(0,5.5)
fig.savefig('histo_dynamineenvt_DIDA1kg.png')
#MANN-WHITNEY:
stats.ranksums(xpred,ypred) # (U,p-value) = (2.6912760375831581, 0.0071179272598568873)
# Reject H0
# The distributions of two sets of variables have a difference
| mit |
jjhelmus/scipy | scipy/optimize/nonlin.py | 11 | 46689 | r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy._lib.six import callable, exec_, xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getargspec_no_self as _getargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
if norm is None:
self.norm = maxnorm
else:
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
args, varargs, varkw, defaults = _getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| bsd-3-clause |
thientu/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
jedufour/NURBS-Python | ex_curve01.py | 1 | 1026 | # -*- coding: utf-8 -*-
from nurbs import Curve as ns
from nurbs import utilities as utils
from matplotlib import pyplot as plt
# Create a NURBS curve instance
curve = ns.Curve()
# Set up the NURBS curve
curve.read_ctrlpts("data\CP_Curve1.txt")
curve.degree = 4
# Auto-generate the knot vector
curve.knotvector = utils.autogen_knotvector(curve.degree, len(curve.ctrlpts))
# Calculate curve points
curve.evaluate_rational()
# Arrange control points for plotting
ctrlpts_x = []
ctrlpts_y = []
for pt in curve.ctrlpts:
ctrlpts_x.append(pt[0])
ctrlpts_y.append(pt[1])
# Arrange curve points for plotting
curvepts_x = []
curvepts_y = []
for pt in curve.curvepts:
curvepts_x.append(pt[0])
curvepts_y.append(pt[1])
# Plot using Matplotlib
plt.figure(figsize=(10.67, 8), dpi=96)
cppolygon, = plt.plot(ctrlpts_x, ctrlpts_y, "k-.")
curveplt, = plt.plot(curvepts_x, curvepts_y, "r-")
plt.legend([cppolygon, curveplt], ["Control Points Polygon", "Evaluated Curve"])
plt.show()
print("End of NURBS-Python Example")
| mit |
hainm/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
xzh86/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
Clyde-fare/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
drufat/sympy | doc/ext/docscrape_sphinx.py | 51 | 9709 | from __future__ import division, absolute_import, print_function
import sys
import re
import inspect
import textwrap
import pydoc
import sphinx
import collections
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
# Lines that are commented out are used to make the
# autosummary:: table. Since SymPy does not use the
# autosummary:: functionality, it is easiest to just comment it
# out.
# autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
# if param_obj and (pydoc.getdoc(param_obj) or not desc):
# # Referenced object has a docstring
# autosum += [" %s%s" % (prefix, param)]
# else:
others.append((param, param_type, desc))
# if autosum:
# out += ['.. autosummary::']
# if self.class_members_toctree:
# out += [' :toctree:']
# out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', '', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns('Returns')
out += self._str_returns('Yields')
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/stats/plm.py | 14 | 24672 | """
Linear regression objects for panel data
"""
# pylint: disable-msg=W0231
# pylint: disable-msg=E1101,E1103
from __future__ import division
from pandas.compat import range
from pandas import compat
import warnings
import numpy as np
from pandas.core.panel import Panel
from pandas.core.frame import DataFrame
from pandas.core.reshape import get_dummies
from pandas.core.series import Series
from pandas.core.sparse import SparsePanel
from pandas.stats.ols import OLS, MovingOLS
import pandas.stats.common as com
import pandas.stats.math as math
from pandas.util.decorators import cache_readonly
class PanelOLS(OLS):
"""Implements panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None, intercept=True, nw_lags=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False,
nw_overlap=False):
self._x_orig = x
self._y_orig = y
self._weights = weights
self._intercept = intercept
self._nw_lags = nw_lags
self._nw_overlap = nw_overlap
self._entity_effects = entity_effects
self._time_effects = time_effects
self._x_effects = x_effects
self._dropped_dummies = dropped_dummies or {}
self._cluster = com._get_cluster_type(cluster)
self._verbose = verbose
(self._x, self._x_trans,
self._x_filtered, self._y,
self._y_trans) = self._prepare_data()
self._index = self._x.index.levels[0]
self._T = len(self._index)
def log(self, msg):
if self._verbose: # pragma: no cover
print(msg)
def _prepare_data(self):
"""Cleans and stacks input data into DataFrame objects
If time effects is True, then we turn off intercepts and omit an item
from every (entity and x) fixed effect.
Otherwise:
- If we have an intercept, we omit an item from every fixed effect.
- Else, we omit an item from every fixed effect except one of them.
The categorical variables will get dropped from x.
"""
(x, x_filtered, y, weights, cat_mapping) = self._filter_data()
self.log('Adding dummies to X variables')
x = self._add_dummies(x, cat_mapping)
self.log('Adding dummies to filtered X variables')
x_filtered = self._add_dummies(x_filtered, cat_mapping)
if self._x_effects:
x = x.drop(self._x_effects, axis=1)
x_filtered = x_filtered.drop(self._x_effects, axis=1)
if self._time_effects:
x_regressor = x.sub(x.mean(level=0), level=0)
unstacked_y = y.unstack()
y_regressor = unstacked_y.sub(unstacked_y.mean(1), axis=0).stack()
y_regressor.index = y.index
elif self._intercept:
# only add intercept when no time effects
self.log('Adding intercept')
x = x_regressor = add_intercept(x)
x_filtered = add_intercept(x_filtered)
y_regressor = y
else:
self.log('No intercept added')
x_regressor = x
y_regressor = y
if weights is not None:
if not y_regressor.index.equals(weights.index):
raise AssertionError("y_regressor and weights must have the "
"same index")
if not x_regressor.index.equals(weights.index):
raise AssertionError("x_regressor and weights must have the "
"same index")
rt_weights = np.sqrt(weights)
y_regressor = y_regressor * rt_weights
x_regressor = x_regressor.mul(rt_weights, axis=0)
return x, x_regressor, x_filtered, y, y_regressor
def _filter_data(self):
"""
"""
data = self._x_orig
cat_mapping = {}
if isinstance(data, DataFrame):
data = data.to_panel()
else:
if isinstance(data, Panel):
data = data.copy()
if not isinstance(data, SparsePanel):
data, cat_mapping = self._convert_x(data)
if not isinstance(data, Panel):
data = Panel.from_dict(data, intersect=True)
x_names = data.items
if self._weights is not None:
data['__weights__'] = self._weights
# Filter x's without y (so we can make a prediction)
filtered = data.to_frame()
# Filter all data together using to_frame
# convert to DataFrame
y = self._y_orig
if isinstance(y, Series):
y = y.unstack()
data['__y__'] = y
data_long = data.to_frame()
x_filt = filtered.filter(x_names)
x = data_long.filter(x_names)
y = data_long['__y__']
if self._weights is not None and not self._weights.empty:
weights = data_long['__weights__']
else:
weights = None
return x, x_filt, y, weights, cat_mapping
def _convert_x(self, x):
# Converts non-numeric data in x to floats. x_converted is the
# DataFrame with converted values, and x_conversion is a dict that
# provides the reverse mapping. For example, if 'A' was converted to 0
# for x named 'variety', then x_conversion['variety'][0] is 'A'.
x_converted = {}
cat_mapping = {}
# x can be either a dict or a Panel, but in Python 3, dicts don't have
# .iteritems
iteritems = getattr(x, 'iteritems', x.items)
for key, df in iteritems():
if not isinstance(df, DataFrame):
raise AssertionError("all input items must be DataFrames, "
"at least one is of "
"type {0}".format(type(df)))
if _is_numeric(df):
x_converted[key] = df
else:
try:
df = df.astype(float)
except (TypeError, ValueError):
values = df.values
distinct_values = sorted(set(values.flat))
cat_mapping[key] = dict(enumerate(distinct_values))
new_values = np.searchsorted(distinct_values, values)
x_converted[key] = DataFrame(new_values, index=df.index,
columns=df.columns)
if len(cat_mapping) == 0:
x_converted = x
return x_converted, cat_mapping
def _add_dummies(self, panel, mapping):
"""
Add entity and / or categorical dummies to input X DataFrame
Returns
-------
DataFrame
"""
panel = self._add_entity_effects(panel)
panel = self._add_categorical_dummies(panel, mapping)
return panel
def _add_entity_effects(self, panel):
"""
Add entity dummies to panel
Returns
-------
DataFrame
"""
from pandas.core.reshape import make_axis_dummies
if not self._entity_effects:
return panel
self.log('-- Adding entity fixed effect dummies')
dummies = make_axis_dummies(panel, 'minor')
if not self._use_all_dummies:
if 'entity' in self._dropped_dummies:
to_exclude = str(self._dropped_dummies.get('entity'))
else:
to_exclude = dummies.columns[0]
if to_exclude not in dummies.columns:
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log('-- Excluding dummy for entity: %s' % to_exclude)
dummies = dummies.filter(dummies.columns.difference([to_exclude]))
dummies = dummies.add_prefix('FE_')
panel = panel.join(dummies)
return panel
def _add_categorical_dummies(self, panel, cat_mappings):
"""
Add categorical dummies to panel
Returns
-------
DataFrame
"""
if not self._x_effects:
return panel
dropped_dummy = (self._entity_effects and not self._use_all_dummies)
for effect in self._x_effects:
self.log('-- Adding fixed effect dummies for %s' % effect)
dummies = get_dummies(panel[effect])
val_map = cat_mappings.get(effect)
if val_map:
val_map = dict((v, k) for k, v in compat.iteritems(val_map))
if dropped_dummy or not self._use_all_dummies:
if effect in self._dropped_dummies:
to_exclude = mapped_name = self._dropped_dummies.get(
effect)
if val_map:
mapped_name = val_map[to_exclude]
else:
to_exclude = mapped_name = dummies.columns[0]
if mapped_name not in dummies.columns: # pragma: no cover
raise Exception('%s not in %s' % (to_exclude,
dummies.columns))
self.log(
'-- Excluding dummy for %s: %s' % (effect, to_exclude))
dummies = dummies.filter(dummies.columns.difference([mapped_name]))
dropped_dummy = True
dummies = _convertDummies(dummies, cat_mappings.get(effect))
dummies = dummies.add_prefix('%s_' % effect)
panel = panel.join(dummies)
return panel
@property
def _use_all_dummies(self):
"""
In the case of using an intercept or including time fixed
effects, completely partitioning the sample would make the X
not full rank.
"""
return (not self._intercept and not self._time_effects)
@cache_readonly
def _beta_raw(self):
"""Runs the regression and returns the beta."""
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
beta, _, _, _ = np.linalg.lstsq(X, Y)
return beta
@cache_readonly
def beta(self):
return Series(self._beta_raw, index=self._x.columns)
@cache_readonly
def _df_model_raw(self):
"""Returns the raw model degrees of freedom."""
return self._df_raw - 1
@cache_readonly
def _df_resid_raw(self):
"""Returns the raw residual degrees of freedom."""
return self._nobs - self._df_raw
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = math.rank(self._x_trans.values)
if self._time_effects:
df += self._total_times
return df
@cache_readonly
def _r2_raw(self):
Y = self._y_trans.values.squeeze()
X = self._x_trans.values
resid = Y - np.dot(X, self._beta_raw)
SSE = (resid ** 2).sum()
if self._use_centered_tss:
SST = ((Y - np.mean(Y)) ** 2).sum()
else:
SST = (Y ** 2).sum()
return 1 - SSE / SST
@property
def _use_centered_tss(self):
# has_intercept = np.abs(self._resid_raw.sum()) < _FP_ERR
return self._intercept or self._entity_effects or self._time_effects
@cache_readonly
def _r2_adj_raw(self):
"""Returns the raw r-squared adjusted values."""
nobs = self._nobs
factors = (nobs - 1) / (nobs - self._df_raw)
return 1 - (1 - self._r2_raw) * factors
@cache_readonly
def _resid_raw(self):
Y = self._y.values.squeeze()
X = self._x.values
return Y - np.dot(X, self._beta_raw)
@cache_readonly
def resid(self):
return self._unstack_vector(self._resid_raw)
@cache_readonly
def _rmse_raw(self):
"""Returns the raw rmse values."""
# X = self._x.values
# Y = self._y.values.squeeze()
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
resid = Y - np.dot(X, self._beta_raw)
ss = (resid ** 2).sum()
return np.sqrt(ss / (self._nobs - self._df_raw))
@cache_readonly
def _var_beta_raw(self):
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
x = self._x
y = self._y
if self._time_effects:
xx = _xx_time_effects(x, y)
else:
xx = np.dot(x.values.T, x.values)
return _var_beta_panel(y, x, self._beta_raw, xx,
self._rmse_raw, cluster_axis, self._nw_lags,
self._nobs, self._df_raw, self._nw_overlap)
@cache_readonly
def _y_fitted_raw(self):
"""Returns the raw fitted y values."""
return np.dot(self._x.values, self._beta_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_vector(self._y_fitted_raw, index=self._x.index)
def _unstack_vector(self, vec, index=None):
if index is None:
index = self._y_trans.index
panel = DataFrame(vec, index=index, columns=['dummy'])
return panel.to_panel()['dummy']
def _unstack_y(self, vec):
unstacked = self._unstack_vector(vec)
return unstacked.reindex(self.beta.index)
@cache_readonly
def _time_obs_count(self):
return self._y_trans.count(level=0).values
@cache_readonly
def _time_has_obs(self):
return self._time_obs_count > 0
@property
def _nobs(self):
return len(self._y)
def _convertDummies(dummies, mapping):
# cleans up the names of the generated dummies
new_items = []
for item in dummies.columns:
if not mapping:
var = str(item)
if isinstance(item, float):
var = '%g' % item
new_items.append(var)
else:
# renames the dummies if a conversion dict is provided
new_items.append(mapping[int(item)])
dummies = DataFrame(dummies.values, index=dummies.index,
columns=new_items)
return dummies
def _is_numeric(df):
for col in df:
if df[col].dtype.name == 'object':
return False
return True
def add_intercept(panel, name='intercept'):
"""
Add column of ones to input panel
Parameters
----------
panel: Panel / DataFrame
name: string, default 'intercept']
Returns
-------
New object (same type as input)
"""
panel = panel.copy()
panel[name] = 1.
return panel.consolidate()
class MovingPanelOLS(MovingOLS, PanelOLS):
"""Implements rolling/expanding panel OLS.
See ols function docs
"""
_panel_model = True
def __init__(self, y, x, weights=None,
window_type='expanding', window=None,
min_periods=None,
min_obs=None,
intercept=True,
nw_lags=None, nw_overlap=False,
entity_effects=False,
time_effects=False,
x_effects=None,
cluster=None,
dropped_dummies=None,
verbose=False):
self._args = dict(intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap,
entity_effects=entity_effects,
time_effects=time_effects,
x_effects=x_effects,
cluster=cluster,
dropped_dummies=dropped_dummies,
verbose=verbose)
PanelOLS.__init__(self, y=y, x=x, weights=weights,
**self._args)
self._set_window(window_type, window, min_periods)
if min_obs is None:
min_obs = len(self._x.columns) + 1
self._min_obs = min_obs
@cache_readonly
def resid(self):
return self._unstack_y(self._resid_raw)
@cache_readonly
def y_fitted(self):
return self._unstack_y(self._y_fitted_raw)
@cache_readonly
def y_predict(self):
"""Returns the predicted y values."""
return self._unstack_y(self._y_predict_raw)
def lagged_y_predict(self, lag=1):
"""
Compute forecast Y value lagging coefficient by input number
of time periods
Parameters
----------
lag : int
Returns
-------
DataFrame
"""
x = self._x.values
betas = self._beta_matrix(lag=lag)
return self._unstack_y((betas * x).sum(1))
@cache_readonly
def _rolling_ols_call(self):
return self._calc_betas(self._x_trans, self._y_trans)
@cache_readonly
def _df_raw(self):
"""Returns the degrees of freedom."""
df = self._rolling_rank()
if self._time_effects:
df += self._window_time_obs
return df[self._valid_indices]
@cache_readonly
def _var_beta_raw(self):
"""Returns the raw covariance of beta."""
x = self._x
y = self._y
dates = x.index.levels[0]
cluster_axis = None
if self._cluster == 'time':
cluster_axis = 0
elif self._cluster == 'entity':
cluster_axis = 1
nobs = self._nobs
rmse = self._rmse_raw
beta = self._beta_raw
df = self._df_raw
window = self._window
if not self._time_effects:
# Non-transformed X
cum_xx = self._cum_xx(x)
results = []
for n, i in enumerate(self._valid_indices):
if self._is_rolling and i >= window:
prior_date = dates[i - window + 1]
else:
prior_date = dates[0]
date = dates[i]
x_slice = x.truncate(prior_date, date)
y_slice = y.truncate(prior_date, date)
if self._time_effects:
xx = _xx_time_effects(x_slice, y_slice)
else:
xx = cum_xx[i]
if self._is_rolling and i >= window:
xx = xx - cum_xx[i - window]
result = _var_beta_panel(y_slice, x_slice, beta[n], xx, rmse[n],
cluster_axis, self._nw_lags,
nobs[n], df[n], self._nw_overlap)
results.append(result)
return np.array(results)
@cache_readonly
def _resid_raw(self):
beta_matrix = self._beta_matrix(lag=0)
Y = self._y.values.squeeze()
X = self._x.values
resid = Y - (X * beta_matrix).sum(1)
return resid
@cache_readonly
def _y_fitted_raw(self):
x = self._x.values
betas = self._beta_matrix(lag=0)
return (betas * x).sum(1)
@cache_readonly
def _y_predict_raw(self):
"""Returns the raw predicted y values."""
x = self._x.values
betas = self._beta_matrix(lag=1)
return (betas * x).sum(1)
def _beta_matrix(self, lag=0):
if lag < 0:
raise AssertionError("'lag' must be greater than or equal to 0, "
"input was {0}".format(lag))
index = self._y_trans.index
major_labels = index.labels[0]
labels = major_labels - lag
indexer = self._valid_indices.searchsorted(labels, side='left')
beta_matrix = self._beta_raw[indexer]
beta_matrix[labels < self._valid_indices[0]] = np.NaN
return beta_matrix
@cache_readonly
def _enough_obs(self):
# XXX: what's the best way to determine where to start?
# TODO: write unit tests for this
rank_threshold = len(self._x.columns) + 1
if self._min_obs < rank_threshold: # pragma: no cover
warnings.warn('min_obs is smaller than rank of X matrix')
enough_observations = self._nobs_raw >= self._min_obs
enough_time_periods = self._window_time_obs >= self._min_periods
return enough_time_periods & enough_observations
def create_ols_dict(attr):
def attr_getter(self):
d = {}
for k, v in compat.iteritems(self.results):
result = getattr(v, attr)
d[k] = result
return d
return attr_getter
def create_ols_attr(attr):
return property(create_ols_dict(attr))
class NonPooledPanelOLS(object):
"""Implements non-pooled panel OLS.
Parameters
----------
y : DataFrame
x : Series, DataFrame, or dict of Series
intercept : bool
True if you want an intercept.
nw_lags : None or int
Number of Newey-West lags.
window_type : {'full_sample', 'rolling', 'expanding'}
'full_sample' by default
window : int
size of window (for rolling/expanding OLS)
"""
ATTRIBUTES = [
'beta',
'df',
'df_model',
'df_resid',
'f_stat',
'p_value',
'r2',
'r2_adj',
'resid',
'rmse',
'std_err',
'summary_as_matrix',
't_stat',
'var_beta',
'x',
'y',
'y_fitted',
'y_predict'
]
def __init__(self, y, x, window_type='full_sample', window=None,
min_periods=None, intercept=True, nw_lags=None,
nw_overlap=False):
for attr in self.ATTRIBUTES:
setattr(self.__class__, attr, create_ols_attr(attr))
results = {}
for entity in y:
entity_y = y[entity]
entity_x = {}
for x_var in x:
entity_x[x_var] = x[x_var][entity]
from pandas.stats.interface import ols
results[entity] = ols(y=entity_y,
x=entity_x,
window_type=window_type,
window=window,
min_periods=min_periods,
intercept=intercept,
nw_lags=nw_lags,
nw_overlap=nw_overlap)
self.results = results
def _var_beta_panel(y, x, beta, xx, rmse, cluster_axis,
nw_lags, nobs, df, nw_overlap):
xx_inv = math.inv(xx)
yv = y.values
if cluster_axis is None:
if nw_lags is None:
return xx_inv * (rmse ** 2)
else:
resid = yv - np.dot(x.values, beta)
m = (x.values.T * resid).T
xeps = math.newey_west(m, nw_lags, nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xeps, xx_inv))
else:
Xb = np.dot(x.values, beta).reshape((len(x.values), 1))
resid = DataFrame(yv[:, None] - Xb, index=y.index, columns=['resid'])
if cluster_axis == 1:
x = x.swaplevel(0, 1).sortlevel(0)
resid = resid.swaplevel(0, 1).sortlevel(0)
m = _group_agg(x.values * resid.values, x.index._bounds,
lambda x: np.sum(x, axis=0))
if nw_lags is None:
nw_lags = 0
xox = 0
for i in range(len(x.index.levels[0])):
xox += math.newey_west(m[i: i + 1], nw_lags,
nobs, df, nw_overlap)
return np.dot(xx_inv, np.dot(xox, xx_inv))
def _group_agg(values, bounds, f):
"""
R-style aggregator
Parameters
----------
values : N-length or N x K ndarray
bounds : B-length ndarray
f : ndarray aggregation function
Returns
-------
ndarray with same length as bounds array
"""
if values.ndim == 1:
N = len(values)
result = np.empty(len(bounds), dtype=float)
elif values.ndim == 2:
N, K = values.shape
result = np.empty((len(bounds), K), dtype=float)
testagg = f(values[:min(1, len(values))])
if isinstance(testagg, np.ndarray) and testagg.ndim == 2:
raise AssertionError('Function must reduce')
for i, left_bound in enumerate(bounds):
if i == len(bounds) - 1:
right_bound = N
else:
right_bound = bounds[i + 1]
result[i] = f(values[left_bound:right_bound])
return result
def _xx_time_effects(x, y):
"""
Returns X'X - (X'T) (T'T)^-1 (T'X)
"""
# X'X
xx = np.dot(x.values.T, x.values)
xt = x.sum(level=0).values
count = y.unstack().count(1).values
selector = count > 0
# X'X - (T'T)^-1 (T'X)
xt = xt[selector]
count = count[selector]
return xx - np.dot(xt.T / count, xt)
| mit |
cpsnowden/ComputationalNeurodynamics | Exercise_1/Q4_RK_HH.py | 1 | 1048 | """
Computational Neurodynamics
Exercise 1 Q4
(C) Murray Shanahan et al, 2015
"""
import numpy as np
import matplotlib.pyplot as plt
def xdot(x, I, fP):
v, m, n, h = x
gNa, gK, gL, ENa, EK, EL, C = fP
alpha_m = (2.5 - 0.1 * v) / (np.exp(2.5 - 0.1 * v) - 1.0)
alpha_n = (0.1 - 0.01 * v) / (np.exp(1.0 - 0.1 * v) - 1.0)
alpha_h = 0.07 * np.exp(-v / 20.0)
beta_m = 4.0 * np.exp(-v / 18.0)
beta_n = 0.125 * np.exp(-v / 80.0)
beta_h = 1.0 / (np.exp(3.0 - 0.1 * v) + 1.0)
mdot = (alpha_m * (1 - m) - beta_m * m)
ndot= (alpha_n * (1 - n) - beta_n * n)
hdot = (alpha_h * (1 - h) - beta_h * h)
sigmaIk = gNa * (m ** 3) * h * (v - ENa) + gK * (n ** 4) * (v - EK) + gL * (v - EL)
vdot = (-sigmaIk + I) / C
return np.array([vdot, mdot, ndot, hdot])
def rk4(x_prev, I, dt, fP):
k1 = xdot(x_prev, I, fP)
k2 = xdot(x_prev + 0.5 * dt * k1, I, fP)
k3 = xdot(x_prev + 0.5 * dt * k2, I, fP)
k4 = xdot(x_prev + dt * k3, I, fP)
return x_prev + dt / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
| gpl-3.0 |
QJonny/CyNest | testsuite/manualtests/stdp_dopa_check.py | 2 | 10096 | #! /usr/bin/env python
#
# stdp_dopa_check.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from matplotlib.pylab import *
import numpy as n
# Test script to reproduce changes in weight of a dopamine modulated STDP synapse in an event-driven way.
# Pre- and post-synaptic spike trains are read in from spikes-6-0.gdf
# (output of test_stdp_dopa.py).
# output: pre/post/dopa \t spike time \t weight
#
# Synaptic dynamics for dopamine modulated STDP synapses as used in [1], based on [2]
#
# References:
# [1] Potjans W, Morrison A and Diesmann M (2010). Enabling functional neural circuit simulations with distributed computing of neuromodulated plasticity. Front. Comput. Neurosci. 4:141. doi:10.3389/fncom.2010.00141
# [2] Izhikevich, E. M. (2007). Solving the distal reward problem through linkage of STDP and dopamine signaling. Cereb. Cortex 17(10), 2443-2452.
#
# author: Wiebke Potjans, October 2010
def stdp_dopa(w_init, pre_spikes, post_spikes, dopa_spikes, tau_e, tau_d, A_minus, A_plus, tau_plus, tau_minus, dendritic_delay, delay_d):
w = w_init # initial weight
w_min = 0. # minimal weight
w_max = 200. #maximal weight
i=0 # index of presynaptic spike
j=0 # index of postsynaptic spike
k=0 # index of dopamine spike
last_post_spike = dendritic_delay
Etrace = 0.
Dtrace = 0.
last_e_update = 0.
last_w_update = 0.
last_pre_spike = 0.
last_dopa_spike = 0.
advance = True
while advance:
advance = False
# next spike is presynaptic
if ((pre_spikes[i] < post_spikes[j]) and (pre_spikes[i] < dopa_spikes[k])):
dt = pre_spikes[i] - last_post_spike
# weight update
w = w + Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d)-exp((last_e_update-pre_spikes[i])/tau_e)*exp((last_dopa_spike-pre_spikes[i])/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "pre\t%.4f\t%.4f" % (pre_spikes[i],w)
last_w_update = pre_spikes[i]
Etrace = Etrace * exp((last_e_update - pre_spikes[i])/tau_e) - A_minus*exp(-dt/tau_minus)
last_e_update = pre_spikes[i]
last_pre_spike = pre_spikes[i]
if i < len(pre_spikes) - 1:
i += 1
advance = True
# next spike is postsynaptic
if( (post_spikes[j] < pre_spikes[i]) and (post_spikes[j] < dopa_spikes[k])):
dt = post_spikes[j] - last_pre_spike
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d)*(exp((last_e_update-post_spikes[j])/tau_e)*exp((last_dopa_spike-post_spikes[j])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "post\t%.4f\t%.4f" % (post_spikes[j],w)
last_w_update = post_spikes[j]
Etrace = Etrace * exp((last_e_update - post_spikes[j])/tau_e) + A_plus*exp(-dt/tau_plus)
last_e_update = post_spikes[j]
last_post_spike = post_spikes[j]
if j < len(post_spikes) - 1:
j += 1
advance = True
# next spike is dopamine spike
if ((dopa_spikes[k] < pre_spikes[i]) and (dopa_spikes[k] < post_spikes[j])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-dopa_spikes[k])/tau_e)*exp((last_dopa_spike-dopa_spikes[k])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "dopa\t%.4f\t%.4f" % (dopa_spikes[k],w)
last_w_update = dopa_spikes[k]
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
if(dopa_spikes[k]==dopa_spikes[k-1]):
advance = False
Dtrace = Dtrace + 1/tau_d
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# pre and postsynaptic spikes are at the same time
# Etrace is not updated for this case; therefore no weight update is required
if ((pre_spikes[i]==post_spikes[j]) and (pre_spikes[i] < dopa_spikes[k])):
if i < len(pre_spikes) - 1:
i += 1
advance = True
if j < len(post_spikes) -1:
j +=1
advance = True
# presynaptic spike and dopamine spike are at the same time
if ((pre_spikes[i]==dopa_spikes[k]) and (pre_spikes[i] < post_spikes[j])):
dt = pre_spikes[i] - last_post_spike
w = w + Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d)-exp((last_e_update-pre_spikes[i])/tau_e)*exp((last_dopa_spike-pre_spikes[i])/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "pre\t%.4f\t%.4f" % (pre_spikes[i],w)
last_w_update = pre_spikes[i]
Etrace = Etrace * exp((last_e_update - pre_spikes[i])/tau_e) - A_minus*exp(-dt/tau_minus)
last_e_update = pre_spikes[i]
last_pre_spike = pre_spikes[i]
if i < len(pre_spikes) - 1:
i += 1
advance = True
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# postsynaptic spike and dopamine spike are at the same time
if ((post_spikes[j]==dopa_spikes[k]) and (post_spikes[j] < pre_spikes[i])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d)*(exp((last_e_update-post_spikes[j])/tau_e)*exp((last_dopa_spike-post_spikes[j])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "post\t%.4f\t%.4f" % (post_spikes[j],w)
last_w_update = post_spikes[j]
Etrace = Etrace * exp((last_e_update - post_spikes[j])/tau_e) + A_plus*exp(-dt/tau_plus)
last_e_update = post_spikes[j]
last_post_spike = post_spikes[j]
if j < len(post_spikes) - 1:
j += 1
advance = True
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# all three spikes are at the same time
if ((post_spikes[j]==dopa_spikes[k]) and (post_spikes[j]==pre_spikes[i])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-dopa_spikes[k])/tau_e)*exp((last_dopa_spike-dopa_spikes[k])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "dopa\t%.4f\t%.4f" % (dopa_spikes[k],w)
last_w_update = dopa_spikes[k]
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
if(dopa_spikes[k]==dopa_spikes[k-1]):
advance = False
Dtrace = Dtrace + 1/tau_d
if k < len(dopa_spikes) - 1:
k += 1
advance = True
return w
# stdp dopa parameters
w_init = 35.
tau_plus = 20.
tau_minus = 15.
tau_e = 1000.
tau_d = 200.
A_minus = 1.5
A_plus = 1.0
dendritic_delay = 1.0
delay_d = 1.
# load spikes from simulation with test_stdp_dopa.py
spikes = n.loadtxt("spikes-3-0.gdf")
pre_spikes = spikes[find(spikes[:,0]==4),1]
# delay is purely dendritic
# postsynaptic spike arrives at sp_j + dendritic_delay at the synapse
post_spikes =spikes[find(spikes[:,0]==5),1] + dendritic_delay
# dopa spike arrives at sp_j + delay_d at the synapse
dopa_spikes = spikes[find(spikes[:,0]==6),1] + delay_d
# calculate development of stdp weight
w = stdp_dopa(w_init, pre_spikes, post_spikes, dopa_spikes, tau_e, tau_d, A_minus, A_plus, tau_plus, tau_minus, dendritic_delay, delay_d)
print w
| gpl-2.0 |
jllanfranchi/phys597_computational2 | ch07_problem7.30/p7x30bak02.py | 1 | 28673 | #!/usr/bin/env python
from __future__ import division
from __future__ import with_statement
import numpy as np
#from pylab import ion
import matplotlib as mpl
#from matplotlib.path import Path
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
#import numexpr as ne
from numba import autojit
import sys
import time
import cPickle as pickle
import collections
from collections import deque
from multiprocessing import Process, Queue
from smartFormat import smartFormat
from genericUtils import wstdout
__author__ = "J.L. Lanfranchi"
__email__ = "[email protected]"
__copyright__ = "Copyright 2014 J.L. Lanfranchi"
__credits__ = ["J.L. Lanfranchi"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
#-- Turn live-interactive plotting on (makes updated plots appear animated)
#ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
mpl.rc('font', **font)
#@autojit
def step(previousDirection, currentCoord, relMoveDir):
newDirection = (previousDirection + relMoveDir) % 4
if newDirection == 0:
return (currentCoord[0]+1, currentCoord[1])
elif newDirection == 1:
return (currentCoord[0], currentCoord[1]+1)
elif newDirection == 2:
return (currentCoord[0]-1, currentCoord[1])
else:
return (currentCoord[0], currentCoord[1]-1)
#@autojit
def measureChain(chain):
"""Measures the Euclidean distance from the startpoint to endpoint of
a chain"""
return ((chain[-1][0] - chain[0][0])**2 + (chain[-1][1] - chain[0][1])**2)
#@autojit
def simpleAttemptToCreateChain(nSteps, changesInDir=(0,-1,+1)):
"""State is taken to be direction of travel; there are 4 directions, so
four states: right (0), up (1), left (2), and down (3). The only allowed
transitions are
state -> state + 1 (modulo 4)
state -> state - 1 (modulo 4)
state -> state
Then, it must be checked that the coordinate isn't already in the chain;
if it is, then None is returned; otherwise, the algo repeats until a
chain of nSteps is reached.
"""
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
#chainCoords = collections.deque([(0,0)])
chainCoords = [(0,0), (0,1)]
chainX = [0,0]
chainY = [0,1]
#chainCoords = {(0,0):True}
coord = (0,1)
#chainCoords.append(coord)
#chainCoords.update({coord:True})
previousDirection = 1
length = 1
nChangesInDir = len(changesInDir)
while True:
relMoveDir = changesInDir[ np.int(np.random.random()*nChangesInDir) ]
coord = step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
return None
chainCoords.append(coord)
#chainCoords.update({coord:True})
length += 1
if length == nSteps:
return chainCoords
def npCreateChain(nSteps, changesInDir=(0,-1,+1)):
nChangesInDir = len(changesInDir)
np.random.random(nSteps)*nChangesInDir
class CreateChainWorkerClass(Process):
"""Direction is direction of travel; there are 4 directions, so
right=0, up=1, left=2, and down=3. By default the only allowed transitions
are
direction -> direction + 1 (modulo 4)
direction -> direction - 1 (modulo 4)
direction -> direction
but the changesInDir allows the user to specify what changes are allowed.
Note that each is chosen with equal probability.
Next, it is checked that the coordinate isn't already in the chain;
if it is in the chain, then None is returned; otherwise, the algo repeats
until a chain of nSteps is reached.
"""
# TODO: shared "tries" object contains single int, gets incremented
# for each try
def __init__(self, chainsQueue, nSteps, changesInDir=(0,-1,+1), nChains=1):
Process.__init__(self)
wstdout("0")
self.chainsQueue = chainsQueue
self.nSteps = nSteps
self.changesInDir = changesInDir
self.nChains = nChains
self.nChangesInDir = len(changesInDir)
wstdout("1\n")
def step(self, previousDirection, currentCoord, relMoveDir):
newDirection = (previousDirection + relMoveDir) % 4
if newDirection is 0:
return (currentCoord[0]+1, currentCoord[1])
elif newDirection is 1:
return (currentCoord[0], currentCoord[1]+1)
elif newDirection is 2:
return (currentCoord[0]-1, currentCoord[1])
else:
return (currentCoord[0], currentCoord[1]-1)
def run(self):
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
while True:
chainCoords = collections.deque([(0,0)], maxlen=self.nSteps+1)
coord = (0,1)
chainCoords.append(coord)
previousDirection = 1
thisChainLen = 1
while thisChainLen < self.nSteps:
if self.chainsQueue.qsize() >= self.nChains:
return
relMoveDir = self.changesInDir[
np.random.randint(0,self.nChangesInDir)]
coord = self.step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
break
chainCoords.append(coord)
thisChainLen += 1
if thisChainLen == self.nSteps:
self.chainsQueue.put(chainCoords)
break
def createChainWorker(chainsQueue, nSteps, changesInDir=(0,-1,+1), nChains=1):
"""Direction is direction of travel; there are 4 directions, so
right=0, up=1, left=2, and down=3. By default the only allowed transitions
are
direction -> direction + 1 (modulo 4)
direction -> direction - 1 (modulo 4)
direction -> direction
but the changesInDir allows the user to specify what changes are allowed.
Note that each is chosen with equal probability.
Next, it is checked that the coordinate isn't already in the chain;
if it is in the chain, then None is returned; otherwise, the algo repeats
until a chain of nSteps is reached.
"""
# TODO: shared "tries" object contains single int, gets incremented
# for each try
#wstdout("0")
chainsQueue = chainsQueue
nSteps = nSteps
nChains = nChains
nChangesInDir = len(changesInDir)
#wstdout("1")
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
while True:
#chainCoords = collections.deque([(nSteps,nSteps),(nSteps,nSteps+1)],
# maxlen=nSteps+1)
chainCoords = collections.deque([(0,0), (0,1)])
coord = (0,1)
previousDirection = 1
thisChainLen = 1
while thisChainLen < nSteps:
relMoveDir = changesInDir[
np.random.randint(0,nChangesInDir)]
coord = step(previousDirection, coord, relMoveDir)
if coord in chainCoords:
break
chainCoords.append(coord)
thisChainLen += 1
if thisChainLen == nSteps:
chainsQueue.put(chainCoords)
break
def reptateChainWorker(chainsToReptate, lenSq, nChainsToCompute):
chain = chainsToReptate.get()
result = chain.reptate()
for (childChain, operation) in childChains:
if operation == 'move':
lenSq.put(measureChain(childChain))
if lenSq.len() < nChainsToCompute:
chainsToReptate.put(childChain)
def simpleCreateChain(nSteps=5, changesInDir=(-1,0,1)):
while True:
chain = simpleAttemptToCreateChain(nSteps, changesInDir=changesInDir)
if chain != None:
break
return collections.deque(chain, maxlen=nSteps+1)
def createChainParallel(nSteps=60, nChains=1, changesInDir=(-1,0,1), nProcs=4):
chainsQueue = Queue()
args = (chainsQueue, nSteps, changesInDir, nChains)
##pool = Pool(processes=nProcs)
#kwargs = {'nSteps': nSteps,
# 'changesInDir': changesInDir,
# 'nChains': nChains,
# 'chainsQueue': chainsQueue
# }
#for procN in range(nProcs):
# #pool.apply_async(CreateChainWorkerClass, kwds=kwargs)
# CreateChainWorkerClass, kwds=kwargs)
#while chainsQueue.qsize() < nChains:
# time.sleep(0.2)
#chains = []
#while not chainsQueue.empty():
# chains.append(chainsQueue.get())
procs = []
for n in range(nProcs):
procs.append(Process(target=createChainWorker,args=args))
[proc.start() for proc in procs]
#while chainsQueue.qsize() < nChains:
# time.sleep(0.1)
chains = []
#while not chainsQueue.empty():
#while len(chains) < nChains:
#time.sleep(0.5)
chains.append(chainsQueue.get())
[proc.terminate() for proc in procs]
return chains
#class chain:
# def __init__(self, nSteps, initialChain=None):
# self.moveForward = True
# if initialChain == None:
# self.nSteps = nSteps
# self.
#
# self.coordinates = collections.deque(coords, maxlen=nSteps)
class reptationChain90:
"""
90-degree-only reptation chain of length nSteps
"""
def __init__(self, nSteps, initialChain):
self.nSteps = nSteps
def reptate(self):
pass
def createChainReptation(nSteps):
"""State is taken to be direction of travel; there are 4 directions, so
four states: right (0), up (1), left (2), and down (3). The only allowed
transitions are
state -> state + 1 (modulo 4)
state -> state - 1 (modulo 4)
state -> state
Then, it must be checked that the coordinate isn't already in the chain;
if it is, then None is returned; otherwise, the algo repeats until a
chain of nSteps is reached.
"""
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
chainCoords = [(0,0)]
chainCoords = [(0,0)]
coord = (0,1)
chainCoords.append(coord)
state = 1
length = 1
#np.random.seed(int(time.time()*1000)%120)
#np.random.seed(2)
while True:
randVal = np.random.randint(low=-1, high=2)
state = (state + randVal) % 4
if state is 0:
coord = (coord[0]+1, coord[1])
elif state is 1:
coord = (coord[0], coord[1]+1)
elif state is 2:
coord = (coord[0]-1, coord[1])
elif state is 3:
coord = (coord[0], coord[1]-1)
if coord in chainCoords:
return None
chainCoords.append(coord)
length += 1
if length == nSteps:
return chainCoords
def coordsFromAbsDir(absdir):
nsteps = len(absdir)
offset = 1
xincr = zeros(nsteps+1, dtype=int)
yincr = ones(nsteps+1, dtype=int)
xincr[argwhere(absdir==0)+1] = 1
xincr[argwhere(absdir==2)+1] = -1
yincr[argwhere(absdir==1)+1] = 1
yincr[argwhere(absdir==3)+1] = -1
x = cumsum(xincr)
y = cumsum(yincr)
return x, y
def plotSnakeAbsDir(absdir):
plotSnakeXY(coordsFromDir(absdir))
def plotSnakeXY(x, y):
fig, ax = subplots()
plot(x,y,'r-o',linewidth=3,markersize=6)
plot(x[0],y[0],'ko',markersize=10)
#ax.set_xlim(min(x)-2, max(x)+2)
#ax.set_ylim(min(y)-2, max(y)+2)
axis('image')
for spine in ax.spines.itervalues():
spine.set_visible(False)
ax.set_xlim(min(x)-2, max(x)+2)
ax.set_ylim(min(y)-2, max(y)+2)
def plotSnakeCoord(coords):
x = []
y = []
for c in coords:
x.append(c[0])
y.append(c[1])
plotSnakeXY(x, y)
def newSnake1(nSteps=10):
#reldir = (random.random(nSteps)*2).astype(int)-1
reldir = random.randint(-1,2,nSteps)
absdir = mod(1+cumsum(reldir), 4)
x, y = coordsFromDir(absdir)
def newSnake2(nSteps=10):
pass
class snake:
"""Self-avoiding random walk."""
def __init__(self, nsteps, validDirs=(-1,1)):
#-- Use a deque as a circular buffer to store the coords
self.coords = deque(maxlen=nsteps+1)
[ self.coords.append((0,y)) for y in range(nsteps+1) ]
self.R2 = [nsteps**2]
#-- This is either -1 (points at most-recently-added element)
# or 0 (points at oldest element)
self.forward = True
self.c1 = -1
self.c2 = -2
self.c_end = 0
self.validDirs = validDirs
self.nValidDirs = len(validDirs)
def plot(self):
if self.forward:
plotSnakeCoord(self.coords)
else:
rc = self.coords
rc.reverse()
plotSnakeCoord(rc)
def stats(self):
self.meanR2 = np.mean(self.R2)
return self.meanR2
def reptate(self):
dx = self.coords[self.c1][0]-self.coords[self.c2][0]
if dx == 1:
previousDir = 0
elif dx == -1:
previousDir = 2
elif self.coords[self.c1][1]-self.coords[self.c2][1] == 1:
previousDir = 1
else:
previousDir = 3
proposedDir = (previousDir + \
self.validDirs[np.random.randint(0,self.nValidDirs)]) % 4
if proposedDir == 0:
proposedCoord = (self.coords[self.c1][0]+1,self.coords[self.c1][1])
elif proposedDir == 1:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]+1)
elif proposedDir == 2:
proposedCoord = (self.coords[self.c1][0]-1,self.coords[self.c1][1])
else:
proposedCoord = (self.coords[self.c1][0],self.coords[self.c1][1]-1)
#-- Exchange head and tail of snake
if proposedCoord in self.coords:
self.forward = not self.forward
if self.forward:
self.c1 = -1
self.c2 = -2
self.c_end = 0
else:
self.c1 = 0
self.c2 = 1
self.c_end = -1
self.R2.append(self.R2[-1])
#-- Or prepand / append new coord
else:
if self.forward:
self.coords.append(proposedCoord)
else:
self.coords.appendleft(proposedCoord)
#print self.coords[self.c1], self.coords[self.c2]
self.R2.append((self.coords[self.c1][0]
-self.coords[self.c_end][0])**2+
(self.coords[self.c1][1]
-self.coords[self.c_end][1])**2)
#def measureChain(chain):
# """Measures the Euclidean distance from the startpoint to endpoint of
# a chain"""
# return (chain[-1][0] - chain[0][0])**2 + (chain[-1][1] - chain[0][1])**2
formatDic = {'sigFigs': 4, 'demarc': "", 'threeSpacing': False, 'rightSep':""}
def powerLaw(x, power, multFact, offset):
return multFact*(x**power) + offset
def powerLawLatex(power, multFact=1, offset=0, pcov=None):
offsetStr = smartFormat(offset, alwaysShowSign=True, **formatDic)
if not (offsetStr[0] == "+" or offsetStr[0] == "-"):
offsetStr = "+" + offsetStr
latex = r"$" + smartFormat(multFact, **formatDic) + \
r" \cdot N^{" + smartFormat(power, **formatDic) + r"} " + \
offsetStr + \
r"$"
return latex
def exponential(x, expExponent, multFact=1):
return multFact * np.exp(np.array(x)*expExponent)
def exponentialLatex(expExponent, multFact=1, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}$"
return latex
def expPower(x, expExponent, powerLawExp, multFact):
x = np.array(x)
return multFact * np.exp(x*expExponent) * x**powerLawExp
def expPowerLatex(expExponent, powerLawExp, multFact, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}\cdot N^{" + smartFormat(powerLawExp, **formatDic) + \
r"}$"
return latex
class SimulationData:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Simulation:
def __init__(self):
self.sd = SimulationData()
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
self.stateFilename = "p7x28_state.pk"
def saveState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'wb') as stateFile:
pickle.dump(self.sd, stateFile, -1)
def loadState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'rb') as stateFile:
self.sd = pickle.load(stateFile)
def runSimulation(self, targetSuccesses=10, stepsRange=(4,50),
plotting=False):
#-- Reset state variables for a new simulation run
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
timeLastSaved = time.time()
self.sd.targetSuccesses = targetSuccesses
self.sd.stepsInChains = range(stepsRange[0],stepsRange[1])
self.sd.allChainFinalCoords = []
self.sd.allMeanChainFinalCoords = []
self.sd.meanChainFinalCoords = []
self.sd.chainSquareLengthAvg = []
self.sd.successRatio = []
self.sd.timingAvg = []
if plotting:
self.fig1 = plt.figure(1)
self.fig1.clf()
self.ax1 = fig1.add_subplot(111)
line, = ax1.plot([], [], 'ko-', lw=2)
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
ax1.axis('image')
plt.draw()
for stepsThisChain in self.sd.stepsInChains:
startTime = time.time()
successfulChains = []
chainSquareLengths = []
chainFinalCoords = []
meanChainFinalCoord = []
nSuccesses = 0
trialN = 0
while nSuccesses < self.sd.targetSuccesses:
trialN += 1
chain = simpleAttemptToCreateChain(stepsThisChain,(-1,0,1))
if chain == None:
continue
successfulChains.append(chain)
chain = np.array(chain)
chainSquareLengths.append(measureChain(chain)**2)
chainFinalCoords.append(chain[-1,:])
nSuccesses += 1
if plotting:
line.set_data(chain[:,0],chain[:,1])
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
plt.draw()
time.sleep(0.005)
chainFinalCoords = np.array(chainFinalCoords)
self.sd.allChainFinalCoords.append(chainFinalCoords)
self.sd.allMeanChainFinalCoords.append(meanChainFinalCoord)
self.sd.meanChainFinalCoord = np.mean(chainFinalCoords, 0)
self.sd.chainSquareLengthAvg.append(np.mean(chainSquareLengths))
self.sd.successRatio.append(nSuccesses / trialN)
self.sd.timingAvg.append( (time.time()-startTime)/nSuccesses )
sys.stdout.write("\nstepsThisChain = " + str(stepsThisChain) + "\n")
sys.stdout.write(" nSuccesses/nTrials = " + str(nSuccesses) + "/"
+ str(trialN) + " = "
+ str(self.sd.successRatio[-1]) + "\n")
sys.stdout.write(" time/success = " +
str(self.sd.timingAvg[-1]) + "\n")
sys.stdout.flush()
if (time.time() - timeLastSaved) > 60*5:
self.saveState()
timeLastSaved = time.time()
self.sd.allMeanChainFinalCoords = \
np.array(self.sd.allMeanChainFinalCoords)
#-- TODO: mean of final-position vector (r_N vector)
#np.sqrt(allMeanChainFinalCoords[:,0]**2+
# allMeanChainFinalCoords[:,1]**2)
self.sd.simulationCompleted = True
self.saveState()
def postproc(self):
"""Perform curve fitting to the data"""
#-- Update state
self.sd.postprocCompleted = False
#-- Check that simulation data is present
if not self.sd.simulationCompleted:
raise Exception("No simulation run; cannot perform curve fit!")
#-- Same x data is used for *all* the below curve fits
x = self.sd.stepsInChains
#============================================================
# Fit success fraction with const * exponential * power law
#============================================================
y = self.sd.successRatio
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
p0 = (-0.117, 0.1, 2)
popt1, pcov1 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0)
self.sd.fit1 = expPower(x, *popt1)
self.sd.fit1eqn = expPowerLatex(*popt1)
print popt1, pcov1, "\n"
#============================================================
# TODO: Fit the final position data
#============================================================
#y = (self.sd.chainLengthAvg)
#sigma = list(np.array(y))
#popt2, pcov2 = curve_fit(powerLaw, x, y, sigma=sigma)
#self.sd.fit2 = powerLaw(x, *popt2)
#self.sd.fit2eqn = powerLawLatex(*popt2)
#print popt2, pcov2, "\n"
#============================================================
# Fit R_N^2 with const * power-law + const
#============================================================
y = self.sd.chainSquareLengthAvg
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt3, pcov3 = curve_fit(f=powerLaw, xdata=x, ydata=y, sigma=sigma)
self.sd.fit3 = powerLaw(x, *popt3)
self.sd.fit3eqn = powerLawLatex(*popt3)
print popt3, pcov3, "\n"
#============================================================
# Exponential fit to wall-clock time (not as good a fit as
# exp*power, so this is commented out)
#============================================================
#y = (self.sd.timingAvg)
##p0 = (0.0985, 0.1, 1.65e-5)
#p0 = (0.0985, 1)
#sigma = list(np.array(y))
#popt4, pcov4 = curve_fit(f=exponential, xdata=x, ydata=y, sigma=sigma,
# p0=p0, )
#self.sd.fit4 = exponential(x, *popt4)
#self.sd.fit4eqn = exponentialLatex(*popt4)
#print popt4, pcov4, "\n"
#============================================================
# Exponential * power-law fit to wall-clock time
#============================================================
y = self.sd.timingAvg
#-- Initial guess
p0 = (0.129, 0, 2.981e-3)
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt4, pcov4 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0, )
self.sd.fit4 = expPower(x, *popt4)
self.sd.fit4eqn = expPowerLatex(*popt4)
print popt4, pcov4, "\n"
#-- Update state
self.sd.postprocCompleted = True
def plotResults(self, savePlot=True):
"""Plot the data and the fit curves"""
if not self.sd.simulationCompleted:
raise Exception("No simulation has been run; cannot plot results!")
if not self.sd.postprocCompleted:
self.postproc()
self.fig2 = plt.figure(2, figsize=(7,12), dpi=80)
self.fig2.clf()
self.ax21 = self.fig2.add_subplot(311)
self.ax21.plot(self.sd.stepsInChains, self.sd.successRatio,
'bo', label="data", markersize=4)
self.ax21.plot(self.sd.stepsInChains, self.sd.fit1,
'r-', label=self.sd.fit1eqn, linewidth=2, alpha=0.75)
self.ax21.set_title(
"Non-intersecting 2D random-walk chains;" +
" stop condition: " + str(self.sd.targetSuccesses) +
" successfully-built chains")
self.ax21.set_ylabel(r"Success fraction $f(N)$")
self.ax21.set_yscale('log')
self.ax21.grid(which='major', b=True)
self.ax21.legend(loc="best", fancybox=True, shadow=True)
#-- TODO: average of final position plot
#self.ax22 = fig2.add_subplot(412)
#self.ax22.plot(self.sd.stepsInChains, self.sd.chainLengthAvg,
# 'bo', label="data", markersize=4)
#self.ax22.plot(self.sd.stepsInChains, self.sd.fit2,
# 'r-', label=self.sd.fit2eqn, linewidth=2, alpha=0.75)
#self.ax22.set_ylabel(r"$\langle R_N \rangle$")
##self.ax22.set_yscale('log')
#ax22.grid(which='major', b=True)
#ax22.legend(loc="best", fancybox=True, shadow=True)
self.ax23 = self.fig2.add_subplot(312)
self.ax23.plot(self.sd.stepsInChains, self.sd.chainSquareLengthAvg,
'bo', label="data", markersize=4)
self.ax23.plot(self.sd.stepsInChains, self.sd.fit3,
'r-', label=self.sd.fit3eqn, linewidth=2, alpha=0.75)
self.ax23.set_ylabel(r"$\langle R_N^2\rangle$")
self.ax23.grid(which='major', b=True)
self.ax23.legend(loc="upper left", fancybox=True, shadow=True)
self.ax24 = self.fig2.add_subplot(313)
self.ax24.plot(self.sd.stepsInChains, self.sd.timingAvg,
'bo', label="data", markersize=4)
self.ax24.plot(self.sd.stepsInChains, self.sd.fit4,
'r-', label=self.sd.fit4eqn, linewidth=2, alpha=0.75)
self.ax24.set_xlabel(r"Nmber of steps in walk, $N$")
self.ax24.set_ylabel("Wall-clock time per successful chain (s)")
self.ax24.set_yscale('log')
self.ax24.grid(which='major', b=True)
self.ax24.legend(loc="upper left", fancybox=True, shadow=True)
self.fig2.tight_layout()
if savePlot:
self.fig2.savefig("2014-01-14_problem7x28_plots.pdf")
self.fig2.savefig("2014-01-14_problem7x28_plots.png", dpi=120)
plt.show()
if __name__ == "__main__":
startTime = time.time()
#-- Instantiate the Simulation object
#sim = Simulation()
##-- Try to load the sim data from any previous run; if no data saved
## to disk in the default location, run a new simulation
##try:
## sim.loadState()
##except Exception as e:
## print "Error({0}: {1}".format(e.errno, e.strerror)
## #sim.runSimulation(targetSuccesses=10, stepsRange=(4,101))
#sim.runSimulation(targetSuccesses=10, stepsRange=(5,30))
##-- *Always* perform post-processing and plotting (allows easy modification
## of the postprocessing (curve fitting) and plotting routines
## without needing to re-run the simulation, which can take hours)
#sim.postproc()
#sim.plotResults()
##print simpleCreateChain(nSteps=20)
chains = createChainParallel(nSteps=60, nProcs=1, nChains=1)
print time.time()-startTime
[wstdout(str(len(chain)) + " ") for chain in chains]
wstdout("\n")
| mit |
vnleonenko/Influenza | FluGraphs_v2/epid_peak_functions.py | 1 | 9001 | import operator
import numpy as np
import fit_functions as ff
import matplotlib.pyplot as plt
ILI_level_delta = 1.4 #1.17
#MAGIC NUMBER - suitable for SPb only!
def extractEpidStages ( incid_list, epid_markers ):
#Finds out ILI levels and extracts epidemic peaks
#incid_list = data_list[...,3]
#epid_markers contains expert opinion on epid start
high_ILI_level_start = 75
high_ILI_level_finish = 340
low_ILI_level_start = 0
low_ILI_level_finish = high_ILI_level_start
#TODO: add curve analysis to find exactly the intervals for every given season
ydata = incid_list
ydata_up_init = ydata[low_ILI_level_start:low_ILI_level_finish] #a part of the curve with upper horizontal ILI graph
a1 = ff.find_regr_coefficient(ydata_up_init)
ydata_up_init = ydata[high_ILI_level_start:high_ILI_level_finish]
a2 = ff.find_regr_coefficient(ydata_up_init)
epid_peaks, epid_curves = find_epid_peaks(ydata, a2, epid_markers)#a2*ILI_level_delta)
return a1, a2, epid_peaks, epid_curves
def suitsEpidPeakCriteria (value, level):
if int(value) > int(level):
return True
else:
return False
def suitsEpidPeakCriteriaFormal (epid_peak_index, epid_markers_list):
return (epid_markers_list[epid_peak_index] == 1)
def stop_condition_epi_left(value, level, eps_level, prev_el):
#stop occurs when value is below high ILI level or the value function derivative is zero near the level
return value < level or ( value < eps_level*level and value-prev_el >0 )
def stop_condition_epi_right(value, level, eps_level, prev_el, edge_left):
#stop occurs when value is below high ILI level or the value function derivative is zero near the level or right side is lower than left one
return value < level or value < edge_left or ( value < eps_level*level and value < eps_level*edge_left and value-prev_el > 0 )
def extractPeakElements (arr, index, ground):
"Removing peaks, both epidemic and non-epidemic, and returning modified array along with peak incid nums and their indices"
#!!! Assuming there's one or two peaks, - in the latter case the second is higher, - otherwise the indices will be corrupted
#new_version: checking left border by the ILI_high level and derivative sign change, right border is set equal for the same level
epid_curve_right = []
epid_curve_indices_right = []
epid_curve_left = []
epid_curve_indices_left = []
#scanning points in backward direction
prev_element = arr[index] +100;
i=index-1
while not ( stop_condition_epi_left( arr[i], ground, ILI_level_delta, prev_element ) ) and i > 0:
epid_curve_left.append(arr[i])
epid_curve_indices_left.append(i)
prev_element = arr[i]
i=i-1
lowest_el = arr[i+1] #epid_curve_left[-1]
arr = list(arr)
prev_element = arr[index] +100;
prev_slope = 0
#scanning points in forward direction
while not( stop_condition_epi_right( arr[index], ground, ILI_level_delta, prev_element, lowest_el) ) and index < len(arr)-1:
#print(arr[index])
prev_element = arr[index]
epid_curve_right.append(arr[index])
epid_curve_indices_right.append(index)
index+=1
epid_curve_left = epid_curve_left[::-1]
epid_curve_indices_left = epid_curve_indices_left[::-1]
epid_curve_left.extend(epid_curve_right)
epid_curve_indices_left.extend(epid_curve_indices_right)
if len(epid_curve_indices_left)>0:
arr = [ arr[i] for i in range(len(arr)) if i < epid_curve_indices_left[0] ] #removing all the data to the right of epid peak (the biggest is the most right)
return arr, np.column_stack((epid_curve_indices_left, epid_curve_left))
def find_epid_peaks ( arr, level_delta, epid_markers):
"returns epid peaks from the high ILI period incidence data"
#we distinguish "just peaks" (epi criteria doesn't hold) and "epidemic peaks" (epi criteria holds)
#level_high_bound = int(level_delta) #a2 level of high ILI
level_ILI = int(level_delta)
max_value = level_ILI+10 #start condition
epid_peaks = []
epid_curves_matr = []
while max_value>level_ILI:
max_value_index, max_value = max(enumerate(arr), key=operator.itemgetter(1))
arr, epid_curve_matr = extractPeakElements( arr, max_value_index, level_ILI )
#if suitsEpidPeakCriteria(max_value, level_ILI): #Using our proper criteria
if suitsEpidPeakCriteriaFormal(max_value_index, epid_markers): #Using official healthcare criteria
epid_peaks.append(max_value)
epid_curves_matr.append(epid_curve_matr)
return epid_peaks, epid_curves_matr
def plotEpidemicOutbreak ( fig, col_epid ):
for i in range(0, len(col_epid), 1):
if col_epid[i] == 1.0:
fig.axvline(i, color='red', linestyle='dashed', linewidth=0.5, label='Outbreak marker')
def plotEpidCurves ( epid_curves, days_list, x_thursday ):
for epid_curve in epid_curves:
plt.plot(epid_curve[...,0], epid_curve[...,1], "r", linewidth=4)
days_list_peak = days_list[[int(i) for i in epid_curve[...,0]]]
epid_days = epid_curve[...,0]
x_thursday_peak = [int(i) for i in epid_days if i in x_thursday]
x_thursday_peak_indices = [i for i in range(0, len(epid_days)) if epid_days[i] in x_thursday]
plt.plot(x_thursday_peak, epid_curve[x_thursday_peak_indices,1], "ro", linewidth=4)
fname_curve = 'epi_' + str(int(days_list[epid_curve[0,0]])) + '.txt'
np.savetxt(fname_curve, np.column_stack((days_list_peak, epid_curve[...,1])), fmt="%d %d")
plt.plot([], [], "r", label='Epidemic outbreak')
#plt.plot([], [], "r", label='Эпидемия гриппа')
plt.legend(loc='upper left')
#section for functions connected with transitions between ILI levels
def stop_condition_hilo(value, level, eps_level, prev_el, prev_slope):
#Right curve is stopped by the change of the curve form for the incidence decline
return value >=level or (value>=eps_level*level and value-prev_el <=prev_slope)
def stop_condition_lohi(value, level, eps_level, prev_el):
#Left curve is stopped by the stop of the incidence growth
return value >=level or (value>=eps_level*level and value-prev_el <=0)
def find_lohi_transit( ydata, a1, a2 ):
index = 0
excess_a1 = 1.1
lack_a2 = 0.9
prev_element = 2*ydata[index]
#seeking the beginning
while not(ydata[index]>a1*excess_a1 and ydata[index]>prev_element or index>len(ydata)-1):
prev_element=ydata[index]
index+=1
begin_index = index
#iterating over transition curve
trans_curve = []
trans_curve_index = []
prev_element = 0.5*ydata[index]
while not( stop_condition_lohi(ydata[index], a2, lack_a2, prev_element) or index>len(ydata)-1):
#trans_curve.append(ydata[index])
trans_curve_index.append(int(index))
prev_element = ydata[index]
index+=1
#saving the end of a1->a2
end_index = index
return np.array(trans_curve_index)
def find_hilo_transit( ydata, a1, a2 ):
index = len(ydata)-1
excess_a1 = 1.1
lack_a2 = 0.9
prev_element = ydata[index]+100
prev_slope = 0
#seeking the beginning
while not(ydata[index]>a1*excess_a1 and ydata[index]>=prev_element or index<0):
prev_element=ydata[index]
index-=1
end_index = index
#iterating over transition curve
trans_curve = []
trans_curve_index = []
prev_element = ydata[index]-100
while not( stop_condition_hilo(ydata[index], a2, lack_a2, prev_element, prev_slope) or index<0):
#trans_curve.append(ydata[index])
trans_curve_index.append(int(index))
prev_element = ydata[index]
prev_slope = ydata[index]-prev_element
index-=1
#saving the end of a1->a2
begin_index = index
#return begin_index, end_index
return np.array(trans_curve_index[::-1])
def find_transit_curves ( ydata, a1, a2 ):
trans_curve1_index = find_lohi_transit(ydata, a1, a2)
trans_curve2_index = find_hilo_transit(ydata, a1, a2)
return trans_curve1_index, trans_curve2_index
def plotLevelTransitions (trans_curve1_index, trans_curve2_index, ydata, x_thursday):
ydata = np.array(ydata)
if (trans_curve1_index!=[]):
x_thursday_curve1 = [int(i) for i in trans_curve1_index if i in x_thursday]
plt.plot(trans_curve1_index, ydata[trans_curve1_index], "g", linewidth=1, label='Level transition')
plt.plot(x_thursday_curve1, ydata[x_thursday_curve1], "go", linewidth=4)
#plt.plot(trans_curve1_index, ydata[trans_curve1_index], "g", linewidth=1, label='Level transition')
if (trans_curve2_index!=[]):
x_thursday_curve2 = [int(i) for i in trans_curve2_index if i in x_thursday]
plt.plot(trans_curve2_index, ydata[trans_curve2_index], "g", linewidth=1)
plt.plot(x_thursday_curve2, ydata[x_thursday_curve2], "go", linewidth=4)
| gpl-3.0 |
hadim/spindle_tracker | spindle_tracker/detector/peak_detector.py | 1 | 14979 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import logging
import subprocess
import multiprocessing
import itertools
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
from scipy.optimize import leastsq
from skimage import feature
import numpy as np
import pandas as pd
from ..utils import print_progress
log = logging.getLogger(__name__)
__all__ = []
DEFAULT_PARAMETERS = {'w_s': 0.7,
'peak_radius': 0.2,
'threshold': 27.,
'max_peaks': 1e4
}
def peak_detector(im,
metadata,
parallel=True,
show_progress=False,
parameters={}):
"""Gaussian peak detection described in Segré et al. Nature Methods, (2008).
Parameters
----------
im : numpy array
To iterate over data.
metadata : dict
Metadata to scale detected peaks and parameters.
parallel : bool
Used several processes at once.
show_progress : bool (default: False)
Print progress bar during detection.
parameters : dict
Contains gaussian detection algorithm parameters:
- w_s: int, optional
Width (in um) of the sliding window over which the hypothesis ratio
is computed :math:`w_s` in the article. It should be wide enough
to contain some background to allow a proper noise evaluation.
- peak_radius: float, optional
Typical radius (in um) of the peaks to detect. It must be higher than one
(as peaks less that a pixel wide would yield bogus results
- threshold: float, optional
Criterium for a positive detection (i.e. the null hypothesis is false).
Corresponds to the :math:`\chi^2` parameter in the Constant False
Alarm Rate section of the article supplementary text (p. 12).
A higher `threshold` corresponds to a more stringent test.
According to the authors, this parameters needs to be adjusted
once for a given data set.
- max_peaks : int, optional
Deflation loop will stop if detected peaks is higher than max_peaks.
Returns
-------
trajs : :class:`pd.DataFrame`
"""
log.info('Initializing peak detection')
_parameters = DEFAULT_PARAMETERS.copy()
_parameters.update(parameters.copy())
parameters = _parameters
# Scale parameters in pixels
parameters['w_s'] /= metadata['PhysicalSizeX']
parameters['w_s'] = np.round(parameters['w_s'])
parameters['peak_radius'] /= metadata['PhysicalSizeX']
# Find number of stacks to process
# Only iteration over T and Z are assumed
n_stack = int(metadata['SizeT'] * metadata['SizeZ'])
if parallel:
# Snippet to allow multiprocessing while importing
# module such as numpy (only needed on linux)
if os.name == 'posix':
subprocess.call("taskset -p 0xff %d" % os.getpid(),
shell=True,
stdout=DEVNULL,
stderr=DEVNULL)
def init_worker():
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
ncore = multiprocessing.cpu_count() + 1
pool = multiprocessing.Pool(processes=ncore, initializer=init_worker)
# Build arguments list
arguments = zip(im,
itertools.repeat(parameters),
range(n_stack))
try:
# Launch peak_detection
if parallel:
results = pool.imap_unordered(find_gaussian_peaks, arguments)
else:
results = map(find_gaussian_peaks, arguments)
all_peaks = []
# Get unordered results and log progress
for i, (pos, peaks) in enumerate(results):
n_peaks = len(peaks)
percent_progression = (i + 1) / n_stack * 100
if show_progress:
message = ("%i/%i - %i peaks detected on stack n°%i" %
((i + 1), n_stack, n_peaks, pos))
print_progress(percent_progression, message)
all_peaks.append((pos, peaks))
if show_progress:
print_progress(-1)
except KeyboardInterrupt:
if parallel:
pool.terminate()
pool.join()
raise Exception('Detection has been canceled by user')
if parallel:
pool.close()
pool.terminate()
# Sort peaks and remove index used to sort
all_peaks.sort(key=lambda x: x[0])
all_peaks = [x[1] for x in all_peaks]
# Store peaks in pd.DataFrame
index = []
peaks_df = []
for n, peaks in enumerate(all_peaks):
if peaks.any():
for peak in peaks:
peaks_df.append(peak)
index.append((n,))
if not peaks_df:
return pd.DataFrame([])
log.info('Terminating peak detection')
peaks_df = pd.DataFrame(peaks_df, columns=['y', 'x', 'w', 'I'], dtype='float')
peaks_df.index = pd.MultiIndex.from_tuples(index, names=['t_stamp'])
t_stamp = peaks_df.index.get_level_values('t_stamp').values
peaks_df['t'] = t_stamp // metadata['SizeZ']
peaks_df['z'] = t_stamp % metadata['SizeZ']
peaks_df['label'] = np.arange(len(peaks_df))
peaks_df = peaks_df.reset_index(level=['t_stamp', 'label'])
peaks_df['t_stamp'] = peaks_df['t'].copy()
peaks_df.set_index(['t_stamp', 'label'], inplace=True)
if "PhysicalSizeX" in metadata.keys():
peaks_df['x'] *= metadata["PhysicalSizeX"]
if "PhysicalSizeY" in metadata.keys():
peaks_df['y'] *= metadata["PhysicalSizeY"]
if "PhysicalSizeZ" in metadata.keys():
peaks_df['z'] *= metadata["PhysicalSizeZ"]
if "PhysicalSizeX" in metadata.keys():
peaks_df['w'] *= metadata["PhysicalSizeX"]
if "TimeIncrement" in metadata.keys():
peaks_df['t'] *= metadata["TimeIncrement"]
return peaks_df
def find_gaussian_peaks(args): # pragma: no cover
"""
Buffer function for _find_gaussian_peaks
"""
frame, detection_parameters, i = args
print(detection_parameters)
return (i, _find_gaussian_peaks(frame, **detection_parameters))
def _find_gaussian_peaks(image, w_s=15, peak_radius=1.5,
threshold=27., max_peaks=1e4): # pragma: no cover
"""
This function implements the Gaussian peak detection described
in Segré et al. Nature Methods **5**, 8 (2008). It is based on a
likelyhood ratio test on the presence or absence of a Gaussian
peak on a patch moving over the input 2D image and a successive
sub-pixel localization of the peaks by a least square fit. This
detection is followed recursively by a _deflation_ of the image
from the detected peaks and a new detection, until no more peaks
are found
Parameters
----------
image: a 2D array
the input image
w_s: int, optional
the width of the sliding window over which the hypothesis ratio
is computed :math:`w_s` in the article. It should be wide enough
to contain some background to allow a proper noise evaluation.
peak_radius: float, optional
typical radius of the peaks to detect. It must be higher than one
(as peaks less that a pixel wide would yield bogus results
thershold: float, optional
Criterium for a positive detection (i.e. the null hypothesis is false).
Corresponds to the :mat:`\chi^2` parameter in the Constant False
Alarm Rate section of the article supplementary text (p. 12).
A higher `threshold` corresponds to a more stringent test.
According to the authors, this parameters needs to be adjusted
once for a given data set.
Returns
-------
peaks: ndarray
peaks is a Nx4 array, where N is the number of detected peaks in the
image. Each line gives the x position, y position, width,
and (background corrected) intensity of a detected peak (in that order).
"""
peaks_coords = glrt_detection(image, peak_radius,
w_s, threshold)
peaks = gauss_estimation(image, peaks_coords, w_s)
d_image = image_deflation(image, peaks, w_s)
peaks_coords = glrt_detection(d_image, peak_radius,
w_s, threshold)
i = 1
while len(peaks_coords) > 0 and len(peaks) < max_peaks:
new_peaks = gauss_estimation(d_image, peaks_coords, w_s)
# in case the 2D gauss fit fails
if len(new_peaks) < 1:
break
peaks.extend(new_peaks[:])
d_image = image_deflation(d_image, new_peaks, w_s)
peaks_coords = glrt_detection(d_image, peak_radius,
w_s, threshold)
i += 1
peaks = np.array(peaks)
print("Number of deflation loop : {}".format(i))
return peaks
def image_deflation(image, peaks, w_s): # pragma: no cover
"""
Substracts the detected Gaussian peaks from the input image and
returns the deflated image.
"""
d_image = image.copy()
for peak in peaks:
xc, yc, width, I = peak
xc_rel = w_s // 2 + xc - np.floor(xc)
yc_rel = w_s // 2 + yc - np.floor(yc)
low_x = int(xc - w_s // 2)
low_y = int(yc - w_s // 2)
if low_x > 0 and low_y > 0:
params = xc_rel, yc_rel, width, I, 0
deflated_peak = gauss_continuous(params, w_s)
d_image[low_x:low_x + w_s,
low_y:low_y + w_s] -= deflated_peak.reshape((w_s, w_s))
return d_image
def gauss_estimation(image, peaks_coords, w_s): # pragma: no cover
"""
Least square fit of a 2D Gauss peaks (with radial symmetry)
on regions of width `w_s` centered on each element
of `peaks_coords`.
Parameters:
----------
image : 2D array
a greyscale 2D input image.
peaks_coords: iterable of pairs of int.
The peaks_coords should contain `(x, y)` pairs
corresponding to the approximate peak center,
in pixels.
"""
peaks = []
for coords in peaks_coords:
low_x, low_y = coords - w_s // 2
try:
patch = image[low_x: low_x + w_s,
low_y: low_y + w_s]
params, success = gauss_estimate(patch, w_s)
xc, yc, width, I, bg = params
if success and I > 0 and width < w_s:
peaks.append([xc + low_x, yc + low_y, width, I])
except IndexError:
log.error('peak too close from the edge\n'
'use a smaller window\n'
'peak @ (%i, %i) discarded' % (coords[0], coords[1]))
continue
return peaks
def glrt_detection(image, r0, w_s, threshold): # pragma: no cover
"""
Implements the Generalized Likelyhood Ratio Test, by
computing equation 4 in Segré et al. Supplementary Note (p. 12)
in a window sliding other the image.
Parameters:
----------
image: array
the 2D input image
r0: float
the detected Gaussian peak 1/e radius
w_s: int
Size of the sliding window over which the test is
computed ( :math:`w_s` in the article).
threshold: float
Criterium for a positive detection (i.e. the null hypothesis is false).
Corresponds to the :mat:`\chi^2` parameter in the Constant False
Alarm Rate section of the article supplementary text (p. 12).
A higher `threshold` corresponds to a more stringent test.
According to the authors, this parameters needs to be adjusted
once for a given data set.
Returns:
--------
peaks_coords: array
An Nx2 array containing (x, y) pairs of the detected peaks
in integer pixel coordinates.
"""
if isinstance(image, np.ma.core.MaskedArray):
mask = image.mask
image = image.data
elif isinstance(image, np.ndarray):
mask = None
else:
raise Exception("Image has to be np.ndarray or np.ma.core.MaskedArray")
w, h = image.shape
g_patch = gauss_patch(r0, w_s)
g_patch -= g_patch.mean()
g_squaresum = np.sum(g_patch ** 2)
hmap = []
for i, j in np.ndindex((w - w_s, h - w_s)):
tmp = image[int(i): int(i + w_s), int(j): int(j + w_s)]
h = hypothesis_map(tmp,
g_patch,
g_squaresum)
hmap.append(h)
hmap = np.array(hmap)
try:
hmap = -2 * hmap.reshape((w - w_s, h - w_s))
peaks_coords = feature.peak_local_max(hmap, 3,
threshold_abs=threshold)
peaks_coords += w_s / 2
if isinstance(mask, np.ndarray):
peaks_coords = list(filter(lambda x: not mask[x[0], x[1]], peaks_coords))
return peaks_coords
except ValueError:
return np.array([])
def hypothesis_map(patch, g_patch, g_squaresum): # pragma: no cover
"""
Computes the ratio for a given patch position.
"""
w_s = patch.shape[0]
# mean = patch.mean()
multiplicative = g_patch * patch
intensity = multiplicative.sum()
normalisation = w_s * patch.std()
ratio = (w_s ** 2 / 2.) * np.log(1 - (intensity
/ normalisation) ** 2
/ g_squaresum)
return ratio
def gauss_estimate(patch, w_s): # pragma: no cover
"""
Least square 2D gauss fit
"""
params0 = [w_s / 2., w_s / 2., 3.,
np.float(patch.max() - patch.min()), np.float(patch.min())]
errfunc = lambda p: patch.flatten() - gauss_continuous(p, w_s)
return leastsq(errfunc, params0, xtol=0.01)
def gauss_continuous(params, w_s): # pragma: no cover
"""2D gauss function with a float center position"""
xc, yc, width, I, bg = params
xc = np.float(xc)
yc = np.float(yc)
x = np.exp(- (np.arange(0, w_s) - xc) ** 2 / width ** 2)
y = np.exp(- (np.arange(0, w_s) - yc) ** 2 / width ** 2)
g_patch = I * np.outer(x, y) + bg
return g_patch.flatten()
def gauss_patch(r0, w_s): # pragma: no cover
"""
Computes an w_s by w_s image with a
power normalized Gaussian peak with radial symmetry
at its center.
"""
x = y = np.exp(- (np.arange(w_s) - w_s // 2) ** 2 / r0 ** 2)
A = 1. / (np.sqrt(np.pi) * r0)
g_patch = A * np.outer(x, y)
return g_patch
def gauss_discrete(r0, i, j, w_s): # pragma: no cover
"""
2D gauss function with a discrete center position
"""
i -= w_s // 2
j -= w_s // 2
A = 1. / (np.sqrt(np.pi) * r0)
return A * np.exp(-(i ** 2 + j ** 2) / r0 ** 2)
| bsd-3-clause |
mgahsan/QuantEcon.py | quantecon/tests/test_matrix_eqn.py | 7 | 1050 | """
tests for quantecon.util
"""
from __future__ import division
from collections import Counter
import unittest
import numpy as np
from numpy.testing import assert_allclose
from nose.plugins.attrib import attr
import pandas as pd
from quantecon import matrix_eqn as qme
def test_solve_discrete_lyapunov_zero():
'Simple test where X is all zeros'
A = np.eye(4) * .95
B = np.zeros((4, 4))
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(X, np.zeros((4, 4)))
def test_solve_discrete_lyapunov_B():
'Simple test where X is same as B'
A = np.ones((2, 2)) * .5
B = np.array([[.5, -.5], [-.5, .5]])
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(B, X)
def test_solve_discrete_lyapunov_complex():
'Complex test, A is companion matrix'
A = np.array([[0.5 + 0.3j, 0.1 + 0.1j],
[ 1, 0]])
B = np.eye(2)
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(np.dot(np.dot(A, X), A.conj().transpose()) - X, -B,
atol=1e-15)
| bsd-3-clause |
timovanopstal/nutils | docs/conf.py | 1 | 10742 | # -*- coding: utf-8 -*-
#
# nutils documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 10 16:51:22 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
class Fake(object):
def __init__( self, **attrs ):
self.__dict__.update( attrs )
def __getattr__( self, attr ):
return None
sys.modules[ 'numpy' ] = Fake( __version__='1.8', dtype=lambda o: None, ndarray=Fake )
sys.modules[ 'scipy' ] = Fake()
sys.modules[ 'matplotlib' ] = Fake()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nutils'
copyright = u'2014, Gertjan van Zwieten'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = [ 'nutils.' ]
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nutilsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'nutils.tex', u'nutils Documentation',
u'Gertjan van Zwieten', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nutils', u'nutils Documentation',
[u'Gertjan van Zwieten'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'nutils', u'nutils Documentation',
u'Gertjan van Zwieten', 'nutils', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'nutils'
epub_author = u'Gertjan van Zwieten'
epub_publisher = u'Gertjan van Zwieten'
epub_copyright = u'2014, Gertjan van Zwieten'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'nutils'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
autodoc_member_order = 'bysource'
autodoc_default_flags = [ 'members' ]
| mit |
vortex-ape/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 2 | 64475 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from ..utils import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..exceptions import ConvergenceWarning
from ..externals import six
from ..model_selection import train_test_split
from .sgd_fast import plain_sgd, average_sgd
from ..utils import compute_class_weight
from ..utils import deprecated
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"adaptive": 4, "pa1": 5, "pa2": 6}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False,
n_iter=None):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.n_iter = n_iter
self.max_iter = max_iter
self.tol = tol
# current tests expect init to do parameter validation
# but we are not allowed to set attributes
self._validate_params(set_max_iter=False)
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params(set_max_iter=False)
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self, set_max_iter=True, for_partial_fit=False):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False")
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.n_iter_no_change < 1:
raise ValueError("n_iter_no_change must be >= 1")
if not (0.0 < self.validation_fraction < 1.0):
raise ValueError("validation_fraction must be in ]0, 1[")
if self.learning_rate in ("constant", "invscaling", "adaptive"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
if not set_max_iter:
return
# n_iter deprecation, set self._max_iter, self._tol
self._tol = self.tol
if self.n_iter is not None:
warnings.warn("n_iter parameter is deprecated in 0.19 and will be"
" removed in 0.21. Use max_iter and tol instead.",
DeprecationWarning)
# Same behavior as before 0.19
max_iter = self.n_iter
self._tol = None
elif self.tol is None and self.max_iter is None:
if not for_partial_fit:
warnings.warn(
"max_iter and tol parameters have been "
"added in %s in 0.19. If both are left unset, "
"they default to max_iter=5 and tol=None. "
"If tol is not None, max_iter defaults to max_iter=1000. "
"From 0.21, default max_iter will be 1000, and"
" default tol will be 1e-3." % type(self).__name__,
FutureWarning)
# Before 0.19, default was n_iter=5
max_iter = 5
else:
max_iter = self.max_iter if self.max_iter is not None else 1000
self._max_iter = max_iter
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_validation_split(self, X, y, sample_weight):
"""Split the dataset between training set and validation set.
Parameters
----------
X : {array, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples, )
Target values.
sample_weight : array, shape (n_samples, )
Weights applied to individual samples.
Returns
-------
validation_mask : array, shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
n_samples = X.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
tmp = train_test_split(X, y, np.arange(n_samples), sample_weight,
test_size=self.validation_fraction,
random_state=self.random_state)
X_train, X_val, y_train, y_val = tmp[:4]
idx_train, idx_val, sample_weight_train, sample_weight_val = tmp[4:8]
if X_train.shape[0] == 0 or X_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, X_train.shape[0],
X_val.shape[0]))
self._X_val = X_val
self._y_val = y_val
self._sample_weight_val = sample_weight_val
validation_mask[idx_val] = 1
return validation_mask
def _delete_validation_split(self):
if self.early_stopping:
del self._X_val
del self._y_val
del self._sample_weight_val
def _validation_score(self, coef, intercept):
"""Compute the score on the validation set. Used for early stopping."""
# store attributes
old_coefs, old_intercept = self.coef_, self.intercept_
# replace them with current coefficients for scoring
self.coef_ = coef.reshape(1, -1)
self.intercept_ = np.atleast_1d(intercept)
score = self.score(self._X_val, self._y_val, self._sample_weight_val)
# restore old attributes
self.coef_, self.intercept_ = old_coefs, old_intercept
return score
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept, average_coef, average_intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : string
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
validation_mask = est._make_validation_split(X, y, sample_weight)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = est.tol if est.tol is not None else -np.inf
if not est.average:
result = plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, validation_mask, est.early_stopping, est,
int(est.n_iter_no_change),
max_iter, tol, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, average_intercept, \
n_iter_ = average_sgd(coef, intercept, average_coef,
average_intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, validation_mask, est.early_stopping,
est, int(est.n_iter_no_change),
max_iter, tol,
int(est.fit_intercept), int(est.verbose),
int(est.shuffle), seed, pos_weight,
neg_weight, learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
result = standard_coef, standard_intercept, n_iter_
est._delete_validation_split()
return result
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False,
validation_fraction=0.1, n_iter_no_change=5,
class_weight=None, warm_start=False, average=False,
n_iter=None):
super(BaseSGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average, n_iter=n_iter)
self.class_weight = class_weight
self.n_jobs = n_jobs
@property
@deprecated("Attribute loss_function was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``loss_function_`` instead")
def loss_function(self):
return self.loss_function_
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, max_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C",
accept_large_sparse=False)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
else:
raise ValueError(
"The number of classes has to be greater than one;"
" got %d class" % n_classes)
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C",
accept_large_sparse=False)
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self._max_iter,
classes, sample_weight, coef_init, intercept_init)
if (self._tol is not None and self._tol > -np.inf
and self.n_iter_ == self._max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, max_iter):
"""Fit a binary classifier on X and y. """
coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,
learning_rate, max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, prefer="threads",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
max_iter, self._expanded_class_weight[i],
1., sample_weight)
for i in range(len(self.classes_)))
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default: 'hinge'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The possible options are 'hinge', 'log', 'modified_huber',
'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, optional
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
`partial_fit`.
Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None.
.. versionadded:: 0.19
tol : float or None, optional
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol). Defaults to None.
Defaults to 1e-3 from 0.21.
.. versionadded:: 0.19
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : int or None, optional (default=None)
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : string, optional
The learning rate schedule:
'constant':
eta = eta0
'optimal': [default]
eta = 1.0 / (alpha * (t + t0))
where t0 is chosen by a heuristic proposed by Leon Bottou.
'invscaling':
eta = eta0 / pow(t, power_t)
'adaptive':
eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
eta0 : double
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate training when
validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to None. Deprecated, will be removed in 0.21.
.. versionchanged:: 0.19
Deprecated
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
loss_function_ : concrete ``LossFunction``
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier(max_iter=1000)
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None,
early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=1000,
n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
power_t=0.5, random_state=None, shuffle=True, tol=None,
validation_fraction=0.1, verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.svm.LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=None, tol=None, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, class_weight=None, warm_start=False,
average=False, n_iter=None):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, class_weight=class_weight,
warm_start=warm_start, average=average, n_iter=n_iter)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self, "t_")
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False,
n_iter=None):
super(BaseSGDRegressor, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average, n_iter=n_iter)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
max_iter, sample_weight, coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64,
accept_large_sparse=False)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
sample_weight=sample_weight, coef_init=None,
intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate,
self._max_iter, sample_weight, coef_init,
intercept_init)
if (self._tol is not None and self._tol > -np.inf
and self.n_iter_ == self._max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
validation_mask = self._make_validation_split(X, y, sample_weight)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self._tol if self._tol is not None else -np.inf
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_, self.n_iter_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping, self,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += self.n_iter_ * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_, self.n_iter_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping, self,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += self.n_iter_ * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
self._delete_validation_split()
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default: 'squared_loss'
The loss function to be used. The possible values are 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
The 'squared_loss' refers to the ordinary least squares fit.
'huber' modifies 'squared_loss' to focus less on getting outliers
correct by switching from squared to linear loss past a distance of
epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
linear past that; this is the loss function used in SVR.
'squared_epsilon_insensitive' is the same but becomes squared loss past
a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
max_iter : int, optional
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
`partial_fit`.
Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None.
.. versionadded:: 0.19
tol : float or None, optional
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol). Defaults to None.
Defaults to 1e-3 from 0.21.
.. versionadded:: 0.19
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : string, optional
The learning rate schedule:
'constant':
eta = eta0
'optimal':
eta = 1.0 / (alpha * (t + t0))
where t0 is chosen by a heuristic proposed by Leon Bottou.
'invscaling': [default]
eta = eta0 / pow(t, power_t)
'adaptive':
eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
eta0 : double
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate training when
validation score is not improving by at least tol for
n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to None. Deprecated, will be removed in 0.21.
.. versionchanged:: 0.19
Deprecated
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor(max_iter=1000)
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, early_stopping=False,
epsilon=0.1, eta0=0.01, fit_intercept=True, l1_ratio=0.15,
learning_rate='invscaling', loss='squared_loss', max_iter=1000,
n_iter=None, n_iter_no_change=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, tol=None, validation_fraction=0.1,
verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, sklearn.svm.SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False,
n_iter=None):
super(SGDRegressor, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average, n_iter=n_iter)
| bsd-3-clause |
GGoussar/scikit-image | doc/examples/segmentation/plot_rag.py | 12 | 2234 | """
=======================
Region Adjacency Graphs
=======================
This example demonstrates the use of the `merge_nodes` function of a Region
Adjacency Graph (RAG). The `RAG` class represents a undirected weighted graph
which inherits from `networkx.graph` class. When a new node is formed by
merging two nodes, the edge weight of all the edges incident on the resulting
node can be updated by a user defined function `weight_func`.
The default behaviour is to use the smaller edge weight in case of a conflict.
The example below also shows how to use a custom function to select the larger
weight instead.
"""
from skimage.future.graph import rag
import networkx as nx
from matplotlib import pyplot as plt
import numpy as np
def max_edge(g, src, dst, n):
"""Callback to handle merging nodes by choosing maximum weight.
Returns a dictionary with `"weight"` set as either the weight between
(`src`, `n`) or (`dst`, `n`) in `g` or the maximum of the two when
both exist.
Parameters
----------
g : RAG
The graph under consideration.
src, dst : int
The vertices in `g` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
data : dict
A dict with the "weight" attribute set the weight between
(`src`, `n`) or (`dst`, `n`) in `g` or the maximum of the two when
both exist.
"""
w1 = g[n].get(src, {'weight': -np.inf})['weight']
w2 = g[n].get(dst, {'weight': -np.inf})['weight']
return {'weight': max(w1, w2)}
def display(g, title):
"""Displays a graph with the given title."""
pos = nx.circular_layout(g)
plt.figure()
plt.title(title)
nx.draw(g, pos)
nx.draw_networkx_edge_labels(g, pos, font_size=20)
g = rag.RAG()
g.add_edge(1, 2, weight=10)
g.add_edge(2, 3, weight=20)
g.add_edge(3, 4, weight=30)
g.add_edge(4, 1, weight=40)
g.add_edge(1, 3, weight=50)
# Assigning dummy labels.
for n in g.nodes():
g.node[n]['labels'] = [n]
gc = g.copy()
display(g, "Original Graph")
g.merge_nodes(1, 3)
display(g, "Merged with default (min)")
gc.merge_nodes(1, 3, weight_func=max_edge, in_place=False)
display(gc, "Merged with max without in_place")
plt.show()
| bsd-3-clause |
austinban/aima-python | submissions/Conklin/myNN.py | 13 | 3059 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Conklin import music
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
musicECHP = DataFrame()
musicECHP.data = []
targetInfo = []
list_of_songs = music.get_songs()
for song in list_of_songs:
try:
tempo = float(song['song']["tempo"])
targetInfo.append(tempo)
loudness = float(song['song']["loudness"])
fadeOut = float(song['song']["start_of_fade_out"])
fadeIn = float(song['song']["end_of_fade_in"])
duration = float(song['song']["duration"])
releaseYear = float(song['song']["year"])
musicECHP.data.append([tempo, fadeOut, fadeIn, duration, releaseYear])
except:
traceback.print_exc()
musicECHP.feature_names = [
'Loudness',
'Fade Out',
'Fade In',
'Duration',
'Release Year'
]
musicECHP.target = []
def musicTarget(speed):
if speed > 100:
return 1
return 0
for pre in targetInfo:
# choose the target
tt = musicTarget(pre)
musicECHP.target.append(tt)
musicECHP.target_names = [
'Tempo <= 100 bpm',
'Tempo > 100 bpm',
]
Examples = {
'Music': musicECHP,
}
'''
Make a custom classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (1000,),
activation = 'relu',
solver='sgd',#'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
shuffle = False,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
momentum = 0.5,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
beta_1 = 0.9,
beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
musicScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(musicECHP.data)
musicScaled.data = scaleGrid(musicECHP.data)
musicScaled.feature_names = musicECHP.feature_names
musicScaled.target = musicECHP.target
musicScaled.target_names = musicECHP.target_names
Examples = {
'MusicDefault': {
'frame': musicECHP,
},
'MusicSGD': {
'frame': musicECHP,
'mlpc': mlpc
},
'MusicScaled': {
'frame': musicScaled,
},
}
| mit |
chenjun0210/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tests/test_msgpack/test_limits.py | 9 | 2663 | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas.util.testing as tm
from pandas.msgpack import packb, unpackb, Packer, Unpacker, ExtType
class TestLimits(tm.TestCase):
def test_integer(self):
x = -(2 ** 63)
assert unpackb(packb(x)) == x
self.assertRaises((OverflowError, ValueError), packb, x-1)
x = 2 ** 64 - 1
assert unpackb(packb(x)) == x
self.assertRaises((OverflowError, ValueError), packb, x+1)
def test_array_header(self):
packer = Packer()
packer.pack_array_header(2**32-1)
self.assertRaises((OverflowError, ValueError),
packer.pack_array_header, 2**32)
def test_map_header(self):
packer = Packer()
packer.pack_map_header(2**32-1)
self.assertRaises((OverflowError, ValueError),
packer.pack_array_header, 2**32)
def test_max_str_len(self):
d = 'x' * 3
packed = packb(d)
unpacker = Unpacker(max_str_len=3, encoding='utf-8')
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_str_len=2, encoding='utf-8')
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_bin_len(self):
d = b'x' * 3
packed = packb(d, use_bin_type=True)
unpacker = Unpacker(max_bin_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_bin_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_array_len(self):
d = [1, 2, 3]
packed = packb(d)
unpacker = Unpacker(max_array_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_array_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_map_len(self):
d = {1: 2, 3: 4, 5: 6}
packed = packb(d)
unpacker = Unpacker(max_map_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_map_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
def test_max_ext_len(self):
d = ExtType(42, b"abc")
packed = packb(d)
unpacker = Unpacker(max_ext_len=3)
unpacker.feed(packed)
assert unpacker.unpack() == d
unpacker = Unpacker(max_ext_len=2)
unpacker.feed(packed)
self.assertRaises(ValueError, unpacker.unpack)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.