repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kaiserroll14/301finalproject | main/pandas/tseries/plotting.py | 9 | 9293 | """
Period formatters and locators adapted from scikits.timeseries by
Pierre GF Gerard-Marchant & Matt Knox
"""
#!!! TODO: Use the fact that axis can have units to simplify the process
import numpy as np
from matplotlib import pylab
from pandas.tseries.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
import pandas.compat as compat
from pandas.tseries.converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter)
#----------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
return lines
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = series.resample('D', how=how).dropna()
series = series.resample(ax_freq, how=how).dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, series
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, compat.string_types):
from pandas.tools.plotting import _plot_klass
plotf = _plot_klass[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0])
labels.append(com.pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
return False
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _get_index_freq(data):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data = data.to_period(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = lambda t, y: ("t = {0} "
"y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
pylab.draw_if_interactive()
| gpl-3.0 |
kwailamchan/programming-languages | python/dreamer/dreamer/main.py | 3 | 4513 | import matplotlib
matplotlib.use('Agg')
import sys
sys.path.insert(0, '/usr/local/caffe/python/')
import caffe
from cStringIO import StringIO
import numpy as np
import scipy.ndimage as nd
import PIL.Image
from IPython.display import clear_output, Image, display
from google.protobuf import text_format
# If your GPU supports CUDA and Caffe was built with CUDA support,
# uncomment the following to run Caffe operations on the GPU.
# caffe.set_mode_gpu()
# caffe.set_device(0) # select GPU device if multiple devices exist
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
#-------------------------------------------------------------#
# DNN
# http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel
model_path = '/vagrant/dreamer/dreamer/models/bvlc_googlenet/' # substitute your path here
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'bvlc_googlenet.caffemodel'
# Patching model to be able to compute gradients.
# Note that you can also manually add "force_backward: true" line to "deploy.prototxt".
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True
open('tmp.prototxt', 'w').write(str(model))
net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]), # ImageNet mean, training set dependent
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
# a couple of utility functions for converting to and from Caffe's input image layout
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data']
def deprocess(net, img):
return np.dstack((img + net.transformer.mean['data'])[::-1])
#----------------------------------------------------------------#
# producing
def objective_L2(dst):
dst.diff[:] = dst.data
def make_step(net, step_size=1.5, end='inception_4c/output',
jitter=32, clip=True, objective=objective_L2):
'''Basic gradient ascent step.'''
src = net.blobs['data'] # input image is stored in Net's 'data' blob
dst = net.blobs[end]
ox, oy = np.random.randint(-jitter, jitter+1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift
net.forward(end=end)
objective(dst) # specify the optimization objective
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size/np.abs(g).mean() * g
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image
if clip:
bias = net.transformer.mean['data']
src.data[:] = np.clip(src.data, -bias, 255-bias)
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
end='inception_4c/output', clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
img = np.float32(PIL.Image.open('/vagrant/dreamer/dreamer/images/sky1024px.jpg'))
showarray(img)
_=deepdream(net, img)
_=deepdream(net, img, end='inception_3b/5x5_reduce')
net.blobs.keys()
| mit |
kevntao/data-science-from-scratch | code/recommender_systems.py | 60 | 6291 | from __future__ import division
import math, random
from collections import defaultdict, Counter
from linear_algebra import dot
users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests).most_common()
def most_popular_new_interests(user_interests, max_results=5):
suggestions = [(interest, frequency)
for interest, frequency in popular_interests
if interest not in user_interests]
return suggestions[:max_results]
#
# user-based filtering
#
def cosine_similarity(v, w):
return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
unique_interests = sorted(list({ interest
for user_interests in users_interests
for interest in user_interests }))
def make_user_interest_vector(user_interests):
"""given a list of interests, produce a vector whose i-th element is 1
if unique_interests[i] is in the list, 0 otherwise"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_matrix = map(make_user_interest_vector, users_interests)
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_matrix]
for interest_vector_i in user_interest_matrix]
def most_similar_users_to(user_id):
pairs = [(other_user_id, similarity) # find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity
return sorted(pairs, # sort them
key=lambda (_, similarity): similarity, # most similar
reverse=True) # first
def user_based_suggestions(user_id, include_current_interests=False):
# sum up the similarities
suggestions = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# convert them to a sorted list
suggestions = sorted(suggestions.items(),
key=lambda (_, weight): weight,
reverse=True)
# and (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
#
# Item-Based Collaborative Filtering
#
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_matrix]
for j, _ in enumerate(unique_interests)]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda (_, similarity): similarity,
reverse=True)
def item_based_suggestions(user_id, include_current_interests=False):
suggestions = defaultdict(float)
user_interest_vector = user_interest_matrix[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
suggestions = sorted(suggestions.items(),
key=lambda (_, similarity): similarity,
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
if __name__ == "__main__":
print "Popular Interests"
print popular_interests
print
print "Most Popular New Interests"
print "already like:", ["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"]
print most_popular_new_interests(["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"])
print
print "already like:", ["R", "Python", "statistics", "regression", "probability"]
print most_popular_new_interests(["R", "Python", "statistics", "regression", "probability"])
print
print "User based similarity"
print "most similar to 0"
print most_similar_users_to(0)
print "Suggestions for 0"
print user_based_suggestions(0)
print
print "Item based similarity"
print "most similar to 'Big Data'"
print most_similar_interests_to(0)
print
print "suggestions for user 0"
print item_based_suggestions(0)
| unlicense |
aemerick/galaxy_analysis | particle_analysis/particle_dispersion.py | 1 | 3354 | import numpy as np
import yt
import glob
import matplotlib.pyplot as plt
def vel_dispersion(v):
return np.std(v)
def compute_velocity_dispersion(data, types = None, fields = None, filter = None):
"""
Returns all possible velocity dispersons from all particles found in the
data set. A particle filter can be passed using "filter" which is a list
of booleans:
e.x.
> filter = data['particle_type'] == 11
> particle_dispersion.compute_velocity_dispersion(data, filter = filter)
---
> filter = (data['particle_type'] == 11) * (data['particle_spherical_r'] < 100.0)
> particle_dispersion.compute_velocity_dispersion(data, filter = filter)
"""
types_to_fields = {'x': 'particle_velocity_x',
'y': 'particle_velocity_y',
'z': 'particle_velocity_z',
'r': 'particle_velocity_spherical_radius',
'theta': 'particle_velocity_spherical_theta',
'phi': 'particle_velocity_spherical_phi'}
if types is None and fields is None:
fields = types_to_fields.values()
keys = types_to_fields.keys()
elif fields is None:
fields = [ types_to_fields[x] for x in types ]
keys = types
else:
keys = fields
dispersion = {}
for i,x in enumerate(fields):
if filter is not None:
v = data[x][filter]
else:
v = data[x]
if np.size(v) == 0:
dispersion[keys[i]] = 0.0
else:
dispersion[keys[i]] = vel_dispersion( v.convert_to_units('km/s') )
return dispersion
#
# function to do LOS velocity dispersion calculatoins
# - take a list of positions
# - take a list of velocities
# then
# - choose random line of sight
# - project velocities onto LOS
# - compute velocity disperions
# - repeat for X random LOS and average together
if __name__ == '__main__':
ds_list = np.sort( glob.glob('./DD????/DD????'))
ds = yt.load(ds_list[-1])
data = ds.all_data()
filter = data['particle_type'] == 11
dispersions = compute_velocity_dispersion(data, filter = filter)
dr = 50.0
bins = np.arange(0.0, 750.0 + dr, dr) * yt.units.pc
r = data['particle_position_spherical_radius'].convert_to_units('pc')
beta = np.zeros(np.size(bins)-1)
sigma = np.zeros(np.size(bins)-1)
for i in np.arange(1, np.size(bins)):
radial_filter = (r < bins[i]) * (r >= bins[i-1])
dispersions = compute_velocity_dispersion(data, filter = filter*radial_filter)
beta[i-1] = 1.0 - dispersions['theta'].value**2 / dispersions['r'].value**2
sigma[i-1] = np.sqrt(dispersions['x']**2 + dispersions['y']**2 + dispersions['z']**2)
centers = 0.5 * ( bins[1:] + bins[:-1])
fig, ax = plt.subplots(figsize=(8,8))
ax.plot(centers.value, beta, color = 'black', lw = 3)
ax.set_xlabel('Radius (pc)')
ax.set_ylabel(r'Anisotropy Parameter')
ax.minorticks_on()
plt.savefig('radial_anisotropy.png')
plt.close()
fig, ax = plt.subplots(figsize=(8,8))
ax.plot(centers.value, sigma, color = 'black', lw =3)
ax.set_xlabel('Radius (pc)')
ax.set_ylabel(r'3D velocity dispersion (km/s)')
ax.minorticks_on()
plt.savefig('velocity_dispersion.png')
| mit |
appapantula/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
cpaulik/scipy | scipy/cluster/tests/test_hierarchy.py | 26 | 35159 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy._lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 6 | 21872 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and l1_ratio.
Actually, the parameters alpha = 0 should not be allowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
"""Test that both random and cyclic selection give the same results.
Ensure that the test models fully converge and check a wide
range of conditions.
"""
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
"""
Test that setting precompute="auto" gives a Deprecation Warning.
"""
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
"""
Test that the coefs returned by positive=True in enet_path are positive
"""
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
"""
Test that dense and sparse input give the same input for descent paths.
"""
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
dilawar/moose-full | moose-examples/snippets/hhcomp.py | 2 | 7958 | # hhcomp.py ---
#
# Filename: hhcomp.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Tue May 7 12:11:22 2013 (+0530)
# Version:
# Last-Updated: Tue May 7 19:21:43 2013 (+0530)
# By: subha
# Update #: 309
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""A compartment with hodgkin-huxley ion channels"""
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import numpy as np
import matplotlib.pyplot as plt
import moose
from moose import utils
EREST_ACT = -70e-3
per_ms = 1e3
def create_na_chan(parent='/library', name='na', vmin=-110e-3, vmax=50e-3, vdivs=3000):
"""Create a Hodhkin-Huxley Na channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
na = moose.HHChannel('%s/%s' % (parent, name))
na.Xpower = 3
na.Ypower = 1
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
m_alpha = per_ms * (25 - v * 1e3) / (10 * (np.exp((25 - v * 1e3) / 10) - 1))
m_beta = per_ms * 4 * np.exp(- v * 1e3/ 18)
m_gate = moose.element('%s/gateX' % (na.path))
m_gate.min = vmin
m_gate.max = vmax
m_gate.divs = vdivs
m_gate.tableA = m_alpha
m_gate.tableB = m_alpha + m_beta
h_alpha = per_ms * 0.07 * np.exp(-v / 20e-3)
h_beta = per_ms * 1/(np.exp((30e-3 - v) / 10e-3) + 1)
h_gate = moose.element('%s/gateY' % (na.path))
h_gate.min = vmin
h_gate.max = vmax
h_gate.divs = vdivs
h_gate.tableA = h_alpha
h_gate.tableB = h_alpha + h_beta
plt.subplot(2,1,1)
plt.plot(v, m_alpha / (m_alpha + m_beta), label='m_inf')
plt.plot(v, h_alpha / (h_alpha + h_beta), label='h_inf')
plt.legend()
plt.subplot(2,1,2)
plt.plot(v, 1/(m_alpha + m_beta), label='tau_m')
plt.plot(v, 1/(h_alpha + h_beta), label='tau_h')
plt.legend()
plt.show()
plt.close()
na.tick = -1
return na
def create_k_chan(parent='/library', name='k', vmin=-120e-3, vmax=40e-3, vdivs=3000):
"""Create a Hodhkin-Huxley K channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
k = moose.HHChannel('%s/%s' % (parent, name))
k.Xpower = 4
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
n_alpha = per_ms * (10 - v * 1e3)/(100 * (np.exp((10 - v * 1e3)/10) - 1))
n_beta = per_ms * 0.125 * np.exp(- v * 1e3 / 80)
n_gate = moose.element('%s/gateX' % (k.path))
n_gate.min = vmin
n_gate.max = vmax
n_gate.divs = vdivs
n_gate.tableA = n_alpha
n_gate.tableB = n_alpha + n_beta
plt.subplot(211)
plt.plot(v, n_alpha/(n_alpha + n_beta))
plt.subplot(212)
plt.plot(v, 1/(n_alpha + n_beta))
plt.show()
plt.close()
k.tick = -1
return k
def test_channel_gates():
"""Creates prototype channels under `/library` and plots the time
constants (tau) and activation (minf, hinf, ninf) parameters for the
channel gates.
Does not execute any simulation.
"""
lib = moose.Neutral('/library')
na_proto = create_na_chan()
k_proto = create_k_chan()
m = moose.element('%s/gateX' % (na_proto.path))
h = moose.element('%s/gateY' % (na_proto.path))
n = moose.element('%s/gateX' % (k_proto.path))
v = np.linspace(m.min,m.max, m.divs+1)
plt.subplot(211)
plt.plot(v, 1/m.tableB, label='tau_m')
plt.plot(v, 1/h.tableB, label='tau_h')
plt.plot(v, 1/n.tableB, label='tau_n')
plt.legend()
plt.subplot(212)
plt.plot(v, m.tableA/m.tableB, label='m_inf')
plt.plot(v, h.tableA/h.tableB, label='h_inf')
plt.plot(v, n.tableA/n.tableB, label='n_inf')
plt.legend()
plt.show()
def create_passive_comp(parent='/library', name='comp', diameter=30e-6, length=0.0):
"""Creates a single compartment with squid axon Em, Cm and Rm. Does
not set Ra"""
comp = moose.Compartment('%s/%s' % (parent, name))
comp.Em = EREST_ACT + 10.613e-3
comp.initVm = EREST_ACT
if length <= 0:
sarea = np.pi * diameter * diameter
else:
sarea = np.pi * diameter * length
# specific conductance gm = 0.3 mS/cm^2
comp.Rm = 1 / (0.3e-3 * sarea * 1e4)
# Specific capacitance cm = 1 uF/cm^2
comp.Cm = 1e-6 * sarea * 1e4
return comp, sarea
def create_hhcomp(parent='/library', name='hhcomp', diameter=-30e-6, length=0.0):
"""Create a compartment with Hodgkin-Huxley type ion channels (Na and
K).
Returns a 3-tuple: (compartment, nachannel, kchannel)
"""
comp, sarea = create_passive_comp(parent, name, diameter, length)
if moose.exists('/library/na'):
moose.copy('/library/na', comp.path, 'na')
else:
create_na_chan(parent=comp.path)
na = moose.element('%s/na' % (comp.path))
# Na-conductance 120 mS/cm^2
na.Gbar = 120e-3 * sarea * 1e4
na.Ek = 115e-3 + EREST_ACT
moose.connect(comp, 'channel', na, 'channel')
if moose.exists('/library/k'):
moose.copy('/library/k', comp.path, 'k')
else:
create_k_chan(parent=comp.path)
k = moose.element('%s/k' % (comp.path))
# K-conductance 36 mS/cm^2
k.Gbar = 36e-3 * sarea * 1e4
k.Ek = -12e-3 + EREST_ACT
moose.connect(comp, 'channel', k, 'channel')
return comp, na, k
def test_hhcomp():
"""Create and simulate a single spherical compartment with
Hodgkin-Huxley Na and K channel.
Plots Vm, injected current, channel conductances.
"""
model = moose.Neutral('/model')
data = moose.Neutral('/data')
comp, na, k = create_hhcomp(parent=model.path)
print comp.Rm, comp.Cm, na.Ek, na.Gbar, k.Ek, k.Gbar
pg = moose.PulseGen('%s/pg' % (model.path))
pg.firstDelay = 20e-3
pg.firstWidth = 40e-3
pg.firstLevel = 1e-9
pg.secondDelay = 1e9
moose.connect(pg, 'output', comp, 'injectMsg')
inj = moose.Table('%s/pulse' % (data.path))
moose.connect(inj, 'requestOut', pg, 'getOutputValue')
vm = moose.Table('%s/Vm' % (data.path))
moose.connect(vm, 'requestOut', comp, 'getVm')
gK = moose.Table('%s/gK' % (data.path))
moose.connect(gK, 'requestOut', k, 'getGk')
gNa = moose.Table('%s/gNa' % (data.path))
moose.connect(gNa, 'requestOut', na, 'getGk')
simdt = 1e-6
plotdt = 1e-4
simtime = 100e-3
if (1):
moose.showmsg( '/clock' )
for i in range(8):
moose.setClock( i, simdt )
moose.setClock( 8, plotdt )
moose.reinit()
else:
utils.resetSim([model.path, data.path], simdt, plotdt, simmethod='ee')
moose.showmsg( '/clock' )
moose.start(simtime)
t = np.linspace(0, simtime, len(vm.vector))
plt.subplot(211)
plt.plot(t, vm.vector * 1e3, label='Vm (mV)')
plt.plot(t, inj.vector * 1e9, label='injected (nA)')
plt.legend()
plt.title('Vm')
plt.subplot(212)
plt.title('Conductance (uS)')
plt.plot(t, gK.vector * 1e6, label='K')
plt.plot(t, gNa.vector * 1e6, label='Na')
plt.legend()
plt.show()
plt.close()
# moose.showfield(comp)
# moose.showfield(na)
# moose.showfield(k)
if __name__ == '__main__':
test_channel_gates()
test_hhcomp()
#
# hhcomp.py ends here
| gpl-2.0 |
altairpearl/scikit-learn | sklearn/tests/test_calibration.py | 15 | 11959 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
ignore_warnings)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@ignore_warnings
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
duthils/osmose-frontend | errors_graph.py | 1 | 5939 | #! /usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Etienne Chové <[email protected]> 2009 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
import time, sys, datetime, StringIO, os, tempfile
from datetime import timedelta
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot
import matplotlib.dates
from tools import query
def get_data(db, options):
sqlbase = """
SELECT
date,
SUM(count)
FROM (
SELECT
date_trunc('day', marker.timestamp) AS date,
AVG(marker.count) AS count
FROM
%s
WHERE
%s
GROUP BY
marker.source,
marker.class,
date
) AS t
GROUP BY
date
ORDER BY
date
"""
params = query._params()
join, where = query._build_param(None, None, None, params.source, params.item, params.level, None, params.classs, params.country, params.useDevItem, None, params.tags, None, stats=True, start_date=params.start_date, end_date=params.end_date)
sql = sqlbase % (join, where)
if len(sys.argv)>1:
print sql
result = []
db.execute(sql)
for r in db.fetchall():
result.append((r[0],r[1]))
return result
def get_text(db, options):
if len(options.sources)==1 and len(options.classes)==1:
db.execute("SELECT title->'en' FROM dynpoi_class WHERE source=%s AND class=%s;", (options.sources[0], options.classes[0]))
elif len(options.items)==1 and len(options.classes)==1:
db.execute("SELECT title->'en' FROM dynpoi_class WHERE class=%s AND item=%s LIMIT 1;", (options.classes[0], options.items[0]))
elif len(options.items)==1:
db.execute("SELECT menu->'en' FROM dynpoi_item WHERE item=%s LIMIT 1;", (options.items[0],))
else:
return ""
res = db.fetchone()
if res and res[0]:
return res[0]
else:
return ""
def get_src(db, options):
if len(options.sources) == 1:
db.execute("SELECT country, analyser FROM source WHERE id=%s;", (options.sources[0], ))
r = db.fetchone()
return r[0] + " - " + r[1]
elif options.country:
return str(options.country)
else:
return "All"
def convIntsToStr(values):
"""
Convertie une liste d'entier en chaine
"""
return ", ".join([str(elt) for elt in values])
def make_plt(db, options, format):
data = get_data(db, options)
text = get_text(db, options)
src = get_src(db, options)
return plot(data, text+' '+src, format)
class AutoDateLocatorDay(matplotlib.dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
if dmax-dmin <= timedelta(days=5):
return matplotlib.dates.AutoDateLocator.get_locator(self, dmax-timedelta(days=5), dmax)
else:
return matplotlib.dates.AutoDateLocator.get_locator(self, dmin, dmax)
def plot(data, title, format):
dates = [q[0] for q in data]
opens = [q[1] for q in data]
fig = matplotlib.pyplot.figure()
ax = fig.add_subplot(111)
ax.plot_date(dates, opens, '-', color='r')
ax.set_title(title)
# format the ticks
ax.relim()
if len(opens) > 1:
ytop = float(max(opens)) * 1.05 + 1
else:
ytop = None
ax.set_ylim(bottom=0, top=ytop)
ax.autoscale_view()
# format the coords message box
ax.fmt_ydata = lambda x: '$%1.2f'%x
ax.grid(True)
locator = AutoDateLocatorDay()
locator.set_axis(ax.xaxis)
locator.refresh()
formatter = matplotlib.dates.AutoDateFormatter(locator)
formatter.scaled[30.] = '%Y-%m'
formatter.scaled[1.0] = '%Y-%m-%d'
ax.xaxis.set_major_formatter(formatter)
fig.autofmt_xdate()
buf = StringIO.StringIO()
fig.savefig(buf, format = format)
matplotlib.pyplot.close(fig)
return buf.getvalue()
if __name__ == "__main__":
from optparse import OptionParser, SUPPRESS_HELP
start = time.clock()
parser = OptionParser()
parser.add_option("--source", dest="sources", type="int", action="append", default=[])
parser.add_option("--class", dest="classes", type="int", action="append", default=[])
parser.add_option("--item", dest="items", type="int", action="append", default=[])
parser.add_option("--level", dest="levels", type="int", action="append", default=[])
parser.add_option("--country", dest="country", type="string", default=None)
(options, args) = parser.parse_args()
data = make_plt(None, options, "png")
f = open("graph.png", "w")
f.write(data)
f.close()
end = time.clock()
print "graph.png generated in %ims"%((end-start)*1000)
sys.exit(0)
| gpl-3.0 |
Jsonzhang/kaggle | titanic/titanic.py | 1 | 6545 | # coding=utf-8
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, RandomForestRegressor
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn import preprocessing
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, log_loss
def processFeatures(data):
source = data
source.Cabin = source.Cabin.str.extract('([A-Z])\d+', expand=False)
source.Cabin.fillna('NULL', inplace=True)
source.Fare.fillna(source['Fare'].dropna().median(), inplace=True)
dummiesEmbarked = pd.get_dummies(source['Embarked'], prefix='Embarked')
dummiesCabin = pd.get_dummies(source['Cabin'], prefix='Cabin')
dummiesPclass = pd.get_dummies(source['Pclass'], prefix='Pclass')
source['Title'] = source.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
source['Title'] = source['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
source['Title'] = source['Title'].replace('Mlle', 'Miss')
source['Title'] = source['Title'].replace('Ms', 'Miss')
source['Title'] = source['Title'].replace('Mme', 'Mrs')
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
source['Title'] = source['Title'].map(title_mapping)
dummiesTitle = pd.get_dummies(source['Title'], prefix='Title')
source.loc[source['Fare'] <= 7.91, 'Fare'] = 0
source.loc[(source['Fare'] > 7.91), 'Fare'] = 1
source.loc[(source['Fare'] > 14.454) & (source['Fare'] <= 31), 'Fare'] = 2
source.loc[source['Fare'] > 31, 'Fare']= 3
dummiesFare = pd.get_dummies(source['Fare'], prefix='Fare')
source = pd.concat([source, dummiesEmbarked, dummiesPclass, dummiesCabin, dummiesTitle, dummiesFare], axis=1)
source['Sex'] = source['Sex'].map(lambda x: 1 if x == 'male' else 0)
source['isChild'] = source['Age'].map(lambda x: 1 if x <= 16 else 0)
source['isOld'] = source['Age'].map(lambda x: 1 if x > 60 else 0)
source['isAlone'] = 0
source['FamilySize'] = source['SibSp'] + source['Parch'] + 1
source['SibSp'] = source['SibSp'].map(sib_rate, na_action=None)
source['SibSp'].fillna(0.5, inplace=True)
source.loc[source['FamilySize'] == 1, 'isAlone'] = 1
source = set_missing_ages(source, ['Age', 'Fare', 'Parch', 'SibSp', 'Pclass'], 'Age')
source = source.filter(regex='SibSp|isOld|isChild|isAlone|Title_.*|Age|Fare_.*|Embarked_.*|Cabin_.*|Sex|Pclass_.*')
return preprocessing.MinMaxScaler().fit_transform(source)
def set_missing_ages(df, features, target):
# 根据所坐舱位等数字讯息推断年龄
target_df = df[features]
known = target_df[target_df[target].notnull()].as_matrix()
unknown = target_df[target_df[target].isnull()].as_matrix()
y = known[:, 0]
X = known[:, 1:]
if len(unknown):
rfr = RandomForestRegressor(
random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
predicted = rfr.predict(unknown[:, 1::])
df.loc[(df[target].isnull()), target] = predicted
return df
def sibsp_map_generate(source):
km = source['SibSp'].unique()
sib_rate = pd.Series(0.0, index=km)
for sib in km:
survived_total = source.Survived[source['SibSp'] == sib].value_counts()
if 1 in survived_total:
sib_rate[sib] = float(survived_total[1]) / float(sum(survived_total))
else:
sib_rate[sib] = 0
return sib_rate
train_df = pd.read_csv('./data/train.csv', index_col=False)
# valid_df = pd.read_csv('./data/train.csv', index_col=False)[540:700]
# test_df = pd.read_csv('./data/train.csv', index_col=False).tail(191)
# valid_df = pd.concat([train_df.tail(100), test_df.head(100)], axis=0)
target_df = pd.read_csv('./data/test.csv', index_col=False)
sib_rate = sibsp_map_generate(train_df)
X = processFeatures(train_df)
y = train_df['Survived']
# testX = processFeatures(test_df)
# testY = test_df['Survived']
# validX = processFeatures(valid_df)
# validY = valid_df['Survived']
targetX = processFeatures(target_df)
# create param grid object
forrest_params = dict(
max_depth = [n for n in range(9, 14)],
min_samples_split = [n for n in range(4, 11)],
min_samples_leaf = [n for n in range(2, 5)],
n_estimators = [n for n in range(10, 60, 10)]
)
classifiers = [
KNeighborsClassifier(3),
svm.SVC(probability=True),
svm.LinearSVC(),
DecisionTreeClassifier(),
GridSearchCV(estimator=RandomForestClassifier(), param_grid=forrest_params, cv=5),
AdaBoostClassifier(),
SGDClassifier(),
GradientBoostingClassifier(),
GaussianNB(),
Perceptron(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis(),
LogisticRegression(),
DecisionTreeClassifier()
]
maxValue = 0
targetClf = None
acc_dict = {}
log_cols = ["Classifier", "Accuracy"]
log = pd.DataFrame(columns=log_cols)
sss = StratifiedShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
for clf in classifiers:
name = clf.__class__.__name__
clf.fit(X_train, y_train)
train_predictions = clf.predict(X_test)
acc = accuracy_score(y_test, train_predictions)
print(acc)
if acc > maxValue:
targetClf = clf
maxValue = acc
if name in acc_dict:
acc_dict[name] += acc
else:
acc_dict[name] = acc
for clf in acc_dict:
acc_dict[clf] = acc_dict[clf] / 10.0
log_entry = pd.DataFrame([[clf, acc_dict[clf]]], columns=log_cols)
log = log.append(log_entry)
plt.xlabel('Accuracy')
plt.title('Classifier Accuracy')
sns.set_color_codes("muted")
sns.barplot(x='Accuracy', y='Classifier', data=log, color="b")
result = targetClf.predict(targetX)
submission = pd.DataFrame({
"PassengerId": target_df["PassengerId"],
"Survived": result
})
submission.to_csv('./submission.csv', index=False)
"""
kaggal: 0.78468
kaggal: 3587
"""
| mit |
btabibian/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 8 | 5475 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return np.logical_xor(res, ndimage.binary_erosion(res))
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
ajul/zerosum | python/examples/oneup.py | 1 | 2829 | import _initpath
import numpy
import zerosum.nash
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rc('text', usetex = True)
def minimizer_advantage(a, b):
b_power_a = numpy.power(b, a) # max_handicap
denom = b - b_power_a + a * b_power_a * numpy.log(b)
p = (b - b_power_a) / denom
v = b_power_a * (b - 1.0) / denom
return p, v
def maximizer_advantage(a, b):
b_power_a = numpy.power(b, a) # max_handicap
denom = b - b_power_a + a * b * numpy.log(b)
p = (b - b_power_a) / denom
v = b * (b - 1.0) / denom
return p, v
n = 1024
b = 9.0 # maximum payoff; by what factor is the top of the step higher than the bottom
dpi = 120
a = numpy.linspace(0.0, 1.0, n)
min_p, min_v = minimizer_advantage(a, b)
max_p, max_v = maximizer_advantage(a, b)
fig_p, ax_p = plt.subplots(1, 1, figsize = (9, 9), dpi=dpi)
ax_p.plot(a, min_p, color='blue')
ax_p.plot(a, max_p, color='red')
ax_p.legend(['Minimizer advantage', 'Maximizer advantage'])
ax_p.set_xlabel('Maximum strategy ($a$)')
ax_p.set_ylabel('Probability of playing extremal strategy')
ax_p.set_aspect('equal')
ax_p.set_xlim(left = 0.0, right = 1.0)
ax_p.set_ylim(bottom = 0.0, top = 1.0)
fig_p.savefig("out/oneup_probability.png", dpi = dpi, bbox_inches = "tight")
fig_v, ax_v = plt.subplots(1, 1, figsize = (9, 9), dpi=dpi)
ax_v.plot(a, min_v, color='blue')
ax_v.plot(a, max_v, color='red')
ax_v.legend(['Minimizer advantage', 'Maximizer advantage'])
ax_v.set_xlabel('Maximum strategy ($a$)')
ax_v.set_ylabel('Expected payoff')
#ax_v.set_aspect(1.0 / (b + 1.0))
ax_v.set_xlim(left = 0.0, right = 1.0)
ax_v.set_ylim(bottom = 0.0, top = b)
fig_v.savefig("out/oneup_payoff.png", dpi = dpi, bbox_inches = "tight")
# verification code
"""
n = 1024
row_frac = 0.5
col_frac = 1.0
max_payoff = 2.0
x = numpy.linspace(0.0, 1.0, n)
costs = numpy.power(max_payoff, x)
payoff_matrix = costs[None, :] / costs[:, None]
payoff_matrix[numpy.tril_indices(n)] *= max_payoff
n_row = int(n * row_frac)
n_col = int(n * col_frac)
row_result, col_result = zerosum.nash.nash(payoff_matrix[:n_row, :n_col])
if row_frac < col_frac:
#c = max_payoff - numpy.power(max_payoff, row_frac)
#denom = (c + row_frac * numpy.power(max_payoff, row_frac) * numpy.log(max_payoff))
#p = c / denom
#v = numpy.power(max_payoff, row_frac) * (max_payoff - 1.0) / denom
p, v = minimizer_advantage(row_frac, max_payoff)
else:
#c = max_payoff - numpy.power(max_payoff, col_frac)
#denom = (c + col_frac * max_payoff * numpy.log(max_payoff))
#p = c / denom
#v = max_payoff * (max_payoff - 1.0) / denom
p, v = maximizer_advantage(col_frac, max_payoff)
print(p, max(row_result.strategy), max(col_result.strategy))
print(v, row_result.value)
plt.plot(x[:n_row], row_result.strategy, x[:n_col], col_result.strategy)
plt.show()
"""
| bsd-3-clause |
mindw/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
vigilv/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
wjfwzzc/Kaggle_Script | sf_crime/data/load_data.py | 1 | 3114 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import datetime
import zipfile
import pandas
import sklearn.preprocessing
def parse_time(timestamp):
dt = datetime.datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
return dt.hour, dt.day, dt.month, dt.year
def process(df):
# address_features = df["Address"].apply(lambda x: log_odds[x]).rename(columns=lambda x: "LogOdds_" + str(x))
# df = df.join(address_features)
# df["IsIntersection"] = df["Address"].apply(lambda x: 1 if "/" in x else 0)
# df["LogOddsPA"] = df["Address"].apply(lambda x: log_odds_pa[x])
sklearn.preprocessing.scale(df[["X", "Y"]], copy=False)
df["Time"], df["Day"], df["Month"], df["Year"] = zip(*df["Dates"].apply(parse_time))
dummy_dow = pandas.get_dummies(df["DayOfWeek"], prefix="DayOfWeek")
dummy_pd = pandas.get_dummies(df["PdDistrict"], prefix="PdDistrict")
df = df.join([dummy_dow, dummy_pd])
df.drop(["Dates", "DayOfWeek", "PdDistrict", "Address"], axis=1, inplace=True)
return df
z = zipfile.ZipFile("./data/train.csv.zip")
train_df = pandas.read_csv(z.open("train.csv"))
z = zipfile.ZipFile("./data/test.csv.zip")
test_df = pandas.read_csv(z.open("test.csv"))
# addresses = set(sorted(train_df["Address"].unique()))
# categories = sorted(train_df["Category"].unique())
# addr_cnt = train_df.groupby(["Address"]).size()
# cat_cnt = train_df.groupby(["Category"]).size()
# ac_cnt = train_df.groupby(["Address", "Category"]).size()
# log_odds = {}
# log_odds_pa = {}
# default_log_odds = numpy.log(cat_cnt / len(train_df)) - numpy.log(1 - cat_cnt / len(train_df))
# for addr in addresses:
# pa = addr_cnt[addr] / len(train_df)
# log_odds_pa[addr] = numpy.log(pa) - numpy.log(1 - pa)
# log_odds[addr] = copy.deepcopy(default_log_odds)
# for cat in ac_cnt[addr].keys():
# if 2 < ac_cnt[addr][cat] < addr_cnt[addr]:
# pa = ac_cnt[addr][cat] / addr_cnt[addr]
# log_odds[addr][categories.index(cat)] = numpy.log(pa) - numpy.log(1 - pa)
# log_odds[addr] = pandas.Series(log_odds[addr])
#
# new_addresses = set(sorted(test_df["Address"].unique()))
# new_addr_cnt = test_df.groupby("Address").size()
# in_both = new_addresses & addresses
# only_new = new_addresses - in_both
# for addr in only_new:
# pa = new_addr_cnt[addr] / (len(train_df) + len(test_df))
# log_odds_pa[addr] = numpy.log(pa) - numpy.log(1 - pa)
# log_odds[addr] = copy.deepcopy(default_log_odds)
# for addr in in_both:
# pa = (addr_cnt[addr] + new_addr_cnt[addr]) / (len(train_df) + len(test_df))
# log_odds_pa[addr] = numpy.log(pa) - numpy.log(1 - pa)
data_df = train_df.append(test_df).reset_index(drop=True)
data_df.drop(["Id", "Category", "Descript", "Resolution"], axis=1, inplace=True)
data_df = process(data_df)
le = sklearn.preprocessing.LabelEncoder()
train_df["Category"] = le.fit_transform(train_df["Category"])
target = train_df["Category"].astype('category')
ids = test_df["Id"].values
train = data_df[:train_df.shape[0]].values
test = data_df[train_df.shape[0]:].values
| mit |
CCBatIIT/AlGDock | Pipeline/analyze_profile.py | 2 | 1798 | import os, inspect
script_dir = os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))
execfile(os.path.join(script_dir,'_load_profile.py'))
if not os.path.isdir('figures'):
os.makedirs('figures')
# Plot histogram of all sequence identities
seq_identities = [p[2] for p in profile.values()]
import matplotlib.pylab as plt
plt.clf()
plt.hist(seq_identities)
plt.xlabel('Sequence Identity')
plt.ylabel('Number of Structures')
plt.savefig('figures/hist_seq_id.png')
# Plot histogram of selected sequences, with
# sequence identities greater than min_seq_identity and
# equivalent positions greater than min_equivalent_positions
selected_seq_identities = \
[p[2] for p in profile.values() \
if (p[2]>=min_seq_identity) and (p[1]>=min_equivalent_positions)]
if len(selected_seq_identities)>0:
plt.clf()
plt.hist(selected_seq_identities)
plt.xlabel('Sequence Identity')
plt.ylabel('Number of Structures')
plt.savefig('figures/hist_seq_id_selected.png')
print '%d selected chains'%len(selected_seq_identities)
print 'with minimum sequence identity of %d'%min_seq_identity
print 'and at least %d equivalent positions'%min_equivalent_positions
# Sort sequences by
# 1. Sequence Identity
# 2. Equivalent Positions
import numpy as np
seq_identities = np.array([p[2] for p in profile.values()], dtype=float)
equivalent_positions = np.array([p[1] for p in profile.values()], dtype=float)
scores = seq_identities + equivalent_positions/max(equivalent_positions)
inds = np.argsort(scores)
inds = inds[np.logical_and(\
seq_identities[inds]>=min_seq_identity, \
equivalent_positions[inds]>=min_equivalent_positions)]
prof_list = profile.items()
print '\n'.join(['{0[0][0]} {0[0][1]} {0[1][2]} {0[1][1]} {0[1][3]}'.format(prof_list[ind]) for ind in inds[::-1]])
| mit |
mmottahedi/neuralnilm_prototype | scripts/e201.py | 2 | 6739 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
# global source
# source = RealApplianceSource(
# filename='/data/dk3810/ukdale.h5',
# appliances=[
# ['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television'
# # 'dish washer',
# # ['washer dryer', 'washing machine']
# ],
# max_appliance_powers=[2500] * 5,
# on_power_thresholds=[5] * 5,
# max_input_power=2500,
# min_on_durations=[60, 60, 60, 1800, 1800],
# min_off_durations=[12, 12, 12, 1800, 600],
# window=("2013-06-01", "2014-07-01"),
# seq_length=1520,
# output_one_appliance=False,
# boolean_targets=False,
# train_buildings=[1],
# validation_buildings=[1],
# skip_probability=0.7,
# n_seq_per_batch=25,
# input_padding=4,
# include_diff=False,
# clip_appliance_power=False
# )
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 5,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
# 'W': Uniform()
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
jereze/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
drpngx/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops.py | 6 | 176288 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
weights: A 1-D `Tensor` of weights whose values are `float64`.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Compute the total weight and the total positive weight.
size = array_ops.size(predictions)
if weights is None:
weights = array_ops.ones_like(labels, dtype=dtypes.float64)
labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
labels, predictions, weights)
total_weight = math_ops.reduce_sum(weights)
total_positive = math_ops.reduce_sum(
array_ops.where(
math_ops.greater(labels, 0), weights,
array_ops.zeros_like(labels, dtype=dtypes.float64)))
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, keeping the same order for the
# corresponding labels and weights.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
true_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.greater(ordered_labels, 0), ordered_weights,
array_ops.zeros_like(ordered_labels,
dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
if curve == 'ROC':
# Compute the weight of the negatives to the left of every split point and
# the total weight of the negatives number of negatives for computing the
# FPR.
false_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.less(ordered_labels, 1), ordered_weights,
array_ops.zeros_like(
ordered_labels, dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
total_negative = total_weight - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
positives = array_ops.gather(
array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
splits)
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, positives),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0), math_ops.equal(
total_positive, total_weight)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None,
weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
weights: A 'Tensor' of non-negative weights whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
if weights is not None:
weights = array_ops.reshape(
math_ops.cast(weights, dtypes.float64), [-1])
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op = control_flow_ops.group(update_labels, update_preds,
update_weights)
else:
weights_accum = None
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(
labels_accum, preds_accum, curve=curve, weights=weights_accum)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(
math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(
1 - ordered_labels, weights.dtype)
weights_0 = ordered_weights * math_ops.cast(
1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(
ordered_labels, weights.dtype)
weights_1 = ordered_weights * math_ops.cast(
ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.square(placement_values_1 - auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt(
(var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(
update_labels, update_preds, update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1,
lambda: update_op_for_valid_case, control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds. Types are same as that of predictions.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# It's important we aggregate using float64 since we're accumulating a lot
# of 1.0's for the true/false labels, and accumulating to float32 will
# be quite inaccurate even with just a modest amount of values (~20M).
# We use float64 instead of integer primarily since GPU scatter kernel
# only support floats.
agg_dtype = dtypes.float64
f_labels = math_ops.cast(labels, agg_dtype)
weights = math_ops.cast(weights, agg_dtype)
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
# Convert all tensors back to predictions' dtype (as per function contract).
out_dtype = predictions.dtype
_convert = lambda tensor: math_ops.cast(tensor, out_dtype)
result = PrecisionRecallData(
tp=_convert(tp),
fp=_convert(fp),
tn=_convert(tn),
fn=_convert(fn),
precision=_convert(precision),
recall=_convert(recall),
thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.precision_at_thresholds. Note that '
'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
Returns:
The recall at a given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value')
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def precision_at_recall(labels,
predictions,
target_recall,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision at a given recall.
This function creates variables to track the true positives, false positives,
true negatives, and false negatives at a set of thresholds. Among those
thresholds where recall is at least `target_recall`, precision is computed
at the threshold where recall is closest to `target_recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
precision at `target_recall`. `update_op` increments the counts of true
positives, false positives, true negatives, and false negatives with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about precision and recall, see
http://en.wikipedia.org/wiki/Precision_and_recall
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
target_recall: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
recall.
metrics_collections: An optional list of collections to which `precision`
should be added.
updates_collections: An optional list of collections to which `update_op`
should be added.
name: An optional variable_scope name.
Returns:
precision: A scalar `Tensor` representing the precision at the given
`target_recall` value.
update_op: An operation that increments the variables for tracking the
true positives, false positives, true negatives, and false negatives and
whose value matches `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`target_recall` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_recall is not '
'supported when eager execution is enabled.')
if target_recall < 0 or target_recall > 1:
raise ValueError('`target_recall` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'precision_at_recall',
(predictions, labels, weights)):
kepsilon = 1e-7 # Used to avoid division by zero.
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
def compute_precision_at_recall(tp, fp, fn, name):
"""Computes the precision at a given recall.
Args:
tp: True positives.
fp: False positives.
fn: False negatives.
name: A name for the operation.
Returns:
The precision at the desired recall.
"""
recalls = math_ops.div(tp, tp + fn + kepsilon)
# Because recall is monotone decreasing as a function of the threshold,
# the smallest recall exceeding target_recall occurs at the largest
# threshold where recall >= target_recall.
admissible_recalls = math_ops.cast(
math_ops.greater_equal(recalls, target_recall), dtypes.int64)
tf_index = math_ops.reduce_sum(admissible_recalls) - 1
# Now we have the threshold at which to compute precision:
return math_ops.div(tp[tf_index] + kepsilon,
tp[tf_index] + fp[tf_index] + kepsilon,
name)
precision_value = compute_precision_at_recall(
values['tp'], values['fp'], values['fn'], 'value')
update_op = compute_precision_at_recall(
update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, precision_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_absolute_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None,
'Please switch to tf.metrics.root_mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions), batch_count,
'batch_mean_prediction')
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
_, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_op = state_ops.assign_add(count_, num_values)
if metrics_collections:
ops.add_to_collections(metrics_collections, count_)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return count_, update_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_row')
pe_col = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
metrics_impl._safe_div( # pylint: disable=protected-access
pe_row * pe_col, total, None))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
| apache-2.0 |
blab/nextstrain-augur | augur/tree.py | 1 | 16604 | """
Build a tree using a variety of methods.
"""
import os
import shutil
import sys
import time
import uuid
import Bio
from Bio import Phylo
import numpy as np
from treetime.vcf_utils import read_vcf
from pathlib import Path
from .utils import run_shell_command, nthreads_value
def find_executable(names, default = None):
"""
Return the path to the first executable found in PATH from the given list
of names.
Raises a (hopefully helpful) error if no executable is found. Provide a
value for the "default" parameter to instead return a value.
"""
exe = next(filter(shutil.which, names), default)
if exe is None:
print("Unable to find any of %s in PATH=%s" % (names, os.environ["PATH"]))
print("Hint: You can install the missing program using conda or homebrew or apt-get.")
raise Exception
return exe
def build_raxml(aln_file, out_file, clean_up=True, nthreads=1):
'''
build tree using RAxML with parameters '-f d -m GTRCAT -c 25 -p 235813 -n tre"
'''
raxml = find_executable([
# Users who symlink/install as "raxml" can pick a specific version,
# otherwise we have our own search order based on expected parallelism.
# The variants we look for are not exhaustive, but based on what's
# provided by conda and Ubuntu's raxml packages. This may want to be
# adjusted in the future depending on use.
"raxml",
"raxmlHPC-PTHREADS-AVX2",
"raxmlHPC-PTHREADS-AVX",
"raxmlHPC-PTHREADS-SSE3",
"raxmlHPC-PTHREADS",
"raxmlHPC-AVX2",
"raxmlHPC-AVX",
"raxmlHPC-SSE3",
"raxmlHPC",
])
# RAxML outputs files appended with this random string:
# RAxML_bestTree.4ed91a, RAxML_info.4ed91a, RAxML_parsimonyTree.4ed91a, RAxML_result.4ed91a
random_string = uuid.uuid4().hex[0:6]
call = [raxml,"-T",str(nthreads)," -f d -m GTRCAT -c 25 -p 235813 -n %s -s"%(random_string), aln_file, "> RAxML_log.%s"%(random_string)]
cmd = " ".join(call)
print("Building a tree via:\n\t" + cmd +
"\n\tStamatakis, A: RAxML Version 8: A tool for Phylogenetic Analysis and Post-Analysis of Large Phylogenies."
"\n\tIn Bioinformatics, 2014\n")
try:
run_shell_command(cmd, raise_errors = True)
shutil.copy("RAxML_bestTree.%s"%(random_string), out_file)
T = Phylo.read(out_file, 'newick')
if clean_up:
os.remove("RAxML_bestTree.%s"%(random_string))
os.remove("RAxML_info.%s"%(random_string))
os.remove("RAxML_log.%s"%(random_string))
os.remove("RAxML_parsimonyTree.%s"%(random_string))
os.remove("RAxML_result.%s"%(random_string))
except:
print("TREE BUILDING FAILED, please inspect the raxml.log file\n")
T=None
return T
def build_fasttree(aln_file, out_file, clean_up=True, nthreads=1):
'''
build tree using fasttree with parameters "-nt"
'''
log_file = out_file + ".log"
fasttree = find_executable([
# Search order is based on expected parallelism and accuracy
# (double-precision versions).
"FastTreeDblMP",
"FastTreeDbl",
"FastTreeMP",
"fasttreeMP", # Ubuntu lowercases
"FastTree",
"fasttree"
])
# By default FastTree with OpenMP support uses all available cores.
# However, it respects the standard OpenMP environment variable controlling
# this as described at <http://www.microbesonline.org/fasttree/#OpenMP>.
#
# We always set it, regardless of it the found FastTree executable contains
# "MP" in the name, because the generic "FastTree" or "fasttree" variants
# might be OpenMP-enabled too.
extra_env = {
"OMP_NUM_THREADS": str(nthreads),
}
call = [fasttree, "-nosupport", "-nt", aln_file, "1>", out_file, "2>", log_file]
cmd = " ".join(call)
print("Building a tree via:\n\t" + cmd +
"\n\tPrice et al: FastTree 2 - Approximately Maximum-Likelihood Trees for Large Alignments." +
"\n\tPLoS ONE 5(3): e9490. https://doi.org/10.1371/journal.pone.0009490\n")
try:
run_shell_command(cmd, raise_errors = True, extra_env = extra_env)
T = Phylo.read(out_file, 'newick')
if clean_up:
os.remove(log_file)
except:
print("TREE BUILDING FAILED")
T=None
return T
def build_iqtree(aln_file, out_file, substitution_model="GTR", clean_up=True, nthreads=1):
'''
build tree using IQ-Tree with parameters "-fast"
arguments:
aln_file file name of input aligment
out_file file name to write tree to
'''
with open(aln_file) as ifile:
tmp_seqs = ifile.readlines()
# IQ-tree messes with taxon names. Hence remove offending characters, reinstaniate later
tmp_aln_file = aln_file.replace(".fasta", "-delim.fasta")
log_file = tmp_aln_file.replace(".fasta", ".iqtree.log")
with open(tmp_aln_file, 'w') as ofile:
for line in tmp_seqs:
ofile.write(line.replace('/', '_X_X_').replace('|','_Y_Y_'))
# For compat with older versions of iqtree, we avoid the newish -fast
# option alias and instead spell out its component parts:
#
# -ninit 2
# -n 2
# -me 0.05
#
# This may need to be updated in the future if we want to stay in lock-step
# with -fast, although there's probably no particular reason we have to.
# Refer to the handling of -fast in utils/tools.cpp:
# https://github.com/Cibiv/IQ-TREE/blob/44753aba/utils/tools.cpp#L2926-L2936
fast_opts = [
"-ninit", "2",
"-n", "2",
"-me", "0.05"
]
if substitution_model.lower() != "none":
call = ["iqtree", *fast_opts, "-nt", str(nthreads), "-s", tmp_aln_file,
"-m", substitution_model, ">", log_file]
else:
call = ["iqtree", *fast_opts, "-nt", str(nthreads), "-s", tmp_aln_file, ">", log_file]
cmd = " ".join(call)
print("Building a tree via:\n\t" + cmd +
"\n\tNguyen et al: IQ-TREE: A fast and effective stochastic algorithm for estimating maximum likelihood phylogenies."
"\n\tMol. Biol. Evol., 32:268-274. https://doi.org/10.1093/molbev/msu300\n")
if substitution_model.lower() == "none":
print("Conducting a model test... see iqtree.log for the result. You can specify this with --substitution-model in future runs.")
try:
run_shell_command(cmd, raise_errors = True)
T = Phylo.read(tmp_aln_file+".treefile", 'newick')
shutil.copyfile(tmp_aln_file+".treefile", out_file)
for n in T.get_terminals():
n.name = n.name.replace('_X_X_','/').replace('_Y_Y_','|')
#this allows the user to check intermediate output, as tree.nwk will be
if clean_up:
#allow user to see chosen model if modeltest was run
if substitution_model.lower() == 'none':
shutil.copyfile(log_file, out_file.replace(out_file.split('/')[-1],"iqtree.log"))
for f in [log_file, tmp_aln_file]:
if os.path.isfile(f):
os.remove(f)
for ext in [".bionj",".ckp.gz",".iqtree",".log",".mldist",".model.gz",".treefile",".uniqueseq.phy",".model"]:
if os.path.isfile(tmp_aln_file + ext):
os.remove(tmp_aln_file + ext)
except:
print("TREE BUILDING FAILED")
T=None
return T
def load_excluded_sites(excluded_sites_file):
"""Returns an array of zero-based sites to exclude from a FASTA prior to tree building.
Parameters
----------
excluded_sites_file : str
a path to a BED file (with a .bed extension), a tab-delimited DRM file, or a plain text file with one position per line
Returns
-------
ndarray :
a unique array of positions loaded from the given file
"""
strip_pos = []
is_bed_format = False
if excluded_sites_file is not None:
# Check for BED file extension.
if excluded_sites_file.lower().endswith('.bed'):
import pandas as pd
is_bed_format = True
bed = pd.read_csv(excluded_sites_file, sep='\t')
for index, row in bed.iterrows():
strip_pos.extend(list(range(row[1], row[2]+1)))
else:
# Next, check for DRM-file format or site-per-line format.
with open(excluded_sites_file, 'r') as ifile:
line1 = ifile.readline()
# If the file is tab-delimited, assume it is in DRM-file format.
if '\t' in line1:
strip_pos = [int(line.strip().split('\t')[1]) for line in ifile]
else:
# Finally, fall back to site-per-line format.
strip_pos = [int(line.strip()) for line in ifile]
if line1.strip():
# Add the first line back to the list
strip_pos.append(int(line1.strip()))
strip_pos = np.unique(strip_pos)
# If the given sites are not in BED format, they are one-based positions and
# need to be internally adjusted to zero-based positions.
if not is_bed_format:
strip_pos = strip_pos - 1
return strip_pos
def write_out_informative_fasta(compress_seq, alignment, stripFile=None):
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
sequences = compress_seq['sequences']
ref = compress_seq['reference']
positions = compress_seq['positions']
#If want to exclude sites from initial treebuild, read in here
strip_pos = load_excluded_sites(stripFile)
#Get sequence names
seqNames = list(sequences.keys())
#Check non-ref sites to see if informative
printPositionMap = False #If true, prints file mapping Fasta position to real position
sites = []
pos = []
for key in positions:
if key not in strip_pos:
pattern = []
for k in sequences.keys():
#looping try/except is faster than list comprehension
try:
pattern.append(sequences[k][key])
except KeyError:
pattern.append(ref[key])
origPattern = list(pattern)
if '-' in pattern or 'N' in pattern:
#remove gaps/Ns to see if otherwise informative
pattern = [value for value in origPattern if value != '-' and value != 'N']
un = np.unique(pattern, return_counts=True)
#If not all - or N, not all same base, and >1 differing base, append
if len(un[0])!=0 and len(un[0])!=1 and not (len(un[0])==2 and min(un[1])==1):
sites.append(origPattern)
pos.append("\t".join([str(len(pos)+1),str(key)]))
#Rotate and convert to SeqRecord
sites = np.asarray(sites)
align = np.rot90(sites)
seqNamesCorr = list(reversed(seqNames))
toFasta = [ SeqRecord(id=seqNamesCorr[i], seq=Seq("".join(align[i])), description='') for i in range(len(sequences.keys()))]
fasta_file = '/'.join(alignment.split('/')[:-1]) + '/informative_sites.fasta'
#now output this as fasta to read into raxml or iqtree
SeqIO.write(toFasta, fasta_file, 'fasta')
#If want a position map, print:
if printPositionMap:
with open(fasta_file+".positions.txt", 'w') as the_file:
the_file.write("\n".join(pos))
return fasta_file
def mask_sites_in_multiple_sequence_alignment(alignment_file, excluded_sites_file):
"""Creates a new multiple sequence alignment FASTA file from which the given
excluded sites have been removed and returns the filename of the new
alignment.
Parameters
----------
alignment_file : str
path to the original multiple sequence alignment file
excluded_sites_file : str
path to a text file containing each nucleotide position to exclude with one position per line
Returns
-------
str
path to the new FASTA file from which sites have been excluded
"""
# Load zero-based excluded sites.
excluded_sites = load_excluded_sites(excluded_sites_file).tolist()
# Return the original alignment file, if no excluded sites were found.
if len(excluded_sites) == 0:
return alignment_file
# Load alignment as FASTA generator to prevent loading the whole alignment
# into memory.
alignment = Bio.SeqIO.parse(alignment_file, "fasta")
# Write the masked alignment to disk one record at a time.
alignment_file_path = Path(alignment_file)
masked_alignment_file = str(alignment_file_path.parent / ("masked_%s" % alignment_file_path.name))
with open(masked_alignment_file, "w") as oh:
for record in alignment:
# Convert to a mutable sequence to enable masking with Ns.
sequence = record.seq.tomutable()
# Replace all excluded sites with Ns.
for site in excluded_sites:
sequence[site] = "N"
record.seq = sequence
Bio.SeqIO.write(record, oh, "fasta")
# Return the new alignment FASTA filename.
return masked_alignment_file
def register_arguments(parser):
parser.add_argument('--alignment', '-a', required=True, help="alignment in fasta or VCF format")
parser.add_argument('--method', default='iqtree', choices=["fasttree", "raxml", "iqtree"], help="tree builder to use")
parser.add_argument('--output', '-o', type=str, help='file name to write tree to')
parser.add_argument('--substitution-model', default="GTR", choices=["HKY", "GTR", "HKY+G", "GTR+G"],
help='substitution model to use. Specify \'none\' to run ModelTest. Currently, only available for IQTREE.')
parser.add_argument('--nthreads', type=nthreads_value, default=1,
help="number of threads to use; specifying the value 'auto' will cause the number of available CPU cores on your system, if determinable, to be used")
parser.add_argument('--vcf-reference', type=str, help='fasta file of the sequence the VCF was mapped to')
parser.add_argument('--exclude-sites', type=str, help='file name of one-based sites to exclude for raw tree building (BED format in .bed files, DRM format in tab-delimited files, or one position per line)')
def run(args):
# check alignment type, set flags, read in if VCF
is_vcf = False
ref = None
if any([args.alignment.lower().endswith(x) for x in ['.vcf', '.vcf.gz']]):
# Prepare a multiple sequence alignment from the given variants VCF and
# reference FASTA.
if not args.vcf_reference:
print("ERROR: a reference Fasta is required with VCF-format alignments")
return 1
compress_seq = read_vcf(args.alignment, args.vcf_reference)
sequences = compress_seq['sequences']
ref = compress_seq['reference']
is_vcf = True
aln = sequences
elif args.exclude_sites:
# Mask excluded sites from the given multiple sequence alignment.
aln = mask_sites_in_multiple_sequence_alignment(args.alignment, args.exclude_sites)
else:
# Use the multiple sequence alignment as is.
aln = args.alignment
start = time.time()
if args.output:
tree_fname = args.output
else:
tree_fname = '.'.join(args.alignment.split('.')[:-1]) + '.nwk'
# construct reduced alignment if needed
if is_vcf:
variable_fasta = write_out_informative_fasta(compress_seq, args.alignment, stripFile=args.exclude_sites)
fasta = variable_fasta
else:
fasta = aln
if args.substitution_model and not args.method=='iqtree':
print("Cannot specify model unless using IQTree. Model specification ignored.")
if args.method=='raxml':
T = build_raxml(fasta, tree_fname, nthreads=args.nthreads)
elif args.method=='iqtree':
T = build_iqtree(fasta, tree_fname, args.substitution_model, nthreads=args.nthreads)
elif args.method=='fasttree':
T = build_fasttree(fasta, tree_fname, nthreads=args.nthreads)
else:
print("ERROR: unknown tree builder provided to --method: %s" % args.method, file = sys.stderr)
return 1
end = time.time()
print("Building original tree took {} seconds".format(str(end-start)))
if T:
import json
tree_success = Phylo.write(T, tree_fname, 'newick', format_branch_length='%1.8f')
else:
return 1
| agpl-3.0 |
edmunoz/aed | twitter/TopicModeling.py | 2 | 4201 | import math
from sklearn.feature_extraction.text import CountVectorizer
from lda import LDA
import math
import matplotlib.pyplot as plt
class TopicModelingMetrics(object):
"""docstring for TopicModelingMetrics"""
def __init__(self):
super(TopicModelingMetrics, self).__init__()
def calculate(self,topic_words,topic_k,word_v):
pass
class TopicSimpleScore(TopicModelingMetrics):
"""
Probablidad de que word_v pertenezca al topico topic_k
"""
def __init__(self):
super(TopicSimpleScore, self).__init__()
def calculate(self,topic_words,topic_k,word_v):
return topic_words[topic_k][word_v]
class TopicTermScore(TopicModelingMetrics):
"""
Score propuesto por Blei y Lafferty en Topic Models. In Text Mining: Theory and Applications
Resalta palabras con una alta probablidad de ocurrencia en un topico y baja probabilidad en el resto de topicos
"""
def __init__(self):
super(TopicTermScore, self).__init__()
def get_tf_topic(self,topic_words,topic_k,word_v):
return topic_words[topic_k][word_v]
def get_idf_topic(self,topic_words,topic_k,word_v):
num_topics = topic_words.shape[0]
bkn = topic_words[topic_k][word_v]
value = 1.0
for topic_dist in topic_words:
value = value*topic_dist[word_v]
return math.log(1.0*bkn/math.pow(value,1.0/num_topics))
def calculate(self,topic_words,topic_k,word_v):
tf_topic = self.get_idf_topic(topic_words,topic_k,word_v)
idf_topic = self.get_idf_topic(topic_words,topic_k,word_v)
return tf_topic*idf_topic
class TopicModelingLDA(object):
#wrapper de la libreriar LDA
#permite caracterizar los topicos en base a varios scores encontrados en la literatura
def __init__(self,corpus,metrics_criteria='simple'):
super(TopicModelingLDA, self).__init__()
self.corpus = corpus
self.select_metric_criteria(metrics_criteria)
self.model = None
self.topic_words = None
self.top_words = None
self.all_words = []
def fit(self,num_topic=5,n_iter=1500):
count_vect = CountVectorizer()
x_train_counts = count_vect.fit_transform(self.corpus)
self.model = LDA(n_topics=num_topic, n_iter=n_iter, random_state=1)
self.model.fit(x_train_counts)
self.topic_words = self.model.topic_word_
self.vocabulary = count_vect.get_feature_names()
def select_metric_criteria(self,metrics_criteria):
if metrics_criteria == 'term_score':
self.metrics = TopicTermScore()
else:
self.metrics = TopicSimpleScore()
def get_highest_scores(self,k_top=10):
#topic_words es una matriz (numero de topicos,palabras)
#la fila k indica la distribucion de palabras del topico k
num_topics = len(self.topic_words)
print ("Numero de topicos",num_topics)
top_words = []
self.top_words = {}
for topic_k in range(num_topics):
scores = []
for v,word in enumerate(self.vocabulary):
score = self.metrics.calculate(self.topic_words,topic_k,v)
scores.append((word,score))
scores.sort(key=lambda tup: tup[1])
scores = scores[-k_top:]
print ("Topico %d"%(topic_k))
for word,score in scores:
print ("%s,%.4f"%(word,score))
print ("")
self.top_words[topic_k] = [{'word':word,'score':score} for word,score in scores]
self.all_words += [ word for word,score in scores]
return self.top_words
def get_all_words(self):
return self.all_words
class TopicModelingEvaluator(object):
"""docstring for TopicModelingEvaluator"""
def __init__(self,corpus,num_topics = [5,10,20]):
#calcula perplexity para cada valor del arregloe num topics
#llame a show_perplexity para observar la distribucion de perplexity sobre el numero de topicos
super(TopicModelingEvaluator, self).__init__()
self.num_topics = num_topics
self.corpus = corpus
def show_perplexity(self):
perplexity = [self.calculate_perplexity(num_topic) for num_topic in self.num_topics]
plt.plot(self.num_topics,perplexity)
plt.xlabel("Num topics")
plt.ylabel("Perplexity")
plt.show()
def calculate_perplexity(self,num_topic):
lda_wrapper = TopicModelingLDA(self.corpus)
lda_wrapper.fit(num_topic,10)
log_likelihood = lda_wrapper.model.loglikelihood()
n_features = lda_wrapper.topic_words.shape[1]
x = 1.0*log_likelihood/n_features
result = math.exp(-1.0*x)
return result | apache-2.0 |
samuelstjean/dipy | dipy/tests/test_scripts.py | 1 | 7485 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
If we appear to be running from the development directory, use the scripts in
the top-level folder ``scripts``. Otherwise try and get the scripts from the
path
"""
from __future__ import division, print_function, absolute_import
import sys
import os
import shutil
from os.path import (dirname, join as pjoin, isfile, isdir, abspath, realpath,
pathsep)
from subprocess import Popen, PIPE
from nose.tools import assert_true, assert_false, assert_equal
import numpy.testing as nt
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.utils.six import string_types
from dipy.data import get_data
# Quickbundles command-line requires matplotlib:
try:
import matplotlib
no_mpl = False
except ImportError:
no_mpl = True
DEBUG_PRINT = os.environ.get('NIPY_DEBUG_PRINT', False)
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def local_script_dir(script_sdir):
# Check for presence of scripts in development directory. ``realpath``
# checks for the situation where the development directory has been linked
# into the path.
below_us_2 = realpath(pjoin(dirname(__file__), '..', '..'))
devel_script_dir = pjoin(below_us_2, script_sdir)
if isfile(pjoin(below_us_2, 'setup.py')) and isdir(devel_script_dir):
return devel_script_dir
return None
LOCAL_SCRIPT_DIR = local_script_dir('bin')
def local_module_dir(module_name):
mod = __import__(module_name)
containing_path = dirname(dirname(realpath(mod.__file__)))
if containing_path == realpath(os.getcwd()):
return containing_path
return None
LOCAL_MODULE_DIR = local_module_dir('dipy')
def run_command(cmd, check_code=True):
""" Run command sequence `cmd` returning exit code, stdout, stderr
Parameters
----------
cmd : str or sequence
string with command name or sequence of strings defining command
check_code : {True, False}, optional
If True, raise error for non-zero return code
Returns
-------
returncode : int
return code from execution of `cmd`
stdout : bytes (python 3) or str (python 2)
stdout from `cmd`
stderr : bytes (python 3) or str (python 2)
stderr from `cmd`
"""
if isinstance(cmd, string_types):
cmd = [cmd]
else:
cmd = list(cmd)
if os.name == 'nt': # Need .bat file extension for windows
cmd[0] += '.bat'
if not LOCAL_SCRIPT_DIR is None:
# Windows can't run script files without extensions natively so we need
# to run local scripts (no extensions) via the Python interpreter. On
# Unix, we might have the wrong incantation for the Python interpreter
# in the hash bang first line in the source file. So, either way, run
# the script through the Python interpreter
cmd = [sys.executable, pjoin(LOCAL_SCRIPT_DIR, cmd[0])] + cmd[1:]
if DEBUG_PRINT:
print("Running command '%s'" % cmd)
env = os.environ
if not LOCAL_MODULE_DIR is None:
# module likely comes from the current working directory. We might need
# that directory on the path if we're running the scripts from a
# temporary directory
env = env.copy()
pypath = env.get('PYTHONPATH', None)
if pypath is None:
env['PYTHONPATH'] = LOCAL_MODULE_DIR
else:
env['PYTHONPATH'] = LOCAL_MODULE_DIR + pathsep + pypath
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env)
stdout, stderr = proc.communicate()
if proc.poll() == None:
proc.terminate()
if check_code and proc.returncode != 0:
raise RuntimeError(
"""Command "{0}" failed with
stdout
------
{1}
stderr
------
{2}
""".format(cmd, stdout, stderr))
return proc.returncode, stdout, stderr
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.get_affine(), affine)
def test_dipy_fit_tensor_again():
with InTemporaryDirectory() as tmp:
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory() as tmp:
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
@nt.dec.skipif(no_mpl)
def test_qb_commandline():
with InTemporaryDirectory() as tmp:
tracks_file = get_data('fornix')
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', 'tracks300.trk']
out = run_command(cmd)
assert_equal(out[0], 0)
| bsd-3-clause |
cdegroc/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 1 | 5506 | # Author: Vlad Niculae
# License: BSD
import sys
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
from nose import SkipTest
from nose.tools import assert_true
from .. import SparsePCA, MiniBatchSparsePCA
from ...utils import check_random_state
def generate_toy_data(n_atoms, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_atoms)
V = rng.randn(n_atoms, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_atoms):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
spca = SparsePCA(n_components=3, n_jobs=2, random_state=0,
alpha=alpha).fit(Y)
U2 = spca.transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
vjuranek/rg-offline-plotting | src/python/rgplot/LineChart.py | 1 | 2596 | import matplotlib.pyplot as plt
import numpy as np
import constants as const
from matplotlib.ticker import Formatter
from RgChart import RgChart
from RgVars import MRT, AT
class LineChart(RgChart):
def __init__(self, *measurements):
self._fig, self._ax = plt.subplots()
self._lines = self._create_base_line(measurements)
def with_defaults(self):
self.with_title().with_ylabel().with_grids()
return self
def with_xticks_names(self, xt_names = None):
if xt_names is not None:
formatter = XAxFormatter(xt_names)
self._ax.xaxis.set_major_formatter(formatter)
#self.__fig.autofmt_xdate()
return self
def with_line(self, color = 'b', *measurements):
self._lines.append(self._create_line(measurements, color)[0])
return self
def with_legend(self, *labels):
for i in range(0,len(labels)):
self._lines[i].set_label(labels[i])
self._legend = plt.legend(loc='upper center', fancybox=True, shadow=True, ncol=5)
return self
def _create_line(self, measurements, color = 'b'):
nm = len(measurements)
ind = np.arange(nm)
val, xax = [], []
for i in range(0, len(measurements)):
val.append(measurements[i]._mrt)
xax.append(measurements[i]._title)
#decide if plot MRT or AT
if (issubclass(measurements[i]._rg_var, AT)):
return None #TODO
elif (issubclass(measurements[i]._rg_var, MRT)):
return plt.plot(ind, val, '%co-'%color)
else:
raise ValueError("Unknown variable")
def _create_base_line(self, measurements):
self._title = measurements[0]._title if measurements[0]._title is not None else ""
self._ylabel = measurements[0]._rg_var.ylabel if measurements[0]._rg_var is None else ""
return self._create_line(measurements)
def _create_plot(self):
baseline = self._lines[0]
nm = len(baseline.get_data()[0]) # number of measurements on base line
self._ax.set_xlim(-const.X_AX_OFFSET, nm - const.X_AX_OFFSET)
plt.setp([self._lines])
class XAxFormatter(Formatter):
def __init__(self, xvals):
self._xvals = xvals
def __call__(self, x, pos=0):
'Return the label for time x at position pos'
#ind = int(round(x))
#if ind >= len(self._xvals) or ind < 0: return ''
if(x.is_integer()):
ind = int(x)
return self._xvals[ind]
else:
return ''
| mit |
ClimbsRocks/auto_ml | tests/core_tests/quick_test.py | 1 | 4151 | # """
# nosetests -sv --nologcapture tests/quick_test.py
# nosetests --verbosity=2 --detailed-errors --nologcapture --processes=4 --process-restartworker --process-timeout=1000 tests/quick_test.py
# """
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
# os.environ['KERAS_BACKEND'] = 'theano'
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
import dill
import numpy as np
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
def get_boston_regression_dataset():
boston = load_boston()
df_boston = pd.DataFrame(boston.data)
df_boston.columns = boston.feature_names
df_boston['MEDV'] = boston['target']
df_boston_train, df_boston_test = train_test_split(df_boston, test_size=0.33, random_state=42)
return df_boston_train, df_boston_test
def regression_test():
# a random seed of 42 has ExtraTreesRegressor getting the best CV score, and that model doesn't generalize as well as GradientBoostingRegressor.
np.random.seed(0)
model_name = 'LGBMRegressor'
df_boston_train, df_boston_test = get_boston_regression_dataset()
many_dfs = []
for i in range(100):
many_dfs.append(df_boston_train)
df_boston_train = pd.concat(many_dfs)
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, model_names=[model_name], perform_feature_scaling=False)
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
lower_bound = -3.2
if model_name == 'DeepLearningRegressor':
lower_bound = -7.8
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.4
assert lower_bound < test_score < -2.8
def get_titanic_binary_classification_dataset(basic=True):
try:
df_titanic = pd.read_csv(os.path.join('tests', 'titanic.csv'))
except Exception as e:
print('Error')
print(e)
dataset_url = 'http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.csv'
df_titanic = pd.read_csv(dataset_url)
# Do not write the index that pandas automatically creates
df_titanic.to_csv(os.path.join('tests', 'titanic.csv'), index=False)
df_titanic = df_titanic.drop(['boat', 'body'], axis=1)
if basic == True:
df_titanic = df_titanic.drop(['name', 'ticket', 'cabin', 'home.dest'], axis=1)
df_titanic_train, df_titanic_test = train_test_split(df_titanic, test_size=0.33, random_state=42)
return df_titanic_train, df_titanic_test
def classification_test():
np.random.seed(0)
# model_name = 'GradientBoostingClassifier'
model_name = 'LGBMClassifier'
df_titanic_train, df_titanic_test = get_titanic_binary_classification_dataset()
df_titanic_train['DELETE_THIS_FIELD'] = 1
column_descriptions = {
'survived': 'output'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
, 'sex': 'categorical'
, 'this_does_not_exist': 'ignore'
, 'DELETE_THIS_FIELD': 'ignore'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names=model_name)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
lower_bound = -0.16
if model_name == 'DeepLearningClassifier':
lower_bound = -0.245
if model_name == 'LGBMClassifier':
lower_bound = -0.225
assert lower_bound < test_score < -0.135
if __name__ == '__main__':
classification_test()
| mit |
bundgus/python-playground | matplotlib-playground/examples/user_interfaces/embedding_in_gtk2.py | 1 | 1459 | #!/usr/bin/env python
"""
show how to add a matplotlib FigureCanvasGTK or FigureCanvasGTKAgg widget and
a toolbar to a gtk.Window
"""
import gtk
from matplotlib.figure import Figure
from numpy import arange, sin, pi
# uncomment to select /GTK/GTKAgg/GTKCairo
#from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
#from matplotlib.backends.backend_gtkcairo import FigureCanvasGTKCairo as FigureCanvas
# or NavigationToolbar for classic
#from matplotlib.backends.backend_gtk import NavigationToolbar2GTK as NavigationToolbar
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
win = gtk.Window()
win.connect("destroy", lambda x: gtk.main_quit())
win.set_default_size(400, 300)
win.set_title("Embedding in GTK")
vbox = gtk.VBox()
win.add(vbox)
fig = Figure(figsize=(5, 4), dpi=100)
ax = fig.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
ax.plot(t, s)
canvas = FigureCanvas(fig) # a gtk.DrawingArea
vbox.pack_start(canvas)
toolbar = NavigationToolbar(canvas, win)
vbox.pack_start(toolbar, False, False)
def on_key_event(event):
print('you pressed %s' % event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
win.show_all()
gtk.main()
| mit |
biocore-ntnu/pyranges | pyranges/methods/attr.py | 1 | 1808 | import pyranges as pr
import numpy as np
import pandas as pd
def _setattr(self, column_name, column, pos=False):
if not len(self):
return
isiterable = isinstance(column, (list, pd.Series, np.ndarray))
isdict = isinstance(column, dict)
if isiterable:
if not len(self) == len(column):
raise Exception("DataFrame and column must be same length.")
already_exists = column_name in self.columns
if isinstance(column, pd.Series):
column = column.values
if already_exists:
pos = list(self.values()[0].columns).index(column_name)
elif not pos:
pos = self.values()[0].shape[1]
start_length, end_length = 0, 0
dfs = {}
for k, df in self.items():
end_length += len(df)
if already_exists:
df = df.drop(column_name, axis=1)
if isiterable:
df.insert(pos, column_name, column[start_length:end_length])
elif isdict:
if isinstance(column[k], pd.Series):
_column = column[k].values
else:
_column = column[k]
df.insert(pos, column_name, _column)
else:
df.insert(pos, column_name, column)
start_length = end_length
dfs[k] = df
if column_name not in ["Chromosome", "Strand"]:
self.__dict__["dfs"] = dfs
else:
int64 = True if self.dtypes["Start"] == np.int64 else False
# will merge the dfs, then split on keys again to ensure they are correct
self.__dict__["dfs"] = pr.PyRanges(pr.PyRanges(dfs).df, int64=int64).dfs
def _getattr(self, name):
if name in self.columns:
return pd.concat([df[name] for df in self.values()])
else:
raise AttributeError("PyRanges object has no attribute", name)
| mit |
Batch21/pywr | tests/test_parameters.py | 1 | 38337 | """
Test for individual Parameter classes
"""
from __future__ import division
from pywr.core import Model, Timestep, Scenario, ScenarioIndex, Storage, Link, Input, Output
from pywr.parameters import (Parameter, ArrayIndexedParameter, ConstantScenarioParameter,
ArrayIndexedScenarioMonthlyFactorsParameter, MonthlyProfileParameter, DailyProfileParameter,
DataFrameParameter, AggregatedParameter, ConstantParameter,
IndexParameter, AggregatedIndexParameter, RecorderThresholdParameter, ScenarioMonthlyProfileParameter,
Polynomial1DParameter, Polynomial2DStorageParameter, ArrayIndexedScenarioParameter,
InterpolatedParameter, WeeklyProfileParameter,
FunctionParameter, AnnualHarmonicSeriesParameter, load_parameter)
from pywr.recorders import AssertionRecorder, assert_rec
from pywr.model import OrphanedParameterWarning
from pywr.recorders import Recorder
from fixtures import simple_linear_model, simple_storage_model
from helpers import load_model
import os
import datetime
import numpy as np
import pandas as pd
import pytest
import itertools
from numpy.testing import assert_allclose
TEST_DIR = os.path.dirname(__file__)
@pytest.fixture
def model(solver):
return Model(solver=solver)
def test_parameter_array_indexed(simple_linear_model):
"""
Test ArrayIndexedParameter
"""
model = simple_linear_model
A = np.arange(len(model.timestepper), dtype=np.float64)
p = ArrayIndexedParameter(model, A)
model.setup()
# scenario indices (not used for this test)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for v, ts in zip(A, model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
# Now check that IndexError is raised if an out of bounds Timestep is given.
ts = Timestep(datetime.datetime(2016, 1, 1), 366, 1.0)
with pytest.raises(IndexError):
p.value(ts, si)
def test_parameter_array_indexed_json_load(simple_linear_model, tmpdir):
"""Test ArrayIndexedParameter can be loaded from json dict"""
model = simple_linear_model
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D', name='date')
df = pd.DataFrame(np.arange(365), index=index, columns=['data'])
df_path = tmpdir.join('df.csv')
df.to_csv(str(df_path))
data = {
'type': 'arrayindexed',
'url': str(df_path),
'index_col': 'date',
'parse_dates': True,
'column': 'data',
}
p = load_parameter(model, data)
model.setup()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for v, ts in enumerate(model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
def test_parameter_constant_scenario(simple_linear_model):
"""
Test ConstantScenarioParameter
"""
model = simple_linear_model
# Add two scenarios
scA = Scenario(model, 'Scenario A', size=2)
scB = Scenario(model, 'Scenario B', size=5)
p = ConstantScenarioParameter(model, scB, np.arange(scB.size, dtype=np.float64))
model.setup()
ts = model.timestepper.current
# Now ensure the appropriate value is returned for the Scenario B indices.
for i, (a, b) in enumerate(itertools.product(range(scA.size), range(scB.size))):
si = ScenarioIndex(i, np.array([a, b], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), float(b))
def test_parameter_array_indexed_scenario_monthly_factors(simple_linear_model):
"""
Test ArrayIndexedParameterScenarioMonthlyFactors
"""
model = simple_linear_model
# Baseline timeseries data
values = np.arange(len(model.timestepper), dtype=np.float64)
# Add two scenarios
scA = Scenario(model, 'Scenario A', size=2)
scB = Scenario(model, 'Scenario B', size=5)
# Random factors for each Scenario B value per month
factors = np.random.rand(scB.size, 12)
p = ArrayIndexedScenarioMonthlyFactorsParameter(model, scB, values, factors)
model.setup()
# Iterate in time
for v, ts in zip(values, model.timestepper):
imth = ts.datetime.month - 1
# Now ensure the appropriate value is returned for the Scenario B indices.
for i, (a, b) in enumerate(itertools.product(range(scA.size), range(scB.size))):
f = factors[b, imth]
si = ScenarioIndex(i, np.array([a, b], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), v*f)
def test_parameter_array_indexed_scenario_monthly_factors_json(model):
model.path = os.path.join(TEST_DIR, "models")
scA = Scenario(model, 'Scenario A', size=2)
scB = Scenario(model, 'Scenario B', size=3)
p1 = ArrayIndexedScenarioMonthlyFactorsParameter.load(model, {
"scenario": "Scenario A",
"values": list(range(32)),
"factors": [list(range(1, 13)),list(range(13, 25))],
})
p2 = ArrayIndexedScenarioMonthlyFactorsParameter.load(model, {
"scenario": "Scenario B",
"values": {
"url": "timeseries1.csv",
"index_col": "Timestamp",
"column": "Data",
},
"factors": {
"url": "monthly_profiles.csv",
"index_col": "scenario",
},
})
node1 = Input(model, "node1", max_flow=p1)
node2 = Input(model, "node2", max_flow=p2)
nodeN = Output(model, "nodeN", max_flow=None, cost=-1)
node1.connect(nodeN)
node2.connect(nodeN)
model.timestepper.start = "2015-01-01"
model.timestepper.end = "2015-01-31"
model.run()
def test_parameter_monthly_profile(simple_linear_model):
"""
Test MonthlyProfileParameter
"""
model = simple_linear_model
values = np.arange(12, dtype=np.float64)
p = MonthlyProfileParameter(model, values)
model.setup()
# Iterate in time
for ts in model.timestepper:
imth = ts.datetime.month - 1
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), values[imth])
class TestScenarioMonthlyProfileParameter:
def test_init(self, simple_linear_model):
model = simple_linear_model
scenario = Scenario(model, 'A', 10)
values = np.random.rand(10, 12)
p = ScenarioMonthlyProfileParameter(model, scenario, values)
model.setup()
# Iterate in time
for ts in model.timestepper:
imth = ts.datetime.month - 1
for i in range(scenario.size):
si = ScenarioIndex(i, np.array([i], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), values[i, imth])
def test_json(self, solver):
model = load_model('scenario_monthly_profile.json', solver=solver)
# check first day initalised
assert (model.timestepper.start == datetime.datetime(2015, 1, 1))
# check results
supply1 = model.nodes['supply1']
# Multiplication factors
factors = np.array([
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22],
])
for expected in (23.92, 22.14, 22.57, 24.97, 27.59):
model.step()
imth = model.timestepper.current.month - 1
assert_allclose(supply1.flow, expected*factors[:, imth], atol=1e-7)
def test_parameter_daily_profile(simple_linear_model):
"""
Test DailyProfileParameter
"""
model = simple_linear_model
values = np.arange(366, dtype=np.float64)
p = DailyProfileParameter(model, values)
model.setup()
# Iterate in time
for ts in model.timestepper:
month = ts.datetime.month
day = ts.datetime.day
iday = int((datetime.datetime(2016, month, day) - datetime.datetime(2016, 1, 1)).days)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(p.value(ts, si), values[iday])
def test_daily_profile_leap_day(model):
"""Test behaviour of daily profile parameter for leap years
"""
inpt = Input(model, "input")
otpt = Output(model, "otpt", max_flow=None, cost=-999)
inpt.connect(otpt)
inpt.max_flow = DailyProfileParameter(model, np.arange(0, 366, dtype=np.float64))
# non-leap year
model.timestepper.start = pd.to_datetime("2015-01-01")
model.timestepper.end = pd.to_datetime("2015-12-31")
model.run()
assert_allclose(inpt.flow, 365) # NOT 364
# leap year
model.timestepper.start = pd.to_datetime("2016-01-01")
model.timestepper.end = pd.to_datetime("2016-12-31")
model.run()
assert_allclose(inpt.flow, 365)
def test_weekly_profile(simple_linear_model):
model = simple_linear_model
model.timestepper.start = "2004-01-01"
model.timestepper.end = "2005-05-01"
model.timestepper.delta = 7
values = np.arange(0, 52) ** 2 + 27.5
p = WeeklyProfileParameter.load(model, {"values": values})
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
week = int(min((timestep.dayofyear - 1) // 7, 51))
value = week ** 2 + 27.5
return value
model.run()
class TestAnnualHarmonicSeriesParameter:
""" Tests for `AnnualHarmonicSeriesParameter` """
def test_single_harmonic(self, model):
p1 = AnnualHarmonicSeriesParameter(model, 0.5, [0.25], [np.pi/4])
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for ts in model.timestepper:
doy = (ts.datetime.dayofyear - 1)/365
np.testing.assert_allclose(p1.value(ts, si), 0.5 + 0.25*np.cos(doy*2*np.pi + np.pi/4))
def test_double_harmonic(self, model):
p1 = AnnualHarmonicSeriesParameter(model, 0.5, [0.25, 0.3], [np.pi/4, np.pi/3])
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for ts in model.timestepper:
doy = (ts.datetime.dayofyear - 1) /365
expected = 0.5 + 0.25*np.cos(doy*2*np.pi + np.pi / 4) + 0.3*np.cos(doy*4*np.pi + np.pi/3)
np.testing.assert_allclose(p1.value(ts, si), expected)
def test_load(self, model):
data = {
"type": "annualharmonicseries",
"mean": 0.5,
"amplitudes": [0.25],
"phases": [np.pi/4]
}
p1 = load_parameter(model, data)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
for ts in model.timestepper:
doy = (ts.datetime.dayofyear - 1) / 365
np.testing.assert_allclose(p1.value(ts, si), 0.5 + 0.25 * np.cos(doy * 2 * np.pi + np.pi / 4))
class TestAggregatedParameter:
"""Tests for AggregatedParameter"""
funcs = {"min": np.min, "max": np.max, "mean": np.mean, "median": np.median, "sum": np.sum}
@pytest.mark.parametrize("agg_func", ["min", "max", "mean", "median", "sum"])
def test_agg(self, simple_linear_model, agg_func):
model = simple_linear_model
model.timestepper.delta = 15
scenarioA = Scenario(model, "Scenario A", size=2)
scenarioB = Scenario(model, "Scenario B", size=5)
values = np.arange(366, dtype=np.float64)
p1 = DailyProfileParameter(model, values)
p2 = ConstantScenarioParameter(model, scenarioB, np.arange(scenarioB.size, dtype=np.float64))
p = AggregatedParameter(model, [p1, p2], agg_func=agg_func)
func = TestAggregatedParameter.funcs[agg_func]
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
x = p1.get_value(scenario_index)
y = p2.get_value(scenario_index)
return func(np.array([x,y]))
model.run()
def test_load(self, simple_linear_model):
""" Test load from JSON dict"""
model = simple_linear_model
data = {
"type": "aggregated",
"agg_func": "product",
"parameters": [
0.8,
{
"type": "monthlyprofile",
"values": list(range(12))
}
]
}
p = load_parameter(model, data)
# Correct instance is loaded
assert isinstance(p, AggregatedParameter)
@assert_rec(model, p)
def expected(timestep, scenario_index):
return (timestep.month - 1) * 0.8
model.run()
class DummyIndexParameter(IndexParameter):
"""A simple IndexParameter which returns a constant value"""
def __init__(self, model, index, **kwargs):
super(DummyIndexParameter, self).__init__(model, **kwargs)
self._index = index
def index(self, timestep, scenario_index):
return self._index
def __repr__(self):
return "<DummyIndexParameter \"{}\">".format(self.name)
class TestAggregatedIndexParameter:
"""Tests for AggregatedIndexParameter"""
funcs = {"min": np.min, "max": np.max, "sum": np.sum, "product": np.product}
@pytest.mark.parametrize("agg_func", ["min", "max", "sum", "product"])
def test_agg(self, simple_linear_model, agg_func):
model = simple_linear_model
model.timestepper.delta = 1
model.timestepper.start = "2017-01-01"
model.timestepper.end = "2017-01-03"
scenarioA = Scenario(model, "Scenario A", size=2)
scenarioB = Scenario(model, "Scenario B", size=5)
p1 = DummyIndexParameter(model, 2)
p2 = DummyIndexParameter(model, 3)
p = AggregatedIndexParameter(model, [p1, p2], agg_func=agg_func)
func = TestAggregatedIndexParameter.funcs[agg_func]
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
x = p1.get_index(scenario_index)
y = p2.get_index(scenario_index)
return func(np.array([x,y], np.int32))
model.run()
def test_agg_anyall(self, simple_linear_model):
"""Test the "any" and "all" aggregation functions"""
model = simple_linear_model
model.timestepper.delta = 1
model.timestepper.start = "2017-01-01"
model.timestepper.end = "2017-01-03"
scenarioA = Scenario(model, "Scenario A", size=2)
scenarioB = Scenario(model, "Scenario B", size=5)
num_comb = len(model.scenarios.get_combinations())
parameters = {
0: DummyIndexParameter(model, 0, name="p0"),
1: DummyIndexParameter(model, 1, name="p1"),
2: DummyIndexParameter(model, 2, name="p2"),
}
data = [(0, 0), (1, 0), (0, 1), (1, 1), (1, 1, 1), (0, 2)]
data_parameters = [[parameters[i] for i in d] for d in data]
expected = [(np.any(d), np.all(d)) for d in data]
for n, params in enumerate(data_parameters):
for m, agg_func in enumerate(["any", "all"]):
p = AggregatedIndexParameter(model, params, agg_func=agg_func)
e = np.ones([len(model.timestepper), num_comb]) * expected[n][m]
r = AssertionRecorder(model, p, expected_data=e, name="assertion {}-{}".format(n, agg_func))
model.run()
def test_parameter_child_variables(model):
p1 = Parameter(model)
# Default parameter
assert len(p1.parents) == 0
assert len(p1.children) == 0
c1 = Parameter(model)
c1.parents.add(p1)
assert len(p1.children) == 1
assert c1 in p1.children
assert p1 in c1.parents
# Test third level
c2 = Parameter(model)
c2.parents.add(c1)
# Disable parent
c1.parents.clear()
assert len(p1.children) == 0
def test_scaled_profile_nested_load(model):
""" Test `ScaledProfileParameter` loading with `AggregatedParameter` """
model.timestepper.delta = 15
s = Storage(model, 'Storage', max_volume=100.0, num_outputs=0)
d = Output(model, 'Link')
data = {
'type': 'scaledprofile',
'scale': 50.0,
'profile': {
'type': 'aggregated',
'agg_func': 'product',
'parameters': [
{
'type': 'monthlyprofile',
'values': [0.5]*12
},
{
'type': 'monthlyprofilecontrolcurve',
'control_curves': [0.8, 0.6],
'values': [[1.0]*12, [0.7]*np.arange(12), [0.3]*12],
'storage_node': 'Storage'
}
]
}
}
s.connect(d)
d.max_flow = p = load_parameter(model, data)
@assert_rec(model, p)
def expected_func(timestep, scenario_index):
if s.initial_volume == 90:
return 50.0*0.5*1.0
elif s.initial_volume == 70:
return 50.0 * 0.5 * 0.7 * (timestep.month - 1)
else:
return 50.0 * 0.5 * 0.3
for initial_volume in (90, 70, 30):
s.initial_volume = initial_volume
model.run()
def test_parameter_df_upsampling(model):
""" Test that the `DataFrameParameter` can upsample data from a `pandas.DataFrame` and return that correctly
"""
# scenario indices (not used for this test)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
# Use a 7 day timestep for this test and run 2015
model.timestepper.delta = datetime.timedelta(7)
model.timestepper.start = pd.to_datetime('2015-01-01')
model.timestepper.end = pd.to_datetime('2015-12-31')
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D')
series = pd.Series(np.arange(365), index=index)
p = DataFrameParameter(model, series)
p.setup()
A = series.resample('7D').mean()
for v, ts in zip(A, model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
model.reset()
# Daily time-step that requires aligning
index = pd.date_range('2014-12-31', periods=366, freq='D')
series = pd.Series(np.arange(366), index=index)
p = DataFrameParameter(model, series)
p.setup()
# offset the resample appropriately for the test
A = series[1:].resample('7D').mean()
for v, ts in zip(A, model.timestepper):
np.testing.assert_allclose(p.value(ts, si), v)
model.reset()
# Daily time-step that is not covering the require range
index = pd.date_range('2015-02-01', periods=365, freq='D')
series = pd.Series(np.arange(365), index=index)
p = DataFrameParameter(model, series)
with pytest.raises(ValueError):
p.setup()
model.reset()
# Daily time-step that is not covering the require range
index = pd.date_range('2014-11-01', periods=365, freq='D')
series = pd.Series(np.arange(365), index=index)
p = DataFrameParameter(model, series)
with pytest.raises(ValueError):
p.setup()
def test_parameter_df_upsampling_multiple_columns(model):
""" Test that the `DataFrameParameter` works with multiple columns that map to a `Scenario`
"""
scA = Scenario(model, 'A', size=20)
scB = Scenario(model, 'B', size=2)
# scenario indices (not used for this test)
# Use a 7 day timestep for this test and run 2015
model.timestepper.delta = datetime.timedelta(7)
model.timestepper.start = pd.to_datetime('2015-01-01')
model.timestepper.end = pd.to_datetime('2015-12-31')
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D')
df = pd.DataFrame(np.random.rand(365, 20), index=index)
p = DataFrameParameter(model, df, scenario=scA)
p.setup()
A = df.resample('7D', axis=0).mean()
for v, ts in zip(A.values, model.timestepper):
np.testing.assert_allclose([p.value(ts, ScenarioIndex(i, np.array([i], dtype=np.int32))) for i in range(20)], v)
p = DataFrameParameter(model, df, scenario=scB)
with pytest.raises(ValueError):
p.setup()
def test_parameter_df_json_load(model, tmpdir):
# Daily time-step
index = pd.date_range('2015-01-01', periods=365, freq='D', name='date')
df = pd.DataFrame(np.random.rand(365), index=index, columns=['data'])
df_path = tmpdir.join('df.csv')
df.to_csv(str(df_path))
data = {
'type': 'dataframe',
'url': str(df_path),
'index_col': 'date',
'parse_dates': True,
}
p = load_parameter(model, data)
p.setup()
def test_simple_json_parameter_reference(solver):
# note that parameters in the "parameters" section cannot be literals
model = load_model("parameter_reference.json")
max_flow = model.nodes["supply1"].max_flow
assert(isinstance(max_flow, ConstantParameter))
assert(max_flow.value(None, None) == 125.0)
cost = model.nodes["demand1"].cost
assert(isinstance(cost, ConstantParameter))
assert(cost.value(None, None) == -10.0)
assert(len(model.parameters) == 4) # 4 parameters defined
def test_threshold_parameter(simple_linear_model):
model = simple_linear_model
model.timestepper.delta = 150
scenario = Scenario(model, "Scenario", size=2)
class DummyRecorder(Recorder):
def __init__(self, model, value, *args, **kwargs):
super(DummyRecorder, self).__init__(model, *args, **kwargs)
self.val = value
def setup(self):
super(DummyRecorder, self).setup()
num_comb = len(model.scenarios.combinations)
self.data = np.empty([len(model.timestepper), num_comb], dtype=np.float64)
def after(self):
timestep = model.timestepper.current
self.data[timestep.index, :] = self.val
threshold = 10.0
values = [50.0, 60.0]
rec1 = DummyRecorder(model, threshold-5, name="rec1") # below
rec2 = DummyRecorder(model, threshold, name="rec2") # equal
rec3 = DummyRecorder(model, threshold+5, name="rec3") # above
expected = [
("LT", (1, 0, 0)),
("GT", (0, 0, 1)),
("EQ", (0, 1, 0)),
("LE", (1, 1, 0)),
("GE", (0, 1, 1)),
]
for predicate, (value_lt, value_eq, value_gt) in expected:
for rec in (rec1, rec2, rec3):
param = RecorderThresholdParameter(model, rec, threshold, values=values, predicate=predicate)
e_val = values[getattr(rec.val, "__{}__".format(predicate.lower()))(threshold)]
e = np.ones([len(model.timestepper), len(model.scenarios.get_combinations())]) * e_val
e[0, :] = values[1] # first timestep is always "on"
r = AssertionRecorder(model, param, expected_data=e)
r.name = "assert {} {} {}".format(rec.val, predicate, threshold)
model.run()
def test_constant_from_df(solver):
"""
Test that a dataframe can be used to provide data to ConstantParameter (single values).
"""
model = load_model('simple_df.json', solver=solver)
assert isinstance(model.nodes['demand1'].max_flow, ConstantParameter)
assert isinstance(model.nodes['demand1'].cost, ConstantParameter)
ts = model.timestepper.next()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(model.nodes['demand1'].max_flow.value(ts, si), 10.0)
np.testing.assert_allclose(model.nodes['demand1'].cost.value(ts, si), -10.0)
def test_constant_from_shared_df(solver):
"""
Test that a shared dataframe can be used to provide data to ConstantParameter (single values).
"""
model = load_model('simple_df_shared.json', solver=solver)
assert isinstance(model.nodes['demand1'].max_flow, ConstantParameter)
assert isinstance(model.nodes['demand1'].cost, ConstantParameter)
ts = model.timestepper.next()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(model.nodes['demand1'].max_flow.value(ts, si), 10.0)
np.testing.assert_allclose(model.nodes['demand1'].cost.value(ts, si), -10.0)
def test_constant_from_multiindex_df(solver):
"""
Test that a dataframe can be used to provide data to ConstantParameter (single values).
"""
model = load_model('multiindex_df.json', solver=solver)
assert isinstance(model.nodes['demand1'].max_flow, ConstantParameter)
assert isinstance(model.nodes['demand1'].cost, ConstantParameter)
ts = model.timestepper.next()
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
np.testing.assert_allclose(model.nodes['demand1'].max_flow.value(ts, si), 10.0)
np.testing.assert_allclose(model.nodes['demand1'].cost.value(ts, si), -100.0)
def test_parameter_registry_overwrite(model):
# define a parameter
class NewParameter(Parameter):
DATA = 42
def __init__(self, model, values, *args, **kwargs):
super(NewParameter, self).__init__(model, *args, **kwargs)
self.values = values
NewParameter.register()
# re-define a parameter
class NewParameter(IndexParameter):
DATA = 43
def __init__(self, model, values, *args, **kwargs):
super(NewParameter, self).__init__(model, *args, **kwargs)
self.values = values
NewParameter.register()
data = {
"type": "new",
"values": 0
}
parameter = load_parameter(model, data)
# parameter is instance of new class, not old class
assert(isinstance(parameter, NewParameter))
assert(parameter.DATA == 43)
def test_invalid_parameter_values():
"""
Test that `load_parameter_values` returns a ValueError rather than KeyError.
This is useful to catch and give useful messages when no valid reference to
a data location is given.
Regression test for Issue #247 (https://github.com/pywr/pywr/issues/247)
"""
from pywr.parameters._parameters import load_parameter_values
m = Model()
data = {'name': 'my_parameter', 'type': 'AParameterThatShouldHaveValues'}
with pytest.raises(ValueError):
load_parameter_values(model, data)
class Test1DPolynomialParameter:
""" Tests for `Polynomial1DParameter` """
def test_init(self, simple_storage_model):
""" Test initialisation raises error with too many keywords """
stg = simple_storage_model.nodes['Storage']
param = ConstantParameter(simple_storage_model, 2.0)
with pytest.raises(ValueError):
# Passing both "parameter" and "storage_node" is invalid
Polynomial1DParameter(simple_storage_model, [0.5, np.pi], parameter=param, storage_node=stg)
def test_1st_order_with_parameter(self, simple_linear_model):
""" Test 1st order with a `Parameter` """
model = simple_linear_model
x = 2.0
p1 = Polynomial1DParameter(model, [0.5, np.pi], parameter=ConstantParameter(model, x))
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi * x
model.run()
def test_2nd_order_with_parameter(self, simple_linear_model):
""" Test 2nd order with a `Parameter` """
model = simple_linear_model
x = 2.0
px = ConstantParameter(model, x)
p1 = Polynomial1DParameter(model, [0.5, np.pi, 3.0], parameter=px)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 3.0*x**2
model.run()
def test_1st_order_with_storage(self, simple_storage_model):
""" Test with a `Storage` node """
model = simple_storage_model
stg = model.nodes['Storage']
x = stg.initial_volume
p1 = Polynomial1DParameter(model, [0.5, np.pi], storage_node=stg)
p2 = Polynomial1DParameter(model, [0.5, np.pi], storage_node=stg, use_proportional_volume=True)
# Test with absolute storage
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x
# Test with proportional storage
@assert_rec(model, p2, name="proportionalassertion")
def expected_func(timestep, scenario_index):
return 0.5 + np.pi * x/stg.max_volume
model.setup()
model.step()
def test_load(self, simple_linear_model):
model = simple_linear_model
x = 1.5
data = {
"type": "polynomial1d",
"coefficients": [0.5, 2.5],
"parameter": {
"type": "constant",
"value": x
}
}
p1 = load_parameter(model, data)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + 2.5*x
model.run()
def test_load_with_scaling(self, simple_linear_model):
model = simple_linear_model
x = 1.5
data = {
"type": "polynomial1d",
"coefficients": [0.5, 2.5],
"parameter": {
"type": "constant",
"value": x
},
"scale": 1.25,
"offset": 0.75
}
xscaled = x*1.25 + 0.75
p1 = load_parameter(model, data)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + 2.5*xscaled
model.run()
def test_interpolated_parameter(simple_linear_model):
model = simple_linear_model
model.timestepper.start = "1920-01-01"
model.timestepper.end = "1920-01-12"
p1 = ArrayIndexedParameter(model, [0,1,2,3,4,5,6,7,8,9,10,11])
p2 = InterpolatedParameter(model, p1, [0, 5, 10, 11], [0, 5*2, 10*3, 2])
@assert_rec(model, p2)
def expected_func(timestep, scenario_index):
values = [0, 2, 4, 6, 8, 10, 14, 18, 22, 26, 30, 2]
return values[timestep.index]
model.run()
class Test2DStoragePolynomialParameter:
def test_1st(self, simple_storage_model):
""" Test 1st order """
model = simple_storage_model
stg = model.nodes['Storage']
x = 2.0
y = stg.initial_volume
coefs = [[0.5, np.pi], [2.5, 0.3]]
p1 = Polynomial2DStorageParameter(model, coefs, stg, ConstantParameter(model, x))
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 2.5*y+ 0.3*x*y
model.setup()
model.step()
def test_load(self, simple_storage_model):
model = simple_storage_model
stg = model.nodes['Storage']
x = 2.0
y = stg.initial_volume/stg.max_volume
data = {
"type": "polynomial2dstorage",
"coefficients": [[0.5, np.pi], [2.5, 0.3]],
"use_proportional_volume": True,
"parameter": {
"type": "constant",
"value": x
},
"storage_node": "Storage"
}
p1 = load_parameter(model, data)
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 2.5*y+ 0.3*x*y
model.setup()
model.step()
def test_load_wth_scaling(self, simple_storage_model):
model = simple_storage_model
stg = model.nodes['Storage']
x = 2.0
y = stg.initial_volume/stg.max_volume
data = {
"type": "polynomial2dstorage",
"coefficients": [[0.5, np.pi], [2.5, 0.3]],
"use_proportional_volume": True,
"parameter": {
"type": "constant",
"value": x
},
"storage_node": "Storage",
"storage_scale": 1.3,
"storage_offset": 0.75,
"parameter_scale": 1.25,
"parameter_offset": -0.5
}
p1 = load_parameter(model, data)
# Scaled parameters
x = x*1.25 - 0.5
y = y*1.3 + 0.75
@assert_rec(model, p1)
def expected_func(timestep, scenario_index):
return 0.5 + np.pi*x + 2.5*y+ 0.3*x*y
model.setup()
model.step()
class TestMinMaxNegativeParameter:
@pytest.mark.parametrize("ptype,profile", [
("max", list(range(-10, 356))),
("min", list(range(0, 366))),
("negative", list(range(-366, 0))),
("negativemax", list(range(-366, 0))),
])
def test_parameter(cls, simple_linear_model, ptype,profile):
model = simple_linear_model
model.timestepper.start = "2017-01-01"
model.timestepper.end = "2017-01-15"
data = {
"type": ptype,
"parameter": {
"name": "raw",
"type": "dailyprofile",
"values": profile,
}
}
if ptype in ("max", "min"):
data["threshold"] = 3
func = {"min": min, "max": max, "negative": lambda t,x: -x, "negativemax": lambda t,x: max(t, -x)}[ptype]
model.nodes["Input"].max_flow = parameter = load_parameter(model, data)
model.nodes["Output"].max_flow = 9999
model.nodes["Output"].cost = -100
daily_profile = model.parameters["raw"]
@assert_rec(model, parameter)
def expected(timestep, scenario_index):
value = daily_profile.get_value(scenario_index)
return func(3, value)
model.run()
def test_ocptt(simple_linear_model):
model = simple_linear_model
inpt = model.nodes["Input"]
s1 = Scenario(model, "scenario 1", size=3)
s2 = Scenario(model, "scenario 1", size=2)
x = np.arange(len(model.timestepper)).reshape([len(model.timestepper), 1]) + 5
y = np.arange(s1.size).reshape([1, s1.size])
z = x * y ** 2
p = ArrayIndexedScenarioParameter(model, s1, z)
inpt.max_flow = p
model.setup()
model.reset()
model.step()
values1 = [p.get_value(scenario_index) for scenario_index in model.scenarios.combinations]
values2 = list(p.get_all_values())
assert_allclose(values1, [0, 0, 5, 5, 20, 20])
assert_allclose(values2, [0, 0, 5, 5, 20, 20])
class TestThresholdParameters:
def test_storage_threshold_parameter(self, simple_storage_model):
""" Test StorageThresholdParameter """
m = simple_storage_model
data = {
"type": "storagethreshold",
"storage_node": "Storage",
"threshold": 10.0,
"predicate": ">"
}
p1 = load_parameter(m, data)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
m.nodes['Storage'].initial_volume = 15.0
m.setup()
# Storage > 10
assert p1.index(m.timestepper.current, si) == 1
m.nodes['Storage'].initial_volume = 5.0
m.setup()
# Storage < 10
assert p1.index(m.timestepper.current, si) == 0
def test_node_threshold_parameter2(self, simple_linear_model):
model = simple_linear_model
model.nodes["Input"].max_flow = ArrayIndexedParameter(model, np.arange(0, 20))
model.nodes["Output"].cost = -10.0
model.timestepper.start = "1920-01-01"
model.timestepper.end = "1920-01-15"
model.timestepper.delta = 1
threshold = 5.0
parameters = {}
for predicate in (">", "<", "="):
data = {
"type": "nodethreshold",
"node": "Output",
"threshold": 5.0,
"predicate": predicate,
# we need to define values so AssertionRecorder can be used
"values": [0.0, 1.0],
}
parameter = load_parameter(model, data)
parameter.name = "nodethresold {}".format(predicate)
parameters[predicate] = parameter
if predicate == ">":
expected_data = (np.arange(-1, 20) > threshold).astype(int)
elif predicate == "<":
expected_data = (np.arange(-1, 20) < threshold).astype(int)
else:
expected_data = (np.arange(-1, 20) == threshold).astype(int)
expected_data[0] = 0 # previous flow in initial timestep is undefined
expected_data = expected_data[:, np.newaxis]
rec = AssertionRecorder(model, parameter, expected_data=expected_data, name="assertion recorder {}".format(predicate))
model.run()
@pytest.mark.parametrize("threshold", [
5.0,
{"type": "constant", "value": 5.0},
], ids=["double", "parameter"])
def test_parameter_threshold_parameter(self, simple_linear_model, threshold):
""" Test ParameterThresholdParameter """
m = simple_linear_model
m.nodes['Input'].max_flow = 10.0
m.nodes['Output'].cost = -10.0
data = {
"type": "parameterthreshold",
"parameter": {
"type": "constant",
"value": 3.0
},
"threshold": threshold,
"predicate": "<"
}
p1 = load_parameter(m, data)
si = ScenarioIndex(0, np.array([0], dtype=np.int32))
m.setup()
m.step()
# value < 5
assert p1.index(m.timestepper.current, si) == 1
p1.param.update(np.array([8.0,]))
m.setup()
m.step()
# flow < 5
assert p1.index(m.timestepper.current, si) == 0
def test_orphaned_components(simple_linear_model):
model = simple_linear_model
model.nodes["Input"].max_flow = ConstantParameter(model, 10.0)
result = model.find_orphaned_parameters()
assert(not result)
# assert that warning not raised by check
with pytest.warns(None) as record:
model.check()
for w in record:
if isinstance(w, OrphanedParameterWarning):
pytest.fail("OrphanedParameterWarning raised unexpectedly!")
# add some orphans
orphan1 = ConstantParameter(model, 5.0)
orphan2 = ConstantParameter(model, 10.0)
orphans = {orphan1, orphan2}
result = model.find_orphaned_parameters()
assert(orphans == result)
with pytest.warns(OrphanedParameterWarning):
model.check()
def test_deficit_parameter(solver):
"""Test DeficitParameter
Here we test both uses of the DeficitParameter:
1) Recording the deficit for a node each timestep
2) Using yesterday's deficit to control today's flow
"""
model = load_model("deficit.json", solver=solver)
model.run()
max_flow = np.array([5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8])
demand = 10.0
supplied = np.minimum(max_flow, demand)
expected = demand - supplied
actual = model.recorders["deficit_recorder"].data
assert_allclose(expected, actual[:,0])
expected_yesterday = [0]+list(expected[0:-1])
actual_yesterday = model.recorders["yesterday_recorder"].data
assert_allclose(expected_yesterday, actual_yesterday[:,0])
| gpl-3.0 |
dimagol/trex-core | doc/TRexDataAnalysis.py | 1 | 10771 | #!/scratch/Anaconda2.4.0/bin/python
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import dates as matdates
from matplotlib import lines as matlines
import os
import time
from datetime import datetime
"""
This Module is structured to work with a raw data at the following JSON format:
{'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
}
The Query structure is set (currently) to this:
(test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id) example:
["syn attack - 64 bytes, single CPU", "stl", "20161226", "01", "39", "9.631898", "9.5", "11.5", "54289"]
it can be changed to support other formats of queries, simply change the query class to support your desired structure
the query class specify the indexes of the data within the query tuple
"""
class TestQuery(object):
QUERY_TIMEFORMAT = "%Y-%m-%d %H:%M:%S" # date format in the query
QUERY_TIMESTAMP = 1
QUERY_MPPS_RESULT = 2
QUERY_BUILD_ID = 3
class Test:
def __init__(self, name, setup_name, end_date):
self.name = name
self.setup_name = setup_name
self.end_date = end_date
self.stats = [] # tuple
self.results_df = [] # dataFrame
self.latest_result = [] # float
self.latest_result_date = '' # string
def analyze_all_test_data(self, raw_test_data):
test_results = []
test_dates = []
test_build_ids = []
for query in raw_test_data:
# date_formatted = time.strftime("%d-%m-%Y",
# time.strptime(query[int(TestQuery.QUERY_DATE)], TestQuery.query_dateformat))
# time_of_res = date_formatted + '-' + query[int(TestQuery.QUERY_HOUR)] + ':' + query[
# int(TestQuery.QUERY_MINUTE)]
time_of_query = time.strptime(query[TestQuery.QUERY_TIMESTAMP], TestQuery.QUERY_TIMEFORMAT)
time_formatted = time.strftime("%d-%m-%Y-%H:%M", time_of_query)
test_dates.append(time_formatted)
test_results.append(float(query[int(TestQuery.QUERY_MPPS_RESULT)]))
test_build_ids.append(query[int(TestQuery.QUERY_BUILD_ID)])
test_results_df = pd.DataFrame({self.name: test_results, self.name + ' Date': test_dates,
"Setup": ([self.setup_name] * len(test_results)), "Build Id": test_build_ids},
dtype='str')
stats_avg = float(test_results_df[self.name].mean())
stats_min = float(test_results_df[self.name].min())
stats_max = float(test_results_df[self.name].max())
stats = tuple(
[stats_avg, stats_min, stats_max,
float(test_results_df[self.name].std()),
float(((stats_max - stats_min) / stats_avg) * 100),
len(test_results)]) # stats = (avg_mpps,min,max,std,error, no of test_results) error = ((max-min)/avg)*100
self.latest_result = float(test_results_df[self.name].iloc[-1])
self.latest_result_date = str(test_results_df[test_results_df.columns[3]].iloc[-1])
self.results_df = test_results_df
self.stats = stats
class Setup:
def __init__(self, name, end_date, raw_setup_data):
self.name = name
self.end_date = end_date # string of date
self.tests = [] # list of test objects
self.all_tests_data_table = pd.DataFrame() # dataframe
self.setup_trend_stats = pd.DataFrame() # dataframe
self.latest_test_results = pd.DataFrame() # dataframe
self.raw_setup_data = raw_setup_data # dictionary
self.test_names = raw_setup_data.keys() # list of names
def analyze_all_tests(self):
for test_name in self.test_names:
t = Test(test_name, self.name, self.end_date)
t.analyze_all_test_data(self.raw_setup_data[test_name])
self.tests.append(t)
def analyze_latest_test_results(self):
test_names = []
test_dates = []
test_latest_results = []
for test in self.tests:
test_names.append(test.name)
test_dates.append(test.latest_result_date)
test_latest_results.append(test.latest_result)
self.latest_test_results = pd.DataFrame(
{'Date': test_dates, 'Test Name': test_names, 'MPPS\Core (Norm)': test_latest_results},
index=range(1, len(test_latest_results) + 1))
self.latest_test_results = self.latest_test_results[[2, 1, 0]] # re-order columns to name|MPPS|date
def analyze_all_tests_stats(self):
test_names = []
all_test_stats = []
for test in self.tests:
test_names.append(test.name)
all_test_stats.append(test.stats)
self.setup_trend_stats = pd.DataFrame(all_test_stats, index=test_names,
columns=['Avg MPPS/Core (Norm)', 'Min', 'Max', 'Std', 'Error (%)',
'Total Results'])
self.setup_trend_stats.index.name = 'Test Name'
def analyze_all_tests_trend(self):
all_tests_trend_data = []
for test in self.tests:
all_tests_trend_data.append(test.results_df)
self.all_tests_data_table = reduce(lambda x, y: pd.merge(x, y, how='outer'), all_tests_trend_data)
def plot_trend_graph_all_tests(self, save_path='', file_name='_trend_graph.png'):
time_format1 = '%d-%m-%Y-%H:%M'
time_format2 = '%Y-%m-%d-%H:%M'
for test in self.tests:
test_data = test.results_df[test.results_df.columns[2]].tolist()
test_time_stamps = test.results_df[test.results_df.columns[3]].tolist()
start_date = test_time_stamps[0]
test_time_stamps.append(self.end_date + '-23:59')
test_data.append(test_data[-1])
float_test_time_stamps = []
for ts in test_time_stamps:
try:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format1)))
except:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format2)))
plt.plot_date(x=float_test_time_stamps, y=test_data, label=test.name, fmt='.-', xdate=True)
plt.legend(fontsize='small', loc='best')
plt.ylabel('MPPS/Core (Norm)')
plt.title('Setup: ' + self.name)
plt.tick_params(
axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off')
plt.xlabel('Time Period: ' + start_date[:-6] + ' - ' + self.end_date)
if save_path:
plt.savefig(os.path.join(save_path, self.name + file_name))
if not self.setup_trend_stats.empty:
(self.setup_trend_stats.round(2)).to_csv(os.path.join(save_path, self.name +
'_trend_stats.csv'))
plt.close('all')
def plot_latest_test_results_bar_chart(self, save_path='', img_file_name='_latest_test_runs.png',
stats_file_name='_latest_test_runs_stats.csv'):
plt.figure()
colors_for_bars = ['b', 'g', 'r', 'c', 'm', 'y']
self.latest_test_results[[1]].plot(kind='bar', legend=False,
color=colors_for_bars) # plot only mpps data, which is in column 1
plt.xticks(rotation='horizontal')
plt.xlabel('Index of Tests')
plt.ylabel('MPPS/Core (Norm)')
plt.title("Test Runs for Setup: " + self.name)
if save_path:
plt.savefig(os.path.join(save_path, self.name + img_file_name))
(self.latest_test_results.round(2)).to_csv(
os.path.join(save_path, self.name + stats_file_name))
plt.close('all')
def analyze_all_setup_data(self):
self.analyze_all_tests()
self.analyze_latest_test_results()
self.analyze_all_tests_stats()
self.analyze_all_tests_trend()
def plot_all(self, save_path=''):
self.plot_latest_test_results_bar_chart(save_path)
self.plot_trend_graph_all_tests(save_path)
def latest_runs_comparison_bar_chart(setup_name1, setup_name2, setup1_latest_result, setup2_latest_result,
save_path=''
):
s1_res = setup1_latest_result[[0, 1]] # column0 is test name, column1 is MPPS\Core
s2_res = setup2_latest_result[[0, 1, 2]] # column0 is test name, column1 is MPPS\Core, column2 is Date
s1_res.columns = ['Test Name', setup_name1]
s2_res.columns = ['Test Name', setup_name2, 'Date']
compare_dframe = pd.merge(s1_res, s2_res, on='Test Name')
compare_dframe.plot(kind='bar')
plt.legend(fontsize='small', loc='best')
plt.xticks(rotation='horizontal')
plt.xlabel('Index of Tests')
plt.ylabel('MPPS/Core (Norm)')
plt.title("Comparison between " + setup_name1 + " and " + setup_name2)
if save_path:
plt.savefig(os.path.join(save_path, "_comparison.png"))
compare_dframe = compare_dframe.round(2)
compare_dframe.to_csv(os.path.join(save_path, '_comparison_stats_table.csv'))
# WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data
def create_all_data(ga_data, end_date, save_path='', detailed_test_stats=''):
all_setups = {}
all_setups_data = []
setup_names = ga_data.keys()
for setup_name in setup_names:
s = Setup(setup_name, end_date, ga_data[setup_name])
s.analyze_all_setup_data()
s.plot_all(save_path)
all_setups_data.append(s.all_tests_data_table)
all_setups[setup_name] = s
if detailed_test_stats:
if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):
os.remove(os.path.join(save_path, '_detailed_table.csv'))
if all_setups_data:
all_setups_data_dframe = pd.DataFrame().append(all_setups_data)
all_setups_data_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))
trex19setup = all_setups['trex19']
trex08setup = all_setups['trex08']
latest_runs_comparison_bar_chart('Mellanox ConnectX-5',
'Intel XL710', trex19setup.latest_test_results,
trex08setup.latest_test_results,
save_path=save_path)
| apache-2.0 |
victorbergelin/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
Midafi/scikit-image | doc/examples/plot_join_segmentations.py | 14 | 1967 | """
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.filters import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage.color import label2rgb
from skimage import data, img_as_float
coins = img_as_float(data.coins())
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30.0 / 255] = background
markers[coins > 150.0 / 255] = foreground
ws = watershed(edges, markers)
seg1 = ndi.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
seg2 = slic(coins, n_segments=117, max_iter=160, sigma=1, compactness=0.75,
multichannel=False)
# combine the two
segj = join_segmentations(seg1, seg2)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
color1 = label2rgb(seg1, image=coins, bg_label=0)
axes[1].imshow(color1, interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
color2 = label2rgb(seg2, image=coins, image_alpha=0.5)
axes[2].imshow(color2, interpolation='nearest')
axes[2].set_title('SLIC superpixels')
color3 = label2rgb(segj, image=coins, image_alpha=0.5)
axes[3].imshow(color3, interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
| bsd-3-clause |
mhue/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
almarklein/scikit-image | doc/examples/plot_ransac.py | 2 | 1571 | """
=========================================
Robust line model estimation using RANSAC
=========================================
In this example we see how to robustly fit a line model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage.measure import LineModel, ransac
np.random.seed(seed=1)
# generate coordinates of line
x = np.arange(-200, 200)
y = 0.2 * x + 20
data = np.column_stack([x, y])
# add faulty data
faulty = np.array(30 * [(180., -100)])
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
# fit line using all data
model = LineModel()
model.estimate(data)
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(data, LineModel, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
# generate coordinates of estimated models
line_x = np.arange(-250, 250)
line_y = model.predict_y(line_x)
line_y_robust = model_robust.predict_y(line_x)
plt.plot(data[inliers, 0], data[inliers, 1], '.b', alpha=0.6,
label='Inlier data')
plt.plot(data[outliers, 0], data[outliers, 1], '.r', alpha=0.6,
label='Outlier data')
plt.plot(line_x, line_y, '-k', label='Line model from all data')
plt.plot(line_x, line_y_robust, '-b', label='Robust line model')
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | examples/model_selection/randomized_search.py | 35 | 3287 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
| bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
mingwpy/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
NDManh/numbbo | code-postprocessing/bbob_pproc/pprldistr.py | 3 | 35794 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""For generating empirical cumulative distribution function figures.
The outputs show empirical cumulative distribution functions (ECDFs) of
the running times of trials. These ECDFs show on the y-axis the fraction
of cases for which the running time (left subplots) or the df-value
(right subplots) was smaller than the value given on the x-axis. On the
left, ECDFs of the running times from trials are shown for different
target values. Light brown lines in the background show ECDFs for target
value 1e-8 of all algorithms benchmarked during BBOB-2009. On the right,
ECDFs of df-values from all trials are shown for different numbers of
function evaluations.
**Example**
.. plot::
:width: 75%
import urllib
import tarfile
import glob
from pylab import *
import bbob_pproc as bb
# Collect and unarchive data (3.4MB)
dataurl = 'http://coco.lri.fr/BBOB2009/pythondata/BIPOP-CMA-ES.tar.gz'
filename, headers = urllib.urlretrieve(dataurl)
archivefile = tarfile.open(filename)
archivefile.extractall()
# Empirical cumulative distribution function figure
ds = bb.load(glob.glob('BBOB2009pythondata/BIPOP-CMA-ES/ppdata_f0*_20.pickle'))
figure()
bb.pprldistr.plot(ds)
bb.pprldistr.beautify() # resize the window to view whole figure
CAVEAT: the naming conventions in this module mix up ERT (an estimate
of the expected running length) and run lengths.
"""
from __future__ import absolute_import
import os
import warnings # I don't know what I am doing here
import numpy as np
import pickle, gzip
import matplotlib.pyplot as plt
from pdb import set_trace
from . import toolsstats, genericsettings, pproc
from .ppfig import consecutiveNumbers, plotUnifLogXMarkers, saveFigure, logxticks
from .pptex import color_to_latex, marker_to_latex
single_target_values = pproc.TargetValues((10., 1e-1, 1e-4, 1e-8)) # possibly changed in config
single_runlength_factors = [0.5, 1.2, 3, 10] + [10 ** i for i in range(2, 12)]
# TODO: the method names in this module seem to be overly unclear or misleading and should be revised.
refcolor = 'wheat'
nbperdecade = 1 # markers in x-axis decades in ecdfs
runlen_xlimits_max = None # is possibly manipulated in config
runlen_xlimits_min = 1 # set to 10**-0.5 in runlength case in config
# Used as a global to store the largest xmax and align the FV ECD figures.
fmax = None
evalfmax = runlen_xlimits_max # is manipulated/stored in this module
# TODO: the target function values and the styles of the line only make sense
# together. Therefore we should either:
# 1. keep the targets as input argument and make rldStyles depend on them or
# 2. remove the targets as input argument and put them here.
rldStyles = ({'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'r', 'linewidth': 3.},
{'color': 'k'},
{'color': 'c'},
{'color': 'm'},
{'color': 'r'},
{'color': 'k'},
{'color': 'c'},
{'color': 'm'},
{'color': 'r', 'linewidth': 3.})
rldUnsuccStyles = (
{'color': 'c', 'ls': '-'},
{'color': 'm', 'ls': '-'},
{'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'k'},
{'color': 'c', 'ls': '-'},
{'color': 'm'},
{'color': 'k'},
) # should not be too short
styles = genericsettings.line_styles
caption_part_one = r"""%
Empirical cumulative distribution functions (ECDF), plotting the fraction of
trials with an outcome not larger than the respective value on the $x$-axis.
#1"""
caption_left_fixed_targets = r"""%
Left subplots: ECDF of the number of function evaluations (FEvals) divided by search space dimension $D$,
to fall below $\fopt+\Df$ with $\Df=10^{k}$, where $k$ is the first value in the legend.
The thick red line represents the most difficult target value $\fopt+10^{-8}$. """
caption_left_rlbased_targets = r"""%
Left subplots: ECDF of number of function evaluations (FEvals) divided by search space dimension $D$,
to fall below $\fopt+\Df$ where \Df\ is the
target just not reached by the GECCO-BBOB-2009 best algorithm within a budget of
% largest $\Df$-value $\ge10^{-8}$ for which the best \ERT\ seen in the GECCO-BBOB-2009 was yet above
$k\times\DIM$ evaluations, where $k$ is the first value in the legend. """
caption_wrap_up = r"""%
Legends indicate for each target the number of functions that were solved in at
least one trial within the displayed budget."""
caption_right = r"""%
Right subplots: ECDF of the
best achieved $\Df$
for running times of TO_BE_REPLACED
function evaluations
(from right to left cycling cyan-magenta-black\dots) and final $\Df$-value (red),
where \Df\ and \textsf{Df} denote the difference to the optimal function value.
Light brown lines in the background show ECDFs for the most difficult target of all
algorithms benchmarked during BBOB-2009."""
caption_single_fixed = caption_part_one + caption_left_fixed_targets + caption_wrap_up + caption_right
caption_single_rlbased = caption_part_one + caption_left_rlbased_targets + caption_wrap_up + caption_right
caption_two_part_one = r"""%
Empirical cumulative distributions (ECDF)
of run lengths and speed-up ratios in 5-D (left) and 20-D (right).
Left sub-columns: ECDF of
the number of function evaluations divided by dimension $D$
(FEvals/D) """
symbAlgorithmA = r'{%s%s}' % (color_to_latex('k'),
marker_to_latex(styles[0]['marker']))
symbAlgorithmB = r'{%s%s}' % (color_to_latex('k'),
marker_to_latex(styles[1]['marker']))
caption_two_fixed_targets_part1 = r"""%
to reach a target value $\fopt+\Df$ with $\Df=10^{k}$, where
$k\in\{1, -1, -4, -8\}$ is given by the first value in the legend, for
\algorithmA\ ("""
caption_two_fixed_targets_part2 = r""") and \algorithmB\ ("""
caption_two_fixed_targets_part3 = r""")%
. Light beige lines show the ECDF of FEvals for target value $\Df=10^{-8}$
of all algorithms benchmarked during BBOB-2009.
Right sub-columns:
ECDF of FEval ratios of \algorithmA\ divided by \algorithmB for target
function values $10^k$ with $k$ given in the legend; all
trial pairs for each function. Pairs where both trials failed are disregarded,
pairs where one trial failed are visible in the limits being $>0$ or $<1$. The
legend also indicates, after the colon, the number of functions that were
solved in at least one trial (\algorithmA\ first)."""
caption_two_rlbased_targets_part1 = r"""%
to fall below $\fopt+\Df$ for
\algorithmA\ ("""
caption_two_rlbased_targets_part2 = r""") and \algorithmB\ ("""
caption_two_rlbased_targets_part3 = r"""%
) where \Df\ is the target just not reached by the GECCO-BBOB-2009 best
algorithm within a budget of $k\times\DIM$ evaluations, with $k$ being the
value in the legend.
Right sub-columns:
ECDF of FEval ratios of \algorithmA\ divided by \algorithmB\ for
run-length-based targets; all trial pairs for each function. Pairs where
both trials failed are disregarded, pairs where one trial failed are visible
in the limits being $>0$ or $<1$. The legends indicate the target budget of
$k\times\DIM$ evaluations and, after the colon, the number of functions that
were solved in at least one trial (\algorithmA\ first)."""
caption_two_fixed = (caption_two_part_one
+ caption_two_fixed_targets_part1
+ symbAlgorithmA
+ caption_two_fixed_targets_part2
+ symbAlgorithmB
+ caption_two_fixed_targets_part3)
caption_two_rlbased = (caption_two_part_one
+ caption_two_rlbased_targets_part1
+ symbAlgorithmA
+ caption_two_rlbased_targets_part2
+ symbAlgorithmB
+ caption_two_rlbased_targets_part3)
previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'
previous_RLBdata_filename = 'pprldistr2009_hardestRLB.pickle.gz'
previous_data_filename = os.path.join(os.path.split(__file__)[0], previous_data_filename)
previous_RLBdata_filename = os.path.join(os.path.split(__file__)[0], previous_RLBdata_filename)
previous_data_dict = None
previous_RLBdata_dict = None
def load_previous_data(filename = previous_data_filename, force = False):
if previous_data_dict and not force:
return previous_data_dict
try:
# cocofy(previous_data_filename)
f = gzip.open(previous_data_filename, 'r')
return pickle.load(f)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
previous_algorithm_data_found = False
print 'Could not find file: ', previous_data_filename
else:
f.close()
return None
def load_previous_RLBdata(filename = previous_RLBdata_filename):
if previous_RLBdata_dict:
return previous_RLBdata_dict
try:
f = gzip.open(previous_RLBdata_filename, 'r')
return pickle.load(f)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
print 'Could not find file: ', previous_RLBdata_filename
else:
f.close()
return None
def caption_single(max_evals_div_dim):
caption = caption_single_rlbased if genericsettings.runlength_based_targets else caption_single_fixed
return caption.replace(r'TO_BE_REPLACED', '$' + 'D, '.join([str(i) for i in single_runlength_factors[:6]]) + 'D,\dots$')
def caption_two():
caption = caption_two_rlbased if genericsettings.runlength_based_targets else caption_two_fixed
return caption
def beautifyECDF():
"""Generic formatting of ECDF figures."""
plt.ylim(-0.0, 1.01) # was plt.ylim(-0.01, 1.01)
plt.yticks(np.arange(0., 1.001, 0.2)) # , ('0.0', '', '0.5', '', '1.0'))
plt.grid(True)
xmin, xmax = plt.xlim()
# plt.xlim(xmin=xmin*0.90) # why this?
c = plt.gca().get_children()
for i in c: # TODO: we only want to extend ECDF lines...
try:
if i.get_drawstyle() == 'steps' and not i.get_linestyle() in ('', 'None'):
xdata = i.get_xdata()
ydata = i.get_ydata()
if len(xdata) > 0:
# if xmin < min(xdata):
# xdata = np.hstack((xmin, xdata))
# ydata = np.hstack((ydata[0], ydata))
if xmax > max(xdata):
xdata = np.hstack((xdata, xmax))
ydata = np.hstack((ydata, ydata[-1]))
plt.setp(i, 'xdata', xdata, 'ydata', ydata)
elif (i.get_drawstyle() == 'steps' and i.get_marker() != '' and
i.get_linestyle() in ('', 'None')):
xdata = i.get_xdata()
ydata = i.get_ydata()
if len(xdata) > 0:
# if xmin < min(xdata):
# minidx = np.ceil(np.log10(xmin) * nbperdecade)
# maxidx = np.floor(np.log10(xdata[0]) * nbperdecade)
# x = 10. ** (np.arange(minidx, maxidx + 1) / nbperdecade)
# xdata = np.hstack((x, xdata))
# ydata = np.hstack(([ydata[0]] * len(x), ydata))
if xmax > max(xdata):
minidx = np.ceil(np.log10(xdata[-1]) * nbperdecade)
maxidx = np.floor(np.log10(xmax) * nbperdecade)
x = 10. ** (np.arange(minidx, maxidx + 1) / nbperdecade)
xdata = np.hstack((xdata, x))
ydata = np.hstack((ydata, [ydata[-1]] * len(x)))
plt.setp(i, 'xdata', xdata, 'ydata', ydata)
except (AttributeError, IndexError):
pass
def beautifyRLD(xlimit_max = None):
"""Format and save the figure of the run length distribution.
After calling this function, changing the boundaries of the figure
will not update the ticks and tick labels.
"""
a = plt.gca()
a.set_xscale('log')
a.set_xlabel('log10 of FEvals / DIM')
a.set_ylabel('proportion of trials')
logxticks()
if xlimit_max:
plt.xlim(xmax = xlimit_max ** 1.0) # was 1.05
plt.xlim(xmin = runlen_xlimits_min)
plt.text(plt.xlim()[0], plt.ylim()[0], single_target_values.short_info, fontsize = 14)
beautifyECDF()
def beautifyFVD(isStoringXMax = False, ylabel = True):
"""Formats the figure of the run length distribution.
This function is to be used with :py:func:`plotFVDistr`
:param bool isStoringMaxF: if set to True, the first call
:py:func:`beautifyFVD` sets the global
:py:data:`fmax` and all subsequent call
will have the same maximum xlim
:param bool ylabel: if True, y-axis will be labelled.
"""
a = plt.gca()
a.set_xscale('log')
if isStoringXMax:
global fmax
else:
fmax = None
if not fmax:
xmin, fmax = plt.xlim()
plt.xlim(1.01e-8, fmax) # 1e-8 was 1.
# axisHandle.invert_xaxis()
a.set_xlabel('log10 of Df') # / Dftarget
if ylabel:
a.set_ylabel('proportion of trials')
logxticks(limits=plt.xlim())
beautifyECDF()
if not ylabel:
a.set_yticklabels(())
def plotECDF(x, n = None, **plotArgs):
"""Plot an empirical cumulative distribution function.
:param seq x: data
:param int n: number of samples, if not provided len(x) is used
:param plotArgs: optional keyword arguments provided to plot.
:returns: handles of the plot elements.
"""
if n is None:
n = len(x)
nx = len(x)
if n == 0 or nx == 0:
res = plt.plot([], [], **plotArgs)
else:
x = sorted(x) # do not sort in place
x = np.hstack((x, x[-1]))
y = np.hstack((np.arange(0., nx) / n, float(nx) / n))
res = plotUnifLogXMarkers(x, y, nbperdecade = nbperdecade,
drawstyle = 'steps', **plotArgs)
return res
def _plotERTDistr(dsList, target, **plotArgs):
"""This method is obsolete, should be removed? The replacement for simulated runlengths is in pprldmany?
Creates simulated run time distributions (it is not an ERT distribution) from a DataSetList.
:keyword DataSet dsList: Input data sets
:keyword dict target: target precision
:keyword plotArgs: keyword arguments to pass to plot command
:return: resulting plot.
Details: calls ``plotECDF``.
"""
x = []
nn = 0
samplesize = genericsettings.simulated_runlength_bootstrap_sample_size # samplesize should be at least 1000
percentiles = 0.5 # could be anything...
for i in dsList:
# funcs.add(i.funcId)
for j in i.evals:
if j[0] <= target[i.funcId]:
runlengthsucc = j[1:][np.isfinite(j[1:])]
runlengthunsucc = i.maxevals[np.isnan(j[1:])]
tmp = toolsstats.drawSP(runlengthsucc, runlengthunsucc,
percentiles = percentiles,
samplesize = samplesize)
x.extend(tmp[1])
break
nn += samplesize
res = plotECDF(x, nn, **plotArgs)
return res
def _plotRLDistr_old(dsList, target, **plotArgs):
"""Creates run length distributions from a sequence dataSetList.
Labels of the line (for the legend) will be set automatically with
the following format: %+d: %d/%d % (log10()
:param DataSetList dsList: Input data sets
:param dict or float target: target precision
:param plotArgs: additional arguments passed to the plot command
:returns: handles of the resulting plot.
"""
x = []
nn = 0
fsolved = set()
funcs = set()
for i in dsList:
funcs.add(i.funcId)
try:
target = target[i.funcId] # TODO: this can only work for a single function, generally looks like a bug
if not genericsettings.test:
print 'target:', target
print 'function:', i.funcId
raise Exception('please check this, it looks like a bug')
except TypeError:
target = target
tmp = i.detEvals((target,))[0] / i.dim
tmp = tmp[np.isnan(tmp) == False] # keep only success
if len(tmp) > 0:
fsolved.add(i.funcId)
x.extend(tmp)
nn += i.nbRuns()
kwargs = plotArgs.copy()
label = ''
try:
label += '%+d:' % (np.log10(target))
except NameError:
pass
label += '%d/%d' % (len(fsolved), len(funcs))
kwargs['label'] = kwargs.setdefault('label', label)
res = plotECDF(x, nn, **kwargs)
return res
def erld_data(dsList, target, max_fun_evals = np.inf):
"""return ``[sorted_runlengths_divided_by_dimension, nb_of_all_runs, functions_ids_found, functions_ids_solved]``
`max_fun_evals` is only used to compute `function_ids_solved`,
that is elements in `sorted_runlengths...` can be larger.
copy-paste from `plotRLDistr` and not used.
"""
runlength_data = []
nruns = 0
fsolved = set()
funcs = set()
for ds in dsList: # ds is a DataSet
funcs.add(ds.funcId)
evals = ds.detEvals((target((ds.funcId, ds.dim)),))[0] / ds.dim
evals = evals[np.isnan(evals) == False] # keep only success
if len(evals) > 0 and sum(evals <= max_fun_evals):
fsolved.add(ds.funcId)
runlength_data.extend(evals)
nruns += ds.nbRuns()
return sorted(runlength_data), nruns, funcs, fsolved
def plotRLDistr(dsList, target, label = '', max_fun_evals = np.inf,
**plotArgs):
"""Creates run length distributions from a sequence dataSetList.
Labels of the line (for the legend) will be appended with the number
of functions at least solved once.
:param DataSetList dsList: Input data sets
:param target: a method that delivers single target values like ``target((fun, dim))``
:param str label: target value label to be displayed in the legend
:param max_fun_evals: only used to determine success on a single function
:param plotArgs: additional arguments passed to the plot command
:returns: handles of the resulting plot.
Example::
plotRLDistr(dsl, lambda f: 1e-6)
Details: ``target`` is a function taking a (function_number, dimension) pair
as input and returning a ``float``. It can be defined as
``lambda fun_dim: targets(fun_dim)[j]`` returning the j-th element of
``targets(fun_dim)``, where ``targets`` is an instance of
``class pproc.TargetValues`` (see the ``pproc.TargetValues.__call__`` method).
TODO: data generation and plotting should be in separate methods
TODO: different number of runs/data biases the results, shouldn't
the number of data made the same, in case?
"""
x = []
nn = 0
fsolved = set()
funcs = set()
for ds in dsList: # ds is a DataSet
funcs.add(ds.funcId)
tmp = ds.detEvals((target((ds.funcId, ds.dim)),))[0] / ds.dim
tmp = tmp[np.isnan(tmp) == False] # keep only success
if len(tmp) > 0 and sum(tmp <= max_fun_evals):
fsolved.add(ds.funcId)
x.extend(tmp)
nn += ds.nbRuns()
kwargs = plotArgs.copy()
label += ': %d/%d' % (len(fsolved), len(funcs))
kwargs['label'] = kwargs.setdefault('label', label)
res = plotECDF(x, nn, **kwargs)
return res
def plotFVDistr(dsList, budget, min_f = 1e-8, **plotArgs):
"""Creates ECDF of final function values plot from a DataSetList.
:param dsList: data sets
:param min_f: used for the left limit of the plot
:param float budget: maximum evaluations / dimension that "count"
:param plotArgs: additional arguments passed to plot
:returns: handle
"""
x = []
nn = 0
for ds in dsList:
if ds.isBiobjective():
continue;
for i, fvals in enumerate(ds.funvals):
if fvals[0] > budget * ds.dim:
assert i > 0, 'first entry ' + str(fvals[0]) + 'was smaller than maximal budget ' + str(budget * ds.dim)
fvals = ds.funvals[i - 1]
break
# vals = fvals[1:].copy() / target[i.funcId]
vals = fvals[1:].copy()
# replace negative values to prevent problem with log of vals
vals[vals <= 0] = min(np.append(vals[vals > 0], [min_f])) # works also when vals[vals > 0] is empty
if genericsettings.runlength_based_targets:
NotImplementedError('related function vals with respective budget (e.g. ERT(val)) see pplogloss.generateData()')
x.extend(vals)
nn += ds.nbRuns()
if nn > 0:
return plotECDF(x, nn, **plotArgs)
else:
return None
def comp(dsList0, dsList1, targets, isStoringXMax = False,
outputdir = '', info = 'default', verbose = True):
"""Generate figures of ECDF that compare 2 algorithms.
:param DataSetList dsList0: list of DataSet instances for ALG0
:param DataSetList dsList1: list of DataSet instances for ALG1
:param seq targets: target function values to be displayed
:param bool isStoringXMax: if set to True, the first call
:py:func:`beautifyFVD` sets the globals
:py:data:`fmax` and :py:data:`maxEvals`
and all subsequent calls will use these
values as rightmost xlim in the generated
figures.
:param string outputdir: output directory (must exist)
:param string info: string suffix for output file names.
:param bool verbose: control verbosity
"""
# plt.rc("axes", labelsize=20, titlesize=24)
# plt.rc("xtick", labelsize=20)
# plt.rc("ytick", labelsize=20)
# plt.rc("font", size=20)
# plt.rc("legend", fontsize=20)
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
targets = pproc.TargetValues.cast(targets)
dictdim0 = dsList0.dictByDim()
dictdim1 = dsList1.dictByDim()
for d in set(dictdim0.keys()) & set(dictdim1.keys()):
maxEvalsFactor = max(max(i.mMaxEvals() / d for i in dictdim0[d]),
max(i.mMaxEvals() / d for i in dictdim1[d]))
if isStoringXMax:
global evalfmax
else:
evalfmax = None
if not evalfmax:
evalfmax = maxEvalsFactor ** 1.05
if runlen_xlimits_max is not None:
evalfmax = runlen_xlimits_max
filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info))
fig = plt.figure()
for j in range(len(targets)):
tmp = plotRLDistr(dictdim0[d], lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
marker = genericsettings.line_styles[1]['marker'],
**rldStyles[j % len(rldStyles)])
plt.setp(tmp[-1], label = None) # Remove automatic legend
# Mods are added after to prevent them from appearing in the legend
plt.setp(tmp, markersize = 20.,
markeredgewidth = plt.getp(tmp[-1], 'linewidth'),
markeredgecolor = plt.getp(tmp[-1], 'color'),
markerfacecolor = 'none')
tmp = plotRLDistr(dictdim1[d], lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
marker = genericsettings.line_styles[0]['marker'],
**rldStyles[j % len(rldStyles)])
# modify the automatic legend: remover marker and change text
plt.setp(tmp[-1], marker = '',
label = targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j))
# Mods are added after to prevent them from appearing in the legend
plt.setp(tmp, markersize = 15.,
markeredgewidth = plt.getp(tmp[-1], 'linewidth'),
markeredgecolor = plt.getp(tmp[-1], 'color'),
markerfacecolor = 'none')
funcs = set(i.funcId for i in dictdim0[d]) | set(i.funcId for i in dictdim1[d])
text = consecutiveNumbers(sorted(funcs), 'f')
if not dsList0.isBiobjective():
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
plot_previous_algorithms(d, funcs)
else:
plotRLB_previous_algorithms(d, funcs)
# plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim0[d]), ls='--', color='k')
# plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim1[d]), color='k')
plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim0[d]),
marker = '+', markersize = 20., color = 'k',
markeredgewidth = plt.getp(tmp[-1], 'linewidth',))
plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim1[d]),
marker = 'o', markersize = 15., color = 'k', markerfacecolor = 'None',
markeredgewidth = plt.getp(tmp[-1], 'linewidth'))
plt.legend(loc = 'best')
plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top", transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False),
beautifyRLD(evalfmax)
saveFigure(filename, verbose = verbose)
plt.close(fig)
def beautify():
"""Format the figure of the run length distribution.
Used in conjunction with plot method (obsolete/outdated, see functions ``beautifyFVD`` and ``beautifyRLD``).
"""
# raise NotImplementedError('this implementation is obsolete')
plt.subplot(121)
axisHandle = plt.gca()
axisHandle.set_xscale('log')
axisHandle.set_xlabel('log10 of FEvals / DIM')
axisHandle.set_ylabel('proportion of trials')
# Grid options
logxticks()
beautifyECDF()
plt.subplot(122)
axisHandle = plt.gca()
axisHandle.set_xscale('log')
xmin, fmax = plt.xlim()
plt.xlim(1., fmax)
axisHandle.set_xlabel('log10 of Df / Dftarget')
beautifyECDF()
logxticks()
axisHandle.set_yticklabels(())
plt.gcf().set_size_inches(16.35, 6.175)
# try:
# set_trace()
# plt.setp(plt.gcf(), 'figwidth', 16.35)
# except AttributeError: # version error?
# set_trace()
# plt.setp(plt.gcf(), 'figsize', (16.35, 6.))
def plot(dsList, targets = single_target_values, **plotArgs):
"""Plot ECDF of evaluations and final function values
in a single figure for demonstration purposes."""
# targets = targets() # TODO: this needs to be rectified
# targets = targets.target_values
dsList = pproc.DataSetList(dsList)
assert len(dsList.dictByDim()) == 1, ('Cannot display different '
'dimensionalities together')
res = []
plt.subplot(121)
maxEvalsFactor = max(i.mMaxEvals() / i.dim for i in dsList)
evalfmax = maxEvalsFactor
for j in range(len(targets)):
tmpplotArgs = dict(plotArgs, **rldStyles[j % len(rldStyles)])
tmp = plotRLDistr(dsList, lambda fun_dim: targets(fun_dim)[j], **tmpplotArgs)
res.extend(tmp)
res.append(plt.axvline(x = maxEvalsFactor, color = 'k', **plotArgs))
funcs = list(i.funcId for i in dsList)
text = consecutiveNumbers(sorted(funcs), 'f')
res.append(plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top", transform = plt.gca().transAxes))
plt.subplot(122)
for j in [range(len(targets))[-1]]:
tmpplotArgs = dict(plotArgs, **rldStyles[j % len(rldStyles)])
tmp = plotFVDistr(dsList, evalfmax, lambda fun_dim: targets(fun_dim)[j], **tmpplotArgs)
if tmp:
res.extend(tmp)
tmp = np.floor(np.log10(evalfmax))
# coloring right to left:
maxEvalsF = np.power(10, np.arange(0, tmp))
for j in range(len(maxEvalsF)):
tmpplotArgs = dict(plotArgs, **rldUnsuccStyles[j % len(rldUnsuccStyles)])
tmp = plotFVDistr(dsList, maxEvalsF[j], lambda fun_dim: targets(fun_dim)[-1], **tmpplotArgs)
if tmp:
res.extend(tmp)
res.append(plt.text(0.98, 0.02, text, horizontalalignment = "right",
transform = plt.gca().transAxes))
return res
def plot_previous_algorithms(dim, funcs):
"""Display BBOB 2009 data, by default from ``pprldistr.previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'``"""
global previous_data_dict
if previous_data_dict is None:
previous_data_dict = load_previous_data() # this takes about 6 seconds
if previous_data_dict is not None:
for alg in previous_data_dict:
x = []
nn = 0
try:
tmp = previous_data_dict[alg]
for f in funcs:
tmp[f][dim] # simply test that they exists
except KeyError:
continue
for f in funcs:
tmp2 = tmp[f][dim][0][1:]
# [0], because the maximum #evals is also recorded
# [1:] because the target function value is recorded
x.append(tmp2[np.isnan(tmp2) == False])
nn += len(tmp2)
if x:
x = np.hstack(x)
plotECDF(x[np.isfinite(x)] / float(dim), nn,
color = refcolor, ls = '-', zorder = -1)
def plotRLB_previous_algorithms(dim, funcs):
"""Display BBOB 2009 data, by default from ``pprldistr.previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'``"""
global previous_RLBdata_dict
if previous_RLBdata_dict is None:
previous_RLBdata_dict = load_previous_RLBdata()
if previous_RLBdata_dict is not None:
for alg in previous_RLBdata_dict:
x = []
nn = 0
try:
tmp = previous_RLBdata_dict[alg]
for f in funcs:
tmp[f][dim] # simply test that they exists
except KeyError:
continue
for f in funcs:
tmp2 = np.array(tmp[f][dim][0][1:][0])
# [0], because the maximum #evals is also recorded
# [1:] because the target function value is recorded
x.append(tmp2[np.isnan(tmp2) == False])
nn += len(tmp2)
if x:
x = np.hstack(x)
plotECDF(x[np.isfinite(x)] / float(dim), nn,
color = refcolor, ls = '-', zorder = -1)
def main(dsList, isStoringXMax = False, outputdir = '',
info = 'default', verbose = True):
"""Generate figures of empirical cumulative distribution functions.
This method has a feature which allows to keep the same boundaries
for the x-axis, if ``isStoringXMax==True``. This makes sense when
dealing with different functions or subsets of functions for one
given dimension.
CAVE: this is bug-prone, as some data depend on the maximum
evaluations and the appearence therefore depends on the
calling order.
:param DataSetList dsList: list of DataSet instances to process.
:param bool isStoringXMax: if set to True, the first call
:py:func:`beautifyFVD` sets the
globals :py:data:`fmax` and
:py:data:`maxEvals` and all subsequent
calls will use these values as rightmost
xlim in the generated figures.
:param string outputdir: output directory (must exist)
:param string info: string suffix for output file names.
:param bool verbose: control verbosity
"""
# plt.rc("axes", labelsize=20, titlesize=24)
# plt.rc("xtick", labelsize=20)
# plt.rc("ytick", labelsize=20)
# plt.rc("font", size=20)
# plt.rc("legend", fontsize=20)
targets = single_target_values # convenience abbreviation
for d, dictdim in dsList.dictByDim().iteritems():
maxEvalsFactor = max(i.mMaxEvals() / d for i in dictdim)
if isStoringXMax:
global evalfmax
else:
evalfmax = None
if not evalfmax:
evalfmax = maxEvalsFactor
if runlen_xlimits_max is not None:
evalfmax = runlen_xlimits_max
# first figure: Run Length Distribution
filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info))
fig = plt.figure()
for j in range(len(targets)):
plotRLDistr(dictdim,
lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
evalfmax, # can be larger maxEvalsFactor with no effect
** rldStyles[j % len(rldStyles)])
funcs = list(i.funcId for i in dictdim)
text = '{%s}, %d-D' % (consecutiveNumbers(sorted(funcs), 'f'), d)
if not dsList.isBiobjective():
# try:
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
# if targets.target_values[-1] == 1e-8: # this is a hack
plot_previous_algorithms(d, funcs)
else:
plotRLB_previous_algorithms(d, funcs)
# except:
# pass
plt.axvline(x = maxEvalsFactor, color = 'k') # vertical line at maxevals
plt.legend(loc = 'best')
plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top",
transform = plt.gca().transAxes
# bbox=dict(ec='k', fill=False)
)
try: # was never tested, so let's make it safe
if len(funcs) == 1:
plt.title(genericsettings.current_testbed.info(funcs[0])[:27])
except:
warnings.warn('could not print title')
beautifyRLD(evalfmax)
saveFigure(filename, verbose = verbose)
plt.close(fig)
for ds in dictdim:
if ds.isBiobjective():
return
# second figure: Function Value Distribution
filename = os.path.join(outputdir, 'ppfvdistr_%02dD_%s' % (d, info))
fig = plt.figure()
plotFVDistr(dictdim, np.inf, 1e-8, **rldStyles[-1])
# coloring right to left
for j, max_eval_factor in enumerate(single_runlength_factors):
if max_eval_factor > maxEvalsFactor:
break
plotFVDistr(dictdim, max_eval_factor, 1e-8,
**rldUnsuccStyles[j % len(rldUnsuccStyles)])
plt.text(0.98, 0.02, text, horizontalalignment = "right",
transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False),
beautifyFVD(isStoringXMax = isStoringXMax, ylabel = False)
saveFigure(filename, verbose = verbose)
plt.close(fig)
# plt.rcdefaults()
| bsd-3-clause |
plissonf/scikit-learn | sklearn/tree/export.py | 78 | 15814 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
feranick/KerogenGA | Scripts/collectGA/collectGA.py | 2 | 5691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
**********************************************************
* CollectGA
* version: 20161106b
* By: Nicola Ferralis <[email protected]>
* help: python collectGA.py -h
***********************************************************
'''
print(__doc__)
import numpy as np
import sys, os.path, csv
from pandas import read_csv
summaryFile = "summaryFile.csv"
summaryFolder = os.getenv("HOME") + '/Desktop/'
fitnessFile = "output.txt"
#**********************************************
''' Main '''
#**********************************************
def main():
if len(sys.argv) < 2:
inputFile = 'bestIndividual.txt'
else:
if sys.argv[1] == '-h' or sys.argv[1] == '--help':
usage()
sys.exit(2)
else:
inputFile = sys.argv[1]
runCollect(inputFile)
#**********************************************
''' RunCollection '''
#**********************************************
def runCollect(inputFile):
pos = readFitness(fitnessFile)
#**********************************************
''' Read new file '''
#**********************************************
X = readFile(inputFile)
print (' New GA file: ' + os.path.relpath(".",".."))
summaryFile_path = summaryFolder + summaryFile
print (' Fitness: ' + str(pos))
#**********************************************
''' Read summary file '''
#**********************************************
ind = 0
if os.path.exists(summaryFile_path) == True:
L,llr = readSummary(summaryFile_path)
lr = [None]*llr
else:
with open(summaryFile_path, 'a') as f:
csv_out=csv.writer(f)
csv_out.writerow(np.append(['name', 'fitness'], X[:,0]))
f.close()
L = np.append(['name', 'fitness'], X[:,0]).tolist()
lr = np.append(os.path.relpath(".",".."), np.append(float(pos), X[:,1])).tolist()
summary = lr
summary[0] = os.path.relpath(".","..")
summary[1] = float(pos)
origL = len(L)
#**********************************************
''' Detect if molecule has been already seen '''
#**********************************************
for i in range(0,len(X)):
if X[i,0] in L:
j = L.index(X[i,0])
print(' ' + L[j] + '\t already detected')
summary[j] = X[i,1]
else:
L.append(X[i,0])
summary[i+2] = ''
print('+\033[1m' + X[i,0] + '\t first detected!' + '\033[0m')
summary = np.append(summary, X[i,1])
ind += 1
#*********************************************************
''' Change header file if new molcules are detected '''
#*********************************************************
if ind != 0:
df = read_csv(summaryFile_path)
for i in range(0,ind):
df.insert(i+origL, L[i+origL], '', 0)
df.columns = L
df.to_csv(summaryFile_path, index=False)
#**********************************************
''' Save new data into Summary '''
#**********************************************
with open(summaryFile_path, 'a') as f:
csv_out=csv.writer(f)
csv_out.writerow(summary)
f.close()
print('\n Summary saved in: ' + summaryFile_path + '\n')
#**********************************************
''' Read new files '''
#**********************************************
def readFile(sampleFile):
try:
X=np.empty([0,2])
totconc=0
ind=0
with open(sampleFile, 'r') as f:
data = csv.reader(f, delimiter=' ')
for row in data:
totconc += float(row[2])
X = np.row_stack((X, [np.array(row[0]), row[2]]))
ind+=1
for i in range(0,ind):
X[i,1]= float(X[i,1])*100/totconc
except:
print('\033[1m File: \"' + sampleFile + '\" not found \n ' + '\033[0m')
sys.exit(2)
return X
#**********************************************
''' Read Summary '''
#**********************************************
def readSummary(summaryFile):
try:
with open(summaryFile, 'r') as f:
data = csv.reader(f, delimiter=',')
L = next(data)
with open(summaryFile, 'r') as f:
lastrow = None
for lastrow in csv.reader(f): pass
except:
print('\033[1m File: \"' + summaryFile + '\" not found \n ' + '\033[0m')
return
return L, len(lastrow)
#**********************************************
''' Read Fitness '''
#**********************************************
def readFitness(fitnessFile):
try:
with open(fitnessFile, 'r') as f:
lastrow = None
beforelastrow = None
for lastrow in enumerate(f):
if(lastrow[1].find('Predicted fit') == -1):
beforelastrow = lastrow[1]
else:
pass
except:
print('\033[1m File: \"' + fitnessFile + '\" not found \n ' + '\033[0m')
return
pos = beforelastrow.find("Best Fitness =")+15
return beforelastrow[pos:]
#************************************
''' Lists the program usage '''
#************************************
def usage():
print(' Usage:')
print('\n for default filenames:')
print(' python collectGA.py ')
print('\n for custom filenames:')
print(' python collectGA.py <filename>\n')
#************************************
''' Main initialization routine '''
#************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
samuel1208/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 47 | 12381 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
IntelligentSystemsLaboratory/JGI-PURE-Challenge | data/pure_data_challenge_170327/create_connections.py | 1 | 1811 | import pandas as pd
from collections import Counter
csv_files = ['outputs', 'authors', 'staff']
dfs = {name:pd.read_csv(name + '.csv', infer_datetime_format=True,
index_col=False) for name in csv_files}
keep_columns = {'outputs': ['PUBLICATION_ID', 'PUBLICATION_YEAR'],
'authors': ['PERSON_ID', 'PUBLICATION_ID'],
'staff': ['PERSON_ID', 'ORGANISATION_CODE']}
dfs = {name:dfs[name][columns] for name, columns in keep_columns.items()}
for name, df in dfs.items():
print('{} shape = {}'.format(name, df.shape))
# We get: person_id, publication_id, and organization_code
df = pd.merge(dfs['authors'], dfs['staff'], on='PERSON_ID')
# We get: person_id, publication_id, and organization_code, publication_year
df = pd.merge(df, dfs['outputs'], on='PUBLICATION_ID')
# We get: publication_id, and organization_code, publication_year
del df['PERSON_ID']
# We get: publication_id, list of organisation_code
df_pub_org = df.groupby(['PUBLICATION_ID'])['ORGANISATION_CODE'].unique()
counter = Counter(tuple(sorted(tup)) for tup in df_pub_org.values)
connections = []
for tup, counts in counter.items():
if len(tup) == 1:
org1 = org2 = tup[0]
connections.append([2012, org1, org2, counts, counts])
else:
# FIXME Solve problem with duplicated pairs
for i in range(len(tup)):
for j in range(i,len(tup)):
org1 = tup[i]
org2 = tup[j]
connections.append([2012, org1, org2, counts, counts])
df_connections = pd.DataFrame(connections, columns=['year', 'importer1',
'importer2', 'flow1',
'flow2'])
df_connections.to_csv('org_connections.csv', index=False)
| mit |
thientu/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
guoxiaolongzte/spark | python/pyspark/sql/session.py | 4 | 37209 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
jsqlContext = self._wrapped._jsqlContext
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(batches, ArrowStreamSerializer(), reader_func,
create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
lucasdavid/Manifold-Learning | tests/integration/displayer_test.py | 1 | 2081 | from unittest import TestCase
from sklearn import datasets, manifold
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from manifold.infrastructure import Displayer
class DisplayerTest(TestCase):
def test_random_generation(self):
"""Tests if the Displayer class is presenting 4 graphics correctly (manual checking).
"""
points = 1000
data, color = datasets.make_swiss_roll(points, random_state=0)
neighbors = 10
d = Displayer(points=points, neighbors=neighbors)
d \
.load(data, color, title='Graphic I') \
.load(data, color, title='Graphic II') \
.load(data, color, title='Graphic III') \
.load(data, color, title='Graphic IV') \
.show()
def test_similar_graphics(self):
"""Tests if Displayer class is presenting a similar graphic from the one printed
by the hard-coded lines bellow (manual checking).
"""
points = 1000
data, color = datasets.make_swiss_roll(points, random_state=0)
neighbors = 10
to_dimension = 2
result = manifold.Isomap(neighbors, to_dimension).fit_transform(data)
# Expected printing...
Axes3D
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Expected image", fontsize=14)
ax = fig.add_subplot(121, projection='3d')
ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
ax = fig.add_subplot(122)
plt.scatter(result[:, 0], result[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SKLearn's Isomap")
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Actual printing...
Displayer(title="Actual image", points=points, neighbors=neighbors) \
.load(data, color, title='Graphic I') \
.load(result, color, title='SKLearn\'s Isomap') \
.show()
| mit |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/core/config.py | 8 | 22948 | """
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- keys are case-insensitive.
- functions should accept partial/regex keys, when unambiguous.
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
- you can register a callback to be invoked when the the option value
is set or reset. Changing the stored value is considered misuse, but
is not verboten.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associcated
with them, which are stored in auxilary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
"""
import re
from collections import namedtuple
from contextlib import contextmanager
import warnings
from pandas.compat import map, lmap, u
import pandas.compat as compat
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple('RegisteredOption',
'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for pandas.options, backwards compatible with KeyError
checks
"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.pop('silent', False)
if kwargs:
raise TypeError('_set_option() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
if silent:
with warnings.catch_warnings(record=True):
o.cb(key)
else:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError('Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).')
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide pandas config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option" %
'.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import pandas.core.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
from pandas.formats.printing import pprint_thing
type_repr = "|".join(map(pprint_thing, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
def inner(x):
from pandas.formats.printing import pprint_thing as pp
if x not in legal_values:
if not any([c(x) for c in callables]):
pp_values = pp("|".join(lmap(pp, legal_values)))
msg = "Value must be one of {0}".format(pp_values)
if len(callables):
msg += " or a callable"
raise ValueError(msg)
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
def is_callable(obj):
"""
Parameters
----------
`obj` - the object to be checked
Returns
-------
validator - returns True if object is callable
raises ValueError otherwise.
"""
if not callable(obj):
raise ValueError("Value must be a callable")
return True
| mit |
FRESNA/powerplantmatching | powerplantmatching/export.py | 1 | 13515 | # -*- coding: utf-8 -*-
# Copyright 2016-2018 Fabian Hofmann (FIAS), Jonas Hoersch (KIT, IAI) and
# Fabian Gotzens (FZJ, IEK-STE)
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This export script is intented for the users of PyPSA:
# https://www.pypsa.org/
# or the VEDA-TIMES modelling framework:
# http://iea-etsap.org/index.php/etsap-tools/data-handling-shells/veda
from .core import _data_out, get_obj_if_Acc
from .heuristics import set_denmark_region_id, set_known_retire_years
import pandas as pd
import numpy as np
import pycountry
import logging
from scipy.spatial import cKDTree as KDTree
logger = logging.getLogger(__name__)
cget = pycountry.countries.get
def to_pypsa_names(df):
"""Rename the columns of the powerplant data according to the
convention in PyPSA.
Arguments:
df {pandas.DataFrame} -- powerplant data
Returns:
pandas.DataFrame -- Column renamed dataframe
"""
df = get_obj_if_Acc(df)
return (df.assign(Fueltype=df['Fueltype'].str.lower())
.rename(columns={'Fueltype': 'carrier',
'Capacity': 'p_nom',
'Duration': 'max_hours',
'Set': 'component'}))
def map_bus(df, buses):
'''
Assign a 'bus' colum to the dataframe based on a list of coordinates.
Parameters
----------
df : pd.DataFrame
power plant list with coordinates 'lat' and 'lon'
buses : pd.DataFrame
bus list with coordites 'x' and 'y'
Returns
-------
DataFrame with an extra column 'bus' indicating the nearest bus.
'''
df = get_obj_if_Acc(df)
kdtree = KDTree(buses[['x', 'y']])
buses_i = buses.index.append(pd.Index([np.nan]))
return df.assign(bus=buses_i[kdtree.query(df[['lon', 'lat']].values)[1]])
def map_country_bus(df, buses):
'''
Assign a 'bus' colum based on a list of coordinates and countries.
Parameters
----------
df : pd.DataFrame
power plant list with coordinates 'lat', 'lon' and 'Country'
buses : pd.DataFrame
bus list with coordites 'x', 'y', 'country'
Returns
-------
DataFrame with an extra column 'bus' indicating the nearest bus.
'''
df = get_obj_if_Acc(df)
diff = set(df.Country.unique()) - set(buses.country)
if len(diff):
logger.warning(f'Power plants in {", ".join(diff)} cannot be mapped '
'because the countries do not appear in `buses`.')
res = []
for c in df.Country.unique():
res.append(map_bus(df.query('Country == @c'),
buses.query('country == @c')))
return pd.concat(res)
def to_pypsa_network(df, network, buslist=None):
"""
Export a powerplant dataframe to a pypsa.Network(), specify specific buses
to allocate the plants (buslist).
"""
df = get_obj_if_Acc(df)
df = map_bus(df, network.buses.reindex(buslist))
df.Set.replace('CHP', 'PP', inplace=True)
if 'Duration' in df:
df['weighted_duration'] = df['Duration'] * df['Capacity']
df = (df.groupby(['bus', 'Fueltype', 'Set'])
.aggregate({'Capacity': sum,
'weighted_duration': sum}))
df = df.assign(Duration=df['weighted_duration'] / df['Capacity'])
df = df.drop(columns='weighted_duration')
else:
df = (df.groupby(['bus', 'Fueltype', 'Set'])
.aggregate({'Capacity': sum}))
df = df.reset_index()
df = to_pypsa_names(df)
df.index = df.bus + ' ' + df.carrier
network.import_components_from_dataframe(df[df['component'] != 'Store'],
'Generator')
network.import_components_from_dataframe(df[df['component'] == 'Store'],
'StorageUnit')
def to_TIMES(df=None, use_scaled_capacity=False, baseyear=2015):
"""
Transform a given dataset into the TIMES format and export as .xlsx.
"""
if df is None:
from .collection import matched_data
df = matched_data()
if df is None:
raise RuntimeError("The data to be exported does not yet exist.")
df = df.loc[(df.DateIn.isnull())
| (df.DateIn <= baseyear)]
plausible = True
# Set region via country names by iso3166-2 codes
if 'Region' not in df:
pos = [i for i, x in enumerate(df.columns) if x == 'Country'][0]
df.insert(pos+1, 'Region', np.nan)
df.Country = df.Country.replace({'Czech Republic': 'Czechia'})
df.loc[:, 'Region'] = df.Country.apply(lambda c: cget(name=c).alpha_2)
df = set_denmark_region_id(df)
regions = sorted(set(df.Region))
if None in regions:
raise ValueError("There are rows without a valid country identifier "
"in the dataframe. Please check!")
# add column with TIMES-specific type. The pattern is as follows:
# 'ConELC-' + Set + '_' + Fueltype + '-' Technology
df.loc[:, 'Technology'].fillna('', inplace=True)
if 'TimesType' not in df:
pos = [i for i, x in enumerate(df.columns) if x == 'Technology'][0]
df.insert(pos+1, 'TimesType', np.nan)
df.loc[:, 'TimesType'] = pd.Series('ConELC-' for _ in range(len(df))) +\
np.where(df.loc[:, 'Set'].str.contains('CHP'), 'CHP', 'PP') +\
'_' + df.loc[:, 'Fueltype'].map(fueltype_to_abbrev())
df.loc[(df.Fueltype == 'Wind')
& (df.Technology.str.contains('offshore', case=False)),
'TimesType'] += 'F'
df.loc[(df.Fueltype == 'Wind')
& ~(df.Technology.str.contains('offshore', case=False)),
'TimesType'] += 'N'
df.loc[(df.Fueltype == 'Solar')
& (df.Technology.str.contains('CSP', case=False)),
'TimesType'] += 'CSP'
df.loc[(df.Fueltype == 'Solar')
& ~(df.Technology.str.contains('CSP', case=False)),
'TimesType'] += 'SPV'
df.loc[(df.Fueltype == 'Natural Gas')
& (df.Technology.str.contains('CCGT', case=False)),
'TimesType'] += '-CCGT'
df.loc[(df.Fueltype == 'Natural Gas')
& ~(df.Technology.str.contains('CCGT', case=False))
& (df.Technology.str.contains('OCGT', case=False)),
'TimesType'] += '-OCGT'
df.loc[(df.Fueltype == 'Natural Gas')
& ~(df.Technology.str.contains('CCGT', case=False))
& ~(df['Technology'].str.contains('OCGT', case=False)),
'TimesType'] += '-ST'
df.loc[(df.Fueltype == 'Hydro')
& (df.Technology.str.contains('pumped storage', case=False)),
'TimesType'] += '-PST'
df.loc[(df.Fueltype == 'Hydro')
& (df.Technology.str.contains('run-of-river', case=False))
& ~(df.Technology.str.contains('pumped storage', case=False)),
'TimesType'] += '-ROR'
df.loc[(df.Fueltype == 'Hydro')
& ~(df.Technology.str.contains('run-of-river', case=False))
& ~(df.Technology.str.contains('pumped storage', case=False)),
'TimesType'] += '-STO'
if None in set(df.TimesType):
raise ValueError("There are rows without a valid TIMES-Type "
"identifier in the dataframe. Please check!")
# add column with technical lifetime
if 'Life' not in df:
pos = [i for i, x in enumerate(df.columns) if x == 'Retrofit'][0]
df.insert(pos+1, 'Life', np.nan)
df.loc[:, 'Life'] = df.TimesType.map(timestype_to_life())
if df.Life.isnull().any():
raise ValueError("There are rows without a given lifetime in the "
"dataframe. Please check!")
# add column with decommissioning year
if 'YearRetire' not in df:
pos = [i for i, x in enumerate(df.columns) if x == 'Life'][0]
df.insert(pos+1, 'YearRetire', np.nan)
df.loc[:, 'YearRetire'] = df.loc[:, 'Retrofit'] + df.loc[:, 'Life']
df = set_known_retire_years(df)
# Now create empty export dataframe with headers
columns = ['Attribute', '*Unit', 'LimType', 'Year']
columns.extend(regions)
columns.append('Pset_Pn')
# Loop stepwise through technologies, years and countries
df_exp = pd.DataFrame(columns=columns)
cap_column = 'Scaled Capacity' if use_scaled_capacity else 'Capacity'
row = 0
for tt, df_tt in df.groupby('TimesType'):
for yr in range(baseyear, 2055, 5):
df_exp.loc[row, 'Year'] = yr
data_regions = df_tt.groupby('Region')
for reg in regions:
if reg in data_regions.groups:
ct_group = data_regions.get_group(reg)
# Here, all matched units existing in the dataset are being
# considered. This is needed since there can be units in
# the system which are actually already beyond their
# assumed technical lifetimes but still online in baseyear.
if yr == baseyear:
series = ct_group.apply(lambda x: x[cap_column],
axis=1)
# Here all matched units that are not retired in yr,
# are being filtered.
elif yr > baseyear:
series = ct_group.apply(lambda x: x[cap_column]
if yr >= x['DateIn']
and yr <= x['YearRetire']
else 0, axis=1)
else:
message = 'loop yr({}) below baseyear({})'
raise ValueError(message.format(yr, baseyear))
# Divide the sum by 1000 (MW->GW) and write into export df
df_exp.loc[row, reg] = series.sum()/1000.0
else:
df_exp.loc[row, reg] = 0.0
# Plausibility-Check:
if (yr > baseyear and (df_exp.loc[row, reg]
> df_exp.loc[row-1, reg])):
plausible = False
logger.error(
"For region '{}' and timestype '{}' the value for "
"year {} ({0.000}) is higher than in the year before "
"({0.000}).".format(reg, tt, yr, df_exp.loc[row, reg],
df_exp.loc[row-1, reg]))
df_exp.loc[row, 'Pset_Pn'] = tt
row += 1
df_exp.loc[:, 'Attribute'] = 'STOCK'
df_exp.loc[:, '*Unit'] = 'GW'
df_exp.loc[:, 'LimType'] = 'FX'
# Write resulting dataframe to file
if plausible:
df_exp.to_excel(_data_out('Export_Stock_TIMES.xlsx'))
return df_exp
def store_open_dataset():
from .collection import matched_data, reduce_matched_dataframe
m = (matched_data(reduced=False)
.reindex(columns=['CARMA', 'ENTSOE', 'GEO', 'GPD', 'OPSD'], level=1)
[lambda df: df.Name.notnull().any(1)])
m.to_csv(_data_out('powerplants_large.csv'))
m = m.pipe(reduce_matched_dataframe)
m.to_csv(_data_out('powerplants.csv'))
return m
def fueltype_to_abbrev():
"""
Return the fueltype-specific abbreviation.
"""
data = {'Bioenergy': 'BIO',
'Geothermal': 'GEO',
'Hard Coal': 'COA',
'Hydro': 'HYD',
'Lignite': 'LIG',
'Natural Gas': 'NG',
'Nuclear': 'NUC',
'Oil': 'OIL',
'Other': 'OTH',
'Solar': '', # DO NOT delete this entry!
'Waste': 'WST',
'Wind': 'WO'}
return data
def timestype_to_life():
"""
Returns the timestype-specific technical lifetime.
"""
return {'ConELC-PP_COA': 45,
'ConELC-PP_LIG': 45,
'ConELC-PP_NG-OCGT': 40,
'ConELC-PP_NG-ST': 40,
'ConELC-PP_NG-CCGT': 40,
'ConELC-PP_OIL': 40,
'ConELC-PP_NUC': 50,
'ConELC-PP_BIO': 25,
'ConELC-PP_HYD-ROR': 200, # According to A.K. Riekkolas comment,
'ConELC-PP_HYD-STO': 200, # these will not retire after 75-100 a,
'ConELC-PP_HYD-PST': 200, # but exist way longer at retrofit costs
'ConELC-PP_WON': 25,
'ConELC-PP_WOF': 25,
'ConELC-PP_SPV': 30,
'ConELC-PP_CSP': 30,
'ConELC-PP_WST': 30,
'ConELC-PP_SYN': 5,
'ConELC-PP_CAES': 40,
'ConELC-PP_GEO': 30,
'ConELC-PP_OTH': 5,
'ConELC-CHP_COA': 45,
'ConELC-CHP_LIG': 45,
'ConELC-CHP_NG-OCGT': 40,
'ConELC-CHP_NG-ST': 40,
'ConELC-CHP_NG-CCGT': 40,
'ConELC-CHP_OIL': 40,
'ConELC-CHP_BIO': 25,
'ConELC-CHP_WST': 30,
'ConELC-CHP_SYN': 5,
'ConELC-CHP_GEO': 30,
'ConELC-CHP_OTH': 5}
| gpl-3.0 |
wkerzendorf/tardis | tardis/plasma/properties/plasma_input.py | 1 | 1717 | import numpy as np
import pandas as pd
from tardis.plasma.properties.base import (Input, ArrayInput, DataFrameInput)
__all__ = ['TRadiative', 'DilutionFactor', 'AtomicData', 'Abundance', 'Density',
'TimeExplosion', 'JBlues', 'LinkTRadTElectron', 'NLTESpecies',
'RadiationFieldCorrectionInput']
class TRadiative(ArrayInput):
"""
Outputs:
t_rad : Numpy Array
"""
outputs = ('t_rad',)
latex_name = ('T_{\\textrm{rad}}',)
class DilutionFactor(ArrayInput):
"""
Outputs:
w : Numpy Array
Factor used in nebular ionisation / dilute excitation calculations
to account for the dilution of the radiation field.
"""
outputs = ('w',)
latex_name = ('W',)
class AtomicData(Input):
outputs = ('atomic_data',)
class Abundance(Input):
outputs = ('abundance',)
class RadiationFieldCorrectionInput(Input):
"""
Outputs:
delta_input : Numpy Array
Used to adjust the ionisation balance to account for greater line
blanketing in the blue.
"""
outputs = ('delta_input',)
latex_name = ('\\delta_{\\textrm{input}}',)
class Density(ArrayInput):
outputs = ('density',)
latex_name = ('\\rho',)
class TimeExplosion(Input):
outputs = ('time_explosion',)
latex_name = ('t_{\\textrm{exp}}',)
class JBlues(DataFrameInput):
"""
Outputs:
j_blues : Pandas DataFrame
Mean intensity in the blue wing of each line.
"""
outputs = ('j_blues',)
latex_name = ('J_{lu}^{b}',)
class LinkTRadTElectron(Input):
outputs = ('link_t_rad_t_electron',)
latex_name = ('T_{\\textrm{electron}}/T_{\\textrm{rad}}',)
class NLTESpecies(Input):
outputs = ('nlte_species',)
| bsd-3-clause |
XInterns/IPL-Sparkers | src/getprediction.py | 1 | 2194 |
# coding: utf-8
# # Predicting the Outcome of Cricket Matches
# In[1]:
import numpy as np # imports a fast numerical programming library
import pandas as pd #lets us handle data as dataframes
#sets up pandas table display
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from patsy import dmatrices
# In[2]:
matches = pd.read_csv("../data/matcheswithfeatures.csv", index_col = 0)
# In[3]:
y, X = dmatrices('team1Winning ~ 0 + Avg_SR_Difference + Avg_WPR_Difference + Total_MVP_Difference + Prev_Enc_Team1_WinPerc + Total_RF_Difference', matches, return_type="dataframe")
y_arr = np.ravel(y)
# ### Splitting Training Set (2008-2013) and Test Set (2013-2015) based on Seasons
# In[4]:
X_timetrain = X.loc[X.index < 398]
Y_timetrain = y.loc[y.index < 398]
Y_timetrain_arr = np.ravel(Y_timetrain)
X_timetest = X.loc[X.index >= 398]
Y_timetest = y.loc[y.index >= 398]
Y_timetest_arr = np.ravel(Y_timetest)
# In[5]:
# Best values of k in time-based split data
knn1 = KNeighborsClassifier(n_neighbors = 31)
knn1.fit(X_timetrain, Y_timetrain_arr)
# In[6]:
def getPrediction(match_id):
'''Returns the prediction for the given match
Args: match_id (int): Match ID for the required game
Returns: String: Predicted winner of the game and probability of victory
'''
try:
assert (399 <= match_id <= 517)
results = {}
match_row = matches.loc[matches['id'] == match_id]
team1name = match_row.team1.unique()[0]
team2name = match_row.team2.unique()[0]
toPredict = X_timetest.loc[X_timetest.index == match_id-1].values
prediction_prob = knn1.predict_proba(toPredict)
prediction = knn1.predict(toPredict)
if prediction[0] > 0:
results['name'] = str(team1name)
results['prob'] = float(prediction_prob[0][1])*100
else:
results['name'] = str(team2name)
results['prob'] = float(prediction_prob[0][0])*100
return results
except:
return None
| mit |
CassioAmador/profile_tcabr | visualization_tools/test_group_delay2.py | 1 | 2352 | """
Test group delay (raw) from each band.
Compare it with median filtered, smoothed and fitted group delay.
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import numpy as np
import scipy
import sys
sys.path.insert(0, './../src/')
import proc_profile_abel_inversion as pp
fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.25)
if len(sys.argv) < 1:
shot_number=int(open('shot_number.txt','r').read())
else:
if (len(sys.argv) == 1) & ("py" in sys.argv[0]):
shot_number=int(open('shot_number.txt','r').read())
else:
shot_number = int(sys.argv[1])
shot = pp.ProcProfile(shot_number)
sweeps_average = 33
shot.reference_gd(all_shot=1, sw_clustersize=sweeps_average)
shot.plasma_gd(5000, sweeps_average, 1)
shot.find_ne_max2()
tau_coef = np.polyfit(shot.freqs2, shot.gd2, 3)
ax1, = plt.plot(shot.freqs2, shot.gd2, 'ok', label="gd (raw)")
ax2, = plt.plot(shot.freqs2, scipy.signal.medfilt(shot.gd2, 5), 'ob', label="gd (median filt-5)")
ax3, = plt.plot(shot.freqs2, shot.smooth_signal(shot.gd2), '--c', label="gd (smothed)")
ax4, = plt.plot(shot.freqs2, np.polyval(tau_coef, shot.freqs2), '--r', label="gd (fitted)")
plt.xlabel("freq (GHz)")
plt.ylabel("group delay (ns)")
plt.title("# %s - time: %s ms" % (shot.shot, shot.sweep2time(shot.sweep_cur)))
plt.legend(loc=2)
plt.xlim(15, 40)
plt.ylim(-0.1, 3)
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.13, 0.1, 0.77, 0.03], axisbg=axcolor)
sweep = Slider(axfreq, 'Sweep', 1, len(shot.points) - 1 - sweeps_average, valinit=1, valfmt='%1.f')
def update(val):
shot.plasma_gd(int(sweep.val), sweeps_average, 1)
shot.find_ne_max2()
tau_coef = np.polyfit(shot.freqs2, shot.gd2, 3)
ax1.set_xdata(shot.freqs2)
ax1.set_ydata(shot.gd2)
ax2.set_xdata(shot.freqs2)
ax2.set_ydata(scipy.signal.medfilt(shot.gd2, 5))
ax3.set_xdata(shot.freqs2)
ax3.set_ydata(shot.smooth_signal(shot.gd2))
ax4.set_xdata(shot.freqs2)
ax4.set_ydata(np.polyval(tau_coef, shot.freqs2))
ax.set_title("# %s - time: %.3f ms" % (shot.shot, shot.sweep2time(shot.sweep_cur)))
fig.canvas.draw_idle()
sweep.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sweep.reset()
button.on_clicked(reset)
plt.show()
| mit |
FrancoisRheaultUS/dipy | dipy/stats/analysis.py | 2 | 10573 |
import os
import numpy as np
from scipy.spatial import cKDTree
from scipy.ndimage.interpolation import map_coordinates
from scipy.spatial.distance import mahalanobis
from dipy.utils.optpkg import optional_package
from dipy.io.utils import save_buan_profiles_hdf5
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import AveragePointwiseEuclideanMetric
from dipy.tracking.streamline import (set_number_of_points,
values_from_volume,
orient_by_streamline,
transform_streamlines,
Streamlines)
pd, have_pd, _ = optional_package("pandas")
_, have_tables, _ = optional_package("tables")
def peak_values(bundle, peaks, dt, pname, bname, subject, group_id, ind, dir):
""" Peak_values function finds the generalized fractional anisotropy (gfa)
and quantitative anisotropy (qa) values from peaks object (eg: csa) for
every point on a streamline used while tracking and saves it in hd5
file.
Parameters
----------
bundle : string
Name of bundle being analyzed
peaks : peaks
contains peak directions and values
dt : DataFrame
DataFrame to be populated
pname : string
Name of the dti metric
bname : string
Name of bundle being analyzed.
subject : string
subject number as a string (e.g. 10001)
group_id : integer
which group subject belongs to 1 patient and 0 for control
ind : integer list
ind tells which disk number a point belong.
dir : string
path of output directory
"""
gfa = peaks.gfa
anatomical_measures(bundle, gfa, dt, pname+'_gfa', bname, subject, group_id,
ind, dir)
qa = peaks.qa[...,0]
anatomical_measures(bundle, qa, dt, pname+'_qa', bname, subject, group_id,
ind, dir)
def anatomical_measures(bundle, metric, dt, pname, bname, subject, group_id,
ind, dir):
""" Calculates dti measure (eg: FA, MD) per point on streamlines and
save it in hd5 file.
Parameters
----------
bundle : string
Name of bundle being analyzed
metric : matrix of float values
dti metric e.g. FA, MD
dt : DataFrame
DataFrame to be populated
pname : string
Name of the dti metric
bname : string
Name of bundle being analyzed.
subject : string
subject number as a string (e.g. 10001)
group_id : integer
which group subject belongs to 1 for patient and 0 control
ind : integer list
ind tells which disk number a point belong.
dir : string
path of output directory
"""
dt["streamline"] = []
dt["disk"] = []
dt["subject"] = []
dt[pname] = []
dt["group"] = []
values = map_coordinates(metric, bundle._data.T,
order=1)
dt["disk"].extend(ind[list(range(len(values)))]+1)
dt["subject"].extend([subject]*len(values))
dt["group"].extend([group_id]*len(values))
dt[pname].extend(values)
for st_i in range(len(bundle)):
st = bundle[st_i]
dt["streamline"].extend([st_i]*len(st))
file_name = bname + "_" + pname
save_buan_profiles_hdf5(os.path.join(dir, file_name), dt)
def assignment_map(target_bundle, model_bundle, no_disks):
"""
Calculates assignment maps of the target bundle with reference to
model bundle centroids.
Parameters
----------
target_bundle : streamlines
target bundle extracted from subject data in common space
model_bundle : streamlines
atlas bundle used as reference
no_disks : integer, optional
Number of disks used for dividing bundle into disks. (Default 100)
References
----------
.. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak,
E. Garyfallidis, Bundle Analytics: a computational and statistical
analyses framework for tractometric studies, Proceedings of:
International Society of Magnetic Resonance in Medicine (ISMRM),
Montreal, Canada, 2019.
"""
mbundle_streamlines = set_number_of_points(model_bundle,
nb_points=no_disks)
metric = AveragePointwiseEuclideanMetric()
qb = QuickBundles(threshold=85., metric=metric)
clusters = qb.cluster(mbundle_streamlines)
centroids = Streamlines(clusters.centroids)
_, indx = cKDTree(centroids.get_data(), 1,
copy_data=True).query(target_bundle.get_data(), k=1)
return indx
def gaussian_weights(bundle, n_points=100, return_mahalnobis=False,
stat=np.mean):
"""
Calculate weights for each streamline/node in a bundle, based on a
Mahalanobis distance from the core the bundle, at that node (mean, per
default).
Parameters
----------
bundle : Streamlines
The streamlines to weight.
n_points : int, optional
The number of points to resample to. *If the `bundle` is an array, this
input is ignored*. Default: 100.
Returns
-------
w : array of shape (n_streamlines, n_points)
Weights for each node in each streamline, calculated as its relative
inverse of the Mahalanobis distance, relative to the distribution of
coordinates at that node position across streamlines.
"""
# Resample to same length for each streamline:
bundle = set_number_of_points(bundle, n_points)
# This is the output
w = np.zeros((len(bundle), n_points))
# If there's only one fiber here, it gets the entire weighting:
if len(bundle) == 1:
if return_mahalnobis:
return np.array([np.nan])
else:
return np.array([1])
for node in range(n_points):
# This should come back as a 3D covariance matrix with the spatial
# variance covariance of this node across the different streamlines
# This is a 3-by-3 array:
node_coords = bundle._data[node::n_points]
c = np.cov(node_coords.T, ddof=0)
# Reorganize as an upper diagonal matrix for expected Mahalanobis
# input:
c = np.array([[c[0, 0], c[0, 1], c[0, 2]],
[0, c[1, 1], c[1, 2]],
[0, 0, c[2, 2]]])
# Calculate the mean or median of this node as well
# delta = node_coords - np.mean(node_coords, 0)
m = stat(node_coords, 0)
# Weights are the inverse of the Mahalanobis distance
for fn in range(len(bundle)):
# In the special case where all the streamlines have the exact same
# coordinate in this node, the covariance matrix is all zeros, so
# we can't calculate the Mahalanobis distance, we will instead give
# each streamline an identical weight, equal to the number of
# streamlines:
if np.allclose(c, 0):
w[:, node] = len(bundle)
break
# Otherwise, go ahead and calculate Mahalanobis for node on
# fiber[fn]:
w[fn, node] = mahalanobis(node_coords[fn], m, np.linalg.inv(c))
if return_mahalnobis:
return w
# weighting is inverse to the distance (the further you are, the less you
# should be weighted)
w = 1 / w
# Normalize before returning, so that the weights in each node sum to 1:
return w / np.sum(w, 0)
def afq_profile(data, bundle, affine, n_points=100,
orient_by=None, weights=None, **weights_kwarg):
"""
Calculates a summarized profile of data for a bundle or tract
along its length.
Follows the approach outlined in [Yeatman2012]_.
Parameters
----------
data : 3D volume
The statistic to sample with the streamlines.
bundle : StreamLines class instance
The collection of streamlines (possibly already resampled into an array
for each to have the same length) with which we are resampling. See
Note below about orienting the streamlines.
affine : array_like (4, 4)
The mapping from voxel coordinates to streamline points.
The voxel_to_rasmm matrix, typically from a NIFTI file.
n_points: int, optional
The number of points to sample along the bundle. Default: 100.
orient_by: streamline, optional.
A streamline to use as a standard to orient all of the streamlines in
the bundle according to.
weights : 1D array or 2D array or callable (optional)
Weight each streamline (1D) or each node (2D) when calculating the
tract-profiles. Must sum to 1 across streamlines (in each node if
relevant). If callable, this is a function that calculates weights.
weights_kwarg : key-word arguments
Additional key-word arguments to pass to the weight-calculating
function. Only to be used if weights is a callable.
Returns
-------
ndarray : a 1D array with the profile of `data` along the length of
`bundle`
Notes
-----
Before providing a bundle as input to this function, you will need to make
sure that the streamlines in the bundle are all oriented in the same
orientation relative to the bundle (use :func:`orient_by_streamline`).
References
----------
.. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty,
Nathaniel J. Myall, Brian A. Wandell, and Heidi M. Feldman. 2012.
"Tract Profiles of White Matter Properties: Automating Fiber-Tract
Quantification" PloS One 7 (11): e49790.
"""
if orient_by is not None:
bundle = orient_by_streamline(bundle, orient_by)
if affine is None:
affine = np.eye(4)
if len(bundle) == 0:
raise ValueError("The bundle contains no streamlines")
# Resample each streamline to the same number of points:
fgarray = set_number_of_points(bundle, n_points)
# Extract the values
values = np.array(values_from_volume(data, fgarray, affine))
if weights is None:
weights = np.ones(values.shape) / values.shape[0]
elif callable(weights):
weights = weights(bundle, **weights_kwarg)
else:
# We check that weights *always sum to 1 across streamlines*:
if not np.allclose(np.sum(weights, 0), np.ones(n_points)):
raise ValueError("The sum of weights across streamlines must ",
"be equal to 1")
return np.sum(weights * values, 0)
| bsd-3-clause |
bzero/statsmodels | statsmodels/examples/tut_ols_ancova.py | 33 | 2455 | '''Examples OLS
Note: uncomment plt.show() to display graphs
Summary:
========
Relevant part of construction of design matrix
xg includes group numbers/labels,
x1 is continuous explanatory variable
>>> dummy = (xg[:,None] == np.unique(xg)).astype(float)
>>> X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
Estimate the model
>>> res2 = sm.OLS(y, X).fit()
>>> print res2.params
[ 1.00901524 3.08466166 -2.84716135 9.94655423]
>>> print res2.bse
[ 0.07499873 0.71217506 1.16037215 0.38826843]
>>> prstd, iv_l, iv_u = wls_prediction_std(res2)
"Test hypothesis that all groups have same intercept"
>>> R = [[0, 1, 0, 0],
... [0, 0, 1, 0]]
>>> print res2.f_test(R)
<F test: F=array([[ 91.69986847]]), p=[[ 8.90826383e-17]], df_denom=46, df_num=2>
strongly rejected because differences in intercept are very large
'''
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(98765789)
#OLS with dummy variables, similar to ANCOVA
#-------------------------------------------
#construct simulated example:
#3 groups common slope but different intercepts
nsample = 50
x1 = np.linspace(0, 20, nsample)
sig = 1.
#suppose observations from 3 groups
xg = np.zeros(nsample, int)
xg[20:40] = 1
xg[40:] = 2
#print xg
dummy = (xg[:,None] == np.unique(xg)).astype(float)
#use group 0 as benchmark
X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
beta = [1., 3, -3, 10]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate
#~~~~~~~~
res2 = sm.OLS(y, X).fit()
#print "estimated parameters: x d1-d0 d2-d0 constant"
print(res2.params)
#print "standard deviation of parameter estimates"
print(res2.bse)
prstd, iv_l, iv_u = wls_prediction_std(res2)
#print res.summary()
#plot
#~~~~
plt.figure()
plt.plot(x1, y, 'o', x1, y_true, 'b-')
plt.plot(x1, res2.fittedvalues, 'r--.')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('3 groups: different intercepts, common slope; blue: true, red: OLS')
plt.show()
#Test hypothesis that all groups have same intercept
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
R = [[0, 1, 0, 0],
[0, 0, 1, 0]]
# F test joint hypothesis R * beta = 0
# i.e. coefficient on both dummy variables equal zero
print("Test hypothesis that all groups have same intercept")
print(res2.f_test(R))
| bsd-3-clause |
RobertABT/heightmap | build/scipy/scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| mit |
antoinehirtz/jupyterWorkflow | jupyterworkflow/data.py | 1 | 1048 | import os
from urllib.request import urlretrieve
import pandas as pd
FERMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename='Fremont.csv', url=FERMONT_URL,
force_download=False):
"""Download and cache the Fremont data
Parameters
----------
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : Bool (optional)
if True, force redownload of data
Returns
-------
data : pandas.DataFrame
The Fremont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(URL, filename)
data = pd.read_csv('Fremont.csv', index_col='Date')
try:
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %H:%M:%S %p')
except TypeError:
data.index = pd.to_datetime(data.index)
data.columns = ['West', 'East']
data['Total'] = data['West'] + data['East']
return data
| mit |
plissonf/scikit-learn | sklearn/decomposition/nmf.py | 100 | 19059 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
ACLARKNET/aclarknet-database | aclarknet/database/plot.py | 1 | 1471 | from django.http import HttpResponse
from django.utils import timezone
from io import BytesIO
from matplotlib.dates import DateFormatter
from matplotlib.dates import date2num
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from .query import get_query_string
def get_plot(request): # http://stackoverflow.com/a/5515994/185820
"""
"""
costs = get_query_string(request, 'costs')
grosses = get_query_string(request, 'grosses')
nets = get_query_string(request, 'nets')
# Cost
x1 = [ # http://matplotlib.org/examples/api/date_demo.html
date2num(timezone.datetime.strptime(i[1], '%Y-%m-%d')) for i in costs
]
y1 = [i[0] for i in costs]
# Gross
x2 = [
date2num(timezone.datetime.strptime(i[1], '%Y-%m-%d')) for i in grosses
]
y2 = [i[0] for i in grosses]
# Net
x3 = [date2num(timezone.datetime.strptime(i[1], '%Y-%m-%d')) for i in nets]
y3 = [i[0] for i in nets]
figure = Figure()
canvas = FigureCanvasAgg(figure)
axes = figure.add_subplot(1, 1, 1)
axes.grid(True)
axes.plot(x1, y1)
axes.plot(x2, y2)
axes.plot(x3, y3)
axes.xaxis.set_major_formatter(DateFormatter('%m'))
# write image data to a string buffer and get the PNG image bytes
buf = BytesIO()
canvas.print_png(buf)
data = buf.getvalue()
# write image bytes back to the browser
return HttpResponse(data, content_type="image/png")
| mit |
francisliyy/caravel-aidp | caravel/dataframe.py | 3 | 3272 | """ Caravel wrapper around pandas.DataFrame.
TODO(bkyryliuk): add support for the conventions like: *_dim or dim_*
dimensions, *_ts, ts_*, ds_*, *_ds - datetime, etc.
TODO(bkyryliuk): recognize integer encoded enums.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pandas as pd
import numpy as np
INFER_COL_TYPES_THRESHOLD = 95
INFER_COL_TYPES_SAMPLE_SIZE = 100
# http://pandas.pydata.org/pandas-docs/stable/internals.html#
# subclassing-pandas-data-structures
class CaravelDataFrame(object):
def __init__(self, df):
self.__df = df.where((pd.notnull(df)), None)
@property
def size(self):
return len(self.__df.index)
@property
def data(self):
return self.__df.to_dict(orient='records')
@property
def columns_dict(self):
"""Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
"""
if self.__df.empty:
return None
columns = []
sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.__df.index))
sample = self.__df
if sample_size:
sample = self.__df.sample(sample_size)
for col in self.__df.dtypes.keys():
column = {
'name': col,
'type': self.__df.dtypes[col].name,
'is_date': is_date(self.__df.dtypes[col]),
'is_dim': is_dimension(self.__df.dtypes[col], col),
}
agg = agg_func(self.__df.dtypes[col], col)
if agg_func:
column['agg'] = agg
if column['type'] == 'object':
# check if encoded datetime
if (datetime_conversion_rate(sample[col]) >
INFER_COL_TYPES_THRESHOLD):
column.update({
'type': 'datetime_string',
'is_date': True,
'is_dim': False,
'agg': None
})
# 'agg' is optional attribute
if not column['agg']:
column.pop('agg', None)
columns.append(column)
return columns
# It will give false positives on the numbers that are stored as strings.
# It is hard to distinguish integer numbers and timestamps
def datetime_conversion_rate(data_series):
success = 0
total = 0
for value in data_series:
total = total + 1
try:
pd.to_datetime(value)
success = success + 1
except Exception:
continue
return 100 * success / total
def is_date(dtype):
return dtype.name.startswith('datetime')
def is_dimension(dtype, column_name):
if is_id(column_name):
return False
return dtype == np.object or dtype == np.bool
def is_id(column_name):
return column_name.startswith('id') or column_name.endswith('id')
def agg_func(dtype, column_name):
# consider checking for key substring too.
if is_id(column_name):
return 'count_distinct'
if np.issubdtype(dtype, np.number):
return 'sum'
return None
| apache-2.0 |
codyhan94/epidemic-graph-inference | scripts/generaltest.py | 1 | 2557 | """This is a general file used to test the basic functionality of our system"""
from __future__ import print_function
from pdb import set_trace
import networkx as nx
import matplotlib.pyplot as plt
import math
import sys, os
sys.path.append(os.getcwd())
# CONSTANTS
GENERATOR = "TREE" # or "GNP"
graphfile = "data/testin.graphml"
inferredfile = "data/testout.graphml"
from graph_inference.graphs.gnp import GNPGraph
from graph_inference.graphs.tree import TreeGraph
from graph_inference.sim.sirsim import SIRSim
from graph_inference.sim.sisim import SISim
from graph_inference.solver.greedysolver import GreedySolver
from graph_inference.analysis.baseanalysis import BaseAnalysis
def circlepos(G, r0=10):
pos = {}
d = max(G.degree().values())
n = G.number_of_nodes()
for node in G.nodes():
deg = 2 * math.pi * int(node) / n
r = r0 * (1 - float(G.degree(node)) / d) + 1
pos[node] = (r * math.sin(deg), r * math.cos(deg))
return pos
if __name__ == "__main__":
n = 100
if GENERATOR is "TREE":
graph = TreeGraph()
graph.generate(n, .2)
else:
graph = GNPGraph()
graph.generate(n=n, p=1./n, directed=True)
print(graphfile, "created")
n_cascades = 500
p_init = 0.05
model = SIRSim(graph.G, n_cascades, p_init)
cascades = model.run()
print()
print("Done simulating! Now solving...")
solver = GreedySolver(cascades)
inferred = solver.solve_graph()
print()
print("Solved graph saved to", inferredfile)
print()
print("Starting analysis...")
analysis = BaseAnalysis(graph.G, inferred)
print("correct edges", analysis.edgeCorrect())
print("missing edges:", analysis.edgeError())
print("extra edges:", analysis.edgeExtra())
print("edge number:", analysis.edgeDifference())
print("degree sequence", analysis.degreeSequence())
print("degree difference", analysis.nodeDegreeDifference())
print("similarity = ", analysis.similarity())
# Make plots, using the dot package to make trees look nice.
plt.figure(1)
plt.title('Original Graph')
pos = nx.graphviz_layout(analysis.G, prog='dot')
# pos = circlepos(analysis.G)
nx.draw(analysis.G, pos, with_labels=True)
plt.figure(2)
plt.title('Analyzed Graph')
label = "{} cascades with p_init = {}.".format(n_cascades, p_init)
plt.figtext(0.3, 0.1, label)
pos = nx.graphviz_layout(analysis.H, prog='dot')
# pos = circlepos(analysis.G)
nx.draw(analysis.H, pos, with_labels=True)
# plt.show()
| mit |
annoviko/pyclustering | pyclustering/cluster/bang.py | 1 | 44713 | """!
@brief Cluster analysis algorithm: BANG.
@details Implementation based on paper @cite inproceedings::bang::1.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import itertools
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.encoder import type_encoding
from pyclustering.utils import data_corners
from pyclustering.utils.color import color as color_list
class bang_visualizer:
"""!
@brief Visualizer of BANG algorithm's results.
@details BANG visualizer provides visualization services that are specific for BANG algorithm.
"""
__maximum_density_alpha = 0.6
@staticmethod
def show_blocks(directory):
"""!
@brief Show BANG-blocks (leafs only) in data space.
@details BANG-blocks represents grid that was used for clustering process.
@param[in] directory (bang_directory): Directory that was created by BANG algorithm during clustering process.
"""
dimension = len(directory.get_data()[0])
amount_canvases = 1
if dimension > 1:
amount_canvases = int(dimension * (dimension - 1) / 2)
figure = plt.figure()
grid_spec = gridspec.GridSpec(1, amount_canvases)
pairs = list(itertools.combinations(range(dimension), 2))
if len(pairs) == 0: pairs = [(0, 0)]
for index in range(amount_canvases):
ax = figure.add_subplot(grid_spec[index])
bang_visualizer.__draw_blocks(ax, directory.get_leafs(), pairs[index])
bang_visualizer.__draw_two_dimension_data(ax, directory.get_data(), pairs[index])
plt.show()
plt.close(figure)
@staticmethod
def show_dendrogram(dendrogram):
"""!
@brief Display dendrogram of BANG-blocks.
@param[in] dendrogram (list): List representation of dendrogram of BANG-blocks.
@see bang.get_dendrogram()
"""
figure = plt.figure()
axis = plt.subplot(1, 1, 1)
current_position = 0
for index_cluster in range(len(dendrogram)):
densities = [ block.get_density() for block in dendrogram[index_cluster] ]
xrange = range(current_position, current_position + len(densities))
axis.bar(xrange, densities, 1.0, linewidth=0.0, color=color_list.get_color(index_cluster))
current_position += len(densities)
axis.set_ylabel("density")
axis.set_xlabel("block")
axis.xaxis.set_ticklabels([])
plt.xlim([-0.5, current_position - 0.5])
plt.show()
plt.close(figure)
@staticmethod
def show_clusters(data, clusters, noise=None):
"""!
@brief Display BANG clustering results.
@param[in] data (list): Dataset that was used for clustering.
@param[in] clusters (array_like): Clusters that were allocated by the algorithm.
@param[in] noise (array_like): Noise that were allocated by the algorithm.
"""
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, data)
visualizer.append_cluster(noise or [], data, marker='x')
figure = visualizer.show()
visualizer.close(figure)
@staticmethod
def __draw_two_dimension_data(ax, data, pair):
"""!
@brief Display data in two-dimensional canvas.
@param[in] ax (Axis): Canvas where data should be displayed.
@param[in] data (list): Data points that should be displayed.
@param[in] pair (tuple): Pair of dimension indexes.
"""
ax.set_xlabel("x%d" % pair[0])
ax.set_ylabel("x%d" % pair[1])
for point in data:
if len(data[0]) > 1:
ax.plot(point[pair[0]], point[pair[1]], color='red', marker='.')
else:
ax.plot(point[pair[0]], 0, color='red', marker='.')
ax.yaxis.set_ticklabels([])
@staticmethod
def __draw_blocks(ax, blocks, pair):
"""!
@brief Display BANG-blocks on specified figure.
@param[in] ax (Axis): Axis where bang-blocks should be displayed.
@param[in] blocks (list): List of blocks that should be displyed.
@param[in] pair (tuple): Pair of coordinate index that should be displayed.
"""
ax.grid(False)
density_scale = blocks[-1].get_density()
for block in blocks:
bang_visualizer.__draw_block(ax, pair, block, density_scale)
@staticmethod
def __draw_block(ax, pair, block, density_scale):
"""!
@brief Display BANG-block on the specified ax.
@param[in] ax (Axis): Axis where block should be displayed.
@param[in] pair (tuple): Pair of coordinate index that should be displayed.
@param[in] block (bang_block): BANG-block that should be displayed.
@param[in] density_scale (double): Max density to display density of the block by appropriate tone.
"""
max_corner, min_corner = bang_visualizer.__get_rectangle_description(block, pair)
belong_cluster = block.get_cluster() is not None
if density_scale != 0.0:
density_scale = bang_visualizer.__maximum_density_alpha * block.get_density() / density_scale
face_color = matplotlib.colors.to_rgba('blue', alpha=density_scale)
edge_color = matplotlib.colors.to_rgba('black', alpha=1.0)
rect = patches.Rectangle(min_corner, max_corner[0] - min_corner[0], max_corner[1] - min_corner[1],
fill=belong_cluster,
facecolor=face_color,
edgecolor=edge_color,
linewidth=0.5)
ax.add_patch(rect)
@staticmethod
def __get_rectangle_description(block, pair):
"""!
@brief Create rectangle description for block in specific dimension.
@param[in] pair (tuple): Pair of coordinate index that should be displayed.
@param[in] block (bang_block): BANG-block that should be displayed
@return (tuple) Pair of corners that describes rectangle.
"""
max_corner, min_corner = block.get_spatial_block().get_corners()
max_corner = [max_corner[pair[0]], max_corner[pair[1]]]
min_corner = [min_corner[pair[0]], min_corner[pair[1]]]
if pair == (0, 0):
max_corner[1], min_corner[1] = 1.0, -1.0
return max_corner, min_corner
class bang_animator:
"""!
@brief Provides service for creating 2-D animation using BANG clustering results.
@details The animator does not support visualization of clustering process where non 2-dimensional was used.
Code example of animation of BANG clustering process:
@code
from pyclustering.cluster.bang import bang, bang_animator
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Read data two dimensional data.
data = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# Create instance of BANG algorithm.
bang_instance = bang(data, 9)
bang_instance.process()
# Obtain clustering results.
clusters = bang_instance.get_clusters()
noise = bang_instance.get_noise()
directory = bang_instance.get_directory()
# Create BANG animation using class 'bang_animator':
animator = bang_animator(directory, clusters)
animator.animate()
@endcode
"""
def __init__(self, directory, clusters):
"""!
@brief Creates BANG animator instance.
@param[in] directory (bang_directory): BANG directory that was formed during BANG clustering process.
@param[in] clusters (list): Allocated clusters during BANG clustering process.
"""
self.__directory = directory
self.__clusters = clusters
self.__noise = []
self.__current_block = 0
self.__current_level = 0
self.__level_blocks = directory.get_level(0)
self.__figure = plt.figure()
self.__ax = self.__figure.add_subplot(1, 1, 1)
self.__special_frame = 0
self.__validate_arguments()
def __del__(self):
"""!
@brief Destructor of the BANG animator.
"""
plt.close(self.__figure)
def __validate_arguments(self):
"""!
@brief Check correctness of input arguments and throw exception if incorrect is found.
"""
if len(self.__directory.get_data()[0]) != 2:
raise ValueError("Impossible to animate BANG clustering process for non 2D data.")
def __increment_block(self):
"""!
@brief Increment BANG block safely by updating block index, level and level block.
"""
self.__current_block += 1
if self.__current_block >= len(self.__level_blocks):
self.__current_block = 0
self.__current_level += 1
if self.__current_level < self.__directory.get_height():
self.__level_blocks = self.__directory.get_level(self.__current_level)
def __draw_block(self, block, block_alpha=0.0):
"""!
@brief Display single BANG block on axis.
@param[in] block (bang_block): BANG block that should be displayed.
@param[in] block_alpha (double): Transparency level - value of alpha.
"""
max_corner, min_corner = block.get_spatial_block().get_corners()
face_color = matplotlib.colors.to_rgba('blue', alpha=block_alpha)
edge_color = matplotlib.colors.to_rgba('black', alpha=1.0)
rect = patches.Rectangle(min_corner, max_corner[0] - min_corner[0], max_corner[1] - min_corner[1],
fill=True,
facecolor=face_color,
edgecolor=edge_color,
linewidth=0.5)
self.__ax.add_patch(rect)
def __draw_leaf_density(self):
"""!
@brief Display densities by filling blocks by appropriate colors.
"""
leafs = self.__directory.get_leafs()
density_scale = leafs[-1].get_density()
if density_scale == 0.0: density_scale = 1.0
for block in leafs:
alpha = 0.8 * block.get_density() / density_scale
self.__draw_block(block, alpha)
def __draw_clusters(self):
"""!
@brief Display clusters and outliers using different colors.
"""
data = self.__directory.get_data()
for index_cluster in range(len(self.__clusters)):
color = color_list.get_color(index_cluster)
self.__draw_cluster(data, self.__clusters[index_cluster], color, '.')
self.__draw_cluster(self.__directory.get_data(), self.__noise, 'gray', 'x')
def __draw_cluster(self, data, cluster, color, marker):
"""!
@brief Draw 2-D single cluster on axis using specified color and marker.
"""
for item in cluster:
self.__ax.plot(data[item][0], data[item][1], color=color, marker=marker)
def animate(self, animation_velocity=75, movie_fps=25, movie_filename=None):
"""!
@brief Animates clustering process that is performed by BANG algorithm.
@param[in] animation_velocity (uint): Interval between frames in milliseconds (for run-time animation only).
@param[in] movie_fps (uint): Defines frames per second (for rendering movie only).
@param[in] movie_filename (string): If it is specified then animation will be stored to file that is specified in this parameter.
"""
def init_frame():
self.__figure.clf()
self.__ax = self.__figure.add_subplot(1, 1, 1)
self.__figure.suptitle("BANG algorithm", fontsize=18, fontweight='bold')
for point in self.__directory.get_data():
self.__ax.plot(point[0], point[1], color='red', marker='.')
return frame_generation(0)
def frame_generation(index_iteration):
if self.__current_level < self.__directory.get_height():
block = self.__level_blocks[self.__current_block]
self.__draw_block(block)
self.__increment_block()
else:
if self.__special_frame == 0:
self.__draw_leaf_density()
elif self.__special_frame == 15:
self.__draw_clusters()
elif self.__special_frame == 30:
self.__figure.clf()
self.__ax = self.__figure.add_subplot(1, 1, 1)
self.__figure.suptitle("BANG algorithm", fontsize=18, fontweight='bold')
self.__draw_clusters()
self.__special_frame += 1
iterations = len(self.__directory) + 60
# print("Total number of iterations: %d" % iterations)
cluster_animation = animation.FuncAnimation(self.__figure, frame_generation, iterations,
interval=animation_velocity,
init_func=init_frame,
repeat_delay=5000)
if movie_filename is not None:
cluster_animation.save(movie_filename, writer = 'ffmpeg', fps = movie_fps, bitrate = 3500)
else:
plt.show()
class bang_directory:
"""!
@brief BANG directory stores BANG-blocks that represents grid in data space.
@details The directory build BANG-blocks in binary tree manner. Leafs of the tree stored separately to provide
a direct access to the leafs that should be analysed. Leafs cache data-points.
"""
def __init__(self, data, levels, **kwargs):
"""!
@brief Create BANG directory - basically tree structure with direct access to leafs.
@param[in] data (list): Input data that is clustered.
@param[in] levels (uint): Height of the tree of blocks.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'observe').
<b>Keyword Args:</b><br>
- observe (bool): If 'True' then blocks on each level are stored.
- density_threshold (double): The lowest level of density when contained data in bang-block is
considered as a noise and there is no need to split it till the last level. Be aware that this
parameter is used with 'amount_threshold' parameter.
- amount_threshold (uint): Amount of points in the block when it contained data in bang-block is
considered as a noise and there is no need to split it till the last level.
"""
self.__data = data
self.__levels = levels
self.__density_threshold = kwargs.get('density_threshold', 0.0)
self.__amount_density = kwargs.get('amount_threshold', 0)
self.__leafs = []
self.__root = None
self.__level_blocks = []
self.__size = 0
self.__observe = kwargs.get('observe', True)
self.__create_directory()
def __len__(self):
"""!
@brief Returns amount of blocks that is stored in the directory
@return (uint) Amount of blocks in the BANG directory.
"""
return self.__size
def get_data(self):
"""!
@brief Return data that is stored in the directory.
@return (list) List of points that represents stored data.
"""
return self.__data
def get_leafs(self):
"""!
@brief Return leafs - the smallest blocks.
@details Some leafs can be bigger than others because splitting is not performed for blocks whose density is
less than threshold.
@return (list) List of blocks that are leafs of BANG directory.
"""
return self.__leafs
def get_level(self, level):
"""!
@brief Returns BANG blocks on the specific level.
@param[in] level (uint): Level of tree where BANG blocks are located.
@return (list) List of BANG blocks on the specific level.
"""
return self.__level_blocks[level]
def get_height(self):
"""!
@brief Returns height of BANG tree where blocks are stored.
@return (uint) Height of BANG tree.
"""
return len(self.__level_blocks)
def __create_directory(self):
"""!
@brief Create BANG directory as a tree with separate storage for leafs.
"""
min_corner, max_corner = data_corners(self.__data)
data_block = spatial_block(max_corner, min_corner)
cache_require = (self.__levels == 1)
self.__root = bang_block(self.__data, 0, 0, data_block, cache_require)
if cache_require:
self.__leafs.append(self.__root)
self.__store_level_blocks([self.__root])
else:
self.__build_directory_levels()
def __store_level_blocks(self, level_blocks):
"""!
@brief Store level blocks if observing is enabled.
@param[in] level_blocks (list): Created blocks on a new level.
"""
self.__size += len(level_blocks)
if self.__observe is True:
self.__level_blocks.append(level_blocks)
def __build_directory_levels(self):
"""!
@brief Build levels of direction if amount of level is greater than one.
"""
previous_level_blocks = [ self.__root ]
for level in range(1, self.__levels):
previous_level_blocks = self.__build_level(previous_level_blocks, level)
self.__store_level_blocks(previous_level_blocks)
self.__leafs = sorted(self.__leafs, key=lambda block: block.get_density())
def __build_level(self, previous_level_blocks, level):
"""!
@brief Build new level of directory.
@param[in] previous_level_blocks (list): BANG-blocks on the previous level.
@param[in] level (uint): Level number that should be built.
@return (list) New block on the specified level.
"""
current_level_blocks = []
split_dimension = level % len(self.__data[0])
cache_require = (level == self.__levels - 1)
for block in previous_level_blocks:
self.__split_block(block, split_dimension, cache_require, current_level_blocks)
if cache_require:
self.__leafs += current_level_blocks
return current_level_blocks
def __split_block(self, block, split_dimension, cache_require, current_level_blocks):
"""!
@brief Split specific block in specified dimension.
@details Split is not performed for block whose density is lower than threshold value, such blocks are putted to
leafs.
@param[in] block (bang_block): BANG-block that should be split.
@param[in] split_dimension (uint): Dimension at which splitting should be performed.
@param[in] cache_require (bool): Defines when points in cache should be stored during density calculation.
@param[in|out] current_level_blocks (list): Block storage at the current level where new blocks should be added.
"""
if block.get_density() <= self.__density_threshold or len(block) <= self.__amount_density:
self.__leafs.append(block)
else:
left, right = block.split(split_dimension, cache_require)
current_level_blocks.append(left)
current_level_blocks.append(right)
class spatial_block:
"""!
@brief Geometrical description of BANG block in data space.
@details Provides services related to spatial functionality and used by bang_block
@see bang_block
"""
def __init__(self, max_corner, min_corner):
"""!
@brief Creates spatial block in data space.
@param[in] max_corner (array_like): Maximum corner coordinates of the block.
@param[in] min_corner (array_like): Minimal corner coordinates of the block.
"""
self.__max_corner = max_corner
self.__min_corner = min_corner
self.__volume = self.__calculate_volume()
def __str__(self):
"""!
@brief Returns string block description.
@return String representation of the block.
"""
return "(max: %s; min: %s)" % (self.__max_corner, self.__min_corner)
def __contains__(self, point):
"""!
@brief Point is considered as contained if it lies in block (belong to it).
@return (bool) True if point is in block, otherwise False.
"""
for i in range(len(point)):
if point[i] < self.__min_corner[i] or point[i] > self.__max_corner[i]:
return False
return True
def get_corners(self):
"""!
@brief Return spatial description of current block.
@return (tuple) Pair of maximum and minimum corners (max_corner, min_corner).
"""
return self.__max_corner, self.__min_corner
def get_volume(self):
"""!
@brief Returns volume of current block.
@details Volume block has uncommon mining here: for 1D is length of a line, for 2D is square of rectangle,
for 3D is volume of 3D figure, and for ND is volume of ND figure.
@return (double) Volume of current block.
"""
return self.__volume
def split(self, dimension):
"""!
@brief Split current block into two spatial blocks in specified dimension.
@param[in] dimension (uint): Dimension where current block should be split.
@return (tuple) Pair of new split blocks from current block.
"""
first_max_corner = self.__max_corner[:]
second_min_corner = self.__min_corner[:]
split_border = (self.__max_corner[dimension] + self.__min_corner[dimension]) / 2.0
first_max_corner[dimension] = split_border
second_min_corner[dimension] = split_border
return spatial_block(first_max_corner, self.__min_corner), spatial_block(self.__max_corner, second_min_corner)
def is_neighbor(self, block):
"""!
@brief Performs calculation to identify whether specified block is neighbor of current block.
@details It also considers diagonal blocks as neighbors.
@param[in] block (spatial_block): Another block that is check whether it is neighbor.
@return (bool) True is blocks are neighbors, False otherwise.
"""
if block is not self:
block_max_corner, _ = block.get_corners()
dimension = len(block_max_corner)
neighborhood_score = self.__calculate_neighborhood(block_max_corner)
if neighborhood_score == dimension:
return True
return False
def __calculate_neighborhood(self, block_max_corner):
"""!
@brief Calculates neighborhood score that defined whether blocks are neighbors.
@param[in] block_max_corner (list): Maximum coordinates of other block.
@return (uint) Neighborhood score.
"""
dimension = len(block_max_corner)
length_edges = [self.__max_corner[i] - self.__min_corner[i] for i in range(dimension)]
neighborhood_score = 0
for i in range(dimension):
diff = abs(block_max_corner[i] - self.__max_corner[i])
if diff <= length_edges[i] + length_edges[i] * 0.0001:
neighborhood_score += 1
return neighborhood_score
def __calculate_volume(self):
"""!
@brief Calculates volume of current spatial block.
@details If empty dimension is detected (where all points has the same value) then such dimension is ignored
during calculation of volume.
@return (double) Volume of current spatial block.
"""
volume = 0.0
for i in range(0, len(self.__max_corner)):
side_length = self.__max_corner[i] - self.__min_corner[i]
if side_length != 0.0:
if volume == 0.0: volume = side_length
else: volume *= side_length
return volume
class bang_block:
"""!
@brief BANG-block that represent spatial region in data space.
"""
def __init__(self, data, region, level, space_block, cache_points=False):
"""!
@brief Create BANG-block.
@param[in] data (list): List of points that are processed.
@param[in] region (uint): Region number - unique value on a level.
@param[in] level (uint): Level number where block is created.
@param[in] space_block (spatial_block): Spatial block description in data space.
@param[in] cache_points (bool): if True then points are stored in memory (used for leaf blocks).
"""
self.__data = data
self.__region_number = region
self.__level = level
self.__spatial_block = space_block
self.__cache_points = cache_points
self.__cluster = None
self.__points = None
self.__amount_points = self.__get_amount_points()
self.__density = self.__calculate_density(self.__amount_points)
def __str__(self):
"""!
@brief Returns string representation of BANG-block using region number and level where block is located.
"""
return "(" + str(self.__region_number) + ", " + str(self.__level) + ")"
def __len__(self):
"""!
@brief Returns block size defined by amount of points that are contained by this block.
"""
return self.__amount_points
def get_region(self):
"""!
@brief Returns region number of BANG-block.
@details Region number is unique on among region numbers on a directory level. Pair of region number and level
is unique for all directory.
@return (uint) Region number.
"""
return self.__region_number
def get_density(self):
"""!
@brief Returns density of the BANG-block.
@return (double) BANG-block density.
"""
return self.__density
def get_cluster(self):
"""!
@brief Return index of cluster to which the BANG-block belongs to.
@details Index of cluster may have None value if the block was not assigned to any cluster.
@return (uint) Index of cluster or None if the block does not belong to any cluster.
"""
return self.__cluster
def get_spatial_block(self):
"""!
@brief Return spatial block - BANG-block description in data space.
@return (spatial_block) Spatial block of the BANG-block.
"""
return self.__spatial_block
def get_points(self):
"""!
@brief Return points that covers by the BANG-block.
@return (list) List of point indexes that are covered by the block.
"""
if self.__points is None:
self.__cache_covered_data()
return self.__points
def set_cluster(self, index):
"""!
@brief Assign cluster to the BANG-block by index.
@param[in] index (uint): Index cluster that is assigned to BANG-block.
"""
self.__cluster = index
def is_neighbor(self, block):
"""!
@brief Performs calculation to check whether specified block is neighbor to the current.
@param[in] block (bang_block): Other BANG-block that should be checked for neighborhood.
@return (bool) True if blocks are neighbors, False if blocks are not neighbors.
"""
return self.get_spatial_block().is_neighbor(block.get_spatial_block())
def split(self, split_dimension, cache_points):
"""!
@brief Split BANG-block into two new blocks in specified dimension.
@param[in] split_dimension (uint): Dimension where block should be split.
@param[in] cache_points (bool): If True then covered points are cached. Used for leaf blocks.
@return (tuple) Pair of BANG-block that were formed from the current.
"""
left_region_number = self.__region_number
right_region_number = self.__region_number + 2 ** self.__level
first_spatial_block, second_spatial_block = self.__spatial_block.split(split_dimension)
left = bang_block(self.__data, left_region_number, self.__level + 1, first_spatial_block, cache_points)
right = bang_block(self.__data, right_region_number, self.__level + 1, second_spatial_block, cache_points)
return left, right
def __calculate_density(self, amount_points):
"""!
@brief Calculates BANG-block density.
@param[in] amount_points (uint): Amount of points in block.
@return (double) BANG-block density.
"""
volume = self.__spatial_block.get_volume()
if volume != 0.0:
return amount_points / volume
return 0.0
def __get_amount_points(self):
"""!
@brief Count covered points by the BANG-block and if cache is enable then covered points are stored.
@return (uint) Amount of covered points.
"""
amount = 0
for index in range(len(self.__data)):
if self.__data[index] in self.__spatial_block:
self.__cache_point(index)
amount += 1
return amount
def __cache_covered_data(self):
"""!
@brief Cache covered data.
"""
self.__cache_points = True
self.__points = []
for index_point in range(len(self.__data)):
if self.__data[index_point] in self.__spatial_block:
self.__cache_point(index_point)
def __cache_point(self, index):
"""!
@brief Store index points.
@param[in] index (uint): Index point that should be stored.
"""
if self.__cache_points:
if self.__points is None:
self.__points = []
self.__points.append(index)
class bang:
"""!
@brief Class implements BANG grid based clustering algorithm.
@details BANG clustering algorithms uses a multidimensional grid structure to organize the value space surrounding
the pattern values. The patterns are grouped into blocks and clustered with respect to the blocks by
a topological neighbor search algorithm @cite inproceedings::bang::1.
Code example of BANG usage:
@code
from pyclustering.cluster.bang import bang, bang_visualizer
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import FCPS_SAMPLES
# Read data three dimensional data.
data = read_sample(FCPS_SAMPLES.SAMPLE_CHAINLINK)
# Prepare algorithm's parameters.
levels = 11
# Create instance of BANG algorithm.
bang_instance = bang(data, levels)
bang_instance.process()
# Obtain clustering results.
clusters = bang_instance.get_clusters()
noise = bang_instance.get_noise()
directory = bang_instance.get_directory()
dendrogram = bang_instance.get_dendrogram()
# Visualize BANG clustering results.
bang_visualizer.show_blocks(directory)
bang_visualizer.show_dendrogram(dendrogram)
bang_visualizer.show_clusters(data, clusters, noise)
@endcode
There is visualization of BANG-clustering of three-dimensional data 'chainlink'. BANG-blocks that were formed during
processing are shown on following figure. The darkest color means highest density, blocks that does not cover points
are transparent:
@image html bang_blocks_chainlink.png "Fig. 1. BANG-blocks that cover input data."
Here is obtained dendrogram that can be used for further analysis to improve clustering results:
@image html bang_dendrogram_chainlink.png "Fig. 2. BANG dendrogram where the X-axis contains BANG-blocks, the Y-axis contains density."
BANG clustering result of 'chainlink' data:
@image html bang_clustering_chainlink.png "Fig. 3. BANG clustering result. Data: 'chainlink'."
"""
def __init__(self, data, levels, ccore=False, **kwargs):
"""!
@brief Create BANG clustering algorithm.
@param[in] data (list): Input data (list of points) that should be clustered.
@param[in] levels (uint): Amount of levels in tree that is used for splitting (how many times block should be
split). For example, if amount of levels is two then surface will be divided into two blocks and
each obtained block will be divided into blocks also.
@param[in] ccore (bool): Reserved positional argument - not used yet.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'observe').
<b>Keyword Args:</b><br>
- density_threshold (double): If block density is smaller than this value then contained data by this
block is considered as a noise and its points as outliers. Block density is defined by amount of
points in block divided by block volume: <i>amount_block_points</i>/<i>block_volume</i>. By default
it is 0.0 - means than only empty blocks are considered as noise. Be aware that this parameter is used
with parameter 'amount_threshold' - the maximum threshold is considered during processing.
- amount_threshold (uint): Amount of points in the block when it contained data in bang-block is
considered as a noise and there is no need to split it till the last level. Be aware that this parameter
is used with parameter 'density_threshold' - the maximum threshold is considered during processing.
"""
self.__data = data
self.__levels = levels
self.__directory = None
self.__clusters = []
self.__noise = []
self.__cluster_blocks = []
self.__dendrogram = []
self.__density_threshold = kwargs.get('density_threshold', 0.0)
self.__amount_threshold = kwargs.get('amount_threshold', 0)
self.__ccore = ccore
self.__validate_arguments()
def process(self):
"""!
@brief Performs clustering process in line with rules of BANG clustering algorithm.
@return (bang) Returns itself (BANG instance).
@see get_clusters()
@see get_noise()
@see get_directory()
@see get_dendrogram()
"""
self.__directory = bang_directory(self.__data, self.__levels,
density_threshold=self.__density_threshold,
amount_threshold=self.__amount_threshold)
self.__allocate_clusters()
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters.
@remark Allocated clusters are returned only after data processing (method process()). Otherwise empty list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_noise()
"""
return self.__clusters
def get_noise(self):
"""!
@brief Returns allocated noise.
@remark Allocated noise is returned only after data processing (method process()). Otherwise empty list is returned.
@return (list) List of indexes that are marked as a noise.
@see process()
@see get_clusters()
"""
return self.__noise
def get_directory(self):
"""!
@brief Returns grid directory that describes grid of the processed data.
@remark Grid directory is returned only after data processing (method process()). Otherwise None value is returned.
@return (bang_directory) BANG directory that describes grid of process data.
@see process()
"""
return self.__directory
def get_dendrogram(self):
"""!
@brief Returns dendrogram of clusters.
@details Dendrogram is created in following way: the density indices of all regions are calculated and sorted
in decreasing order for each cluster during clustering process.
@remark Dendrogram is returned only after data processing (method process()). Otherwise empty list is returned.
"""
return self.__dendrogram
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __validate_arguments(self):
"""!
@brief Check input arguments of BANG algorithm and if one of them is not correct then appropriate exception
is thrown.
"""
if len(self.__data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__data))
if self.__levels < 1:
raise ValueError("Height of the tree should be greater than 0 (current value: '%d')." % self.__levels)
if self.__density_threshold < 0.0:
raise ValueError("Density threshold should be greater or equal to 0 (current value: '%d')." %
self.__density_threshold)
if self.__amount_threshold < 0:
raise ValueError("Amount of points threshold should be greater than 0 (current value: '%d')" %
self.__amount_threshold)
def __allocate_clusters(self):
"""!
@brief Performs cluster allocation using leafs of tree in BANG directory (the smallest cells).
"""
leaf_blocks = self.__directory.get_leafs()
unhandled_block_indexes = set([i for i in range(len(leaf_blocks)) if leaf_blocks[i].get_density() > self.__density_threshold])
current_block = self.__find_block_center(leaf_blocks, unhandled_block_indexes)
cluster_index = 0
while current_block is not None:
if current_block.get_density() <= self.__density_threshold or len(current_block) <= self.__amount_threshold:
break
self.__expand_cluster_block(current_block, cluster_index, leaf_blocks, unhandled_block_indexes)
current_block = self.__find_block_center(leaf_blocks, unhandled_block_indexes)
cluster_index += 1
self.__store_clustering_results(cluster_index, leaf_blocks)
def __expand_cluster_block(self, block, cluster_index, leaf_blocks, unhandled_block_indexes):
"""!
@brief Expand cluster from specific block that is considered as a central block.
@param[in] block (bang_block): Block that is considered as a central block for cluster.
@param[in] cluster_index (uint): Index of cluster that is assigned to blocks that forms new cluster.
@param[in] leaf_blocks (list): Leaf BANG-blocks that are considered during cluster formation.
@param[in] unhandled_block_indexes (set): Set of candidates (BANG block indexes) to become a cluster member. The
parameter helps to reduce traversing among BANG-block providing only restricted set of block that
should be considered.
"""
block.set_cluster(cluster_index)
self.__update_cluster_dendrogram(cluster_index, [block])
neighbors = self.__find_block_neighbors(block, leaf_blocks, unhandled_block_indexes)
self.__update_cluster_dendrogram(cluster_index, neighbors)
for neighbor in neighbors:
neighbor.set_cluster(cluster_index)
neighbor_neighbors = self.__find_block_neighbors(neighbor, leaf_blocks, unhandled_block_indexes)
self.__update_cluster_dendrogram(cluster_index, neighbor_neighbors)
neighbors += neighbor_neighbors
def __store_clustering_results(self, amount_clusters, leaf_blocks):
"""!
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells).
"""
self.__clusters = [[] for _ in range(amount_clusters)]
for block in leaf_blocks:
index = block.get_cluster()
if index is not None:
self.__clusters[index] += block.get_points()
else:
self.__noise += block.get_points()
self.__clusters = [ list(set(cluster)) for cluster in self.__clusters ]
self.__noise = list(set(self.__noise))
def __find_block_center(self, level_blocks, unhandled_block_indexes):
"""!
@brief Search block that is cluster center for new cluster.
@return (bang_block) Central block for new cluster, if cluster is not found then None value is returned.
"""
for i in reversed(range(len(level_blocks))):
if level_blocks[i].get_density() <= self.__density_threshold:
return None
if level_blocks[i].get_cluster() is None:
unhandled_block_indexes.remove(i)
return level_blocks[i]
return None
def __find_block_neighbors(self, block, level_blocks, unhandled_block_indexes):
"""!
@brief Search block neighbors that are parts of new clusters (density is greater than threshold and that are
not cluster members yet), other neighbors are ignored.
@param[in] block (bang_block): BANG-block for which neighbors should be found (which can be part of cluster).
@param[in] level_blocks (list): BANG-blocks on specific level.
@param[in] unhandled_block_indexes (set): Blocks that have not been processed yet.
@return (list) Block neighbors that can become part of cluster.
"""
neighbors = []
handled_block_indexes = []
for unhandled_index in unhandled_block_indexes:
if block.is_neighbor(level_blocks[unhandled_index]):
handled_block_indexes.append(unhandled_index)
neighbors.append(level_blocks[unhandled_index])
# Maximum number of neighbors is eight
if len(neighbors) == 8:
break
for handled_index in handled_block_indexes:
unhandled_block_indexes.remove(handled_index)
return neighbors
def __update_cluster_dendrogram(self, index_cluster, blocks):
"""!
@brief Append clustered blocks to dendrogram.
@param[in] index_cluster (uint): Cluster index that was assigned to blocks.
@param[in] blocks (list): Blocks that were clustered.
"""
if len(self.__dendrogram) <= index_cluster:
self.__dendrogram.append([])
blocks = sorted(blocks, key=lambda block: block.get_density(), reverse=True)
self.__dendrogram[index_cluster] += blocks
| gpl-3.0 |
automl/SpySMAC | cave/plot/cdf.py | 1 | 1207 | import matplotlib.pyplot as plt
import numpy as np
def plot_cdf(x_list, y_list, label_list, timeout, out_fn):
"""
Parameters
----------
x_list, y_list: List[np.array]
zip lists and plot all data on one plot
label_list: str
strings for legend corresponding to x, y
timeout: float
if set, timeouts are marked at this point
out_fn: str
filename
Returns
-------
out_fn: str
filename
"""
f = plt.figure(1, dpi=100, figsize=(10, 10))
ax = f.add_subplot(1, 1, 1)
colors = ['red', 'blue', 'green']
for x, y, l, c in zip(x_list, y_list, label_list, colors):
ax.step(x, y, color=c, linestyle='-', label=l)
ax.legend()
ax.grid(True)
ax.set_xscale('log')
ax.set_ylabel('probability of being solved')
ax.set_xlabel('time')
# Plot 'timeout'
if timeout:
ax.text(timeout,
ax.get_ylim()[0] - 0.1 * np.abs(ax.get_ylim()[0]),
"timeout ", horizontalalignment='center',
verticalalignment="top", rotation=30)
ax.axvline(x=timeout, linestyle='--')
f.tight_layout()
f.savefig(out_fn)
plt.close(f)
return out_fn
| bsd-3-clause |
rsivapr/scikit-learn | examples/cluster/plot_segmentation_toy.py | 8 | 3318 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
pl.matshow(img)
pl.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
pl.matshow(img)
pl.matshow(label_im)
pl.show()
| bsd-3-clause |
harisbal/pandas | pandas/core/util/hashing.py | 3 | 10765 | """
data hash pandas / numpy objects
"""
import itertools
import numpy as np
from pandas._libs import hashing, tslibs
from pandas.core.dtypes.generic import (
ABCMultiIndex,
ABCIndexClass,
ABCSeries,
ABCDataFrame)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_list_like, is_extension_array_dtype)
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import infer_dtype_from_scalar
# 16 byte long hashing key
_default_hash_key = '0123456789123456'
def _combine_hash_arrays(arrays, num_items):
"""
Parameters
----------
arrays : generator
num_items : int
Should be the same as CPython's tupleobject.c
"""
try:
first = next(arrays)
except StopIteration:
return np.array([], dtype=np.uint64)
arrays = itertools.chain([first], arrays)
mult = np.uint64(1000003)
out = np.zeros_like(first) + np.uint64(0x345678)
for i, a in enumerate(arrays):
inverse_i = num_items - i
out ^= a
out *= mult
mult += np.uint64(82520 + inverse_i + inverse_i)
assert i + 1 == num_items, 'Fed in wrong num_items'
out += np.uint64(97531)
return out
def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None,
categorize=True):
"""
Return a data hash of the Index/Series/DataFrame
.. versionadded:: 0.19.2
Parameters
----------
index : boolean, default True
include the index in the hash (if Series/DataFrame)
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
Series of uint64, same length as the object
"""
from pandas import Series
if hash_key is None:
hash_key = _default_hash_key
if isinstance(obj, ABCMultiIndex):
return Series(hash_tuples(obj, encoding, hash_key),
dtype='uint64', copy=False)
if isinstance(obj, ABCIndexClass):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64', copy=False)
h = Series(h, index=obj, dtype='uint64', copy=False)
elif isinstance(obj, ABCSeries):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64', copy=False)
if index:
index_iter = (hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values
for _ in [None])
arrays = itertools.chain([h], index_iter)
h = _combine_hash_arrays(arrays, 2)
h = Series(h, index=obj.index, dtype='uint64', copy=False)
elif isinstance(obj, ABCDataFrame):
hashes = (hash_array(series.values) for _, series in obj.iteritems())
num_items = len(obj.columns)
if index:
index_hash_generator = (hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values # noqa
for _ in [None])
num_items += 1
hashes = itertools.chain(hashes, index_hash_generator)
h = _combine_hash_arrays(hashes, num_items)
h = Series(h, index=obj.index, dtype='uint64', copy=False)
else:
raise TypeError("Unexpected type for hashing %s" % type(obj))
return h
def hash_tuples(vals, encoding='utf8', hash_key=None):
"""
Hash an MultiIndex / list-of-tuples efficiently
.. versionadded:: 0.20.0
Parameters
----------
vals : MultiIndex, list-of-tuples, or single tuple
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array
"""
is_tuple = False
if isinstance(vals, tuple):
vals = [vals]
is_tuple = True
elif not is_list_like(vals):
raise TypeError("must be convertible to a list-of-tuples")
from pandas import Categorical, MultiIndex
if not isinstance(vals, ABCMultiIndex):
vals = MultiIndex.from_tuples(vals)
# create a list-of-Categoricals
vals = [Categorical(vals.labels[level],
vals.levels[level],
ordered=False,
fastpath=True)
for level in range(vals.nlevels)]
# hash the list-of-ndarrays
hashes = (_hash_categorical(cat,
encoding=encoding,
hash_key=hash_key)
for cat in vals)
h = _combine_hash_arrays(hashes, len(vals))
if is_tuple:
h = h[0]
return h
def hash_tuple(val, encoding='utf8', hash_key=None):
"""
Hash a single tuple efficiently
Parameters
----------
val : single tuple
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
hash
"""
hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key)
for v in val)
h = _combine_hash_arrays(hashes, len(val))[0]
return h
def _hash_categorical(c, encoding, hash_key):
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array, same size as len(c)
"""
# Convert ExtensionArrays to ndarrays
values = np.asarray(c.categories.values)
hashed = hash_array(values, encoding, hash_key,
categorize=False)
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
# instead, directly construct the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH 15362
mask = c.isna()
if len(hashed):
result = hashed.take(c.codes)
else:
result = np.zeros(len(mask), dtype='uint64')
if mask.any():
result[mask] = np.iinfo(np.uint64).max
return result
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True):
"""
Given a 1d array, return an array of deterministic integers.
.. versionadded:: 0.19.2
Parameters
----------
vals : ndarray, Categorical
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
1d uint64 numpy array of hash values, same length as the vals
"""
if not hasattr(vals, 'dtype'):
raise TypeError("must pass a ndarray-like")
dtype = vals.dtype
if hash_key is None:
hash_key = _default_hash_key
# For categoricals, we hash the categories, then remap the codes to the
# hash values. (This check is above the complex check so that we don't ask
# numpy if categorical is a subdtype of complex, as it will choke).
if is_categorical_dtype(dtype):
return _hash_categorical(vals, encoding, hash_key)
elif is_extension_array_dtype(dtype):
vals, _ = vals._values_for_factorize()
dtype = vals.dtype
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
if np.issubdtype(dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
# First, turn whatever array this is into unsigned 64-bit ints, if we can
# manage it.
elif isinstance(dtype, np.bool):
vals = vals.astype('u8')
elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):
vals = vals.view('i8').astype('u8', copy=False)
elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:
vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
else:
# With repeated values, its MUCH faster to categorize object dtypes,
# then hash and rename categories. We allow skipping the categorization
# when the values are known/likely to be unique.
if categorize:
from pandas import factorize, Categorical, Index
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories),
ordered=False, fastpath=True)
return _hash_categorical(cat, encoding, hash_key)
try:
vals = hashing.hash_object_array(vals, hash_key, encoding)
except TypeError:
# we have mixed types
vals = hashing.hash_object_array(vals.astype(str).astype(object),
hash_key, encoding)
# Then, redistribute these 64-bit ints within the space of 64-bit ints
vals ^= vals >> 30
vals *= np.uint64(0xbf58476d1ce4e5b9)
vals ^= vals >> 27
vals *= np.uint64(0x94d049bb133111eb)
vals ^= vals >> 31
return vals
def _hash_scalar(val, encoding='utf8', hash_key=None):
"""
Hash scalar value
Returns
-------
1d uint64 numpy array of hash value, of length 1
"""
if isna(val):
# this is to be consistent with the _hash_categorical implementation
return np.array([np.iinfo(np.uint64).max], dtype='u8')
if getattr(val, 'tzinfo', None) is not None:
# for tz-aware datetimes, we need the underlying naive UTC value and
# not the tz aware object or pd extension type (as
# infer_dtype_from_scalar would do)
if not isinstance(val, tslibs.Timestamp):
val = tslibs.Timestamp(val)
val = val.tz_convert(None)
dtype, val = infer_dtype_from_scalar(val)
vals = np.array([val], dtype=dtype)
return hash_array(vals, hash_key=hash_key, encoding=encoding,
categorize=False)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/test_common.py | 1 | 4503 | import collections
from functools import partial
import string
import numpy as np
import pytest
from pandas.compat import np_version_under1p18
import pandas as pd
from pandas import Series
import pandas._testing as tm
from pandas.core import ops
import pandas.core.common as com
from pandas.util.version import Version
def test_get_callable_name():
getname = com.get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall:
def __call__(self):
return x # noqa
assert getname(fn) == "fn"
assert getname(lambda_)
assert getname(part1) == "fn"
assert getname(part2) == "fn"
assert getname(somecall()) == "somecall"
assert getname(1) is None
def test_any_none():
assert com.any_none(1, 2, 3, None)
assert not com.any_none(1, 2, 3, 4)
def test_all_not_none():
assert com.all_not_none(1, 2, 3, 4)
assert not com.all_not_none(1, 2, 3, None)
assert not com.all_not_none(None, None, None, None)
def test_random_state():
import numpy.random as npr
# Check with seed
state = com.random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()
# check with no arg random state
assert com.random_state() is np.random
# check array-like
# GH32503
state_arr_like = npr.randint(0, 2 ** 31, size=624, dtype="uint32")
assert (
com.random_state(state_arr_like).uniform()
== npr.RandomState(state_arr_like).uniform()
)
# Check BitGenerators
# GH32503
if not np_version_under1p18:
assert (
com.random_state(npr.MT19937(3)).uniform()
== npr.RandomState(npr.MT19937(3)).uniform()
)
assert (
com.random_state(npr.PCG64(11)).uniform()
== npr.RandomState(npr.PCG64(11)).uniform()
)
# Error for floats or strings
msg = (
"random_state must be an integer, array-like, a BitGenerator, "
"a numpy RandomState, or None"
)
with pytest.raises(ValueError, match=msg):
com.random_state("test")
with pytest.raises(ValueError, match=msg):
com.random_state(5.5)
@pytest.mark.parametrize(
"left, right, expected",
[
(Series([1], name="x"), Series([2], name="x"), "x"),
(Series([1], name="x"), Series([2], name="y"), None),
(Series([1]), Series([2], name="x"), None),
(Series([1], name="x"), Series([2]), None),
(Series([1], name="x"), [2], "x"),
([1], Series([2], name="y"), "y"),
],
)
def test_maybe_match_name(left, right, expected):
assert ops.common._maybe_match_name(left, right) == expected
def test_standardize_mapping():
# No uninitialized defaultdicts
msg = r"to_dict\(\) only accepts initialized defaultdicts"
with pytest.raises(TypeError, match=msg):
com.standardize_mapping(collections.defaultdict)
# No non-mapping subtypes, instance
msg = "unsupported type: <class 'list'>"
with pytest.raises(TypeError, match=msg):
com.standardize_mapping([])
# No non-mapping subtypes, class
with pytest.raises(TypeError, match=msg):
com.standardize_mapping(list)
fill = {"bad": "data"}
assert com.standardize_mapping(fill) == dict
# Convert instance to type
assert com.standardize_mapping({}) == dict
dd = collections.defaultdict(list)
assert isinstance(com.standardize_mapping(dd), partial)
def test_git_version():
# GH 21295
git_version = pd.__git_version__
assert len(git_version) == 40
assert all(c in string.hexdigits for c in git_version)
def test_version_tag():
version = Version(pd.__version__)
try:
version > Version("0.0.1")
except TypeError:
raise ValueError(
"No git tags exist, please sync tags between upstream and your repo"
)
@pytest.mark.parametrize(
"obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)]
)
def test_serializable(obj):
# GH 35611
unpickled = tm.round_trip_pickle(obj)
assert type(obj) == type(unpickled)
class TestIsBoolIndexer:
def test_non_bool_array_with_na(self):
# in particular, this should not raise
arr = np.array(["A", "B", np.nan], dtype=object)
assert not com.is_bool_indexer(arr)
| bsd-3-clause |
hbenniou/trunk | doc/sphinx/ipython_directive013.py | 8 | 27280 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
From: https://github.com/ipython/ipython/blob/master/docs/sphinxext/ipython_directive.py
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| gpl-2.0 |
mxjl620/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
Erotemic/ibeis | ibeis/unstable/demobayes.py | 1 | 36463 | from __future__ import absolute_import, division, print_function, unicode_literals
import six # NOQA
import utool as ut
import numpy as np
from ibeis.algo.hots.bayes import make_name_model, temp_model, draw_tree_model
print, rrr, profile = ut.inject2(__name__)
def trytestdata_demo_cfgs():
alias_keys = {'nA': 'num_annots', 'nN': 'num_names', 'nS': 'num_scores'}
cfg_list = ut.parse_argv_cfg('--ev', alias_keys=alias_keys)
return cfg_list
def demo_bayesnet(cfg={}):
r"""
Make a model that knows who the previous annots are and tries to classify a new annot
CommandLine:
python -m ibeis --tf demo_bayesnet --diskshow --verbose --save demo4.png --dpath . --figsize=20,10 --dpi=128 --clipwhite
python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1 --show
python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Scd=1 --show
python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1,Scd=1 --show
python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
python -m ibeis --tf demo_bayesnet --ev :nA=5,rand_scores=True --show
python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=3,rand_scores=True --show --verbose
python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=2,Na=fred,rand_scores=True --show --verbose
python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=5,Na=fred,rand_scores=True --show --verbose
python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=2,Na=fred,rand_scores=True --show --verbose
python -m ibeis.unstable.demobayes --exec-demo_bayesnet \
--ev =:nA=4,Sab=0,Sac=0,Sbc=1 \
:Sbd=1 :Scd=1 :Sbd=1,Scd=1 :Sbd=1,Scd=1,Sad=0 \
--show --present
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> cfg_list = testdata_demo_cfgs()
>>> print('cfg_list = %r' % (cfg_list,))
>>> for cfg in cfg_list:
>>> demo_bayesnet(cfg)
>>> ut.show_if_requested()
"""
cfg = cfg.copy()
num_annots = cfg.pop('num_annots', 3)
num_names = cfg.pop('num_names', None)
num_scores = cfg.pop('num_scores', 2)
rand_scores = cfg.pop('rand_scores', False)
method = cfg.pop('method', 'bp')
other_evidence = {k: v for k, v in cfg.items() if not k.startswith('_')}
if rand_scores:
#import randomdotorg
#import sys
#r = randomdotorg.RandomDotOrg('ExampleCode')
#seed = int((1 - 2 * r.random()) * sys.maxint)
toy_data = get_toy_data_1v1(num_annots, nid_sequence=[0, 0, 1, 0, 1, 2])
print('toy_data = ' + ut.repr3(toy_data, nl=1))
diag_scores, = ut.dict_take(
toy_data, 'diag_scores'.split(', '))
discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]
def discretize_scores(scores):
# Assign continuous scores to discrete index
score_idxs = np.abs(1 - (discr_domain / scores[:, None])).argmin(axis=1)
return score_idxs
score_evidence = discretize_scores(diag_scores)
else:
score_evidence = []
discr_p_same = None
discr_domain = None
model, evidence, query_results = temp_model(
num_annots=num_annots, num_names=num_names,
num_scores=num_scores,
score_evidence=score_evidence,
mode=1,
other_evidence=other_evidence,
p_score_given_same=discr_p_same,
score_basis=discr_domain,
method=method,
)
def classify_k(cfg={}):
"""
CommandLine:
python -m ibeis.unstable.demobayes --exec-classify_k --show --ev :nA=3
python -m ibeis.unstable.demobayes --exec-classify_k --show --ev :nA=3,k=1
python -m ibeis.unstable.demobayes --exec-classify_k --show --ev :nA=3,k=0 --method=approx
python -m ibeis.unstable.demobayes --exec-classify_k --show --ev :nA=10,k=1 --method=approx
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> cfg_list = testdata_demo_cfgs()
>>> classify_k(cfg_list[0])
>>> ut.show_if_requested()
"""
cfg = cfg.copy()
num_annots = cfg.pop('num_annots', 3)
num_scores = cfg.pop('num_scores', 2)
num_iter = cfg.pop('k', 0)
nid_sequence = np.array([0, 0, 1, 2, 2, 1, 1])
toy_data = get_toy_data_1v1(num_annots, nid_sequence=nid_sequence)
force_evidence = None
force_evidence = 0
diag_scores, = ut.dict_take(
toy_data, 'diag_scores'.split(', '))
#print('diag_scores = %r' % (diag_scores,))
#diag_labels = pairwise_matches.compress(is_diag)
#diag_pairs = ut.compress(pairwise_aidxs, is_diag)
discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]
def discretize_scores(scores):
# Assign continuous scores to closest discrete index
score_idxs = np.abs(1 - (discr_domain / scores[:, None])).argmin(axis=1)
return score_idxs
# Careful ordering is important here
score_evidence = discretize_scores(diag_scores)
if force_evidence is not None:
for x in range(len(score_evidence)):
score_evidence[x] = 0
model, evidence, query_results = temp_model(
num_annots=num_annots, num_names=num_annots,
num_scores=num_scores,
mode=1,
score_evidence=score_evidence,
p_score_given_same=discr_p_same,
score_basis=discr_domain,
#verbose=True
)
print(query_results['top_assignments'][0])
toy_data1 = toy_data
print('toy_data1 = ' + ut.repr3(toy_data1, nl=1))
num_annots2 = num_annots + 1
score_evidence1 = [None] * len(score_evidence)
full_evidence = score_evidence.tolist()
factor_list = query_results['factor_list']
using_soft = False
if using_soft:
soft_evidence1 = [dict(zip(x.statenames[0], x.values)) for x in factor_list]
for _ in range(num_iter):
print('\n\n ---------- \n\n')
#toy_data1['all_nids'].max() + 1
num_names_gen = len(toy_data1['all_aids']) + 1
num_names_gen = toy_data1['all_nids'].max() + 2
toy_data2 = get_toy_data_1v1(
1, num_names_gen,
initial_aids=toy_data1['all_aids'],
initial_nids=toy_data1['all_nids'],
nid_sequence=nid_sequence)
diag_scores2, = ut.dict_take(
toy_data2, 'diag_scores'.split(', '))
print('toy_data2 = ' + ut.repr3(toy_data2, nl=1))
score_evidence2 = discretize_scores(diag_scores2).tolist()
if force_evidence is not None:
for x in range(len(score_evidence2)):
score_evidence2[x] = force_evidence
print('score_evidence2 = %r' % (score_evidence2,))
if using_soft:
# Demo with soft evidence
model, evidence, query_results2 = temp_model(
num_annots=num_annots2, num_names=num_annots2,
num_scores=num_scores,
mode=1,
name_evidence=soft_evidence1,
#score_evidence=score_evidence1 + score_evidence2,
score_evidence=score_evidence2,
p_score_given_same=discr_p_same,
score_basis=discr_domain,
#verbose=True,
hack_score_only=len(score_evidence2),
)
if 1:
# Demo with full evidence
model, evidence, query_results2 = temp_model(
num_annots=num_annots2, num_names=num_annots2,
num_scores=num_scores,
mode=1,
score_evidence=full_evidence + score_evidence2,
p_score_given_same=discr_p_same,
score_basis=discr_domain,
verbose=True
)
factor_list2 = query_results2['factor_list']
if using_soft:
soft_evidence1 = [dict(zip(x.statenames[0], x.values)) for x in factor_list2]
score_evidence1 += ([None] * len(score_evidence2))
full_evidence = full_evidence + score_evidence2
num_annots2 += 1
toy_data1 = toy_data2
def show_toy_distributions(toy_params):
import vtool_ibeis as vt
import plottool_ibeis as pt
pt.ensureqt()
xdata = np.linspace(0, 8, 1000)
tp_pdf = vt.gauss_func1d(xdata, **toy_params[True])
fp_pdf = vt.gauss_func1d(xdata, **toy_params[False])
pt.plot_probabilities(
[tp_pdf, fp_pdf], ['TP', 'TF'],
prob_colors=[pt.TRUE_BLUE, pt.FALSE_RED],
xdata=xdata,
figtitle='Toy Distributions')
def get_toy_data_1vM(num_annots, num_names=None, **kwargs):
r"""
Args:
num_annots (int):
num_names (int): (default = None)
Kwargs:
initial_aids, initial_nids, nid_sequence, seed
Returns:
tuple: (pair_list, feat_list)
CommandLine:
python -m ibeis.unstable.demobayes --exec-get_toy_data_1vM --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> num_annots = 1000
>>> num_names = 40
>>> get_toy_data_1vM(num_annots, num_names)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
import vtool_ibeis as vt
tup_ = get_toy_annots(num_annots, num_names, **kwargs)
aids, nids, aids1, nids1, all_aids, all_nids = tup_
rng = vt.ensure_rng(None)
# Test a simple SVM classifier
nid2_nexemp = ut.dict_hist(nids1)
aid2_nid = dict(zip(aids, nids))
ut.fix_embed_globals()
#def add_to_globals(globals_, subdict):
# globals_.update(subdict)
unique_nids = list(nid2_nexemp.keys())
def annot_to_class_feats2(aid, aid2_nid, top=None):
pair_list = []
score_list = []
nexemplar_list = []
for nid in unique_nids:
label = (aid2_nid[aid] == nid)
num_exemplars = nid2_nexemp.get(nid, 0)
if num_exemplars == 0:
continue
params = toy_params[label]
mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
score_ = rng.normal(mu, sigma, size=num_exemplars).max()
score = np.clip(score_, 0, np.inf)
pair_list.append((aid, nid))
score_list.append(score)
nexemplar_list.append(num_exemplars)
rank_list = ut.argsort(score_list, reverse=True)
feat_list = np.array([score_list, rank_list, nexemplar_list]).T
sortx = np.argsort(rank_list)
feat_list = feat_list.take(sortx, axis=0)
pair_list = np.array(pair_list).take(sortx, axis=0)
if top is not None:
feat_list = feat_list[:top]
pair_list = pair_list[0:top]
return pair_list, feat_list
toclass_features = [annot_to_class_feats2(aid, aid2_nid, top=5) for aid in aids]
aidnid_pairs = np.vstack(ut.get_list_column(toclass_features, 0))
feat_list = np.vstack(ut.get_list_column(toclass_features, 1))
score_list = feat_list.T[0:1].T
lbl_list = [aid2_nid[aid] == nid for aid, nid in aidnid_pairs]
from sklearn import svm
#clf1 = svm.LinearSVC()
print('Learning classifiers')
clf3 = svm.SVC(probability=True)
clf3.fit(feat_list, lbl_list)
#prob_true, prob_false = clf3.predict_proba(feat_list).T
clf1 = svm.LinearSVC()
clf1.fit(score_list, lbl_list)
# Score new annots against the training database
tup_ = get_toy_annots(num_annots * 2, num_names, initial_aids=all_aids, initial_nids=all_nids)
aids, nids, aids1, nids1, all_aids, all_nids = tup_
aid2_nid = dict(zip(aids, nids))
toclass_features = [annot_to_class_feats2(aid, aid2_nid) for aid in aids]
aidnid_pairs = np.vstack(ut.get_list_column(toclass_features, 0))
feat_list = np.vstack(ut.get_list_column(toclass_features, 1))
lbl_list = np.array([aid2_nid[aid] == nid for aid, nid in aidnid_pairs])
print('Running tests')
score_list = feat_list.T[0:1].T
tp_feat_list = feat_list[lbl_list]
tn_feat_list = feat_list[~lbl_list]
tp_lbls = lbl_list[lbl_list]
tn_lbls = lbl_list[~lbl_list]
print('num tp: %d' % len(tp_lbls))
print('num fp: %d' % len(tn_lbls))
tp_score_list = score_list[lbl_list]
tn_score_list = score_list[~lbl_list]
print('tp_feat' + ut.repr3(ut.get_stats(tp_feat_list, axis=0), precision=2))
print('tp_feat' + ut.repr3(ut.get_stats(tn_feat_list, axis=0), precision=2))
print('tp_score' + ut.repr2(ut.get_stats(tp_score_list), precision=2))
print('tp_score' + ut.repr2(ut.get_stats(tn_score_list), precision=2))
tp_pred3 = clf3.predict(tp_feat_list)
tn_pred3 = clf3.predict(tn_feat_list)
print((tp_pred3.sum(), tp_pred3.shape))
print((tn_pred3.sum(), tn_pred3.shape))
tp_score3 = clf3.score(tp_feat_list, tp_lbls)
tn_score3 = clf3.score(tn_feat_list, tn_lbls)
tp_pred1 = clf1.predict(tp_score_list)
tn_pred1 = clf1.predict(tn_score_list)
print((tp_pred1.sum(), tp_pred1.shape))
print((tn_pred1.sum(), tn_pred1.shape))
tp_score1 = clf1.score(tp_score_list, tp_lbls)
tn_score1 = clf1.score(tn_score_list, tn_lbls)
print('tp score with rank = %r' % (tp_score3,))
print('tn score with rank = %r' % (tn_score3,))
print('tp score without rank = %r' % (tp_score1,))
print('tn score without rank = %r' % (tn_score1,))
toy_data = {}
return toy_data
def get_toy_annots(num_annots, num_names=None, initial_aids=None, initial_nids=None, nid_sequence=None, seed=None):
r"""
Args:
num_annots (int):
num_names (int): (default = None)
initial_aids (None): (default = None)
initial_nids (None): (default = None)
nid_sequence (None): (default = None)
seed (None): (default = None)
Returns:
tuple: (aids, nids, aids1, nids1, all_aids, all_nids)
CommandLine:
python -m ibeis.unstable.demobayes --exec-get_toy_annots
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> num_annots = 1
>>> num_names = 5
>>> initial_aids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.int64)
>>> initial_nids = np.array([0, 0, 1, 2, 2, 1, 1, 1, 2, 3], dtype=np.int64)
>>> nid_sequence = np.array([0, 0, 1, 2, 2, 1, 1], dtype=np.int64)
>>> seed = 0
>>> (aids, nids, aids1, nids1, all_aids, all_nids) = get_toy_annots(num_annots, num_names, initial_aids, initial_nids, nid_sequence, seed)
>>> result = ('(aids, nids, aids1, nids1, all_aids, all_nids) = %s' % (ut.repr2((aids, nids, aids1, nids1, all_aids, all_nids), nl=1),))
>>> print(result)
"""
import vtool_ibeis as vt
if num_names is None:
num_names = num_annots
print('Generating toy data with num_annots=%r' % (num_annots,))
if initial_aids is None:
assert initial_nids is None
first_step = True
initial_aids = []
initial_nids = []
else:
first_step = False
assert initial_nids is not None
aids = np.arange(len(initial_aids), num_annots + len(initial_aids))
rng = vt.ensure_rng(seed)
if nid_sequence is None:
nids = rng.randint(0, num_names, num_annots)
else:
unused_from_sequence = max(len(nid_sequence) - len(initial_aids), 0)
if unused_from_sequence == 0:
nids = rng.randint(0, num_names, num_annots)
elif unused_from_sequence > 0 and unused_from_sequence < num_annots:
num_remain = num_annots - unused_from_sequence
nids = np.append(nid_sequence[-unused_from_sequence:], rng.randint(0, num_names, num_remain))
else:
nids = nid_sequence[-unused_from_sequence]
nids = np.array(ut.take(nid_sequence, range(len(initial_aids), len(initial_aids) + num_annots)))
if first_step:
aids1 = aids
nids1 = nids
else:
aids1 = initial_aids
nids1 = initial_nids
all_nids = np.append(initial_nids, nids)
all_aids = np.append(initial_aids, aids)
import utool
with utool.embed_on_exception_context:
ut.assert_eq(len(aids), len(nids), 'len new')
ut.assert_eq(len(aids1), len(nids1), 'len comp')
ut.assert_eq(len(all_aids), len(all_nids), 'len all')
return aids, nids, aids1, nids1, all_aids, all_nids
toy_params = {
True: {'mu': 1.5, 'sigma': 3.0},
False: {'mu': 0.5, 'sigma': .4}
#True: {'mu': 3.5, 'sigma': 1.1},
#False: {'mu': .3, 'sigma': .7}
#'p': .7},
#'p': .2}
}
#@ut.cached_func('_toy_bayes_data3')
def get_toy_data_1v1(num_annots=5, num_names=None, **kwargs):
r"""
CommandLine:
python -m ibeis.unstable.demobayes --exec-get_toy_data_1v1 --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> toy_data = get_toy_data_1v1()
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> show_toy_distributions(toy_data['toy_params'])
>>> ut.show_if_requested()
Example1:
>>> # ENABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> toy_data = get_toy_data_1v1()
>>> kwargs = {}
>>> initial_aids = toy_data['aids']
>>> initial_nids = toy_data['nids']
>>> num_annots = 1
>>> num_names = 6
>>> toy_data2 = get_toy_data_1v1(num_annots, num_names, initial_aids=initial_aids, initial_nids=initial_nids)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> show_toy_distributions(toy_data['toy_params'])
>>> ut.show_if_requested()
Ignore:
>>> num_annots = 1000
>>> num_names = 400
"""
import vtool_ibeis as vt
tup_ = get_toy_annots(num_annots, num_names, **kwargs)
aids, nids, aids1, nids1, all_aids, all_nids = tup_
rng = vt.ensure_rng(None)
def pairwise_feature(aidx1, aidx2, all_nids=all_nids, toy_params=toy_params):
if aidx1 == aidx2:
score = -1
else:
#rng = np.random.RandomState(int((aidx1 + 13) * (aidx2 + 13)))
nid1 = all_nids[int(aidx1)]
nid2 = all_nids[int(aidx2)]
params = toy_params[nid1 == nid2]
mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
score_ = rng.normal(mu, sigma)
score = np.clip(score_, 0, np.inf)
return score
pairwise_nids = list([tup[::-1] for tup in ut.iprod(nids, nids1)])
pairwise_matches = np.array(
[nid1 == nid2 for nid1, nid2 in pairwise_nids])
pairwise_aidxs = list([tup[::-1] for tup in ut.iprod(aids, aids1)])
pairwise_features = np.array(
[pairwise_feature(aidx1, aidx2) for aidx1, aidx2 in pairwise_aidxs])
#pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)
is_diag = [r < c for r, c, in pairwise_aidxs]
diag_scores = pairwise_features.compress(is_diag)
diag_aidxs = ut.compress(pairwise_aidxs, is_diag)
import utool
with utool.embed_on_exception_context:
diag_nids = ut.compress(pairwise_nids, is_diag)
diag_labels = pairwise_matches.compress(is_diag)
#import utool
#utool.embed()
toy_data = {
'aids': aids,
'nids': nids,
'all_nids': all_nids,
'all_aids': all_aids,
#'pairwise_aidxs': pairwise_aidxs,
#'pairwise_scores': pairwise_scores,
#'pairwise_matches': pairwise_matches,
'diag_labels': diag_labels,
'diag_scores': diag_scores,
'diag_nids': diag_nids,
'diag_aidxs': diag_aidxs,
'toy_params': toy_params,
}
return toy_data
@ut.cached_func('_toy_learn_prob_score5')
def learn_prob_score(num_scores=5, pad=55, ret_enc=False, use_cache=None):
r"""
Args:
num_scores (int): (default = 5)
Returns:
tuple: (discr_domain, discr_p_same)
CommandLine:
python -m ibeis.unstable.demobayes --exec-learn_prob_score --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> num_scores = 2
>>> (discr_domain, discr_p_same, encoder) = learn_prob_score(num_scores, ret_enc=True, use_cache=False)
>>> print('discr_p_same = %r' % (discr_p_same,))
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> encoder.visualize()
>>> ut.show_if_requested()
"""
num_annots_train = 200
num_names_train = 5
toy_data = get_toy_data_1v1(num_annots_train, num_names_train)
#pairwise_aidxs, pairwise_scores, pairwise_matches = ut.dict_take(
# toy_data, 'pairwise_aidxs, pairwise_scores, pairwise_matches'.split(', '))
diag_scores, diag_labels = ut.dict_take(
toy_data, 'diag_scores, diag_labels'.split(', '))
#is_diag = [r < c for r, c, in pairwise_aidxs]
#diag_scores = pairwise_scores.compress(is_diag)
#diag_labels = pairwise_matches.compress(is_diag)
# Learn P(S_{ij} | M_{ij})
import vtool_ibeis as vt
encoder = vt.ScoreNormalizer(
reverse=True, monotonize=True,
adjust=4,
)
encoder.fit(X=diag_scores, y=diag_labels, verbose=True)
if False:
import plottool_ibeis as pt
pt.ensureqt()
encoder.visualize()
#show_toy_distributions()
def discretize_probs(encoder):
p_tp_given_score = encoder.p_tp_given_score / encoder.p_tp_given_score.sum()
bins = len(p_tp_given_score) - (pad * 2)
stride = int(np.ceil(bins / num_scores))
idxs = np.arange(0, bins, stride) + pad
discr_p_same = p_tp_given_score.take(idxs)
discr_p_same = discr_p_same / discr_p_same.sum()
discr_domain = encoder.score_domain.take(idxs)
return discr_domain, discr_p_same
discr_domain, discr_p_same = discretize_probs(encoder)
if ret_enc:
return discr_domain, discr_p_same, encoder
return discr_domain, discr_p_same
def classify_one_new_unknown():
r"""
Make a model that knows who the previous annots are and tries to classify a new annot
CommandLine:
python -m ibeis.unstable.demobayes --exec-classify_one_new_unknown --verbose
python -m ibeis.unstable.demobayes --exec-classify_one_new_unknown --show --verbose --present
python3 -m ibeis.unstable.demobayes --exec-classify_one_new_unknown --verbose
python3 -m ibeis.unstable.demobayes --exec-classify_one_new_unknown --verbose --diskshow --verbose --present --save demo5.png --dpath . --figsize=20,10 --dpi=128 --clipwhite
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = classify_one_new_unknown()
>>> ut.show_if_requested()
"""
if False:
constkw = dict(
num_annots=5, num_names=3,
name_evidence=[0]
#name_evidence=[0, 0, 1, 1, None],
#name_evidence=[{0: .99}, {0: .99}, {1: .99}, {1: .99}, None],
#name_evidence=[0, {0: .99}, {1: .99}, 1, None],
)
temp_model(score_evidence=[1, 0, 0, 0, 0, 1], mode=1, **constkw)
#from ibeis.unstable.demobayes import *
constkw = dict(
num_annots=4, num_names=4,
)
model, evidence = temp_model(
mode=1,
# lll and llh have strikingly different
# probability of M marginals
score_evidence=[0, 0, 1],
other_evidence={
},
**constkw)
def tst_triangle_property():
r"""
CommandLine:
python -m ibeis.unstable.demobayes --exec-test_triangle_property --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = test_triangle_property()
>>> ut.show_if_requested()
"""
constkw = dict(
num_annots=3, num_names=3,
name_evidence=[],
)
temp_model(
mode=1,
other_evidence={
'Mab': False,
'Mac': False,
#'Na': 'fred',
#'Nb': 'sue',
},
**constkw)
def demo_structure():
r"""
CommandLine:
python -m ibeis.unstable.demobayes --exec-demo_structure --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = demo_structure()
>>> ut.show_if_requested()
"""
constkw = dict(score_evidence=[], name_evidence=[], mode=3)
model, = temp_model(num_annots=4, num_names=4, **constkw)
draw_tree_model(model)
def make_bayes_notebook():
r"""
CommandLine:
python -m ibeis.unstable.demobayes --exec-make_bayes_notebook
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = make_bayes_notebook()
>>> print(result)
"""
from ibeis.templates import generate_notebook
initialize = ut.codeblock(
r'''
# STARTBLOCK
import os
os.environ['UTOOL_NO_CNN'] = 'True'
from ibeis.unstable.demobayes import * # NOQA
# Matplotlib stuff
import matplotlib as mpl
%matplotlib inline
%load_ext autoreload
%autoreload
from IPython.core.display import HTML
HTML("<style>body .container { width:99% !important; }</style>")
# ENDBLOCK
'''
)
cell_list_def = [
initialize,
show_model_templates,
demo_modes,
demo_name_annot_complexity,
###demo_model_idependencies,
demo_single_add,
demo_ambiguity,
demo_conflicting_evidence,
demo_annot_idependence_overlap,
]
def format_cell(cell):
if ut.is_funclike(cell):
header = '# ' + ut.to_title_caps(ut.get_funcname(cell))
code = (header, ut.get_func_sourcecode(cell, stripdef=True, stripret=True))
else:
code = (None, cell)
return generate_notebook.format_cells(code)
cell_list = ut.flatten([format_cell(cell) for cell in cell_list_def])
nbstr = generate_notebook.make_notebook(cell_list)
print('nbstr = %s' % (nbstr,))
fpath = 'demobayes.ipynb'
ut.writeto(fpath, nbstr)
ut.startfile(fpath)
def show_model_templates():
r"""
CommandLine:
python -m ibeis.unstable.demobayes --exec-show_model_templates
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = show_model_templates()
>>> ut.show_if_requested()
"""
make_name_model(2, 2, verbose=True, mode=1)
print('-------------')
make_name_model(2, 2, verbose=True, mode=2)
def demo_single_add():
"""
This demo shows how a name is assigned to a new annotation.
CommandLine:
python -m ibeis.unstable.demobayes --exec-demo_single_add --show --present --mode=1
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> demo_single_add()
>>> ut.show_if_requested()
"""
# Initially there are only two annotations that have a strong match
name_evidence = [{0: .9}] # Soft label
name_evidence = [0] # Hard label
temp_model(num_annots=2, num_names=5, score_evidence=[1], name_evidence=name_evidence)
# Adding a new annotation does not change the original probabilites
temp_model(num_annots=3, num_names=5, score_evidence=[1], name_evidence=name_evidence)
# Adding evidence that Na matches Nc does not influence the probability
# that Na matches Nb. However the probability that Nb matches Nc goes up.
temp_model(num_annots=3, num_names=5, score_evidence=[1, 1], name_evidence=name_evidence)
# However, once Nb is scored against Nb that does increase the likelihood
# that all 3 are fred goes up significantly.
temp_model(num_annots=3, num_names=5, score_evidence=[1, 1, 1],
name_evidence=name_evidence)
def demo_conflicting_evidence():
"""
Notice that the number of annotations in the graph does not affect the
probability of names.
"""
# Initialized with two annots. Each are pretty sure they are someone else
constkw = dict(num_annots=2, num_names=5, score_evidence=[])
temp_model(name_evidence=[{0: .9}, {1: .9}], **constkw)
# Having evidence that they are different increases this confidence.
temp_model(name_evidence=[{0: .9}, {1: .9}], other_evidence={'Sab': 0}, **constkw)
# However,, confusion is introduced if there is evidence that they are the same
temp_model(name_evidence=[{0: .9}, {1: .9}], other_evidence={'Sab': 1}, **constkw)
# When Na is forced to be fred, this doesnt change Nbs evaulatation by more
# than a few points
temp_model(name_evidence=[0, {1: .9}], other_evidence={'Sab': 1}, **constkw)
def demo_ambiguity():
r"""
Test what happens when an annotation need to choose between one of two
names
CommandLine:
python -m ibeis.unstable.demobayes --exec-demo_ambiguity --show --verbose --present
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = demo_ambiguity()
>>> ut.show_if_requested()
"""
constkw = dict(
num_annots=3, num_names=3,
name_evidence=[0],
#name_evidence=[],
#name_evidence=[{0: '+eps'}, {1: '+eps'}, {2: '+eps'}],
)
temp_model(score_evidence=[0, 0, 1], mode=1,
**constkw)
def demo_annot_idependence_overlap():
r"""
Given:
* an unknown annotation \d
* three annots with the same name (Fred) \a, \b, and \c
* \a and \b are near duplicates
* (\a and \c) / (\b and \c) are novel views
Goal:
* If \d matches to \a and \b the probably that \d is Fred should not be
much more than if \d matched only \a or only \b.
* The probability that \d is Fred given it matches to any of the 3 annots
alone should be equal
P(\d is Fred | Mad=1) = P(\d is Fred | Mbd=1) = P(\d is Fred | Mcd=1)
* The probability that \d is fred given two matches to any of those two annots
should be greater than the probability given only one.
P(\d is Fred | Mad=1, Mbd=1) > P(\d is Fred | Mad=1)
P(\d is Fred | Mad=1, Mcd=1) > P(\d is Fred | Mad=1)
* The probability that \d is fred given matches to two near duplicate
matches should be less than
if \d matches two non-duplicate matches.
P(\d is Fred | Mad=1, Mcd=1) > P(\d is Fred | Mad=1, Mbd=1)
* The probability that \d is fred given two near duplicates should be only epsilon greater than
a match to either one individually.
P(\d is Fred | Mad=1, Mbd=1) = P(\d is Fred | Mad=1) + \epsilon
Method:
We need to model the fact that there are other causes that create the
effect of a high score. Namely, near duplicates.
This can be done by adding an extra conditional that score depends on
if they match as well as if they are near duplicates.
P(S_ij | Mij) --> P(S_ij | Mij, Dij)
where
Dij is a random variable indicating if the image is a near duplicate.
We can model this as an independant variable
P(Dij) = {True: .5, False: .5}
or as depending on if the names match.
P(Dij | Mij) = {'same': {True: .5, False: .5} diff: {True: 0, False 1}}
CommandLine:
python -m ibeis.unstable.demobayes --exec-demo_annot_idependence_overlap --verbose --present --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = demo_annot_idependence_overlap()
>>> ut.show_if_requested()
"""
# We will end up making annots a and b fred and c and d sue
constkw = dict(
num_annots=4, num_names=4,
name_evidence=[{0: '+eps'}, {1: '+eps'}, {2: '+eps'}, {3: '+eps'}],
#name_evidence=[{0: .9}, None, None, {1: .9}]
#name_evidence=[0, None, None, None]
#name_evidence=[0, None, None, None]
)
temp_model(score_evidence=[1, 1, 1, None, None, None], **constkw)
temp_model(score_evidence=[1, 1, 0, None, None, None], **constkw)
temp_model(score_evidence=[1, 0, 0, None, None, None], **constkw)
def demo_modes():
"""
Look at the last result of the different names demo under differet modes
"""
constkw = dict(
num_annots=4, num_names=8,
score_evidence=[1, 0, 0, 0, 0, 1],
#name_evidence=[{0: .9}, None, None, {1: .9}],
#name_evidence=[0, None, None, 1],
name_evidence=[0, None, None, None],
#other_evidence={
# 'Sad': 0,
# 'Sab': 1,
# 'Scd': 1,
# 'Sac': 0,
# 'Sbc': 0,
# 'Sbd': 0,
#}
)
# The first mode uses a hidden Match layer
temp_model(mode=1, **constkw)
# The second mode directly maps names to scores
temp_model(mode=2, **constkw)
temp_model(mode=3, noquery=True, **constkw)
temp_model(mode=4, noquery=True, **constkw)
def demo_name_annot_complexity():
"""
This demo is meant to show the structure of the graph as more annotations
and names are added.
CommandLine:
python -m ibeis.unstable.demobayes --exec-demo_name_annot_complexity --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> demo_name_annot_complexity()
>>> ut.show_if_requested()
"""
constkw = dict(score_evidence=[], name_evidence=[], mode=1)
# Initially there are 2 annots and 4 names
model, = temp_model(num_annots=2, num_names=4, **constkw)
draw_tree_model(model)
# Adding a name causes the probability of the other names to go down
model, = temp_model(num_annots=2, num_names=5, **constkw)
draw_tree_model(model)
# Adding an annotation wihtout matches dos not effect probabilities of
# names
model, = temp_model(num_annots=3, num_names=5, **constkw)
draw_tree_model(model)
model, = temp_model(num_annots=4, num_names=10, **constkw)
draw_tree_model(model)
# Given A annots, the number of score nodes is (A ** 2 - A) / 2
model, = temp_model(num_annots=5, num_names=5, **constkw)
draw_tree_model(model)
#model, = temp_model(num_annots=6, num_names=5, score_evidence=[], name_evidence=[], mode=1)
#draw_tree_model(model)
def demo_model_idependencies():
"""
Independences of the 3 annot 3 name model
CommandLine:
python -m ibeis.unstable.demobayes --exec-demo_model_idependencies --mode=1 --num-names=2 --show
python -m ibeis.unstable.demobayes --exec-demo_model_idependencies --mode=2
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.unstable.demobayes import * # NOQA
>>> result = demo_model_idependencies()
>>> print(result)
>>> ut.show_if_requested()
"""
num_names = ut.get_argval('--num-names', default=3)
model = temp_model(num_annots=num_names, num_names=num_names, score_evidence=[], name_evidence=[])[0]
# This model has the following independenceis
idens = model.get_independencies()
iden_strs = [', '.join(sorted(iden.event1)) +
' _L ' +
','.join(sorted(iden.event2)) +
' | ' +
', '.join(sorted(iden.event3))
for iden in idens.independencies]
print('general idependencies')
print(ut.align(ut.align('\n'.join(sorted(iden_strs)), '_'), '|'))
#ut.embed()
#model.is_active_trail('Na', 'Nb', 'Sab')
# Might not be valid, try and collapse S and M
#xs = list(map(str, idens.independencies))
#import re
#xs = [re.sub(', M..', '', x) for x in xs]
#xs = [re.sub('M..,?', '', x) for x in xs]
#xs = [x for x in xs if not x.startswith('( _')]
#xs = [x for x in xs if not x.endswith('| )')]
#print('\n'.join(sorted(list(set(xs)))))
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.unstable.demobayes
python -m ibeis.unstable.demobayes --allexamples
"""
if ut.VERBOSE:
print('[hs] demobayes')
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
seanli9jan/tensorflow | tensorflow/contrib/eager/python/examples/l2hmc/main.py | 19 | 7805 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC on simple Gaussian mixture model with TensorFlow eager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
tfe = tf.contrib.eager
def main(_):
tf.enable_eager_execution()
global_step = tf.train.get_or_create_global_step()
global_step.assign(1)
energy_fn, mean, covar = {
"scg": l2hmc.get_scg_energy_fn(),
"rw": l2hmc.get_rw_energy_fn()
}[FLAGS.energy_fn]
x_dim = 2
train_iters = 5000
eval_iters = 2000
eps = 0.1
n_steps = 10 # Chain length
n_samples = 200
record_loss_every = 100
dynamics = l2hmc.Dynamics(
x_dim=x_dim, minus_loglikelihood_fn=energy_fn, n_steps=n_steps, eps=eps)
learning_rate = tf.train.exponential_decay(
1e-3, global_step, 1000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, dynamics=dynamics, global_step=global_step)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" ".format(latest_path))
sys.stdout.flush()
if not FLAGS.restore:
# Training
if FLAGS.use_defun:
# Use `tfe.deun` to boost performance when there are lots of small ops
loss_fn = tfe.function(l2hmc.compute_loss)
else:
loss_fn = l2hmc.compute_loss
samples = tf.random_normal(shape=[n_samples, x_dim])
for i in range(1, train_iters + 1):
loss, samples, accept_prob = train_one_iter(
dynamics,
samples,
optimizer,
loss_fn=loss_fn,
global_step=global_step)
if i % record_loss_every == 0:
print("Iteration {}, loss {:.4f}, x_accept_prob {:.4f}".format(
i, loss.numpy(),
accept_prob.numpy().mean()))
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Training loss", loss, step=global_step)
print("Training complete.")
sys.stdout.flush()
if FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" ".format(saved_path))
sys.stdout.flush()
# Evaluation
if FLAGS.use_defun:
# Use tfe.deun to boost performance when there are lots of small ops
apply_transition = tfe.function(dynamics.apply_transition)
else:
apply_transition = dynamics.apply_transition
samples = tf.random_normal(shape=[n_samples, x_dim])
samples_history = []
for i in range(eval_iters):
samples_history.append(samples.numpy())
_, _, _, samples = apply_transition(samples)
samples_history = np.array(samples_history)
print("Sampling complete.")
sys.stdout.flush()
# Mean and covariance of target distribution
mean = mean.numpy()
covar = covar.numpy()
ac_spectrum = compute_ac_spectrum(samples_history, mean, covar)
print("First 25 entries of the auto-correlation spectrum: {}".format(
ac_spectrum[:25]))
ess = compute_ess(ac_spectrum)
print("Effective sample size per Metropolis-Hastings step: {}".format(ess))
sys.stdout.flush()
if FLAGS.train_dir:
# Plot autocorrelation spectrum in tensorboard
plot_step = tfe.Variable(1, trainable=False, dtype=tf.int64)
for ac in ac_spectrum:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Autocorrelation", ac, step=plot_step)
plot_step.assign(plot_step + n_steps)
if HAS_MATPLOTLIB:
# Choose a single chain and plot the trajectory
single_chain = samples_history[:, 0, :]
xs = single_chain[:100, 0]
ys = single_chain[:100, 1]
plt.figure()
plt.plot(xs, ys, color="orange", marker="o", alpha=0.6) # Trained chain
plt.savefig(os.path.join(FLAGS.train_dir, "single_chain.png"))
def train_one_iter(dynamics,
x,
optimizer,
loss_fn=l2hmc.compute_loss,
global_step=None):
"""Train the sampler for one iteration."""
loss, grads, out, accept_prob = l2hmc.loss_and_grads(
dynamics, x, loss_fn=loss_fn)
optimizer.apply_gradients(
zip(grads, dynamics.trainable_variables), global_step=global_step)
return loss, out, accept_prob
def compute_ac_spectrum(samples_history, target_mean, target_covar):
"""Compute autocorrelation spectrum.
Follows equation 15 from the L2HMC paper.
Args:
samples_history: Numpy array of shape [T, B, D], where T is the total
number of time steps, B is the batch size, and D is the dimensionality
of sample space.
target_mean: 1D Numpy array of the mean of target(true) distribution.
target_covar: 2D Numpy array representing a symmetric matrix for variance.
Returns:
Autocorrelation spectrum, Numpy array of shape [T-1].
"""
# Using numpy here since eager is a bit slow due to the loop
time_steps = samples_history.shape[0]
trace = np.trace(target_covar)
rhos = []
for t in range(time_steps - 1):
rho_t = 0.
for tau in range(time_steps - t):
v_tau = samples_history[tau, :, :] - target_mean
v_tau_plus_t = samples_history[tau + t, :, :] - target_mean
# Take dot product over observation dims and take mean over batch dims
rho_t += np.mean(np.sum(v_tau * v_tau_plus_t, axis=1))
rho_t /= trace * (time_steps - t)
rhos.append(rho_t)
return np.array(rhos)
def compute_ess(ac_spectrum):
"""Compute the effective sample size based on autocorrelation spectrum.
This follows equation 16 from the L2HMC paper.
Args:
ac_spectrum: Autocorrelation spectrum
Returns:
The effective sample size
"""
# Cutoff from the first value less than 0.05
cutoff = np.argmax(ac_spectrum[1:] < .05)
if cutoff == 0:
cutoff = len(ac_spectrum)
ess = 1. / (1. + 2. * np.sum(ac_spectrum[1:cutoff]))
return ess
if __name__ == "__main__":
flags.DEFINE_string(
"train_dir",
default=None,
help="[Optional] Directory to store the training information")
flags.DEFINE_boolean(
"restore",
default=False,
help="[Optional] Restore the latest checkpoint from `train_dir` if True")
flags.DEFINE_boolean(
"use_defun",
default=False,
help="[Optional] Use `tfe.defun` to boost performance")
flags.DEFINE_string(
"energy_fn",
default="scg",
help="[Optional] The energy function used for experimentation"
"Other options include `rw`")
FLAGS = flags.FLAGS
tf.app.run(main)
| apache-2.0 |
lhyqie/rtree | python/trajectory_test.py | 1 | 1931 | # input is a trajectory Q and a couple of trajectory S to query on
from matplotlib.pyplot import figure
# each trajectory is a sequence of points
# each point is (x,y)
q = [(1,1),(2,2),(3,3)]
S = []
S.append([(1,0),(2, 3),(2,3)])
S.append([(1,3),(2, 4),(5, 3.8)])
S.append([(1,1.5),(2, 6),(3, 7)])
print q
for t in S : # for each trajaetory in S,
print t
# Given multiple query points, a k-BCT query finds k closest trajectories to the set of query points.
# The distance Distq between a query location qi and a trajectory R = {p1, p2, . . . , pl} is defined as:
# (2.2)
def point_distance(p1, p2):
"""
euclidean distance between two points
"""
from math import sqrt
return sqrt((p1[0] - p2[0]) **2 + (p1[1] - p2[1]) **2)
def point_traj_distance(p1, t):
"""
distance between one point p1 and a trajectory t
is the minimum distance of p1 to each point p2 in t
refer to Equation (2.2)
"""
min_dist = 99999
for p2 in t:
dist = point_distance(p1, p2)
if dist < min_dist:
min_dist = dist
return min_dist
def traj_sim(t1, t2):
"""
similarity of two trajectories is defined as (2.3)
"""
from math import exp
sum = 0
for p1 in t1:
dist = point_traj_distance(p1, t2)
sum += exp(-dist)
return sum
def t_query(q, S):
#find the k - closest traj in S for Q
for t in S:
print 'distance between ', q, ' and ', t, ' is ' ,traj_sim(q, t)
t_query(q, S)
import matplotlib.pyplot as plt
zipq = zip(*q)
plt.figure(figsize=(10,5))
plt.plot(zipq[0], zipq[1], '-bo')
plt.text(zipq[0][-1], zipq[1][-1] + 0.2, s="q")
for i, t in enumerate(S):
zipt = zip(*t)
plt.plot(zipt[0], zipt[1], '-rx')
plt.text(zipt[0][-1], zipt[1][-1] + 0.2, s="trajectory"+str(i))
plt.xlim([0,8])
plt.ylim([-1,8])
plt.show()
| apache-2.0 |
Sharmarajnish/Allstate-Claims-Severity | allstate_data_analysis.py | 1 | 2458 | import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score as AUC
from sklearn.metrics import mean_absolute_error
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.cross_validation import cross_val_score
from copy import deepcopy
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
print train.shape
print test.shape
print train.describe()
print pd.isnull(train).values.any()
# print train.info()
cat_features = list(train.select_dtypes(include=['object']).columns)
print("Number of categorical variables are:", len(cat_features))
print cat_features
cont_features = [cont for cont in list(train.select_dtypes(
include=['float64', 'int64']).columns) if cont not in ['loss', 'id']]
print "Continuous: {} features".format(len(cont_features))
id_col = list(train.select_dtypes(include=['int64']).columns)
print "A column of int64: {}".format(id_col)
cat_uniques =[]
for cat in cat_features:
cat_uniques.append(len(train[cat].unique()))
print cat_uniques
unique_val_categories = pd.DataFrame.from_items([('cat_names',cat_features),('unique_values',cat_uniques)])
print unique_val_categories.head()
fig,(ax1,ax2)= plt.subplots(1,2)
fig.set_size_inches(16,5)
ax1.hist(unique_val_categories.unique_values,bins=50)
ax1.set_title('Amount of categorical features with X distinct values')
ax1.set_xlabel('Distinct values in a feature')
ax1.set_ylabel('Features')
plt.figure(figsize=(16,8))
plt.plot(train['id'], train['loss'])
plt.title('Loss values per id')
plt.xlabel('id')
plt.ylabel('loss')
plt.legend()
print stats.mstats.skew(train['loss']).data
print stats.mstats.skew(np.log(train['loss'])).data
fig, (ax1, ax2) = plt.subplots(1,2)
fig.set_size_inches(16,5)
ax1.hist(train['loss'], bins=50)
ax1.set_title('Train Loss target histogram')
ax1.grid(True)
ax2.hist(np.log(train['loss']), bins=50, color='g')
ax2.set_title('Train Log Loss target histogram')
ax2.grid(True)
# plt.show()
train[cont_features].hist(bins=50,figsize=(16,12))
# plt.show()
for con in cont_features:
print stats.mstats.skew(train[con]).data
plt.subplots(figsize=(16,9))
correlation_mat = train[cont_features].corr()
sns.heatmap(correlation_mat, annot=True)
plt.show()
| mit |
ajylee/gpaw-rtxs | gpaw/transport/jstm.py | 1 | 66345 | from ase.units import Bohr, Hartree
from gpaw import GPAW
from gpaw.fd_operators import Laplace
from gpaw.utilities.tools import tri2full
from gpaw.lcao.projected_wannier import dots
from gpaw.grid_descriptor import GridDescriptor
from gpaw.lfc import NewLocalizedFunctionsCollection as LFC
from gpaw.lcao.tools import remove_pbc, get_lcao_hamiltonian, \
get_lead_lcao_hamiltonian
from gpaw.mpi import world
from gpaw.mpi import world as w
from gpaw import mpi
import time
import numpy as np
import numpy.linalg as la
from math import cos, sin, pi
import cPickle as pickle
class LocalizedFunctions:
def __init__(self, gd, f_iG, corner_c, index=None, vt_G=None):
self.gd = gd
self.size_c = np.array(f_iG.shape[1:4])
self.f_iG = f_iG
self.corner_c = corner_c
self.index = index
self.vt_G = vt_G
self.restricted = False
self.phase = 1
self.sdisp_c = np.array([0, 0])
def __len__(self):
return len(self.f_iG)
def set_phase_factor(self, k_c):
self.phase = np.exp(-2.j * pi * np.inner(k_c, self.sdisp_c))
def apply_t(self):
"""Apply kinetic energy operator and return new object."""
p = 2 # padding
newsize_c = self.size_c + 2 * p
assert self.gd.orthogonal
gd = GridDescriptor(N_c=newsize_c + 1,
cell_cv=self.gd.h_cv.diagonal() * (newsize_c + 1),
pbc_c=False,
comm=mpi.serial_comm)
T = Laplace(gd, scale=-1/2., n=p)
f_ig = np.zeros((len(self.f_iG),) + tuple(newsize_c))
f_ig[:, p:-p, p:-p, p:-p] = self.f_iG
Tf_iG = np.empty_like(f_ig)
T.apply(f_ig, Tf_iG)
return LocalizedFunctions(self.gd, Tf_iG, self.corner_c - p,
self.index)
def overlap(self, other):
"""Calculate the overlap between two Localized functions objects"""
start_c = np.maximum(self.corner_c, other.corner_c)
stop_c = np.minimum(self.corner_c + self.size_c,
other.corner_c + other.size_c)
if (start_c < stop_c).all():
astart_c = start_c - self.corner_c
astop_c = stop_c - self.corner_c
a_iG = self.f_iG[:,
astart_c[0]:astop_c[0],
astart_c[1]:astop_c[1],
astart_c[2]:astop_c[2]].reshape((len(self.f_iG), -1))
bstart_c = start_c - other.corner_c
bstop_c = stop_c - other.corner_c
b_iG = other.f_iG[:,
bstart_c[0]:bstop_c[0],
bstart_c[1]:bstop_c[1],
bstart_c[2]:bstop_c[2]].reshape((len(other.f_iG), -1))
a_iG1 = a_iG.copy()
if self.vt_G is not None:
a_iG1 *= self.vt_G[start_c[0]:stop_c[0],
start_c[1]:stop_c[1],
start_c[2]:stop_c[2]].reshape((-1,))
return (self.gd.dv * np.inner(a_iG1, b_iG) * self.phase *
np.conj(other.phase))
else:
return None
def restrict(self):
"""Restricts the box of the object to the current grid"""
start_c = np.maximum(self.corner_c, np.zeros(3))
stop_c = np.minimum(self.corner_c + self.size_c, self.gd.N_c)
if (start_c < stop_c).all():
astart_c = start_c - self.corner_c
astop_c = stop_c -self.corner_c
a_iG = self.f_iG[:,
astart_c[0]:astop_c[0],
astart_c[1]:astop_c[1],
astart_c[2]:astop_c[2]]
new_corner_c = self.corner_c
for i in np.where(self.corner_c<0):
new_corner_c[i] = 0
if (self.f_iG.shape != a_iG.shape):
self.restricted = True
self.corner_c = new_corner_c
self.f_iG = a_iG
self.size_c=np.asarray(a_iG.shape[1:])
def __or__(self, other):
if isinstance(other, LocalizedFunctions):
return self.overlap(other)
# other is a potential:
vt_G = other
newf = LocalizedFunctions(self.gd, self.f_iG, self.corner_c,
self.index, vt_G)
newf.sdisp_c = self.sdisp_c
newf.phase = self.phase
return newf
class WannierFunction(LocalizedFunctions):
def __init__(self, gd, wanf_G, corner_c, index=None):
LocalizedFunctions.__init__(self, gd, wanf_G[np.newaxis, :, :, :],
corner_c, index)
class AtomCenteredFunctions(LocalizedFunctions):
def __init__(self, gd, spline_j, spos_c, index=None):
rcut = max([spline.get_cutoff() for spline in spline_j])
cell = gd.cell_cv.copy()
diagonal = cell[0]+cell[1]
diagonal = diagonal/np.linalg.norm(diagonal)
a = np.zeros_like(diagonal).astype(float)
a[0]=cell[0][1]
a[1]=-cell[0][0]
a=-a/np.linalg.norm(a)
c = rcut/np.dot(diagonal,a)
# Determine corner
A = cell.T / gd.cell_cv.diagonal() # Basis change matrix
pos = np.dot(np.linalg.inv(cell.T), diagonal * c)
pos[2] = rcut / gd.cell_cv[2, 2]
h_c = gd.h_cv.diagonal()
cell_c = gd.cell_cv.diagonal()
assert gd.orthogonal
corner_c = np.ceil(spos_c * gd.N_c - pos * cell_c / h_c).astype(int)
self.center = pos * cell_c / h_c - corner_c
size_c = np.ceil(spos_c * gd.N_c +
pos * cell_c / h_c).astype(int) - corner_c
smallgd = GridDescriptor(N_c=size_c + 1,
cell_cv=(np.dot(A, np.diag(h_c *
(size_c + 1))).T),
pbc_c=False,
comm=mpi.serial_comm)
self.test = (np.dot(A,np.diag(h_c * (size_c +1))).T)
self.smallgd=smallgd
lfc = LFC(smallgd, [spline_j])
lfc.set_positions((spos_c[np.newaxis, :] * gd.N_c - corner_c + 1) /
smallgd.N_c)
ni = lfc.Mmax
f_iG = smallgd.zeros(ni)
lfc.add(f_iG, {0: np.eye(ni)})
LocalizedFunctions.__init__(self, gd, f_iG, corner_c,
index=index)
class STM:
"""Simulate STM-images using Green's function methods"""
def __init__(self, tip=None, surface=None, lead1=None, lead2=None,
**kwargs):
"""Create the STM-calculators.
Parameters:
tip_atom_index: int
index of the atom that defines the tip apex
dmin: float
minimal distance between tip and surface
hs1: {None, (h1, s1) tuple}
here h1 and s1 are the Hamiltonan and overlap
matrix for the tip region.
hs10: {None, (h10, s10) tuple}
where h10 and s10 are the Hamiltonian and
overlap matrix for tip lead.
hs2: {None, (h2, s2) tuple}
here h2 and s2 are the Hamiltonan and overlap
matrix for the tip region.
hs20: {None (h20, s20) tuple}
here h20 and s20 are the Hamiltonian and
overlap matrix for tip lead.
align_bf: {1, Int}
Use align_bf=m to shift the surface region
by a constant potential so that the m'th onsite element in
the surface is aligned with the m'th onsite element in the
surface lead. Further the tip lead is shifted so that the
-m'th onsite element of the tip is aligned with the -m'th
onsite element of the tip lead.
bias: {1, float}
sets the bias value across the tunneling junction
de: {0.01, float}
spacing of the energy grid which the transmission function
should be calcualted on.
w: {0.0, [0:1], float}
symmetry of the applied bias:
w=0 surface potential is fixed,
w=1 tip potential is fixed.
k_c: {(0, 0), array}
array of a k-point of the irreducible transverse
Brillouin zone.
energies: {None, array}
A custom energy grid, on which the transmission
function should be calculated.
eta1/eta2: {1e-4, float}
Infinitesimal for the calculation of the tip/surface lead
self-energies
cpu_grid: {None, (N,N) ndarray, dtype=Int}
define the cpu grid that is used for the calculation. The
first index refers to a parallelisation over tip
positions, while the second index refers to a
prallelisation over basis functions. If 'None' is used
the prallelisation is over tip position only.
logfile: {None , str}
Write a logfile in the local directory with name given by 'str'
"""
self.input_parameters = {'tip_atom_index': 0,
'dmin': 6.0,
'hs1': None,
'hs10': None,
'hs2': None,
'hs20': None,
'align_bf': 1,
'bias': 1.0,
'de': 0.01,
'k_c': (0, 0),
'energies': None,
'w': 0.0,
'eta1': 1e-4,
'eta2': 1e-4,
'cpu_grid': None,
'logfile': None}
self.tip = tip
self.srf = surface
self.lead1 = lead1
self.lead2 = lead2
self.stm_calc = None
self.scans = {}
self.potential_shift = 0
#initialize communicators
if 'cpu_grid' in kwargs:
self.input_parameters['cpu_grid'] = kwargs['cpu_grid']
if self.input_parameters['cpu_grid'] == None: # parallelization over domains only
self.input_parameters['cpu_grid'] = (world.size, 1)
n, m = self.input_parameters['cpu_grid']
assert n * m == world.size
ranks = np.arange(world.rank % m, world.size, m)
domain_comm = world.new_communicator(ranks) # comm for tip positions
r = world.rank // m * m
bfs_comm = world.new_communicator(np.arange(r, r + m)) # comm for bfs
self.world = world
self.domain_comm = domain_comm
self.bfs_comm = bfs_comm
self.initialized = False
self.transport_uptodate = False
self.hs_aligned = False
self.set(**kwargs)
def set(self, **kwargs):
for key in kwargs:
if key in ['hs1', 'hs10', 'hs2', 'hs20',
'cvl1', 'cvl2', 'bias',
'de', 'energies', 'w',
'align_bf', 'eta1', 'eta2']:
self.transport_uptodate = False
break
elif key in ['tip_atom_index', 'dmin']:
self.initialized = False
elif key in ['k_c']:
self.transport_uptodate = False
self.initialized = False
elif key not in self.input_parameters:
raise KeyError, '\'%s\' not a valid keyword' % key
self.input_parameters.update(kwargs)
log = self.input_parameters['logfile']
if log is None:
class Trash:
def write(self,s):
pass
def flush(self):
pass
self.log = Trash()
elif log == '-':
from sys import stdout
self.log = stdout
elif 'logfile' in kwargs:
self.log = open(log + str(world.rank), 'w')
def initialize(self):
"""Initialize the STM-calculator:
1. Preselect tip and surface basis functions with
overlapping values in the z-direction.
2. Initialize the tip region
3. Initialize the surface region
4. Initialize the transport calculator. This includes
a pre-calculation of the Green's functions.
"""
if self.initialized and self.transport_uptodate:
return
elif not self.transport_uptodate and self.initialized:
self.initialize_transport()
return
if world.rank == 0:
T = time.localtime()
self.log.write('#%d:%02d:%02d' % (T[3], T[4], T[5]) + ' Initializing\n')
self.log.flush()
p = self.input_parameters
self.dmin = p['dmin'] / Bohr
tip_atom_index = p['tip_atom_index']
# preselect tip and surface functions
tip_pos_av = self.tip.atoms.get_positions() / Bohr
srf_pos_av = self.srf.atoms.get_positions() / Bohr
tip_zmin = tip_pos_av[tip_atom_index, 2]
srf_zmax = srf_pos_av[:, 2].max()
tip_zmin_a = np.empty(len(tip_pos_av))
for a, setup in enumerate(self.tip.wfs.setups):
rcutmax = max([phit.get_cutoff() for phit in setup.phit_j])
tip_zmin_a[a] = tip_pos_av[a, 2] - rcutmax - tip_zmin
srf_zmax_a = np.empty(len(srf_pos_av))
for a, setup in enumerate(self.srf.wfs.setups):
rcutmax = max([phit.get_cutoff() for phit in setup.phit_j])
srf_zmax_a[a] = srf_pos_av[a, 2] + rcutmax - srf_zmax
tip_indices = np.where(tip_zmin_a < srf_zmax_a.max() - self.dmin)[0]
srf_indices = np.where(srf_zmax_a > tip_zmin_a.min() + self.dmin)[0]
srf_indices = np.arange(srf_indices.min(), len(srf_zmax_a)).astype(int)
# tip initialization
self.tip_cell = TipCell(self.tip, self.srf)
self.tip_cell.initialize(tip_indices, tip_atom_index)
self.ni = self.tip_cell.ni # number tip basis functions
# distribution of surface basis functions over CPUs in bfs-communicator
bcomm = self.bfs_comm
bfs_indices = []
j = 0
for a in srf_indices:
setup = self.srf.wfs.setups[a]
spos_c = self.srf.atoms.get_scaled_positions()[a]
for phit in setup.phit_j:
f = AtomCenteredFunctions(self.srf.wfs.gd, [phit], spos_c, j)
bfs_indices.append(j)
j += len(f.f_iG)
assert len(bfs_indices) >= bcomm.size
l = len(bfs_indices) // bcomm.size
rest = len(bfs_indices) % bcomm.size
if bcomm.rank < rest:
start = (l + 1) * bcomm.rank
stop = (l + 1) * (bcomm.rank + 1)
else:
start = l * bcomm.rank + rest
stop = l * (bcomm.rank + 1) + rest
bfs_indices = bfs_indices[start:stop] # surface bfs on this CPU
# surface initialization
self.srf_cell = SrfCell(self.srf)
self.srf_cell.initialize(self.tip_cell, srf_indices, bfs_indices, p['k_c'])
self.nj = self.srf_cell.nj
self.set_tip_position([0, 0])
if world.rank == 0:
self.log.write(' dmin = %.3f\n' % (self.dmin * Bohr) +
' tip atoms: %i to %i, tip functions: %i\n'
% (tip_indices.min(), tip_indices.max(),
len(self.tip_cell.functions))
+' surface atoms: %i to %i, srf functions %i\n'
%(srf_indices.min(), srf_indices.max(),
len(self.srf_cell.functions))
)
self.log.flush()
if not self.transport_uptodate:
self.initialize_transport()
# Shift the potential of the tip cell so that the fermi level
# of the combined stm
# is in the middle of the original tip and surface fermi level.
srf_efermi = self.srf.get_fermi_level() / Hartree
tip_efermi = self.tip.get_fermi_level() / Hartree
self.tip_cell.shift_potential(-self.potential_shift
-(srf_efermi + tip_efermi) / 2)
self.initialized = True
def initialize_transport(self, restart = False):
"""Initialize the transport calculator that is used to
calculate the Green's function matrices."""
p = self.input_parameters
h1, s1 = p['hs1']
h10, s10 = p['hs10']
h2, s2 = p['hs2']
h20, s20 = p['hs20']
align_bf = p['align_bf']
de = p['de']
bias = p['bias']
w = p['w']
eta1 = p['eta1']
eta2 = p['eta2']
if not self.hs_aligned:
tip_efermi = self.tip.get_fermi_level() / Hartree
srf_efermi = self.srf.get_fermi_level() / Hartree
fermi_diff = tip_efermi - srf_efermi
# Align bfs with the surface lead as a reference
diff = (h2[align_bf, align_bf] - h20[align_bf, align_bf]) \
/ s2[align_bf, align_bf]
self.potential_shift = diff / Hartree
h2 -= diff * s2
h1 -= diff * s1
diff1 = (h10[-1, -1]
- h1[-1, -1]) / s1[-1, -1]
h10 -= diff1 * s10
self.hs_aligned = True
if not self.transport_uptodate:
from ase.transport.stm import STM as STMCalc
if world.rank == 0:
T = time.localtime()
self.log.write('\n %d:%02d:%02d' % (T[3], T[4], T[5]) +
' Precalculating Green\'s functions\n')
self.log.flush()
# Determine the energy grid on which the transmission function
# should be evaluated.
if p['energies'] == None:
energies = np.sign(bias) * \
np.arange(-abs(bias) * w, -abs(bias) * (w - 1) + de, de)
energies.sort()
else:
energies = p['energies']
# distribute energy grid over all cpu's
self.energies = energies # global energy grid
l = len(energies) // world.size # minimum number of enpts per cpu
rest = len(energies) % world.size # first #rest cpus get +1 enpt
if world.rank < rest:
start = (l + 1) * world.rank
stop = (l + 1) * (world.rank + 1)
else:
start = l * world.rank + rest
stop = l * (world.rank + 1) + rest
energies = energies[start:stop] # energy grid on this cpu
# set up current calculator
stm_calc = STMCalc(h2, s2,
h1, s1,
h20, s20,
h10, s10,
eta1, eta2,
w=w, logfile=self.log)
# precalculate Green's functions for tip and surface.
if not restart:
stm_calc.initialize(energies,
bias=bias,)
self.stm_calc = stm_calc
self.transport_uptodate = True
if world.rank == 0:
T = time.localtime()
self.log.write(' %d:%02d:%02d' % (T[3], T[4], T[5]) +
' Done\n')
self.log.flush()
self.world.barrier()
def set_tip_position(self, position_c):
"""Set tip positions.
Positions tip atom as close as possible above the surface at
the grid point given by positions_c and sums the tip and
surface potentials"""
position_c = np.resize(position_c,3)
assert self.srf_cell.gd.orthogonal
h_c = self.srf_cell.gd.h_cv.diagonal()
tip_cell = self.tip_cell
tip_atom_index = tip_cell.tip_atom_index
tip_pos_av = tip_cell.atoms.get_positions() / Bohr
tip_zmin = tip_pos_av[tip_atom_index, 2]
tip_pos_av_grpt = self.tip_cell.gd.N_c\
* self.tip_cell.atoms.get_scaled_positions()
srf_pos_av = self.srf_cell.atoms.get_positions() / Bohr
srf_zmax = srf_pos_av[:, 2].max()
extension_c = np.resize(self.srf_cell.ext1,3)
extension_c[-1] = 0
#corner of the tip unit cell in the extended grid
cell_corner_c = position_c + extension_c\
- tip_pos_av_grpt[tip_atom_index]
cell_corner_c[2] = (srf_zmax + self.dmin - tip_zmin) / h_c[2]
cell_corner_c = np.round(cell_corner_c).astype(int)
self.tip_position = cell_corner_c + \
tip_pos_av_grpt[tip_atom_index] - extension_c
self.dmin = self.tip_position[2] * h_c[2] - srf_zmax
self.tip_cell.set_position(cell_corner_c)
# sum potentials
size_c = self.tip_cell.gd.n_c
current_Vt = self.srf_cell.vt_G.copy()
current_Vt[cell_corner_c[0] + 1:cell_corner_c[0] + 1 + size_c[0],
cell_corner_c[1] + 1:cell_corner_c[1] + 1 + size_c[1],
cell_corner_c[2] + 1:cell_corner_c[2] + 1 + size_c[2]]\
+= self.tip_cell.vt_G # +1 since grid starts at (1,1,1), pbc = 0
self.current_v = current_Vt
def get_V(self, position_c):
"""Returns the overlap hamiltonian at position_c"""
if not self.initialized:
self.initialize()
dtype = 'float'
if np.any(self.input_parameters['k_c']):
dtype = 'complex'
f_iGs = self.srf_cell.f_iGs
self.set_tip_position(position_c)
nj = self.nj
ni = self.ni
V_ij = np.zeros((nj, ni), dtype=dtype )
vt_G = self.current_v
for s in self.srf_cell.functions:
j1 = s.index
s.f_iG = f_iGs[j1]
j2 = j1 + len(s)
for t, t_kin in zip(self.tip_cell.functions,
self.tip_cell.functions_kin):
i1 = t.index
i2 = i1 + len(t)
V = (s | vt_G | t)
if V is None:
V = 0
kin = (s | t_kin)
if kin is None:
kin = 0
vk = np.asarray(V + kin)
assert abs(vk.imag).max() < 1e-14
V_ij[j1:j2, i1:i2] += vk.real
s.f_iG = None
self.bfs_comm.sum(V_ij)
return V_ij * Hartree
def get_transmission(self, position_c):
"""Calculates the transmission function for a given tip postion_c"""
energies = self.energies # global energy grid
l = len(energies) / world.size # minimum number of enpts per cpu
rest = len(energies) % world.size # first #rest cpus get +1 enpt
if world.rank < rest:
start = (l + 1) * world.rank
stop = (l + 1) * (world.rank + 1)
else:
start = l * world.rank + rest
stop = l * (world.rank + 1) + rest
T_glob = np.zeros_like(energies)
V_ts = self.get_V(position_c)
T_glob[start:stop] = self.stm_calc.get_transmission(V_ts)
world.sum(T_glob)
return T_glob
def get_current(self, position_c, bias=None):
self.initialize()
energies=self.energies
if bias == None:
bias = self.stm_calc.bias
T_e = self.get_transmission(position_c)
bias = self.stm_calc.bias
w = self.stm_calc.w
bias_window = -np.array([bias * w, bias * (w - 1)])
bias_window.sort()
i1 = sum(energies < bias_window[0])
i2 = sum(energies < bias_window[1])
step = 1
if i2 < i1:
step = -1
I = np.sign(bias)*np.trapz(x=energies[i1:i2:step],
y=T_e[i1:i2:step])
return I * 77466.1509 #units: nA
def get_s(self, position_c):
"""Returns the overlap matrix for a given tip postion position_c"""
dtype = 'float'
if np.any(self.input_parameters['k_c']):
dtype = 'complex'
self.set_tip_position(position_c)
S_ij = np.zeros((self.nj, self.ni), dtype=dtype )
f_iGs = self.srf_cell.f_iGs
for s in self.srf_cell.functions:
j1 = s.index
s.f_iG = self.srf_cell.f_iGs[j1]
j2 = j1 + len(s)
for t in self.tip_cell.functions:
i1 = t.index
i2 = i1 + len(t)
overlap = (s | t)
if overlap is not None:
S_ij[j1:j2, i1:i2] += overlap
return S_ij
def reset(self):
self.scans = {}
def scan(self):
"""Performs a scan at constant height.
Parallel run:
The calculation of the Green's functions is done in
parallel by all processors. Hence the energy grid is, at this point,
distributed over all processors.
Further it is possible to parallize over
1. tip-positions (domain_comm)
2. basis functions (bfs_comm)
Assume a processor grid of NxM CPU's. The first axis corresponds
to a parallelization over tip positions an the second axis
corresponds to a parallelization over basis functions:
1. The tip positions are distributed among the N rows
of the cpu grid.
2. For each tip position the basis functions are distributed over
the M colums of the processor grid.
Primarily, the overlap Hamiltonian at each tip position is
calculated. Secondly the transmission function is
evaluated for each tip-position. Since the energy grid is
distributed over all processors, the total transmission has
to be calculated in succesive steps by sending the Green's
function matrices along the N-axis of the processor grid.
"""
dtype = 'float'
if np.any(self.input_parameters['k_c']):
dtype = 'complex'
if world.rank == 0:
T = time.localtime()
self.log.write(' %d:%02d:%02d ' % (T[3], T[4], T[5])
+ 'Fullscan\n')
self.log.flush()
#distribute grid points over cpu's
dcomm = self.domain_comm
N_c = self.srf.wfs.gd.N_c[:2]
gpts_i = np.arange(N_c[0] * N_c[1])
l = len(gpts_i) // dcomm.size
rest = len(gpts_i) % dcomm.size
if dcomm.rank < rest:
start = (l + 1) * dcomm.rank
stop = (l + 1) * (dcomm.rank + 1)
else:
start = l * dcomm.rank +rest
stop = l * (dcomm.rank + 1) + rest
gpts_i = gpts_i[start:stop] # gridpoints on this cpu
V_g = np.zeros((len(gpts_i), self.nj, self.ni),
dtype=dtype) # V_ij's on this cpu
for i, gpt in enumerate(gpts_i):
x = gpt / N_c[1]
y = gpt % N_c[1]
V_g[i] = self.get_V((x, y))
#get the distribution of the energy grid over CPUs
el = len(self.energies) // world.size # minimum number of enpts per cpu
erest = len(self.energies) % world.size # first #rest cpus get +1 enpt
if world.rank < erest:
estart = (el + 1) * world.rank
else:
estart = el * world.rank + erest
bias = self.stm_calc.bias
if world.rank == 0:
T = time.localtime()
self.log.write(' %d:%02d:%02d ' % (T[3], T[4], T[5])
+ 'Done VS, starting T\n')
self.log.flush()
nepts = len(self.stm_calc.energies) # number of e-points on this cpu
T_pe = np.zeros((len(V_g), len(self.energies))) # Transmission function
self.log.write(str(T_pe.shape))
for j, V in enumerate(V_g):
T_pe[j, estart:estart + nepts] = self.stm_calc.get_transmission(V)
self.log.flush()
if world.rank == 0:
T = time.localtime()
self.log.write(' %d:%02d:%02d ' % (T[3], T[4], T[5])
+ 'T done\n')
self.log.flush()
world.barrier()
#send Green's functions if a parallel run
self.stm_calc.energies_req = self.stm_calc.energies.copy()
for i in range(dcomm.size - 1): # parallel run over tip positions
# send Green functions along the domain_comm axis
# tip and surface Green's functions have to be send separately,
# since in general they do not have the same shapes
rank_send = (dcomm.rank + 1) % dcomm.size
rank_receive = (dcomm.rank - 1) % dcomm.size
# send shape of gft, send also the initial index of the
# local energy list
gft1 = self.stm_calc.gft1_emm
request = dcomm.send(np.asarray((estart,) + gft1.shape), rank_send,
block=False)
data = np.array((0, 0, 0, 0), dtype=int)
dcomm.receive(data, rank_receive)
dcomm.wait(request)
estart, nepts = data[:2]
shape = data[1:]
# send Green function of the tip
gft1_receive = np.empty(tuple(shape), dtype = complex)
request = dcomm.send(gft1, rank_send, block=False)
dcomm.receive(gft1_receive, rank_receive)
dcomm.wait(request)
# send shape the surface green functions
gft2 = self.stm_calc.gft2_emm
request = dcomm.send(np.asarray(gft2.shape), rank_send,
block=False)
shape = np.array((0, 0, 0), dtype=int)
dcomm.receive(shape, rank_receive)
dcomm.wait(request)
#send surface green function
gft2_receive = np.empty(tuple(shape), dtype=complex)
request = dcomm.send(gft2, rank_send, block=False)
dcomm.receive(gft2_receive, rank_receive)
dcomm.wait(request)
self.stm_calc.gft1_emm = gft1_receive
self.stm_calc.gft2_emm = gft2_receive
self.stm_calc.energies = self.energies[estart:estart + nepts]
T = time.localtime()
if world.rank == 0:
self.log.write(' %d:%02d:%02d ' % (T[3], T[4], T[5])
+ 'Received another gft, start T\n')
self.log.flush()
for j, V in enumerate(V_g):
T_pe[j, estart:estart + nepts] = \
self.stm_calc.get_transmission(V)
T = time.localtime()
if world.rank == 0:
self.log.write(' %d:%02d:%02d ' % (T[3], T[4], T[5])
+ 'Done\n')
self.log.flush()
self.bfs_comm.sum(T_pe)
# next calculate the current. Parallelize over bfs_comm
# distribute energy grid over all cpu's
bcomm = self.bfs_comm
energies = self.energies # global energy grid
l = len(energies) // bcomm.size
rest = len(energies) % bcomm.size
if bcomm.rank < rest:
start = (l + 1) * bcomm.rank
stop = (l + 1) * (bcomm.rank + 1) + 1 # +1 is important
else:
start = l * bcomm.rank + rest
stop = l * (bcomm.rank + 1) + rest + 1
T = time.localtime()
if world.rank == 0:
self.log.write(' %d:%02d:%02d ' % (T[3], T[4], T[5])
+ 'start Current\n')
self.log.flush()
energies = energies[start:stop] # energy grid on this CPU
T_pe = T_pe[:, start:stop]
ngpts = len(T_pe)
bias = self.stm_calc.bias
w = self.stm_calc.w
bias_window = -np.array([bias * w, bias * (w - 1)])
bias_window.sort()
i1 = sum(energies < bias_window[0])
i2 = sum(energies < bias_window[1])
step = 1
if i2 < i1:
step = -1
I_g = np.sign(bias) * np.trapz(x=energies[i1:i2:step],
y=T_pe[:, i1:i2:step])
bcomm.sum(I_g)
I_g *= 77466.1509 # units are nA
T = time.localtime()
if world.rank == 0:
self.log.write(' %d:%02d:%02d ' % (T[3], T[4], T[5])
+ 'stop current\n')
self.log.flush()
# next gather the domains
scan = np.zeros(N_c)
for i, gpt in enumerate(gpts_i):
x = gpt / N_c[1]
y = gpt % N_c[1]
scan[x, y] = I_g[i]
self.domain_comm.sum(scan) # gather image
sgd = self.srf.wfs.gd
data = (bias, sgd.N_c, sgd.h_cv.diagonal(), sgd.cell_cv,
sgd.cell_cv.diagonal())
dmin = self.get_dmin()
fullscan = (data, scan)
if world.rank == 0:
fd = open('scan_' + str(np.round(self.get_dmin(), 2)) + '_bias_'\
+ str(bias) + '_.pckl', 'wb')
pickle.dump((dmin,fullscan[0], fullscan[1]), fd, 2)
fd.close()
world.barrier()
self.scans['fullscan'] = fullscan
T = time.localtime()
self.log.write(' %d:%02d:%02d' % (T[3], T[4], T[5]) +
'Fullscan done\n')
def scan3d(self, zmin, zmax, filename = 'scan3d.pckl'):
"""Map the current between the minumum tip height zmin and the
maximum tip height zmax. The result is dumped to a file in the local
direcotry"""
sgd = self.srf.wfs.gd
bias = self.input_parameters['bias']
data = (bias, sgd.N_c, sgd.h_cv.diagonal(), sgd.cell_cv,
sgd.cell_cv.diagonal())
self.scans['scan3d'] = (data, {})
hz = self.srf.wfs.gd.h_cv[2, 2] * Bohr
dmins = -np.arange(zmin, zmax + hz, hz)
dmins.sort()
dmins = -dmins
for dmin in dmins:
world.barrier()
self.set(dmin=dmin)
self.initialize()
self.scan()
dmin = self.get_dmin()
self.scans['scan3d'][1][dmin] = self.scans['fullscan'][1].copy()
world.barrier()
if world.rank == 0:
fd = open(filename, 'wb')
pickle.dump(self.scans['scan3d'], fd, 2)
fd.close()
world.barrier()
def get_constant_current_image(self, I):
"""Calculate the constant current image by interpolation between
constant height scans.
"""
assert self.scans.has_key('scan3d')
data, scans = self.scans['scan3d']
hz = data[2][2] * Bohr
dmins = []
for dmin in scans.keys():
dmins.append(dmin)
dmins.sort()
scans3d = np.zeros(tuple(scans.values()[0].shape) + (len(dmins),))
for i, dmin in enumerate(dmins):
scans3d[:, :, i] = scans[dmin]
scans = scans3d.copy()
shape = tuple(scans.shape[:2])
cons = np.zeros(shape)
for x in range(shape[0]):
for y in range(shape[1]):
x_I = abs(scans[x, y, :])
i1 = np.where(x_I <= I)[0].min()
i2 = i1 - 1
I1 = x_I[i1]
I2 = x_I[i2]
h = I2 - I1
Ih = (I - I1) / h
result = i1 * (1 - Ih) + i2 * Ih
if i2 < 0:
result = 0
cons[x, y] = result * hz + dmins[0]
self.scans['fullscan'] = (data, cons)
def get_constant_height_image(self, index):
assert self.scans.has_key('scan3d')
data, scans = self.scans['scan3d']
dmins = []
for dmin in scans.keys():
dmins.append(dmin)
dmins.sort()
key = dmins[index]
print key
self.scans['fullscan'] = (data, abs(scans[key]))
def linescan(self, startstop=None):
if 'fullscan' in self.scans:
data, scan = self.scans['fullscan']
cell_cv = data[3] #XXX
cell_c = data[4] #XXX
h_c = data[2] #XXX
N_c = data[1] #XXX
else:
sgd = self.srf.wfs.gd
cell_cv = sgd.cell_cv
cell_c = sgd.cell_cv.diagonal()
h_c = sgd.h_cv.diagonal()
N_c = sgd.N_c
if startstop == None:
start = np.array([0, 0])
stop = N_c[:2] - 1
else:
start = np.asarray(startstop[0])
stop = np.asarray(startstop[1])
assert ((N_c[:2] - stop)>=0).all()
v = (stop - start) / np.linalg.norm(stop - start)
n_c = N_c[:2]
h_c = h_c[:2]
h = np.linalg.norm(v*h_c)
n = np.floor(np.linalg.norm((stop - start) * h_c) / h).astype(int) + 1
linescan_n = np.zeros((n, ))
line = np.arange(n)*Bohr*h
for i in range(n):
grpt = start + v * i
if np.round((grpt % 1), 5).any(): # Interpolate if nessesary
C = np.empty((2, 2, 2)) # find four nearest neighbours
C[0,0] = np.floor(grpt)
C[1,0] = C[0, 0] + np.array([1, 0])
C[0,1] = C[0, 0] + np.array([0, 1])
C[1,1] = C[0, 0] + np.array([1, 1])
xd = (grpt % 1)[0]
yd = (grpt % 1)[1]
if not 'fullscan' in self.scans:
I1 = self.get_current(C[0, 0]) * (1 - xd) \
+ self.get_current(C[1, 0]) * xd
I2 = self.get_current(C[0, 1]) * (1 - xd) \
+ self.get_current(C[1, 1]) * xd
I = I1 * (1 - yd) + I2 * yd
else:
fullscan = scan
I1 = fullscan[tuple(C[0, 0])] * (1 - xd) \
+ fullscan[tuple(C[1, 0])] * xd
I2 = fullscan[tuple(C[0, 1])] * (1 - xd) \
+ fullscan[tuple(C[1, 1])] * xd
I = I1 * (1 - yd) + I2 * yd
else:
if not 'fullscan' in self.scans:
I = self.get_current(grpt.astype(int))
else:
I = scan[tuple(grpt.astype(int))]
linescan_n[i] = I
self.scans['linescan'] = ([start, stop], line, linescan_n)
def get_dmin(self):
return self.dmin * Bohr
def read_scans_from_file(self, filename):
scan3d = pickle.load(open(filename))
self.scans['scan3d'] = scan3d
def plot(self, repeat=(1, 1), vmin=None, vmax = None, show = True,
label=None):
import matplotlib
import pylab
from pylab import ogrid, imshow, cm, colorbar
repeat = np.asarray(repeat)
if self.scans.has_key('fullscan'):
data, scan0_iG = self.scans['fullscan']
cell_cv = data[3] #XXX
cell_c = data[4] #XXX
h_c = data[2] #XXX
gdN_C = data[1] #XXX
shape0 = np.asarray(scan0_iG.shape)
scan1_iG = np.zeros(shape0 * repeat)
for i in range(repeat[0]):
for j in range(repeat[1]):
start = np.array([i,j]) * shape0
stop = start + shape0
scan1_iG[start[0]:stop[0], start[1]:stop[1]] = scan0_iG
is_orthogonal = np.round(np.trace(cell_cv)-np.sum(cell_c), 5) == 0
if not is_orthogonal:
h = [0.2, 0.2]
else:
h = h_c[:2]
scan0_iG = scan1_iG
shape = scan0_iG.shape
scan_iG = np.zeros(tuple(np.asarray(shape)+1))
scan_iG[:shape[0],:shape[1]] = scan0_iG
scan_iG[-1,:shape[1]] = scan0_iG[0,:]
scan_iG[:,-1] = scan_iG[:,0]
N_c = np.floor((cell_cv[0,:2] * repeat[0]\
+ cell_cv[1, :2] * repeat[1]) / h[0]).astype(int)
ortho_cell_c = np.array(N_c * h_c[:2])
plot = np.zeros(tuple(N_c))
# is srf_cell orthogonal ?
if not is_orthogonal:
h = [0.2, 0.2]
# Basis change matrix
# e -> usual basis {(1,0),(0,1)}
# o -> basis descrining original original cell
# n -> basis describing the new cell
eMo = (cell_cv.T / cell_c * h_c)[:2,:2]
eMn = np.eye(2) * h
oMn = np.dot(np.linalg.inv(eMo), eMn)
for i in range(N_c[0]):
for j in range(N_c[1]):
grpt = np.dot(oMn, [i,j])
if (grpt<0).any() or (np.ceil(grpt)>shape).any():
plot[i,j] = plot.min() - 1000
else: # interpolate
C00 = np.floor(grpt).astype(int)
C01 = C00.copy()
C01[0] += 1
C10 = C00.copy()
C10[1] += 1
C11 = C10.copy()
C11[0] += 1
x0 = grpt[0] - C00[0]
y0 = grpt[1] - C00[1]
P0 = scan_iG[tuple(C00)] * (1 - x0)\
+ scan_iG[tuple(C01)] * x0
P1 = scan_iG[tuple(C10)] * (1 - x0)\
+ scan_iG[tuple(C11)] * x0
plot[i,j] = P0 * (1 - y0) + P1 * y0
else:
plot = scan_iG.copy()
plot = plot.T # origin to the lower left corner
self.scans['interpolated_plot'] = plot
if vmin == None:
vmin = scan0_iG.min()
if vmax == None:
vmax=scan0_iG.max()
norm = matplotlib.colors.normalize(vmin=vmin, vmax=vmax)
self.pylab = pylab
f0 = pylab.figure()
self.fig0 = f0
p0 = f0.add_subplot(111)
x,y = ogrid[0:plot.shape[0]:1, 0:plot.shape[1]:1]
extent=[0, (plot.shape[1] - 1) * h[1] * Bohr,
0, (plot.shape[0] - 1) * h[0] * Bohr]
p0.set_ylabel('\xc5')
p0.set_xlabel('\xc5')
imshow(plot,
norm=norm,
interpolation='bicubic',
origin='lower',
cmap=cm.hot,
extent=extent)
fontsize=100
cb = colorbar()
ax = cb.ax
if label is not None:
cb.set_label(label)
if self.scans.has_key('linescan'):
startstop, line, linescan_n = self.scans['linescan']
start = startstop[0]
stop = startstop[1]
f1 = pylab.figure()
self.fig1 = f1
p1 = f1.add_subplot(111)
self.p1 = p1
if label is not None:
p1.set_ylabel(label)
p1.plot(line, linescan_n)
eMo = (cell_cv.T / cell_c)[:2,:2]
start = np.dot(eMo, start * h_c[:2]) * Bohr
stop = np.dot(eMo, stop * h_c[:2]) * Bohr
if self.scans.has_key('fullscan'): #Add a line
p0.plot([start[0], stop[0]], [start[1], stop[1]],'-b')
p0.set_xlim(tuple(extent[:2]))
p0.set_ylim(tuple(extent[-2:]))
if world.rank == 0:
if show == True:
pylab.show()
else:
return None
class TipCell:
def __init__(self, tip, srf):
self.tip = tip
self.srf = srf
self.gd = None
self.vt_G = None
self.tip_atom_index = None
self.functions = []
self.functions_kin = []
self.energy_shift = 0
def initialize(self, tip_indices, tip_atom_index, debug=False):
self.tip_indices = tip_indices
self.tip_atom_index = tip_atom_index
assert tip_atom_index in tip_indices
tgd = self.tip.wfs.gd
sgd = self.srf.wfs.gd
tip_atoms = self.tip.atoms.copy()[tip_indices]
tip_atoms.pbc = 0
tip_pos_av = tip_atoms.get_positions().copy() / Bohr
tip_cell_cv = tgd.cell_cv
srf_cell_cv = sgd.cell_cv
tip_zmin = tip_pos_av[tip_atom_index, 2]
tip_zmin_a = np.zeros(len(tip_indices))
# size of the simulation cell in the z-direction
m = 0
for a, setup in enumerate(self.tip.wfs.setups):
if a in tip_indices:
rcutmax = max([phit.get_cutoff() for phit in setup.phit_j])
tip_zmin_a[m] = tip_pos_av[m, 2] - rcutmax - tip_zmin
m+=1
p=2
zmax_index = np.where(tip_pos_av[:, 2] == tip_pos_av[:, 2].max())[0][0]
cell_zmin = tip_zmin + tip_zmin_a.min()
cell_zmax = 2 * tip_pos_av[zmax_index, 2]\
- tip_zmin - tip_zmin_a[zmax_index]
self.cell_zmin = cell_zmin
self.cell_zmax = cell_zmax
if cell_zmax > tgd.cell_cv[2, 2] - tgd.h_cv[2, 2]:
cell_zmax = tgd.cell_cv[2, 2] - tgd.h_cv[2, 2]
cell_zmin_grpt = np.floor(cell_zmin / tgd.h_cv[2, 2] - p).astype(int)
if cell_zmin_grpt < 0:
cell_zmin_grpt = 0
cell_zmax_grpt = np.floor(cell_zmax / tgd.h_cv[2, 2]).astype(int)
new_sizez = cell_zmax_grpt - cell_zmin_grpt
self.cell_zmax_grpt = cell_zmax_grpt
self.cell_zmin_grpt = cell_zmin_grpt
# If tip and surface cells differ in the xy-plane,
# determine the 2d-cell with the smallest area, having lattice vectors
# along those vectors describing the 2d-cell belonging to the surface.
# This part is messy and can be disregarded if tip and surface a calculated
# in equal unit cells.
srf_basis = srf_cell_cv.T / sgd.cell_cv.diagonal()
tip_basis = tip_cell_cv.T / tgd.cell_cv.diagonal()
if (srf_basis - tip_basis).any(): # different unit cells
dointerpolate = True
steps = 500 # XXX crap
thetas = np.arange(0, pi, pi/steps)
areas = np.zeros_like(thetas).astype(float)
for i, theta in enumerate(thetas):
cell = smallestbox(tip_cell_cv, srf_cell_cv, theta)[0]
area = np.cross(cell[0, :2], cell[1, :2])
areas[i] = abs(area)
area_min_index = np.where(areas == areas.min())[0].min()
theta_min = thetas[area_min_index]
newcell_cv, origo_c = smallestbox(tip_cell_cv, srf_cell_cv,
theta_min)
tip_pos_av = np.dot(rotate(theta_min), tip_pos_av.T).T + origo_c
newcell_c = np.array([la.norm(cell_cv[x]) for x in range(3)])
newsize2_c = np.around(newcell_c / sgd.h_cv.diagonal()).astype(int)
elif (sgd.h_cv - tgd.h_cv).any(): # different grid spacings
dointerpolate = True
newcell_cv = tip_cell_cv
newcell_c = tgd.cell_cv.diagonal()
newsize2_c = np.around(newcell_c / sgd.h_cv.diagonal()).astype(int)
theta_min = 0.0
origo_c = np.array([0,0,0])
else:
dointerpolate = False
newsize2_c = tgd.N_c.copy()
vt_sG = self.tip.hamiltonian.vt_sG.copy()
vt_sG = self.tip.wfs.gd.collect(vt_sG, broadcast=True)
vt_G = vt_sG[0]
vt_G = vt_G[:, :, cell_zmin_grpt:cell_zmax_grpt]
theta_min = 0.0
origo_c = np.array([0,0,0])
self.vt_G = vt_G
N_c_bak = self.tip.wfs.gd.N_c.copy()
tip_pos_av[:,2] -= cell_zmin_grpt * tgd.h_cv[2, 2]
newsize2_c[2] = new_sizez.copy()
newcell_c = (newsize2_c + 1) * sgd.h_cv.diagonal()
newcell_cv = srf_basis * newcell_c
newgd = GridDescriptor(N_c=newsize2_c+1,
cell_cv=newcell_cv,
pbc_c=False,
comm=mpi.serial_comm)
new_basis = newgd.cell_cv.T / newgd.cell_cv.diagonal()
origo_c += np.dot(new_basis, newgd.h_cv.diagonal())
tip_pos_av += np.dot(new_basis, newgd.h_cv.diagonal())
tip_atoms.set_positions(tip_pos_av * Bohr)
tip_atoms.set_cell(newcell_cv * Bohr)
self.atoms = tip_atoms
self.gd = newgd
# quick check
assert not (np.around(new_basis - srf_basis, 5)).all()
assert not (np.around(newgd.h_cv - sgd.h_cv, 5)).all()
# add functions
functions = []
i=0
for k, a in enumerate(tip_indices):
setup = self.tip.wfs.setups[a]
spos_c = self.atoms.get_scaled_positions()[k]
for phit in setup.phit_j:
f = AtomCenteredFunctions(self.gd, [phit], spos_c, i)
functions.append(f)
i += len(f.f_iG)
self.ni = i
# Apply kinetic energy:
functions_kin = []
for f in functions:
functions_kin.append(f.apply_t())
for f, f_kin in zip(functions, functions_kin):
f.restrict()
f_kin.restrict()
self.attach(functions,functions_kin)
if dointerpolate:
self.interpolate_vt_G(theta_min, origo_c)
def attach(self, functions, functions_kin):
self.functions = functions
self.functions_kin = functions_kin
p0 = {}
p0_kin = {}
for f, f_kin in zip(self.functions, self.functions_kin):
p0[f]=f.corner_c.copy()
p0_kin[f_kin]=f_kin.corner_c.copy()
self.p0 = p0
self.p0_kin = p0_kin
def set_position(self, position_c):
self.position = position_c
for f, f_kin in zip(self.functions, self.functions_kin):
f.corner_c = position_c + self.p0[f]
f_kin.corner_c = position_c + self.p0_kin[f_kin]
def shift_potential(self, shift):
self.vt_G -= self.energy_shift
self.vt_G += shift
self.energy_shift = shift
def interpolate_vt_G(self, theta_min, origo_c):
"""Interpolates the effective potential of the tip calculation onto
the grid of the simulation cell for the tip.
The transformation iMj maps a point from grid 'j' to
a point on grid 'i', j --> i.
Definitions:
e - 'natural' grid, {(1, 0, 0), (0, 1, 0), (0, 0, 1)}
o - grid of the original tip calculation
r - rotated grid of the original tip calculation
n - grid of the tip simulation cell.
Outside the unitcell of the original tip calculation
the effective potential is set to zero"""
vt_sG0 = self.tip.hamiltonian.vt_sG.copy()
vt_sG0 = self.tip.wfs.gd.collect(vt_sG0, broadcast = True)
vt_G0 = vt_sG0[0]
vt_G0 = vt_G0[:, :, self.cell_zmin_grpt:self.cell_zmax_grpt]
tgd = self.tip.wfs.gd
newgd = self.gd
shape0 = vt_G0.shape
tip_basis = tgd.cell_cv.T / tgd.cell_cv.diagonal()
new_basis = newgd.cell_cv.T / newgd.cell_cv.diagonal()
eMo = tip_basis * tgd.h_cv.diagonal()
eMr = np.dot(rotate(theta_min), eMo)
eMn = new_basis * newgd.h_cv.diagonal()
rMn = np.dot(la.inv(eMr), eMn)
vt_G = newgd.zeros()
shape = vt_G.shape
self.shape2 = shape
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
gpt_n = [i, j, k]
gpt_r = np.dot(rMn, gpt_n) - np.dot(la.inv(eMr), origo_c)
if (gpt_r < 0).any() or (np.ceil(gpt_r) > tgd.n_c).any():
vt_G[i,j,k] = 0
else: # trilinear interpolation
C000 = np.floor(gpt_r).astype(int)
z00 = gpt_r[2] - C000[2]
C001 = C000.copy()
C001[2] += 1
C100 = C000.copy()
C100[0] += 1
C101 = C100.copy()
C101[2] += 1
C010 = C000.copy()
C010[1] += 1
C011 = C010.copy()
C011[2] += 1
C110 = C000.copy()
C110[:2] += 1
C111 = C110.copy()
C111[2] += 1
x0 = gpt_r[0] - C000[0]
y0 = gpt_r[1] - C000[1]
C = np.zeros((4,2))
C1 = np.array([[vt_G0[tuple(C000%shape0)],
vt_G0[tuple(C001 % shape0)]],
[vt_G0[tuple(C010 % shape0)],
vt_G0[tuple(C011 % shape0)]]])
C2 = np.array([[vt_G0[tuple(C100 % shape0)],
vt_G0[tuple(C101 % shape0)]],
[vt_G0[tuple(C110 % shape0)],
vt_G0[tuple(C111 % shape0)]]])
Z = np.array([1 - z00, z00])
X = np.array([1 - x0, x0])
Y = np.array([1 - y0, y0])
Q = np.zeros((2, 2))
Q[:,0]=np.dot(C1, Z)
Q[:,1]=np.dot(C2, Z)
F2 = dots(Y, Q, X)
vt_G[i, j, k] = F2
self.vt_G = vt_G
class SrfCell:
def __init__(self, srf):
self.srf = srf
self.functions = []
self.energy_shift = 0.0
def initialize(self, tip_cell, srf_indices, bfs_indices, k_c):
self.srf_indices = srf_indices
# determine the extended unitcell
# The code is really messy. In short, this part determines teh number
# of times the
# original unit cell has to be repeated so that the tip cell
# fits in the simulation cell for all tip positions. Needs to be
# re-written badly.
srf_vt_sG = self.srf.hamiltonian.vt_sG.copy()
srf_vt_sG = self.srf.wfs.gd.collect(srf_vt_sG, broadcast = True)
srf_vt_G = srf_vt_sG[0]
tip = tip_cell
tip_atom_index = tip.tip_atom_index
spos_ac = tip.atoms.get_scaled_positions()
tip_atom_spos = spos_ac[tip_atom_index][:2]
tgd = tip.gd
sgd = self.srf.wfs.gd
tip_cell_cv = tgd.cell_cv[:2, :2]
tip_cell_c = tgd.cell_cv.diagonal()[:2]
tip_basis = tip_cell_cv.T / tip_cell_c
srf_cell_cv = sgd.cell_cv[:2, :2]
srf_cell_c = sgd.cell_cv.diagonal()[:2]
srf_basis = tip_cell_cv.T / tip_cell_c
assert not (np.round(tgd.h_cv - sgd.h_cv, 5)).all()
assert not (np.round(tip_basis - srf_basis, 5)).all()
extension1_c = tip_atom_spos * tip_cell_c / srf_cell_c
extension2_c = (1 - tip_atom_spos) * tip_cell_c / srf_cell_c
ext1_c = np.ceil(extension1_c * sgd.N_c[:2]).astype(int)
ext2_c = np.ceil(extension2_c * sgd.N_c[:2]).astype(int)
srf_shape = sgd.N_c[:2]
extension1 = ext1_c / srf_shape.astype(float)
extension2 = ext2_c / srf_shape.astype(float)
# Size of the extended grid in the transverse directions.
newsize_c = ext1_c + ext2_c + sgd.N_c[:2]
sizez = srf_vt_G.shape[2]
# New size of the extended grid in the z direction.
newsizez = sizez + 10.0 / Bohr / sgd.h_cv[2, 2]
# The extended potential
vt_G = np.zeros(tuple(newsize_c) + (newsizez,))
# Add the potential to the grid
intexa = ext1_c / srf_shape[:2]
rest1 = ext1_c % srf_shape[:2]
intexb = ext2_c / srf_shape[:2]
rest2 = ext2_c % srf_shape[:2]
for n in range(intexa[0]+intexb[0] + 1 ):
for m in range(intexa[1] + intexb[1] + 1):
vt_G[rest1[0] + n * srf_shape[0]:\
rest1[0] + (n + 1) * srf_shape[0],\
rest1[1] + m * srf_shape[1]:\
rest1[1] + (m + 1) * srf_shape[1], :sizez] = srf_vt_G
if rest2[1] == 0:
rest2[1] += 1
if rest2[0] == 0:
rest2[0] += 1
vt_G[:rest1[0], rest1[1]: -rest2[1]]\
= vt_G[-rest1[0] - rest2[0]:-rest2[0], rest1[1]:-rest2[1]]
vt_G[-rest2[0]:, rest1[1]:-rest2[1]]\
= vt_G[rest1[0]:rest1[0] + rest2[0], rest1[1]:-rest2[1]]
vt_G[:, :rest1[1]] = vt_G[:, -rest2[1] - rest1[1]:-rest2[1]]
vt_G[:, -rest2[1]:] = vt_G[:, rest1[1]:rest1[1]+rest2[1]]
# Grid descriptor of the extended unit cell
self.vt_G = vt_G
newsize_c = np.resize(newsize_c, 3)
newsize_c[2] = sgd.N_c[2]
newcell_cv = (newsize_c + 1) * (sgd.cell_cv.T /
sgd.cell_cv.diagonal() *
sgd.h_cv.diagonal())
newgd = GridDescriptor(N_c=newsize_c + 1,
cell_cv=newcell_cv,
pbc_c=False,
comm=mpi.serial_comm)
self.gd = newgd
srf_atoms = self.srf.atoms.copy()[srf_indices]
self.atoms = srf_atoms
# add functions
j = 0
for a in srf_indices:
setup = self.srf.wfs.setups[a]
spos_c = self.srf.atoms.get_scaled_positions()[a]
for phit in setup.phit_j:
f = AtomCenteredFunctions(self.srf.wfs.gd, [phit], spos_c, j)
if j in bfs_indices:
self.functions.append(f)
j += len(f.f_iG)
self.nj = j
# shift corners so that the origin now is the extended surface
for f in self.functions:
f.corner_c[:2] += ext1_c
self.ext1 = ext1_c
# Add an appropriate number of periodic images. The function values are
# only saved in the memory, f_iGs, for those functions belonging to
# the original unit cell. In order to be included a periodic image has
# to have a finite overlap with the extended surface cell.
origo = np.array([0, 0])
list = []
f_iGs = {}
for f in self.functions:
f_iGs[f.index] = f.f_iG
f.f_iG = None
list.append(f)
for n in range(-100, 100, 1):
for m in range(-100, 100, 1):
# Translation vector of the periodic image:
R = np.array((n, m))
newcorner_c = f.corner_c[:2] + R * sgd.N_c[:2]
start_c = np.maximum(newcorner_c, origo)
stop_c = np.minimum(newcorner_c + f.size_c[:2],
newgd.n_c[:2])
# Check if the periodic image has an overlap with the
# extended surface cell.
if (start_c < stop_c).all():
newcorner_c = np.resize(newcorner_c, 3)
newcorner_c[2] = f.corner_c[2]
newf = LocalizedFunctions(f.gd, f_iGs[f.index],
corner_c=newcorner_c,
index=f.index,
vt_G=f.vt_G)
newf.f_iG = None
newf.sdisp_c = R
newf.set_phase_factor(k_c)
if np.any(R):
list.append(newf)
self.functions = list
self.f_iGs = f_iGs
self.atoms = srf_atoms
def shift_potential(self, shift):
self.vt_G -= self.energy_shift
self.vt_G += shift
self.energy_shift = shift
def dump_hs(calc, filename, region, cvl=0, direction= 'z', return_hs=False):
"""Pickle H and S.
Pickle LCAO - Hamiltonian and overlap matrix for a tip or surface
calculation.
region: {['tip', 'surface', 'None'] str}
has to be set.
cvl: {0, Int}
Number of basis functions in the convergence layer, that
has to be 'cut' away in order to assure a smooth matching
of the potential at the lead-surface/tip interface.
"""
assert region in ['tip', 'surface', 'None']
if calc.wfs.S_qMM is None:
calc.initialize(calc.atoms)
calc.initialize_positions(calc.atoms)
dir = 'xyz'.index(direction)
h_skmm, s_kmm = get_lcao_hamiltonian(calc)
atoms = calc.atoms.copy()
atoms.set_calculator(calc)
ibzk2d_kc = calc.get_ibz_k_points()[:, :2]
weight2d_k = calc.get_k_point_weights()
if w.rank == 0:
efermi = calc.get_fermi_level()
h_kmm = h_skmm[0] - s_kmm * efermi
for i in range(len(h_kmm)):
remove_pbc(atoms, h_kmm[i], s_kmm[i], dir)
if region == 'tip':
if cvl!=0:
h_kmm = h_kmm[:, :-cvl, :-cvl]
s_kmm = s_kmm[:, :-cvl, :-cvl]
if region == 'surface':
h_kmm = h_kmm[:, cvl:, cvl:]
s_kmm = s_kmm[:, cvl:, cvl:]
fd = open(filename + '_hs.pckl', 'wb')
pickle.dump((h_kmm, s_kmm), fd, 2)
fd.close()
fd = open(filename + '_data.pckl', 'wb')
pickle.dump((ibzk2d_kc, weight2d_k), fd, 2)
fd.close()
if return_hs:
return ibzk2d_kc, weight2d_k, h_kmm, s_kmm
def dump_lead_hs(calc, filename, direction='z', return_hs=False):
"""Pickle real space LCAO - Hamiltonian and overlap matrix for a
periodic lead calculation.
"""
efermi = calc.get_fermi_level()
ibzk2d_c, weight2d_k, h_skmm, s_kmm\
= get_lead_lcao_hamiltonian(calc, direction=direction)
if w.rank == 0:
h_kmm = h_skmm[0] - efermi * s_kmm
fd = open(filename + '_hs.pckl', 'wb')
pickle.dump((h_kmm, s_kmm), fd, 2)
fd.close()
fd = open(filename + '_data.pckl', 'wb')
pickle.dump((ibzk2d_c, weight2d_k), fd, 2)
fd.close()
if return_hs:
return ibzk2d_c, weight2d_k, h_kmm, s_kmm
else:
return None, None, None, None
def intersection(l1, l2):
"""Intersection (x, y, t) between two lines.
Two points on each line have to be specified.
"""
a1 = l1[0]
b1 = l1[1]
a2 = l2[0]
b2 = l2[1]
A = np.zeros((2,2))
A[:,0] = b1-a1
A[:,1] = a2-b2
if np.round(np.linalg.det(A),5) == 0: #parallel lines
return None
r = a2 - a1
t = np.dot(la.inv(A), r.T)
xy = a2 + t[1] * (b2 - a2)
return (list(xy), t)
def rotate(theta):
return np.array([[cos(theta), sin(theta), 0],
[-sin(theta), cos(theta), 0],
[0, 0, 1]])
def unravel2d(data, shape):
pass
def smallestbox(cell1, cell2, theta, plot=False):
"""Determines the smallest 2d unit cell which encloses cell1 rotated at
an angle theta around the z-axis and which has lattice vectors parallel
to those of cell2."""
ct = cell1[:2,:2] * Bohr
cs = cell2[:2,:2] * Bohr
v3 = [cs[0],cs[1]]
lsrf = [np.array([0.,0.]),v3[1],v3[0]+v3[1],v3[0],np.array([0,0])]
cs = cs/np.array([la.norm(cs[0]),la.norm(cs[1])])
v4 = [ct[0],ct[1]]
lct = [np.array([0.,0.]),v4[1],v4[0]+v4[1],v4[0],np.array([0,0])]
new_ct = np.dot(R(theta), ct.T).T
v1 = [new_ct[0],new_ct[1]]
v2 = [cs[0],cs[1]]
l1 = [np.array([0.,0.]),v1[1],v1[0]+v1[1],v1[0],np.array([0,0])]
sides1 = []
for j in range(4):
intersections = 0
for i in range(4):
line = (l1[j], l1[j]+v2[0])
side = (l1[i], l1[i+1])
test = intersection(line,side)
if test is not None:
t = test[1]
if np.round(t[1],5)<=1 and np.round(t[1],5)>=0:
intersections += 1
if intersections == 2:
sides1.append(line)
sides2 = []
for j in range(4):
intersections = 0
for i in range(4):
line = (l1[j],l1[j]+v2[1])
side = (l1[i], l1[i+1])
test = intersection(line,side)
if test is not None:
t = test[1]
if np.round(t[1],5)<=1 and np.round(t[1],5)>=0:
intersections += 1
if intersections == 2:
sides2.append(line)
corners = []
for side1 in sides1:
for side2 in sides2:
corner=np.round(intersection(side1,side2),5)
if corner is not None:
corners.append(list(corner[0]))
for i in range(len(corners)):
for j in range(2):
if np.round(corners[i][j],5) == 0:
corners[i][j] = abs(corners[i][j])
corners.sort()
if len(corners)==8:
corners = [corners[0],corners[2],corners[4],corners[6]]
if len(corners)==16:
corners = [corners[0],corners[4],corners[8],corners[12]]
origo = np.array(corners.pop(0))
end =np.array(corners.pop(-1))
pa = corners[0]
pb = corners[1]
if pa[1]/pa[0] < pb[1]/pb[0]:
lat1 = pa-origo
lat2 = pb-origo
else:
lat1 = pb-origo
lat2 = pa-origo
l1-=origo
area = np.cross(lat1,lat2)
| gpl-3.0 |
davidam/python-examples | matplotlib/boxplot-example2.py | 1 | 1353 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import numpy as np
import matplotlib.pyplot as plt
# fake data
np.random.seed(937)
data = np.random.lognormal(size=(37, 4), mean=1.5, sigma=1.75)
labels = list('ABCD')
fs = 10 # fontsize
# demonstrate how to toggle the display of different elements:
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6, 6), sharey=True)
axes[0, 0].boxplot(data, labels=labels)
axes[0, 0].set_title('Default', fontsize=fs)
fig.subplots_adjust(hspace=0.4)
plt.show()
| gpl-3.0 |
PeterSchichtel/hepstore | hepstore/core/school/books/regression/mlp.py | 1 | 3334 | #!/usr/bin/env python
import sklearn.neural_network
import sklearn.svm
import sklearn.discriminant_analysis
import numpy as np
import time
import os
from hepstore.core.utility import *
from hepstore.core.statistic.distribution import Log10Flat
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
import sklearn.model_selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ShuffleSplit
import scipy
import tuning
class MLPRegressor(sklearn.neural_network.MLPRegressor):
def __init__( self, hidden_layer_sizes=(100, ), activation='relu', solver='adam',
alpha=0.0001, batch_size='auto', learning_rate='constant',
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=0.0001, verbose=False,
warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9,
beta_2=0.999, epsilon=1e-08,
path=os.getcwd(), jobs=1 ):
sklearn.neural_network.MLPRegressor.__init__( self, hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver,
alpha=alpha, batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=200, shuffle=True, random_state=None,
tol=tol, verbose=verbose, warm_start=warm_start,
momentum=momentum, nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping, validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon )
self.path = path
self.jobs = jobs
pass
def explore( self, X, y ):
path = os.path.join(self.path,self.solver,self.activation)
print '--MLP: explore'
# specify parameters for exploration
if self.solver == 'lbfgs':
param_dist = {
'alpha' : Log10Flat(-10,-0.001),
'tol' : Log10Flat(-10,-0.001),
}
pass
elif self.solver == 'adam':
param_dist = {
'alpha' : Log10Flat(-10,-0.001),
'tol' : Log10Flat(-10,-0.001),
'beta_1' : Log10Flat(-10,-0.001),
'beta_2' : Log10Flat(-10,-0.001),
}
pass
else:
raise KeyError("explore solver '%s' not implementd" % self.solver)
# tune regressor
tuning.tune( self, X, y, param_dist,
path = path,
jobs = 1, ## bug in MLPRegressor!!
random_state = self.random_state
)
pass
def predict_proba( self, data ):
return self.predict( data )
pass
| gpl-3.0 |
dharmasam9/moose-core | tests/python/test_kkit.py | 1 | 2609 | # -*- coding: utf-8 -*-
import matplotlib
# Tests may be run over ssh without -X e.g. on travis.
matplotlib.use( 'Agg' )
import matplotlib.pyplot as plt
import numpy
import sys
import os
import moose
scriptdir = os.path.dirname( os.path.realpath( __file__ ) )
print( 'Script dir %s' % scriptdir )
def main():
""" This example illustrates loading, running, and saving a kinetic model
defined in kkit format. It uses a default kkit model but you can specify another using the command line ``python filename runtime solver``. We use the gsl solver here. The model already defines a couple of plots and sets the runtime to 20 seconds.
"""
solver = "gsl" # Pick any of gsl, gssa, ee..
mfile = os.path.join( scriptdir, 'genesis/kkit_objects_example.g' )
runtime = 20.0
if ( len( sys.argv ) >= 3 ):
if sys.argv[1][0] == '/':
mfile = sys.argv[1]
else:
mfile = './genesis/' + sys.argv[1]
runtime = float( sys.argv[2] )
if ( len( sys.argv ) == 4 ):
solver = sys.argv[3]
modelId = moose.loadModel( mfile, 'model', solver )
# Increase volume so that the stochastic solver gssa
# gives an interesting output
#compt = moose.element( '/model/kinetics' )
#compt.volume = 1e-19
moose.reinit()
moose.start( runtime )
# Report parameters
'''
for x in moose.wildcardFind( '/model/kinetics/##[ISA=PoolBase]' ):
print x.name, x.nInit, x.concInit
for x in moose.wildcardFind( '/model/kinetics/##[ISA=ReacBase]' ):
print x.name, 'num: (', x.numKf, ', ', x.numKb, '), conc: (', x.Kf, ', ', x.Kb, ')'
for x in moose.wildcardFind('/model/kinetics/##[ISA=EnzBase]'):
print x.name, '(', x.Km, ', ', x.numKm, ', ', x.kcat, ')'
'''
# Display all plots.
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt
plt.plot( t, x.vector, label=x.name )
vals = x.vector
stats = [ vals.min(), vals.max( ), vals.mean(), vals.std( ) ]
expected = [ 0.0, 0.00040464 , 0.0001444 , 0.00013177 ]
assert numpy.allclose(stats, expected, rtol=1e-4) , 'Got %s expected %s' % (stats, expected )
plt.legend()
plt.savefig( '%s.png' % sys.argv[0] )
print( 'Wrote results to %s.png' % sys.argv[0] )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-3.0 |
rafin/Spotify-Visualizations | spot/analysis.py | 1 | 4129 | import pl as spotify
import pandas as pd
import numpy as np
import scipy.stats
from sklearn.manifold import TSNE
from sklearn.preprocessing import scale
def dict_to_frame(playlist):
pl_frame = pd.DataFrame(playlist)
raw_data = pl_frame[['energy', 'speechiness', 'acousticness',
'danceability', 'loudness', 'valence',
'instrumentalness']]
return raw_data
def simple_stats(playlist):
''' outputs basic statitics for given playlist
such as average value for each feature
'''
pl_frame = dict_to_frame(playlist)
means = pl_frame.mean().to_dict()
means_list = []
for key, value in means.iteritems():
means_list.append([key, round(value, 1)])
return means_list
def confidence_interval(songs, confidence=.9999):
''' experimental bound generator for sifter
'''
pl_frame = dict_to_frame(songs)
n = len(pl_frame)
m, se = np.mean(pl_frame), scipy.stats.sem(pl_frame)
h = se * scipy.stats.t._ppf((1+confidence)/2., n-1)
uppers = (m+h).to_dict()
lowers = (m-h).to_dict()
keys = ['energy', 'speechiness', 'acousticness',
'danceability', 'loudness', 'valence',
'instrumentalness']
set = []
for key in keys:
if key == 'popularity':
set.append([0, 100])
continue
set.append([round(lowers[key], 0),round(uppers[key], 0)])
return set
def pca(playlist):
''' Principle Component Analysis implementation
'''
pl_frame = pd.DataFrame(playlist)
features = ['energy', 'speechiness', 'acousticness',
'danceability', 'loudness', 'valence',
'instrumentalness']
data = pl_frame[features].T.as_matrix()
## computing d-dimensional mean vector
mean = []
for row in data:
mean.append(np.mean(row))
mean_vector = np.array([mean]).T
size = len(mean_vector)
## computing the scatter matrix
scatter_matrix = np.zeros((size,size))
for i in range(data.shape[1]):
scatter_matrix += (data[:,i].reshape(size,1) - mean_vector).dot((data[:,i].reshape(size,1) - mean_vector).T)
## computing eigenvectors and coor. eigenvalues with scatter ..
eig_val_sc, eig_vec_sc = np.linalg.eig(scatter_matrix)
for i in range(len(eig_val_sc)):
eigvec_sc = eig_vec_sc[:,i].reshape(1,size).T
## Sorting Eignevectors by Decreasing eigenvalues
eig_pairs = [(np.abs(eig_val_sc[i]), eig_vec_sc[:,i]) for i in range(len(eig_val_sc))]
eig_pairs.sort(key = lambda x: x[0], reverse=True)
print eig_pairs
## store two largest eigenvectors for display
vector1 = [round(n, 2) for n in eig_pairs[0][1].tolist()]
vector2 = [round(n, 2) for n in eig_pairs[1][1].tolist()]
weights = map(list, zip(features, vector1, vector2))
## Choosing k eigenvectors with the largest eigenvalues
matrix_w = np.hstack((eig_pairs[0][1].reshape(size,1), eig_pairs[1][1].reshape(size,1)))
## Transforming the samples onto the new subspace
transformed = matrix_w.T.dot(data)
trasnformed = scale(transformed)
coords = pd.DataFrame(transformed.T)
return {"coords": coords, "weights": weights}
def merge_pca(songs, pca):
for index, row in pca.iterrows():
songs[index]['pca1'] = round(row[0], 2)
songs[index]['pca2'] = round(row[1], 2)
return songs
def tSNE(playlist):
''' t-distributed stochastic neighbor embedding implementation
(heavier alternate to pca)
'''
pl_frame = pd.DataFrame(playlist)
features = ['energy', 'speechiness', 'acousticness',
'danceability', 'loudness', 'valence',
'instrumentalness']
data = pl_frame[features].T.as_matrix()
data = scale(data)
data = data.T
data_tsne = TSNE(learning_rate=100, init='pca').fit_transform(data)
data_tsne = scale(data_tsne)
return pd.DataFrame(data_tsne)
def merge_tsne(songs, tsne):
for index, row in tsne.iterrows():
songs[index]['tSNE1'] = round(row[0] * 100, 2)
songs[index]['tSNE2'] = round(row[1] * 100, 2)
return songs
| mit |
argriffing/scipy | scipy/interpolate/ndgriddata.py | 39 | 7457 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
ChinaQuants/zipline | tests/test_perf_tracking.py | 2 | 80474 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import collections
from datetime import (
datetime,
timedelta,
)
import logging
import operator
import unittest
from nose_parameterized import parameterized
import nose.tools as nt
import pytz
import itertools
import pandas as pd
import numpy as np
from six.moves import range, zip
import zipline.utils.factory as factory
import zipline.finance.performance as perf
from zipline.finance.performance import position_tracker
from zipline.finance.slippage import Transaction, create_transaction
import zipline.utils.math_utils as zp_math
from zipline.gens.composites import date_sorted_sources
from zipline.finance.trading import SimulationParameters
from zipline.finance.blotter import Order
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.trading import TradingEnvironment
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.serialization_utils import (
loads_with_persistent_ids, dumps_with_persistent_ids
)
import zipline.protocol as zp
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources.data_frame_source import DataPanelSource
logger = logging.getLogger('Test Perf Tracking')
onesec = timedelta(seconds=1)
oneday = timedelta(days=1)
tradingday = timedelta(hours=6, minutes=30)
# nose.tools changed name in python 3
if not hasattr(nt, 'assert_count_equal'):
nt.assert_count_equal = nt.assert_items_equal
def check_perf_period(pp,
gross_leverage,
net_leverage,
long_exposure,
longs_count,
short_exposure,
shorts_count):
perf_data = pp.to_dict()
np.testing.assert_allclose(
gross_leverage, perf_data['gross_leverage'], rtol=1e-3)
np.testing.assert_allclose(
net_leverage, perf_data['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(
long_exposure, perf_data['long_exposure'], rtol=1e-3)
np.testing.assert_allclose(
longs_count, perf_data['longs_count'], rtol=1e-3)
np.testing.assert_allclose(
short_exposure, perf_data['short_exposure'], rtol=1e-3)
np.testing.assert_allclose(
shorts_count, perf_data['shorts_count'], rtol=1e-3)
def check_account(account,
settled_cash,
equity_with_loan,
total_positions_value,
regt_equity,
available_funds,
excess_liquidity,
cushion,
leverage,
net_leverage,
net_liquidation):
# this is a long only portfolio that is only partially invested
# so net and gross leverage are equal.
np.testing.assert_allclose(settled_cash,
account['settled_cash'], rtol=1e-3)
np.testing.assert_allclose(equity_with_loan,
account['equity_with_loan'], rtol=1e-3)
np.testing.assert_allclose(total_positions_value,
account['total_positions_value'], rtol=1e-3)
np.testing.assert_allclose(regt_equity,
account['regt_equity'], rtol=1e-3)
np.testing.assert_allclose(available_funds,
account['available_funds'], rtol=1e-3)
np.testing.assert_allclose(excess_liquidity,
account['excess_liquidity'], rtol=1e-3)
np.testing.assert_allclose(cushion,
account['cushion'], rtol=1e-3)
np.testing.assert_allclose(leverage, account['leverage'], rtol=1e-3)
np.testing.assert_allclose(net_leverage,
account['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(net_liquidation,
account['net_liquidation'], rtol=1e-3)
def create_txn(trade_event, price, amount):
"""
Create a fake transaction to be filled and processed prior to the execution
of a given trade event.
"""
mock_order = Order(trade_event.dt, trade_event.sid, amount, id=None)
return create_transaction(trade_event, mock_order, price, amount)
def benchmark_events_in_range(sim_params, env):
return [
Event({'dt': dt,
'returns': ret,
'type': zp.DATASOURCE_TYPE.BENCHMARK,
# We explicitly rely on the behavior that benchmarks sort before
# any other events.
'source_id': '1Abenchmarks'})
for dt, ret in env.benchmark_returns.iteritems()
if dt.date() >= sim_params.period_start.date() and
dt.date() <= sim_params.period_end.date()
]
def calculate_results(sim_params,
env,
benchmark_events,
trade_events,
dividend_events=None,
splits=None,
txns=None):
"""
Run the given events through a stripped down version of the loop in
AlgorithmSimulator.transform.
IMPORTANT NOTE FOR TEST WRITERS/READERS:
This loop has some wonky logic for the order of event processing for
datasource types. This exists mostly to accomodate legacy tests accomodate
existing tests that were making assumptions about how events would be
sorted.
In particular:
- Dividends passed for a given date are processed PRIOR to any events
for that date.
- Splits passed for a given date are process AFTER any events for that
date.
Tests that use this helper should not be considered useful guarantees of
the behavior of AlgorithmSimulator on a stream containing the same events
unless the subgroups have been explicitly re-sorted in this way.
"""
txns = txns or []
splits = splits or []
perf_tracker = perf.PerformanceTracker(sim_params, env)
if dividend_events is not None:
dividend_frame = pd.DataFrame(
[
event.to_series(index=zp.DIVIDEND_FIELDS)
for event in dividend_events
],
)
perf_tracker.update_dividends(dividend_frame)
# Raw trades
trade_events = sorted(trade_events, key=lambda ev: (ev.dt, ev.source_id))
# Add a benchmark event for each date.
trades_plus_bm = date_sorted_sources(trade_events, benchmark_events)
# Filter out benchmark events that are later than the last trade date.
filtered_trades_plus_bm = (filt_event for filt_event in trades_plus_bm
if filt_event.dt <= trade_events[-1].dt)
grouped_trades_plus_bm = itertools.groupby(filtered_trades_plus_bm,
lambda x: x.dt)
results = []
bm_updated = False
for date, group in grouped_trades_plus_bm:
for txn in filter(lambda txn: txn.dt == date, txns):
# Process txns for this date.
perf_tracker.process_transaction(txn)
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
perf_tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.DIVIDEND:
perf_tracker.process_dividend(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
perf_tracker.process_benchmark(event)
bm_updated = True
elif event.type == zp.DATASOURCE_TYPE.COMMISSION:
perf_tracker.process_commission(event)
for split in filter(lambda split: split.dt == date, splits):
# Process splits for this date.
perf_tracker.process_split(split)
if bm_updated:
msg = perf_tracker.handle_market_close_daily()
msg['account'] = perf_tracker.get_account(True)
results.append(msg)
bm_updated = False
return results
def check_perf_tracker_serialization(perf_tracker):
scalar_keys = [
'emission_rate',
'txn_count',
'market_open',
'last_close',
'_dividend_count',
'period_start',
'day_count',
'capital_base',
'market_close',
'saved_dt',
'period_end',
'total_days',
]
p_string = dumps_with_persistent_ids(perf_tracker)
test = loads_with_persistent_ids(p_string, env=perf_tracker.env)
for k in scalar_keys:
nt.assert_equal(getattr(test, k), getattr(perf_tracker, k), k)
for period in test.perf_periods:
nt.assert_true(hasattr(period, '_position_tracker'))
class TestSplitPerformance(unittest.TestCase):
def setUp(self):
self.env = TradingEnvironment()
self.env.write_data(equities_identifiers=[1])
self.sim_params = create_simulation_parameters(num_days=2)
# start with $10,000
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_split_long_position(self):
events = factory.create_trade_history(
1,
[20, 20],
[100, 100],
oneday,
self.sim_params,
env=self.env
)
# set up a long position in sid 1
# 100 shares at $20 apiece = $2000 position
txns = [create_txn(events[0], 20, 100)]
# set up a split with ratio 3 occurring at the start of the second
# day.
splits = [
factory.create_split(
1,
3,
events[1].dt,
),
]
results = calculate_results(self.sim_params, self.env,
self.benchmark_events,
events, txns=txns, splits=splits)
# should have 33 shares (at $60 apiece) and $20 in cash
self.assertEqual(2, len(results))
latest_positions = results[1]['daily_perf']['positions']
self.assertEqual(1, len(latest_positions))
# check the last position to make sure it's been updated
position = latest_positions[0]
self.assertEqual(1, position['sid'])
self.assertEqual(33, position['amount'])
self.assertEqual(60, position['cost_basis'])
self.assertEqual(60, position['last_sale_price'])
# since we started with $10000, and we spent $2000 on the
# position, but then got $20 back, we should have $8020
# (or close to it) in cash.
# we won't get exactly 8020 because sometimes a split is
# denoted as a ratio like 0.3333, and we lose some digits
# of precision. thus, make sure we're pretty close.
daily_perf = results[1]['daily_perf']
self.assertTrue(
zp_math.tolerant_equals(8020,
daily_perf['ending_cash'], 1))
# Validate that the account attributes were updated.
account = results[1]['account']
self.assertEqual(float('inf'), account['day_trades_remaining'])
# this is a long only portfolio that is only partially invested
# so net and gross leverage are equal.
np.testing.assert_allclose(0.198, account['leverage'], rtol=1e-3)
np.testing.assert_allclose(0.198, account['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(8020, account['regt_equity'], rtol=1e-3)
self.assertEqual(float('inf'), account['regt_margin'])
np.testing.assert_allclose(8020, account['available_funds'], rtol=1e-3)
self.assertEqual(0, account['maintenance_margin_requirement'])
np.testing.assert_allclose(10000,
account['equity_with_loan'], rtol=1e-3)
self.assertEqual(float('inf'), account['buying_power'])
self.assertEqual(0, account['initial_margin_requirement'])
np.testing.assert_allclose(8020, account['excess_liquidity'],
rtol=1e-3)
np.testing.assert_allclose(8020, account['settled_cash'], rtol=1e-3)
np.testing.assert_allclose(10000, account['net_liquidation'],
rtol=1e-3)
np.testing.assert_allclose(0.802, account['cushion'], rtol=1e-3)
np.testing.assert_allclose(1980, account['total_positions_value'],
rtol=1e-3)
self.assertEqual(0, account['accrued_interest'])
for i, result in enumerate(results):
for perf_kind in ('daily_perf', 'cumulative_perf'):
perf_result = result[perf_kind]
# prices aren't changing, so pnl and returns should be 0.0
self.assertEqual(0.0, perf_result['pnl'],
"day %s %s pnl %s instead of 0.0" %
(i, perf_kind, perf_result['pnl']))
self.assertEqual(0.0, perf_result['returns'],
"day %s %s returns %s instead of 0.0" %
(i, perf_kind, perf_result['returns']))
class TestCommissionEvents(unittest.TestCase):
def setUp(self):
self.env = TradingEnvironment()
self.env.write_data(
equities_identifiers=[0, 1, 133]
)
self.sim_params = create_simulation_parameters(num_days=5)
logger.info("sim_params: %s" % self.sim_params)
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_commission_event(self):
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
# Test commission models and validate result
# Expected commission amounts:
# PerShare commission: 1.00, 1.00, 1.50 = $3.50
# PerTrade commission: 5.00, 5.00, 5.00 = $15.00
# PerDollar commission: 1.50, 3.00, 4.50 = $9.00
# Total commission = $3.50 + $15.00 + $9.00 = $27.50
# Create 3 transactions: 50, 100, 150 shares traded @ $20
transactions = [create_txn(events[0], 20, i)
for i in [50, 100, 150]]
# Create commission models and validate that produce expected
# commissions.
models = [PerShare(cost=0.01, min_trade_cost=1.00),
PerTrade(cost=5.00),
PerDollar(cost=0.0015)]
expected_results = [3.50, 15.0, 9.0]
for model, expected in zip(models, expected_results):
total_commission = 0
for trade in transactions:
total_commission += model.calculate(trade)[1]
self.assertEqual(total_commission, expected)
# Verify that commission events are handled correctly by
# PerformanceTracker.
cash_adj_dt = events[0].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
# Insert a purchase order.
txns = [create_txn(events[0], 20, 1)]
results = calculate_results(self.sim_params,
self.env,
self.benchmark_events,
events,
txns=txns)
# Validate that we lost 320 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9680)
# Validate that the cost basis of our position changed.
self.assertEqual(results[-1]['daily_perf']['positions']
[0]['cost_basis'], 320.0)
# Validate that the account attributes were updated.
account = results[1]['account']
self.assertEqual(float('inf'), account['day_trades_remaining'])
np.testing.assert_allclose(0.001, account['leverage'], rtol=1e-3,
atol=1e-4)
np.testing.assert_allclose(9680, account['regt_equity'], rtol=1e-3)
self.assertEqual(float('inf'), account['regt_margin'])
np.testing.assert_allclose(9680, account['available_funds'],
rtol=1e-3)
self.assertEqual(0, account['maintenance_margin_requirement'])
np.testing.assert_allclose(9690,
account['equity_with_loan'], rtol=1e-3)
self.assertEqual(float('inf'), account['buying_power'])
self.assertEqual(0, account['initial_margin_requirement'])
np.testing.assert_allclose(9680, account['excess_liquidity'],
rtol=1e-3)
np.testing.assert_allclose(9680, account['settled_cash'],
rtol=1e-3)
np.testing.assert_allclose(9690, account['net_liquidation'],
rtol=1e-3)
np.testing.assert_allclose(0.999, account['cushion'], rtol=1e-3)
np.testing.assert_allclose(10, account['total_positions_value'],
rtol=1e-3)
self.assertEqual(0, account['accrued_interest'])
def test_commission_zero_position(self):
"""
Ensure no div-by-zero errors.
"""
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
# Buy and sell the same sid so that we have a zero position by the
# time of events[3].
txns = [
create_txn(events[0], 20, 1),
create_txn(events[1], 20, -1),
]
# Add a cash adjustment at the time of event[3].
cash_adj_dt = events[3].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
results = calculate_results(self.sim_params,
self.env,
self.benchmark_events,
events,
txns=txns)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
def test_commission_no_position(self):
"""
Ensure no position-not-found or sid-not-found errors.
"""
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
# Add a cash adjustment at the time of event[3].
cash_adj_dt = events[3].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
results = calculate_results(self.sim_params,
self.env,
self.benchmark_events,
events)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
class TestDividendPerformance(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.env.write_data(equities_identifiers=[1, 2])
@classmethod
def tearDownClass(cls):
del cls.env
def setUp(self):
self.sim_params = create_simulation_parameters(num_days=6)
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_market_hours_calculations(self):
# DST in US/Eastern began on Sunday March 14, 2010
before = datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc)
after = factory.get_next_trading_dt(
before,
timedelta(days=1),
self.env,
)
self.assertEqual(after.hour, 13)
def test_long_position_receives_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[1].dt,
# pay date, when the algorithm receives the dividend.
events[2].dt
)
# Simulate a transaction being filled prior to the ex_date.
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.1, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.10, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0])
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000])
def test_long_position_receives_stock_dividend(self):
# post some trades in the market
events = []
for sid in (1, 2):
events.extend(
factory.create_trade_history(
sid,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env)
)
dividend = factory.create_stock_dividend(
1,
payment_sid=2,
ratio=2,
# declared date, when the algorithm finds out about
# the dividend
declared_date=events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
ex_date=events[1].dt,
# pay date, when the algorithm receives the dividend.
pay_date=events[2].dt
)
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000] * 5)
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000] * 5)
def test_long_position_purchased_on_ex_date_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt, # Declared date
events[1].dt, # Exclusion date
events[2].dt # Pay date
)
# Simulate a transaction being filled on the ex_date.
txns = [create_txn(events[1], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000])
def test_selling_before_dividend_payment_still_gets_paid(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt, # Declared date
events[1].dt, # Exclusion date
events[3].dt # Pay date
)
buy_txn = create_txn(events[0], 10.0, 100)
sell_txn = create_txn(events[2], 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 1000, 1000])
def test_buy_and_sell_before_ex(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[3].dt,
events[4].dt,
events[5].dt
)
buy_txn = create_txn(events[1], 10.0, 100)
sell_txn = create_txn(events[2], 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0])
def test_ending_before_pay_date(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
pay_date = self.sim_params.first_open
# find pay date that is much later.
for i in range(30):
pay_date = factory.get_next_trading_dt(pay_date, oneday, self.env)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[0].dt,
pay_date
)
txns = [create_txn(events[1], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(
cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000]
)
def test_short_position_pays_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
# declare at open of test
events[0].dt,
# ex_date same as trade 2
events[2].dt,
events[3].dt
)
txns = [create_txn(events[1], 10.0, -100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0])
def test_no_position_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[1].dt,
events[2].dt
)
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0])
def test_no_dividend_at_simulation_end(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[-3].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[-2].dt,
# pay date, when the algorithm receives the dividend.
# This pays out on the day after the last event
self.env.next_trading_day(events[-1].dt)
)
# Set the last day to be the last event
self.sim_params.period_end = events[-1].dt
self.sim_params.update_internal_from_env(self.env)
# Simulate a transaction being filled prior to the ex_date.
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[-1000, -1000, -1000, -1000, -1000])
class TestDividendPerformanceHolidayStyle(TestDividendPerformance):
# The holiday tests begins the simulation on the day
# before Thanksgiving, so that the next trading day is
# two days ahead. Any tests that hard code events
# to be start + oneday will fail, since those events will
# be skipped by the simulation.
def setUp(self):
self.dt = datetime(2003, 11, 30, tzinfo=pytz.utc)
self.end_dt = datetime(2004, 11, 25, tzinfo=pytz.utc)
self.sim_params = SimulationParameters(
self.dt,
self.end_dt,
env=self.env)
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
class TestPositionPerformance(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.env.write_data(equities_identifiers=[1, 2])
@classmethod
def tearDownClass(cls):
del cls.env
def setUp(self):
self.sim_params = create_simulation_parameters(num_days=4)
self.finder = self.env.asset_finder
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_long_short_positions(self):
"""
start with $1000
buy 100 stock1 shares at $10
sell short 100 stock2 shares at $10
stock1 then goes down to $9
stock2 goes to $11
"""
trades_1 = factory.create_trade_history(
1,
[10, 10, 10, 9],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
trades_2 = factory.create_trade_history(
2,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
txn1 = create_txn(trades_1[1], 10.0, 100)
txn2 = create_txn(trades_2[1], 10.0, -100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn1)
pp.handle_execution(txn1)
pt.execute_transaction(txn2)
pp.handle_execution(txn2)
for trade in itertools.chain(trades_1[:-2], trades_2[:-2]):
pt.update_last_sale(trade)
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=2.0,
net_leverage=0.0,
long_exposure=1000.0,
longs_count=1,
short_exposure=-1000.0,
shorts_count=1)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=1000.0,
equity_with_loan=1000.0,
total_positions_value=0.0,
regt_equity=1000.0,
available_funds=1000.0,
excess_liquidity=1000.0,
cushion=1.0,
leverage=2.0,
net_leverage=0.0,
net_liquidation=1000.0)
# now simulate stock1 going to $9
pt.update_last_sale(trades_1[-1])
# and stock2 going to $11
pt.update_last_sale(trades_2[-1])
pp.calculate_performance()
# Validate that the account attributes were updated.
account = pp.as_account()
check_perf_period(
pp,
gross_leverage=2.5,
net_leverage=-0.25,
long_exposure=900.0,
longs_count=1,
short_exposure=-1100.0,
shorts_count=1)
check_account(account,
settled_cash=1000.0,
equity_with_loan=800.0,
total_positions_value=-200.0,
regt_equity=1000.0,
available_funds=1000.0,
excess_liquidity=1000.0,
cushion=1.25,
leverage=2.5,
net_leverage=-0.25,
net_liquidation=800.0)
def test_levered_long_position(self):
"""
start with $1,000, then buy 1000 shares at $10.
price goes to $11
"""
# post some trades in the market
trades = factory.create_trade_history(
1,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
txn = create_txn(trades[1], 10.0, 1000)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
for trade in trades[:-2]:
pt.update_last_sale(trade)
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=10.0,
net_leverage=10.0,
long_exposure=10000.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=-9000.0,
equity_with_loan=1000.0,
total_positions_value=10000.0,
regt_equity=-9000.0,
available_funds=-9000.0,
excess_liquidity=-9000.0,
cushion=-9.0,
leverage=10.0,
net_leverage=10.0,
net_liquidation=1000.0)
# now simulate a price jump to $11
pt.update_last_sale(trades[-1])
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=5.5,
net_leverage=5.5,
long_exposure=11000.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=-9000.0,
equity_with_loan=2000.0,
total_positions_value=11000.0,
regt_equity=-9000.0,
available_funds=-9000.0,
excess_liquidity=-9000.0,
cushion=-4.5,
leverage=5.5,
net_leverage=5.5,
net_liquidation=2000.0)
def test_long_position(self):
"""
verify that the performance period calculates properly for a
single buy transaction
"""
# post some trades in the market
trades = factory.create_trade_history(
1,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
txn = create_txn(trades[1], 10.0, 100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
# This verifies that the last sale price is being correctly
# set in the positions. If this is not the case then returns can
# incorrectly show as sharply dipping if a transaction arrives
# before a trade. This is caused by returns being based on holding
# stocks with a last sale price of 0.
self.assertEqual(pp.positions[1].last_sale_price, 10.0)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security with id 1")
self.assertEqual(
pp.positions[1].amount,
txn.amount,
"should have a position of {sharecount} shares".format(
sharecount=txn.amount
)
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1]['price'],
"last sale should be same as last trade. \
expected {exp} actual {act}".format(
exp=trades[-1]['price'],
act=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.ending_value,
1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, 100, "gain of 1 on 100 shares should be 100")
check_perf_period(
pp,
gross_leverage=1.0,
net_leverage=1.0,
long_exposure=1100.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=0.0,
equity_with_loan=1100.0,
total_positions_value=1100.0,
regt_equity=0.0,
available_funds=0.0,
excess_liquidity=0.0,
cushion=0.0,
leverage=1.0,
net_leverage=1.0,
net_liquidation=1100.0)
def test_short_position(self):
"""verify that the performance period calculates properly for a \
single short-sale transaction"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 10, 9],
[100, 100, 100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
trades_1 = trades[:-2]
txn = create_txn(trades[1], 10.0, -100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
for trade in trades_1:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction\
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_1[-1]['price'],
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, -100, "gain of 1 on 100 shares should be 100")
# simulate additional trades, and ensure that the position value
# reflects the new price
trades_2 = trades[-2:]
# simulate a rollover to a new period
pp.rollover()
for trade in trades_2:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
0,
"capital used should be zero, there were no transactions in \
performance period"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
pp.pnl,
200,
"drop of 2 on -100 shares should be 200"
)
# now run a performance period encompassing the entire trade sample.
ptTotal = perf.PositionTracker(self.env.asset_finder)
ppTotal = perf.PerformancePeriod(1000.0, self.env.asset_finder)
ppTotal.position_tracker = pt
for trade in trades_1:
ptTotal.update_last_sale(trade)
ptTotal.execute_transaction(txn)
ppTotal.handle_execution(txn)
for trade in trades_2:
ptTotal.update_last_sale(trade)
ppTotal.calculate_performance()
self.assertEqual(
ppTotal.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(ppTotal.positions),
1,
"should be just one position"
)
self.assertEqual(
ppTotal.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
ppTotal.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
ppTotal.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
ppTotal.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
ppTotal.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
ppTotal.pnl,
100,
"drop of 1 on -100 shares should be 100"
)
check_perf_period(
pp,
gross_leverage=0.8181,
net_leverage=-0.8181,
long_exposure=0.0,
longs_count=0,
short_exposure=-900.0,
shorts_count=1)
# Validate that the account attributes.
account = ppTotal.as_account()
check_account(account,
settled_cash=2000.0,
equity_with_loan=1100.0,
total_positions_value=-900.0,
regt_equity=2000.0,
available_funds=2000.0,
excess_liquidity=2000.0,
cushion=1.8181,
leverage=0.8181,
net_leverage=-0.8181,
net_liquidation=1100.0)
def test_covering_short(self):
"""verify performance where short is bought and covered, and shares \
trade after cover"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 9, 8, 7, 8, 9, 10],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
short_txn = create_txn(
trades[1],
10.0,
-100,
)
cover_txn = create_txn(trades[6], 7.0, 100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(short_txn)
pp.handle_execution(short_txn)
pt.execute_transaction(cover_txn)
pp.handle_execution(cover_txn)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
short_txn_cost = short_txn.price * short_txn.amount
cover_txn_cost = cover_txn.price * cover_txn.amount
self.assertEqual(
pp.period_cash_flow,
-1 * short_txn_cost - cover_txn_cost,
"capital used should be equal to the net transaction costs"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
short_txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
0,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
0,
"a covered position should have a cost basis of 0"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
0,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(
pp.pnl,
300,
"gain of 1 on 100 shares should be 300"
)
check_perf_period(
pp,
gross_leverage=0.0,
net_leverage=0.0,
long_exposure=0.0,
longs_count=0,
short_exposure=0.0,
shorts_count=0)
account = pp.as_account()
check_account(account,
settled_cash=1300.0,
equity_with_loan=1300.0,
total_positions_value=0.0,
regt_equity=1300.0,
available_funds=1300.0,
excess_liquidity=1300.0,
cushion=1.0,
leverage=0.0,
net_leverage=0.0,
net_liquidation=1300.0)
def test_cost_basis_calc(self):
history_args = (
1,
[10, 11, 11, 12],
[100, 100, 100, 100],
onesec,
self.sim_params,
self.env
)
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
average_cost = 0
for i, txn in enumerate(transactions):
pt.execute_transaction(txn)
pp.handle_execution(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp.positions[1].cost_basis, average_cost)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"should have a last sale of 12, got {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp.pnl,
400
)
down_tick = factory.create_trade(
1,
10.0,
100,
trades[-1].dt + onesec)
sale_txn = create_txn(
down_tick,
10.0,
-100)
pp.rollover()
pt.execute_transaction(sale_txn)
pp.handle_execution(sale_txn)
pt.update_last_sale(down_tick)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
10,
"should have a last sale of 10, was {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(pp.pnl, -800, "this period goes from +400 to -400")
pt3 = perf.PositionTracker(self.env.asset_finder)
pp3 = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp3.position_tracker = pt3
average_cost = 0
for i, txn in enumerate(transactions):
pt3.execute_transaction(txn)
pp3.handle_execution(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp3.positions[1].cost_basis, average_cost)
pt3.execute_transaction(sale_txn)
pp3.handle_execution(sale_txn)
trades.append(down_tick)
for trade in trades:
pt3.update_last_sale(trade)
pp3.calculate_performance()
self.assertEqual(
pp3.positions[1].last_sale_price,
10,
"should have a last sale of 10"
)
self.assertEqual(
pp3.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp3.pnl,
-400,
"should be -400 for all trades and transactions in period"
)
def test_cost_basis_calc_close_pos(self):
history_args = (
1,
[10, 9, 11, 8, 9, 12, 13, 14],
[200, -100, -100, 100, -300, 100, 500, 400],
onesec,
self.sim_params,
self.env
)
cost_bases = [10, 10, 0, 8, 9, 9, 13, 13.5]
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
for txn, cb in zip(transactions, cost_bases):
pt.execute_transaction(txn)
pp.handle_execution(txn)
self.assertEqual(pp.positions[1].cost_basis, cb)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(pp.positions[1].cost_basis, cost_bases[-1])
class TestPerformanceTracker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.env.write_data(equities_identifiers=[1, 2, 133, 134])
@classmethod
def tearDownClass(cls):
del cls.env
NumDaysToDelete = collections.namedtuple(
'NumDaysToDelete', ('start', 'middle', 'end'))
@parameterized.expand([
("Don't delete any events",
NumDaysToDelete(start=0, middle=0, end=0)),
("Delete first day of events",
NumDaysToDelete(start=1, middle=0, end=0)),
("Delete first two days of events",
NumDaysToDelete(start=2, middle=0, end=0)),
("Delete one day of events from the middle",
NumDaysToDelete(start=0, middle=1, end=0)),
("Delete two events from the middle",
NumDaysToDelete(start=0, middle=2, end=0)),
("Delete last day of events",
NumDaysToDelete(start=0, middle=0, end=1)),
("Delete last two days of events",
NumDaysToDelete(start=0, middle=0, end=2)),
("Delete all but one event.",
NumDaysToDelete(start=2, middle=1, end=2)),
])
def test_tracker(self, parameter_comment, days_to_delete):
"""
@days_to_delete - configures which days in the data set we should
remove, used for ensuring that we still return performance messages
even when there is no data.
"""
# This date range covers Columbus day,
# however Columbus day is not a market holiday
#
# October 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start_dt = datetime(year=2008,
month=10,
day=9,
tzinfo=pytz.utc)
end_dt = datetime(year=2008,
month=10,
day=16,
tzinfo=pytz.utc)
trade_count = 6
sid = 133
price = 10.1
price_list = [price] * trade_count
volume = [100] * trade_count
trade_time_increment = timedelta(days=1)
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
env=self.env,
)
benchmark_events = benchmark_events_in_range(sim_params, self.env)
trade_history = factory.create_trade_history(
sid,
price_list,
volume,
trade_time_increment,
sim_params,
source_id="factory1",
env=self.env
)
sid2 = 134
price2 = 12.12
price2_list = [price2] * trade_count
trade_history2 = factory.create_trade_history(
sid2,
price2_list,
volume,
trade_time_increment,
sim_params,
source_id="factory2",
env=self.env
)
# 'middle' start of 3 depends on number of days == 7
middle = 3
# First delete from middle
if days_to_delete.middle:
del trade_history[middle:(middle + days_to_delete.middle)]
del trade_history2[middle:(middle + days_to_delete.middle)]
# Delete start
if days_to_delete.start:
del trade_history[:days_to_delete.start]
del trade_history2[:days_to_delete.start]
# Delete from end
if days_to_delete.end:
del trade_history[-days_to_delete.end:]
del trade_history2[-days_to_delete.end:]
sim_params.capital_base = 1000.0
sim_params.frame_index = [
'sid',
'volume',
'dt',
'price',
'changed']
perf_tracker = perf.PerformanceTracker(
sim_params, self.env
)
events = date_sorted_sources(trade_history, trade_history2)
events = [event for event in
self.trades_with_txns(events, trade_history[0].dt)]
# Extract events with transactions to use for verification.
txns = [event for event in
events if event.type == zp.DATASOURCE_TYPE.TRANSACTION]
orders = [event for event in
events if event.type == zp.DATASOURCE_TYPE.ORDER]
all_events = date_sorted_sources(events, benchmark_events)
filtered_events = [filt_event for filt_event
in all_events if filt_event.dt <= end_dt]
filtered_events.sort(key=lambda x: x.dt)
grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
perf_messages = []
for date, group in grouped_events:
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
perf_tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.ORDER:
perf_tracker.process_order(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
perf_tracker.process_benchmark(event)
elif event.type == zp.DATASOURCE_TYPE.TRANSACTION:
perf_tracker.process_transaction(event)
msg = perf_tracker.handle_market_close_daily()
perf_messages.append(msg)
self.assertEqual(perf_tracker.txn_count, len(txns))
self.assertEqual(perf_tracker.txn_count, len(orders))
positions = perf_tracker.cumulative_performance.positions
if len(txns) == 0:
self.assertNotIn(sid, positions)
else:
expected_size = len(txns) / 2 * -25
cumulative_pos = positions[sid]
self.assertEqual(cumulative_pos.amount, expected_size)
self.assertEqual(len(perf_messages),
sim_params.days_in_period)
check_perf_tracker_serialization(perf_tracker)
def trades_with_txns(self, events, no_txn_dt):
for event in events:
# create a transaction for all but
# first trade in each sid, to simulate None transaction
if event.dt != no_txn_dt:
order = Order(
sid=event.sid,
amount=-25,
dt=event.dt
)
order.source_id = 'MockOrderSource'
yield order
yield event
txn = Transaction(
sid=event.sid,
amount=-25,
dt=event.dt,
price=10.0,
commission=0.50,
order_id=order.id
)
txn.source_id = 'MockTransactionSource'
yield txn
else:
yield event
def test_minute_tracker(self):
""" Tests minute performance tracking."""
start_dt = self.env.exchange_dt_in_utc(datetime(2013, 3, 1, 9, 31))
end_dt = self.env.exchange_dt_in_utc(datetime(2013, 3, 1, 16, 0))
foosid = 1
barsid = 2
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
emission_rate='minute',
env=self.env,
)
tracker = perf.PerformanceTracker(sim_params, env=self.env)
foo_event_1 = factory.create_trade(foosid, 10.0, 20, start_dt)
order_event_1 = Order(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt)
bar_event_1 = factory.create_trade(barsid, 100.0, 200, start_dt)
txn_event_1 = Transaction(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt,
price=10.0,
commission=0.50,
order_id=order_event_1.id)
benchmark_event_1 = Event({
'dt': start_dt,
'returns': 0.01,
'type': zp.DATASOURCE_TYPE.BENCHMARK
})
foo_event_2 = factory.create_trade(
foosid, 11.0, 20, start_dt + timedelta(minutes=1))
bar_event_2 = factory.create_trade(
barsid, 11.0, 20, start_dt + timedelta(minutes=1))
benchmark_event_2 = Event({
'dt': start_dt + timedelta(minutes=1),
'returns': 0.02,
'type': zp.DATASOURCE_TYPE.BENCHMARK
})
events = [
foo_event_1,
order_event_1,
benchmark_event_1,
txn_event_1,
bar_event_1,
foo_event_2,
benchmark_event_2,
bar_event_2,
]
grouped_events = itertools.groupby(
events, operator.attrgetter('dt'))
messages = {}
for date, group in grouped_events:
tracker.set_date(date)
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
tracker.process_benchmark(event)
elif event.type == zp.DATASOURCE_TYPE.ORDER:
tracker.process_order(event)
elif event.type == zp.DATASOURCE_TYPE.TRANSACTION:
tracker.process_transaction(event)
msg, _ = tracker.handle_minute_close(date)
messages[date] = msg
self.assertEquals(2, len(messages))
msg_1 = messages[foo_event_1.dt]
msg_2 = messages[foo_event_2.dt]
self.assertEquals(1, len(msg_1['minute_perf']['transactions']),
"The first message should contain one "
"transaction.")
# Check that transactions aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['transactions']),
"The second message should have no "
"transactions.")
self.assertEquals(1, len(msg_1['minute_perf']['orders']),
"The first message should contain one orders.")
# Check that orders aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['orders']),
"The second message should have no orders.")
# Ensure that period_close moves through time.
# Also, ensure that the period_closes are the expected dts.
self.assertEquals(foo_event_1.dt,
msg_1['minute_perf']['period_close'])
self.assertEquals(foo_event_2.dt,
msg_2['minute_perf']['period_close'])
# In this test event1 transactions arrive on the first bar.
# This leads to no returns as the price is constant.
# Sharpe ratio cannot be computed and is None.
# In the second bar we can start establishing a sharpe ratio.
self.assertIsNone(msg_1['cumulative_risk_metrics']['sharpe'])
self.assertIsNotNone(msg_2['cumulative_risk_metrics']['sharpe'])
check_perf_tracker_serialization(tracker)
def test_close_position_event(self):
pt = perf.PositionTracker(asset_finder=self.env.asset_finder)
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(120.0),
last_sale_date=dt, last_sale_price=3.4)
pos2 = perf.Position(2, amount=np.float64(-100.0),
last_sale_date=dt, last_sale_price=3.4)
pt.update_positions({1: pos1, 2: pos2})
event_type = DATASOURCE_TYPE.CLOSE_POSITION
index = [dt + timedelta(days=1)]
pan = pd.Panel({1: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index),
2: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index),
3: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index)})
source = DataPanelSource(pan)
for i, event in enumerate(source):
txn = pt.maybe_create_close_position_transaction(event)
if event.sid == 1:
# Test owned long
self.assertEqual(-120, txn.amount)
elif event.sid == 2:
# Test owned short
self.assertEqual(100, txn.amount)
elif event.sid == 3:
# Test not-owned SID
self.assertIsNone(txn)
def test_handle_sid_removed_from_universe(self):
# post some trades in the market
sim_params = create_simulation_parameters(num_days=5)
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
sim_params,
env=self.env
)
# Create a tracker and a dividend
perf_tracker = perf.PerformanceTracker(sim_params, env=self.env)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[1].dt,
# pay date, when the algorithm receives the dividend.
events[2].dt
)
dividend_frame = pd.DataFrame(
[dividend.to_series(index=zp.DIVIDEND_FIELDS)],
)
perf_tracker.update_dividends(dividend_frame)
# Ensure that the dividend is in the tracker
self.assertIn(1, perf_tracker.dividend_frame['sid'].values)
# Inform the tracker that sid 1 has been removed from the universe
perf_tracker.handle_sid_removed_from_universe(1)
# Ensure that the dividend for sid 1 has been removed from dividend
# frame
self.assertNotIn(1, perf_tracker.dividend_frame['sid'].values)
def test_serialization(self):
start_dt = datetime(year=2008,
month=10,
day=9,
tzinfo=pytz.utc)
end_dt = datetime(year=2008,
month=10,
day=16,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
env=self.env,
)
perf_tracker = perf.PerformanceTracker(
sim_params, env=self.env
)
check_perf_tracker_serialization(perf_tracker)
class TestPosition(unittest.TestCase):
def setUp(self):
pass
def test_serialization(self):
dt = pd.Timestamp("1984/03/06 3:00PM")
pos = perf.Position(10, amount=np.float64(120.0), last_sale_date=dt,
last_sale_price=3.4)
p_string = dumps_with_persistent_ids(pos)
test = loads_with_persistent_ids(p_string, env=None)
nt.assert_dict_equal(test.__dict__, pos.__dict__)
class TestPositionTracker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
equities_metadata = {1: {'asset_type': 'equity'},
2: {'asset_type': 'equity'}}
futures_metadata = {3: {'asset_type': 'future',
'contract_multiplier': 1000},
4: {'asset_type': 'future',
'contract_multiplier': 1000}}
cls.env.write_data(equities_data=equities_metadata,
futures_data=futures_metadata)
@classmethod
def tearDownClass(cls):
del cls.env
def test_empty_positions(self):
"""
make sure all the empty position stats return a numeric 0
Originally this bug was due to np.dot([], []) returning
np.bool_(False)
"""
pt = perf.PositionTracker(self.env.asset_finder)
pos_stats = position_tracker.calc_position_stats(pt)
stats = [
'net_value',
'net_exposure',
'gross_value',
'gross_exposure',
'short_value',
'short_exposure',
'shorts_count',
'long_value',
'long_exposure',
'longs_count',
]
for name in stats:
val = getattr(pos_stats, name)
self.assertEquals(val, 0)
self.assertNotIsInstance(val, (bool, np.bool_))
def test_update_last_sale(self):
pt = perf.PositionTracker(self.env.asset_finder)
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(100.0),
last_sale_date=dt, last_sale_price=10)
pos3 = perf.Position(3, amount=np.float64(100.0),
last_sale_date=dt, last_sale_price=10)
pt.update_positions({1: pos1, 3: pos3})
event1 = Event({'sid': 1,
'price': 11,
'dt': dt})
event3 = Event({'sid': 3,
'price': 11,
'dt': dt})
# Check cash-adjustment return value
self.assertEqual(0, pt.update_last_sale(event1))
self.assertEqual(100000, pt.update_last_sale(event3))
def test_position_values_and_exposures(self):
pt = perf.PositionTracker(self.env.asset_finder)
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(10.0),
last_sale_date=dt, last_sale_price=10)
pos2 = perf.Position(2, amount=np.float64(-20.0),
last_sale_date=dt, last_sale_price=10)
pos3 = perf.Position(3, amount=np.float64(30.0),
last_sale_date=dt, last_sale_price=10)
pos4 = perf.Position(4, amount=np.float64(-40.0),
last_sale_date=dt, last_sale_price=10)
pt.update_positions({1: pos1, 2: pos2, 3: pos3, 4: pos4})
# Test long-only methods
pos_stats = position_tracker.calc_position_stats(pt)
self.assertEqual(100, pos_stats.long_value)
self.assertEqual(100 + 300000, pos_stats.long_exposure)
self.assertEqual(2, pos_stats.longs_count)
# Test short-only methods
self.assertEqual(-200, pos_stats.short_value)
self.assertEqual(-200 - 400000, pos_stats.short_exposure)
self.assertEqual(2, pos_stats.shorts_count)
# Test gross and net values
self.assertEqual(100 + 200, pos_stats.gross_value)
self.assertEqual(100 - 200, pos_stats.net_value)
# Test gross and net exposures
self.assertEqual(100 + 200 + 300000 + 400000, pos_stats.gross_exposure)
self.assertEqual(100 - 200 + 300000 - 400000, pos_stats.net_exposure)
def test_serialization(self):
pt = perf.PositionTracker(self.env.asset_finder)
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(120.0),
last_sale_date=dt, last_sale_price=3.4)
pos3 = perf.Position(3, amount=np.float64(100.0),
last_sale_date=dt, last_sale_price=3.4)
pt.update_positions({1: pos1, 3: pos3})
p_string = dumps_with_persistent_ids(pt)
test = loads_with_persistent_ids(p_string, env=self.env)
nt.assert_count_equal(test.positions.keys(), pt.positions.keys())
for sid in pt.positions:
nt.assert_dict_equal(test.positions[sid].__dict__,
pt.positions[sid].__dict__)
class TestPerformancePeriod(unittest.TestCase):
def test_serialization(self):
env = TradingEnvironment()
pt = perf.PositionTracker(env.asset_finder)
pp = perf.PerformancePeriod(100, env.asset_finder)
pp.position_tracker = pt
p_string = dumps_with_persistent_ids(pp)
test = loads_with_persistent_ids(p_string, env=env)
correct = pp.__dict__.copy()
del correct['_position_tracker']
nt.assert_count_equal(test.__dict__.keys(), correct.keys())
equal_keys = list(correct.keys())
equal_keys.remove('_account_store')
equal_keys.remove('_portfolio_store')
for k in equal_keys:
nt.assert_equal(test.__dict__[k], correct[k])
| apache-2.0 |
Schallaven/ffeimg | bin/rendertrajectories.py | 1 | 13909 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 by Sven Kochmann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Description:
# Renders trajectories in an input-file (console program)
#
# This program opens a FFE.png file and renders the trajectories in this file to a plot. This program
# uses minimal logging.
#
# Import modules
import cv2 # OpenCV
import numpy as np # Numpy - You always need this.
import ffe # frequently-used function script
import sys # Sys functions
import logging # Logging functions
import getopt # Get and parse command-line arguments
import matplotlib.pyplot as plt # For plotting intermediate graphs (debug)
import matplotlib.collections
import scipy
import scipy.interpolate
# Function: Prints help page
# --------------------------------------------------------------------------------------------------------------------
def printHelpPage():
print("USAGE: script.py [options] --input-file <file>")
print("")
print("Order of options is not important. The input file is mandatory.")
print("")
print("Switches:")
print("\t--help:\t\t\t\tShows this help page.")
print("\t--debug:\t\t\tDebug mode.")
print("\t--silent:\t\t\tSilent mode.")
print("\t--output-file <file>\t\tSave output to a file rather than screen.")
print("\t--output-style <style>\t\tUse render style <style>. Possible values: linear, spline, skeleton. "
"Default: linear.")
print("\t--useinlet #\t\t\tUse inlet # (zero based!) as origin point (if present). Default: Uses inlet nearest "
"to left/middle point.")
print("")
# Step 0: Parse command-line arguments
# --------------------------------------------------------------------------------------------------------------------
# Input/Outputfile
inputfile = ""
outputfile = ""
# Debugmode
debugmode = False
# Silent mode
silentmode = False
# How to render trajectories?
outputstyle = "linear"
# Use inlet
useinlet = -1 # Uses only inlet # for evaluating paths; default (-1): use every inlet.
# No parameters given?
if len(sys.argv) == 1:
printHelpPage()
sys.exit(1)
# Try to find all the arguments
try:
# All the options to recognize
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "debug", "silent", "input-file=",
"output-file=", "output-style=", "useinlet="])
# Found something unexpected? Display help page!
except getopt.GetoptError as parametererror:
print("ERROR: %s." % parametererror.msg)
printHelpPage()
sys.exit(2)
# Otherwise, collect the user input
for opt, arg in opts:
if opt == '--help':
printHelpPage()
sys.exit(0)
elif opt == "--input-file":
inputfile = arg
elif opt == "--output-file":
outputfile = arg
elif opt == "--debug":
debugmode = True
elif opt == "--silent":
silentmode = True
elif opt == "--output-style":
if str(arg) in ["linear", "spline", "skeleton"]:
outputstyle = arg
else:
print("Outputstyle '%s' not recognized." % str(arg))
printHelpPage()
sys.exit(3)
elif opt == "--useinlet":
if 0 <= int(arg):
useinlet = int(arg)
else:
print("Useinlet should be a positive integer. '%s' was given." % str(arg))
# No input file given?
if len(inputfile) == 0:
print("No input file given.")
printHelpPage()
sys.exit(1)
# Step 1: Setup logging
# --------------------------------------------------------------------------------------------------------------------
# Create logger object for this script
logger = logging.getLogger('FFE')
# Set level of information
logger.setLevel(logging.DEBUG)
# Create log file handler which records everything; append the new information
fh = logging.FileHandler('evaluating.log', mode='a')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s\t%(filename)s\t%(levelname)s\t%(lineno)d\t%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
# Create console handler which shows INFO and above (WARNING, ERRORS, CRITICALS, ...)
if not silentmode:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Start the program
logger.info(u"####### Render trajectories #######")
logger.info(u"Render trajectories on '%s'", inputfile)
# Debug mode?
if debugmode:
ffe.enableDebugMode()
# Step 2: Open file and read data
# --------------------------------------------------------------------------------------------------------------------
# Read source image
inputimage = cv2.imread(inputfile, cv2.IMREAD_COLOR)
# Error?
if inputimage is None:
logger.error(u"Input file could not be read.")
sys.exit(3)
# Shape of image
imageheight, imagewidth, imagebits = inputimage.shape
# Read data
ffedata = ffe.loadDictionaryFromPng(inputfile)
# Are there trajectories in file's data?
if "Inner separation zone" not in ffedata:
logger.error(u"No inner separation zone in file.")
sys.exit(-1)
# Extracts the separation zone from the image
zoneimage, transmatrix = ffe.extractZoneFromImage(inputimage, ffedata["Inner separation zone"], 10)
# Shape of zone image
zoneheight, zonewidth, zonebits = zoneimage.shape
# Are there trajectories in file's data?
if "Trajectories" not in ffedata:
logger.error(u"No trajectories in file.")
sys.exit(-1)
# Read trajectories
trajectories = ffedata["Trajectories"]
# Sort trajectories
trajectories = ffe.sortTrajectoriesByWeightedCoordinates(trajectories)
# This is the resolution for both axes in mm per pixel
resolution = (0.15, 0.15)
# Given in input file?
if "Physical zone dimensions" in ffedata:
resolution = (float(ffedata["Physical zone dimensions"][0]) / float(zonewidth),
float(ffedata["Physical zone dimensions"][1]) / float(zoneheight))
# Step 3: Rendering the trajectories
# --------------------------------------------------------------------------------------------------------------------
# Let the origin be the first inlet if exists (origin is in real dimensions, i.e. mm!)
origin = (0, 0)
if "Inlets" in ffedata:
# Inlets
inlets = np.array([(item[0], item[1]) for item in ffedata["Inlets"]])
# Useinlet given and in the range of given inlets? Well, then only use this one start point
if 0 <= useinlet < len(inlets):
logger.info(u"Use inlet %d for origin point.", useinlet)
origin = (inlets[useinlet][0], inlets[useinlet][1])
# or just select the point nearest to center of the left side
else:
# First sort the points by distance from the center of the left side (0, zoneheight/2)
inlets = ffe.sortCoordinatesByDistanceToPoint(inlets, (0, int(ffedata["Physical zone dimensions"][1]/2)))
# Calculate new origin from nearest point (first point in array)
origin = (inlets[0][0], inlets[0][1])
# These are the "Tableau 10" colors as RGB. Changed the order a little bit.
# Source: http://tableaufriction.blogspot.ca/2012/11/finally-you-can-use-tableau-data-colors.html
tableau10 = [(31, 119, 180), (214, 39, 40), (44, 160, 44), (148, 103, 189), (255, 127, 14),
(140, 86, 75), (227, 119, 194), (127, 127, 127), (188, 189, 34), (23, 190, 207)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau10)):
r, g, b = tableau10[i]
tableau10[i] = (r / 255., g / 255., b / 255.)
# Get axes
ax = plt.axes()
# Set axes ticks
ax.set_xticks(np.arange(-50, 50, 5))
ax.set_xticks(np.arange(-50, 50, 1), minor=True)
ax.set_yticks(np.arange(-50, 50, 5))
ax.set_yticks(np.arange(-50, 50, 1), minor=True)
# Set axes limits
ax.set_xlim([0-origin[0], float(ffedata["Physical zone dimensions"][0])-origin[0]])
ax.set_ylim([0-origin[1], float(ffedata["Physical zone dimensions"][1])-origin[1]])
# Set ticks
ax.get_yaxis().set_tick_params(which='both', direction='out')
ax.get_xaxis().set_tick_params(which='both', direction='out')
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# Set aspect ratio to 1:1
ax.set_aspect('equal')
# Invert y-axis
ax.invert_yaxis()
# Add grid
plt.grid()
# Draw axis lines
plt.axhline(y=0, color='black', ls='dashed', linewidth=3, zorder=0)
plt.axvline(x=0, color='black', ls='dashed', linewidth=3, zorder=1)
# Handles for legend
handles = []
# Process each trajectory
for index, trajectory in enumerate(trajectories):
# Get the skin of the trajectory
skin = ffe.getSkinOfTrajectory(trajectory)
# Add handle
handles.append(matplotlib.lines.Line2D([0, 1], [0, 1], color=tableau10[index % len(tableau10)],
linewidth=5, alpha=0.5))
# How to draw the trajectories... skeleton?
if outputstyle == 'skeleton':
logger.info(u"Skeleton rendering of trajectory %d...", index+1)
# x-values for width
xvalues = [float(t[0] * resolution[0]) - origin[0] for t in skin]
# y-values for width
yvalues = [float(t[1] * resolution[1]) - origin[1] for t in skin]
# Number of xvalues should be number of yvalues and it should be a multiply of 2 for drawing the lines properly
if len(xvalues) == len(yvalues) and (len(xvalues) % 2) == 0:
# Number of values devided by two
halflen = int(len(xvalues)/2)
for l in xrange(halflen):
plt.plot([xvalues[l], xvalues[halflen*2-l-1]], [yvalues[l], yvalues[halflen*2-l-1]],
lw=1, color=tableau10[index % len(tableau10)])
# Plot all width points (small)
plt.scatter(xvalues, yvalues, s=1, color=tableau10[index % len(tableau10)], zorder=100 + index)
# x-values for trajectory
xvalues = [float(t[0] * resolution[0]) - origin[0] for t in trajectory]
# y-values for trajectory
yvalues = [float(t[1] * resolution[1]) - origin[1] for t in trajectory]
# Plot trajectory points itself
plt.plot(xvalues, yvalues, color=tableau10[index % len(tableau10)], zorder=100 + index)
plt.scatter(xvalues, yvalues, color=tableau10[index % len(tableau10)], zorder=100 + index)
# or splines?
elif outputstyle == 'spline':
logger.info(u"Spline rendering of trajectory %d...", index+1)
# Convert to real-world dimensions
data = np.array([(float(t[0] * resolution[0]) - origin[0],
float(t[1] * resolution[1]) - origin[1]) for t in skin])
# Resolution for interpolation
newres = np.arange(0, 1.01, 0.01)
# Interpolate as spline (cubic, k=3)
spline, _ = scipy.interpolate.splprep(data.transpose(), s=0)
# Get spline points of spline
splinepoints = scipy.interpolate.splev(newres, spline)
# Convert to vertices
vertices = [zip(splinepoints[0], splinepoints[1])]
# Patch collection
pc = matplotlib.collections.PolyCollection(vertices, color=tableau10[index % len(tableau10)],
zorder=100 + index,
alpha=0.5)
# Add the collection
ax.add_collection(pc)
# Draw the weighted-y lines
weightedy = ffe.calculateWeightedYOfTrajectory(trajectory) * resolution[1] - origin[1]
plt.axhline(y=weightedy, color=tableau10[index % len(tableau10)], ls='dashed', linewidth=3, zorder=50 + index)
# otherwise just use linear drawing (skin)
elif outputstyle == 'linear':
logger.info(u"Linear rendering of trajectory %d...", index+1)
# x-values
xvalues = [float(t[0] * resolution[0]) - origin[0] for t in skin]
# y-values
yvalues = [float(t[1] * resolution[1]) - origin[1] for t in skin]
# Convert the skin to vertices
vertices = [zip(xvalues, yvalues)]
# Patch collection
pc = matplotlib.collections.PolyCollection(vertices, color=tableau10[index % len(tableau10)], zorder=100+index,
alpha=0.5)
# Add the collection
ax.add_collection(pc)
# Draw the weighted-y lines
weightedy = ffe.calculateWeightedYOfTrajectory(trajectory) * resolution[1] - origin[1]
plt.axhline(y=weightedy, color=tableau10[index % len(tableau10)], ls='dashed', linewidth=3, zorder=50+index)
# Set title
ax.set_title("Trajectories of %s" % inputfile, fontsize=18)
ax.set_xlabel('x [mm]', fontsize=16, fontweight='bold')
ax.set_ylabel('y [mm]', fontsize=16, fontweight='bold')
# Set legend for trajectories
ax.legend(handles, ["Trajectory %d" % (i+1) for i in xrange(len(trajectories))], loc='lower left').set_zorder(200)
# Show the plot
if len(outputfile) > 0:
plt.savefig(outputfile)
logger.info(u"Saved plot to file %s.", outputfile)
else:
plt.show()
logger.info(u"Plotted.")
# Final logging
logger.info(u"####### Render trajectories end #######")
| gpl-3.0 |
beni55/sympy | sympy/mpmath/visualization.py | 18 | 9232 | """
Plotting (requires matplotlib)
"""
from colorsys import hsv_to_rgb, hls_to_rgb
from .libmp import NoConvergence
from .libmp.backend import xrange
class VisualizationMethods(object):
plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence)
def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None,
singularities=[], axes=None):
r"""
Shows a simple 2D plot of a function `f(x)` or list of functions
`[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval
specified by *xlim*. Some examples::
plot(lambda x: exp(x)*li(x), [1, 4])
plot([cos, sin], [-4, 4])
plot([fresnels, fresnelc], [-4, 4])
plot([sqrt, cbrt], [-4, 4])
plot(lambda t: zeta(0.5+t*j), [-20, 20])
plot([floor, ceil, abs, sign], [-5, 5])
Points where the function raises a numerical exception or
returns an infinite value are removed from the graph.
Singularities can also be excluded explicitly
as follows (useful for removing erroneous vertical lines)::
plot(cot, ylim=[-5, 5]) # bad
plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good
For parts where the function assumes complex values, the
real part is plotted with dashes and the imaginary part
is plotted with dots.
.. note :: This function requires matplotlib (pylab).
"""
if file:
axes = None
fig = None
if not axes:
import pylab
fig = pylab.figure()
axes = fig.add_subplot(111)
if not isinstance(f, (tuple, list)):
f = [f]
a, b = xlim
colors = ['b', 'r', 'g', 'm', 'k']
for n, func in enumerate(f):
x = ctx.arange(a, b, (b-a)/float(points))
segments = []
segment = []
in_complex = False
for i in xrange(len(x)):
try:
if i != 0:
for sing in singularities:
if x[i-1] <= sing and x[i] >= sing:
raise ValueError
v = func(x[i])
if ctx.isnan(v) or abs(v) > 1e300:
raise ValueError
if hasattr(v, "imag") and v.imag:
re = float(v.real)
im = float(v.imag)
if not in_complex:
in_complex = True
segments.append(segment)
segment = []
segment.append((float(x[i]), re, im))
else:
if in_complex:
in_complex = False
segments.append(segment)
segment = []
if hasattr(v, "real"):
v = v.real
segment.append((float(x[i]), v))
except ctx.plot_ignore:
if segment:
segments.append(segment)
segment = []
if segment:
segments.append(segment)
for segment in segments:
x = [s[0] for s in segment]
y = [s[1] for s in segment]
if not x:
continue
c = colors[n % len(colors)]
if len(segment[0]) == 3:
z = [s[2] for s in segment]
axes.plot(x, y, '--'+c, linewidth=3)
axes.plot(x, z, ':'+c, linewidth=3)
else:
axes.plot(x, y, c, linewidth=3)
axes.set_xlim([float(_) for _ in xlim])
if ylim:
axes.set_ylim([float(_) for _ in ylim])
axes.set_xlabel('x')
axes.set_ylabel('f(x)')
axes.grid(True)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def default_color_function(ctx, z):
if ctx.isinf(z):
return (1.0, 1.0, 1.0)
if ctx.isnan(z):
return (0.5, 0.5, 0.5)
pi = 3.1415926535898
a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi)
a = (a + 0.5) % 1.0
b = 1.0 - float(1/(1.0+abs(z)**0.3))
return hls_to_rgb(a, b, 0.8)
def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None,
verbose=False, file=None, dpi=None, axes=None):
"""
Plots the given complex-valued function *f* over a rectangular part
of the complex plane specified by the pairs of intervals *re* and *im*.
For example::
cplot(lambda z: z, [-2, 2], [-10, 10])
cplot(exp)
cplot(zeta, [0, 1], [0, 50])
By default, the complex argument (phase) is shown as color (hue) and
the magnitude is show as brightness. You can also supply a
custom color function (*color*). This function should take a
complex number as input and return an RGB 3-tuple containing
floats in the range 0.0-1.0.
To obtain a sharp image, the number of points may need to be
increased to 100,000 or thereabout. Since evaluating the
function that many times is likely to be slow, the 'verbose'
option is useful to display progress.
.. note :: This function requires matplotlib (pylab).
"""
if color is None:
color = ctx.default_color_function
import pylab
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = fig.add_subplot(111)
rea, reb = re
ima, imb = im
dre = reb - rea
dim = imb - ima
M = int(ctx.sqrt(points*dre/dim)+1)
N = int(ctx.sqrt(points*dim/dre)+1)
x = pylab.linspace(rea, reb, M)
y = pylab.linspace(ima, imb, N)
# Note: we have to be careful to get the right rotation.
# Test with these plots:
# cplot(lambda z: z if z.real < 0 else 0)
# cplot(lambda z: z if z.imag < 0 else 0)
w = pylab.zeros((N, M, 3))
for n in xrange(N):
for m in xrange(M):
z = ctx.mpc(x[m], y[n])
try:
v = color(f(z))
except ctx.plot_ignore:
v = (0.5, 0.5, 0.5)
w[n,m] = v
if verbose:
print(str(n) + ' of ' + str(N))
rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]]
axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower')
axes.set_xlabel('Re(z)')
axes.set_ylabel('Im(z)')
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \
wireframe=False, file=None, dpi=None, axes=None):
"""
Plots the surface defined by `f`.
If `f` returns a single component, then this plots the surface
defined by `z = f(x,y)` over the rectangular domain with
`x = u` and `y = v`.
If `f` returns three components, then this plots the parametric
surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`.
For example, to plot a simple function::
>>> from sympy.mpmath import *
>>> f = lambda x, y: sin(x+y)*cos(y)
>>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP
Plotting a donut::
>>> r, R = 1, 2.5
>>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)]
>>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP
.. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher.
"""
import pylab
import mpl_toolkits.mplot3d as mplot3d
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = mplot3d.axes3d.Axes3D(fig)
ua, ub = u
va, vb = v
du = ub - ua
dv = vb - va
if not isinstance(points, (list, tuple)):
points = [points, points]
M, N = points
u = pylab.linspace(ua, ub, M)
v = pylab.linspace(va, vb, N)
x, y, z = [pylab.zeros((M, N)) for i in xrange(3)]
xab, yab, zab = [[0, 0] for i in xrange(3)]
for n in xrange(N):
for m in xrange(M):
fdata = f(ctx.convert(u[m]), ctx.convert(v[n]))
try:
x[m,n], y[m,n], z[m,n] = fdata
except TypeError:
x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata
for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]:
if c < cab[0]:
cab[0] = c
if c > cab[1]:
cab[1] = c
if wireframe:
axes.plot_wireframe(x, y, z, rstride=4, cstride=4)
else:
axes.plot_surface(x, y, z, rstride=4, cstride=4)
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
if keep_aspect:
dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]]
maxd = max(dx, dy, dz)
if dx < maxd:
delta = maxd - dx
axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0)
if dy < maxd:
delta = maxd - dy
axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0)
if dz < maxd:
delta = maxd - dz
axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
VisualizationMethods.plot = plot
VisualizationMethods.default_color_function = default_color_function
VisualizationMethods.cplot = cplot
VisualizationMethods.splot = splot
| bsd-3-clause |
rrohan/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
deisi/SFG2D | sfg2d/models.py | 1 | 53293 | """Fitting Models to Fit data with."""
import numpy as np
from scipy.integrate import odeint
from scipy.special import erf, erfc
from scipy.stats import norm, skewnorm
from iminuit import Minuit, describe
import sys
import yaml
import logging
from .utils.static import sfgn
thismodule = sys.modules[__name__]
logger = logging.getLogger(__name__)
flatten = lambda l: [item for sublist in l for item in sublist]
#logger.setLevel(logging.DEBUG)
def read_fit_results(fname):
with open(fname) as ifile:
fit_results = yaml.load(ifile)
return fit_results
def model_fit_record(
model,
record,
kwargs_select_y,
kwargs_select_x,
kwargs_select_yerr=None,
kwargs_model=None,
run=False,
):
"""Make a model using selected data from SfgRecrod.
**Parameters**:
- **model**: String, Class name of the model to use
- **record**: SfgRecord obj to select data from
- **kwargs_ydata**: kwargs to select y data with
- **kwargs_xdata**: kwargs to select x data with
- **kwargs_model**: kwargs to pass to model
- **kwargs_yerr**: kwargs to select yerr with
**Keywords:**
- **run**: Actually run the fit
**Returns:**
A model obj for the fit.
"""
if not kwargs_model:
kwargs_model = {}
#if not kwargs_select_yerr:
# raise NotImplementedError('Models without errorbar not implemented yet')
logger.debug('Selecting model data from record with:')
logger.debug('ydata :{}'.format(kwargs_select_y))
xdata = record.select(**kwargs_select_x).squeeze()
ydata = record.select(**kwargs_select_y).squeeze()
if not kwargs_select_yerr:
yerr = np.ones_like(ydata)
else:
yerr = record.sem(**kwargs_select_yerr).squeeze()
logger.debug('Setting model with:')
logger.debug('xdata: {}'.format(xdata))
logger.debug('ydata: {}'.format(ydata))
logger.debug('yerr: {}'.format(yerr))
logger.debug('kwargs_module: {}'.format(kwargs_model))
model = getattr(thismodule, model)(xdata, ydata, yerr, **kwargs_model)
if run:
fit_model(
model, # print_matrix=print_matrix
)
return model
def make_model_fit(
model,
xdata,
ydata,
yerr=None,
fit=False,
print_matrix=True,
model_kwargs={}
):
"""Generig interface for model fits.
**Arguments:**
- **model**: String name of model class
- **xdata**: x-data to the model
- **ydata**: y-data to model
**Keywords:**
- **yerr**: yerr to model
- **fit**: boolean weather to run the fit
- **model_kwargs**: Keywords passed to model during creation
"""
model = getattr(thismodule, model)(xdata, ydata, yerr, **model_kwargs)
if fit:
fit_model(
model, print_matrix=print_matrix
)
return model
def fit_model(model, minos=False, print_matrix=True):
"""Function to run migrad minimizations.
**Arguments:**
- **model**: Model instance to run migrad of.
**Keywords:**
- **minos**: Boolean, If Errors should be calculated with minos.
Slow but more precise error estimation of the fit parameters.
"""
model.minuit.migrad() # Run the minimization
if minos:
model.minuit.minos()
model.minuit.migrad()
if print_matrix:
try:
model.minuit.print_matrix()
except ValueError:
pass
def normalize_trace(model, shift_mu=False, scale_amp=False, shift_heat=False, scale_x=None):
"""Normalize trace.
model: model to work on
shift_mu: Schift by mu value of fit
scale_amp: Scale by realtive height between heat and minimum.
shift_heat: Make heat value equal
returns shiftet data arrays with:
"""
mu = 0
if shift_mu:
mu = model.minuit.fitarg['mu']
offset = 0
if shift_heat:
offset = 1-model.yfit_sample[-1]
scale = 1
if scale_amp:
x_mask = np.where((model.xsample-mu>0) & (model.xsample-mu<1000))
scale = 1-offset-model.yfit_sample[x_mask].min()
xdata = model.xdata - mu
ydata = (model.ydata+offset-1)/scale+1
yerr = model.yerr/scale
xsample = model.xsample - mu
yfit_sample = (model.yfit_sample+offset-1)/scale+1
if scale_x:
xdata = scale_x * xdata
xsample = scale_x * xsample
return xdata, ydata, yerr, xsample, yfit_sample
class Fitter():
def __init__(
self,
xdata=None,
ydata=None,
sigma=None,
fitarg={},
box_coords=None,
roi=None,
name='',
ignore_errors=False,
**kwargs
):
"""Base Class to fit with Minuit.
- **ignore_errors**:
Optional if given, sigmas will get ignored during the fit.
**fitarg**: Dictionary gets passed to minuit.
and sets the starting parameters.
**kwargs:**
Get passed to minuit. Most important is
"""
self.xdata = xdata
self.ydata = ydata
self.sigma = sigma # y-errors.
self.cov = None # The covariance of the fit
# Coordinates of the fit result box in fig coordinates
self._box_coords = box_coords
self._box_str_format = '{:2}: {:8.3g} $\pm$ {:6.1g}\n'
if not roi:
roi = slice(None)
self.roi = roi
self._pnames = None
self._xsample_num = 400
self.name = name
self.ignore_errors=ignore_errors
# Buffer for figures
self.figures = {}
kwargs.setdefault('pedantic', False)
# Minuit is used for fitting. This makes self.chi2 the fit function
#logger.info(self.chi2)
#kwargs['forced_parameters'] = self.parameters
logger.info(fitarg)
logger.info(kwargs)
self.minuit = Minuit(self.chi2, **fitarg, **kwargs)
def _setup_fitter_kwargs(self, fitarg, kwargs=None):
"""Setup initial fitter kwargs
Use this to pass default fitargs and parameter names to Minuit.
This allows to initialize a Model class with only a fitfunc and no
boilerplate chi2 function.
"""
# This gurantees correct oder and names of fitparameters
# we start at 1 because running value (x or t) must be skipped
self.parameter_names = describe(self.fit_func)[1:]
# The oder of parameters is important
fitarg['forced_parameters'] = self.parameter_names
if not kwargs:
kwargs = {}
if not kwargs.get('fitarg'):
kwargs['fitarg'] = {}
kwargs['fitarg'] = {**fitarg, **kwargs['fitarg']}
# DODO add check that fitargs and parameter_names fit together
return kwargs
def chi2(self, *args, **kwargs):
"""Sum of distance of data and fit. Weighted by uncertainty of data."""
return np.sum(
(
(self.ydata - self.fit_func(self.xdata, *args, **kwargs)) /
self.sigma
)**2
)
def fit_func(self):
"""Fit function that must be implemented by child classes."""
raise NotImplementedError
@property
def parameters(self):
return describe(self.fit_func)[1:]
@property
def box_coords(self):
"""Coordinades for the fit results box."""
if not self._box_coords:
return self.xdata.mean(), self.ydata.mean()
return self._box_coords
def draw_text_box(self, box_coords=None, **kwargs):
"""Draw a textbox on current axes."""
from matplotlib.pyplot import text
if not box_coords:
box_coords = self.box_coords
text(*box_coords, self.box_str, **kwargs)
@property
def p(self):
"""Parameters of the Fit."""
#return self.minuit.args
return [self.minuit.fitarg[param] for param in self.minuit.parameters]
@property
def box_str(self):
"""String to place on the plot. Showing Fit Statistics."""
text = ''
for name, value in zip(self.minuit.parameters, self.minuit.args):
text += self._box_str_format.format(
name, value, self.minuit.errors[name]
)
return text
@property
def xdata(self):
"""X data for the fit."""
return self._xdata[self.roi]
@xdata.setter
def xdata(self, value):
if len(np.shape(value)) != 1:
raise IndexError('Shappe if xdata is not of dim 1')
self._xdata = value
@property
def ydata(self):
"""Y data for the fit."""
return self._ydata[self.roi]
@ydata.setter
def ydata(self, value):
if len(np.shape(value)) != 1:
raise IndexError('Shappe if ydata is not of dim 1')
self._ydata = value
@property
def sigma(self):
"""Error of the ydata for the fit."""
if isinstance(self._sigma, type(None)):
return np.ones_like(self.ydata)
if self.ignore_errors:
return np.ones_like(self.ydata)
return self._sigma[self.roi]
@sigma.setter
def sigma(self, value):
self._sigma = value
if isinstance(value, type(None)):
self._sigma = np.ones_like(self._ydata)
elif len(np.shape(value)) != 1:
raise IndexError('Shappe of yerr is not of dim 1')
if np.any(value==0):
pos = np.where(value==0)
#replace = np.nanmedian(value)
logger.warn('Zero value within ucertainty.')
logger.warn('Zero Values @ {}'.format(pos))
#logger.warn('Replacing error with {}'.format(replace))
#logger.warn('Errors passed were {}'.format(value))
#self._sigma = np.ones_like(self._ydata)
#self.ignore_errors = True
@property
def yerr(self):
"""Error of the ydata for the fit."""
return np.array(self.sigma)
def fit_res(self, x):
"""Fit function wit fit result parameters"""
return self.fit_func(x, *self.p)
@property
def x_edges(self):
"""Edges of the x data of the fit."""
return self.xdata[0], self.xdata[-1]
@property
def y_edges(self):
"""Edges of the y data of the fit."""
return self.ydata[0], self.ydata[-1]
@property
def xsample(self):
"""A sampled version of the xdata. `Fitter._xsample_num` is the number
of samples.
`Fitter.xsample` is used to generate a smooth plot of the fitting curve.
"""
return np.linspace(self.xdata[0], self.xdata[-1], self._xsample_num)
@property
def ysample(self):
"""Y vales of the fit function sampled with `Fitter.xsample`."""
return self.yfit_sample
@property
def y_fit(self):
"""Y vales of the fit function sampled with `Fitter.xsample`."""
return self.yfit_sample
@property
def yfit_sample(self):
"""Y vales of the fit function sampled with `Fitter.xsample`."""
return self.fit_res(self.xsample)
@property
def fitarg(self):
"""Minuit fitargs."""
return self.minuit.fitarg
def plot(self, kwargs_data=None, kwargs_fit=None):
"""Function to easily look at a plot. Not very flexible. But usefull during
interactive sessions.
"""
import matplotlib.pyplot as plt
if not kwargs_data:
kwargs_data = {}
kwargs_data.setdefault('x', self.xdata)
kwargs_data.setdefault('y', self.ydata)
kwargs_data.setdefault('yerr', self.yerr)
kwargs_data.setdefault('fmt', 'o')
if not kwargs_fit:
kwargs_fit = {}
kwargs_fit.setdefault('x', self.xsample)
kwargs_fit.setdefault('y', self.ysample)
kwargs_fit.setdefault('color', 'r')
plt.errorbar(**kwargs_data)
plt.plot(**kwargs_fit)
def save(fpath):
"""Save fit."""
with open(fpath, 'w') as ofile:
yaml.dump(
self.fitarg
)
@property
def kwargs(self):
"""Dict containing the most important kwargs of the Model."""
return {
'xdata' : self.xdata.tolist(),
'ydata' : self.ydata.tolist(),
'sigma': self.sigma.tolist(),
'fitarg' : self.fitarg,
}
@property
def dict(self):
"""Dict containing class name and most important kwargs."""
return {
'name': self.__class__.__name__,
'module': self.__module__,
'kwargs': self.kwargs
}
class GaussianModelM(Fitter):
def __init__(self, *args, **kwargs):
''' Fit Gausian model using Minuit.
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
'''
kwargs = self._setup_fitter_kwargs(
{'A': 1, 'mu':0, 'sigma': 1, 'c': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
def fit_func(self, x, A, mu, sigma, c):
"""Guassian function
A: amplitude
mu: position
sigma: std deviation
c : offset
"""
# Minuit passes negative values for sigma
# and these values lead to failures of the fitting
if sigma < 0:
return 0
return A * norm.pdf(x, mu, sigma) + c
class GaussianModelN(Fitter):
def __init__(self, *args, parameter_dict=None, **kwargs):
''' Fit Gausian model using Minuit.
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **parameter_dict**: Dict of parameters for gaussian fit.
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
'''
self._parameter_names = None
self._parameter_dict_fitarg = None
self._pmn = ['A', 'mu', 'sigma', 'c']
#Numberfy params
if not parameter_dict:
raise NotImplementedError('Must have parameter dict currently')
self.parameter_dict = parameter_dict
if not kwargs:
kwargs = {}
kwargs['forced_parameters'] = self.parameter_names
kwargs['fitarg'] = self.parameter_dict_fitarg
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
@property
def parameter_names(self):
if isinstance(self._parameter_names, type(None)):
ret = []
for name in self._pmn:
pos = 0
for value in self.parameter_dict[name]:
ret.append('%s%d'%(name, pos))
pos += 1
self._parameter_names = ret
return self._parameter_names
@property
def parameter_dict_fitarg(self):
"""Creates a numbered dictionary that can be used as fitargs
dict to create the fit function."""
if isinstance(self._parameter_dict_fitarg, type(None)):
ret = {}
for pm in self._pmn:
values = self.parameter_dict[pm]
pos = 0
for value in values:
ret['%s%d'%(pm,pos)] = value
pos += 1
self._parameter_dict_fitarg = ret
return self._parameter_dict_fitarg
def _params_from_parameter_dict(self):
ret = []
for name in self._parameter_names:
[ret.append(value) for value in self.parameter_dict[name]]
return np.array(ret)
def fit_func(self, x, *params):
"""
Gaussian functions.
Pass parameters as list. Sorting of parameters is:
A0, A1,.. mu0, mu1,... sigma0, sigma1,....c0,c1,....
"""
# Minuit passes negative values for sigma
# and these values lead to failures of the fitting
i = len(params)//4
pparams = np.reshape(params, (4, i)).T
ret = np.zeros_like(x)
for _p in pparams:
ret += self._gaussian(x, *_p)
return ret
def _gaussian(self, x, A, mu, sigma, c):
"""Gaussian function"""
if sigma < 0:
return 0
return A * norm.pdf(x, mu, sigma) + c
class LorenzianModel(Fitter):
"""
N-Lorenzian Peaks and Non Resonant background to fit SFG
Spectra with.
"""
def __init__(self, *args, n_lorenzians=1, **kwargs):
# Must definde forced_parameters because iminuits parameter auto
# discovery fails for sfgn as fit function
self.n_lorenzians = n_lorenzians
_fitarg = {k: 0 for k in flatten([('amp_%i'%i, 'pos_%i'%i, 'width_%i'%i) for i in range(n_lorenzians)])}
_fitarg = {'nr': 0, 'phase': 0, **_fitarg}
self.parameter_names = list(_fitarg.keys())
kwargs['forced_parameters'] = self.parameter_names
# If no fitargs is defined, we define a minimum set and use
# sane parameter defaults
# This has a problem if n_lorenzians is wrong. Currently the user
# has to take care to use it correctly
fitarg = kwargs.get('fitarg')
if not fitarg:
kwargs['fitarg'] = _fitarg
Fitter.__init__(self, *args, **kwargs)
def fit_func(self, x, *args, **kwargs):
return sfgn(x, *args, **kwargs)
@property
def kwargs(self):
"""n_lorenzians is needed for model to work."""
ret = super().kwargs
ret['n_lorenzians'] = self.n_lorenzians
return ret
class SkewedNormal(Fitter):
def __init__(self, *args, **kwargs):
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
def fit_funct(self, x, A, mu, sigma, kurt, c):
return A * skewnorm.pdf(x, kurt, mu, sigma) + c
class FourLevelMolKinM(Fitter):
def __init__(
self,
*args,
gSigma=150,
N0=[1, 0, 0, 0],
rtol=1.09012e-9,
atol=1.49012e-9,
full_output=True,
**kwargs
):
"""4 Level Model Fitter.
To use set following `kwargs`
`xdata`, `ydata` and `fitarg`. Optinal pass `sigma` for y errors.
**Arguments:**
- **N0**: Boundary condition of the DGL
- **rtol**: Precision parameter of the DGL
- **atol**: Precision parameter of the DGL
- **full_output**: Weather to get full_output of the DGL Solver.
Usefull for debugging. atol and rtol
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
"""
self.gSigma = gSigma # width of the excitation
self.rtol = rtol # Precition of the numerical integrator.
self.atol = atol
# Starting conditions of the Populations, not to be confuesed with starting conditions of the plot
self.N0 = N0
self.full_output = full_output
self.infodict = None # Infodict return of the Odeint.
kwargs = self._setup_fitter_kwargs(
{'s': 1, 't1': 1, 't2': 0.7, 'c': 1, 'mu': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
def ext_gaus(self, t, mu, sigma):
"""Gausian excitation function.
Due to historic reasons its not a strict gausian, but something
very cloe to it. The Igor Code is:
1/sqrt(pi)/coeff1*exp(-(coeff0-x)^2/coeff1^2)
The here wanted sigma is sqrt(2)*sigma of a normal gaussian
and then its also normalized. If you have FWHM, then sigma
is sigma = FWHM/(2*sqrt(log(2)))
"""
return 1 / np.sqrt(np.pi) / sigma * np.exp(-((mu-t)/sigma)**2)
# The Physical Water model
def dgl(self, N, t, ext_func, s, t1, t2):
"""Dgl of the 4 Level DGL system.
**Arguments:**
- **N**: deg 4 array
Population of the 4 levels respectively
- **t**: float
time
- **ext_func**: exictation function in time.
Time profile of the pump laser.
Function of t. Usaully a gaussian function.
- **s**: scaling factor of the pump laser.
- **t1**: Time constant of first level
- **t2**: Time constant of second level.
**Returns:**
Derivatives of the system. As 4 dim array.
"""
# This is the DGL written as a Matrix multiplication.
# dNdt = A x N
# A is the constructing matrix of the DGL
# and N is a 4-level vector with (N0, N1, N2, N3)
# as the population of the states at time t.
# dNdt is the state wise derivative of N
# See https://en.wikipedia.org/wiki/Matrix_differential_equation
A = np.array([
[-s * ext_func(t), s * ext_func(t), 0, 0],
[s * ext_func(t), -s * ext_func(t) - 1/t1, 0, 0],
[0, 1 / t1, -1 / t2, 0],
[0, 0, 1 / t2, 0],
], dtype=np.float64)
dNdt = A.dot(N)
return dNdt
def fit_func(self, t, s, t1, t2, c, mu):
"""
Function we use to fit.
**Arguments:**
- **t**: time
- **s**: Gaussian Amplitude
- **t1**: Livetime of first state
- **t2**: livetime of second(intermediate) state
- **c**: Coefficient of third(Heat) state
- **mu**: Position of pump pulse, the zero.
**Returns**
The bleach of the water model
and the Matrix with the populations"""
N = self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
t2
).T
return ((N[0] - N[1]+ N[2] + c * N[3])**2) / (self.N0[0]**2)
def population(self, t, *args, **kwargs):
"""Numerical solution to the 4 Level DGL-Water system.
**Arguments:**
- **t**: array of time values
**Args**:
Arguments of the dgl function
- **ext_func**: Function of excitation.
- **s**: scalar factor for the pump
- **t1**: Live time of the first exited state
- **t2**: livetime of the intermediate state.
**kwargs**:
Get passe to differential equation solver odeing
**Returns**
(len(t), 4) shaped array with the 4 entires beeing the population
of the N0 t0 N3 levels of the system
"""
ret = odeint(
func=self.dgl, # the DGL of the 4 level water system
y0=self.N0, # Starting conditions of the DGL
t=t, # Time as parameter
args=args, # Aguments of the dgl
# Dfun=self.jac, # The Jacobean of the DGL. Its optional.
# The precisioin parameter for the nummerical DGL solver.
rtol=self.rtol,
atol=self.atol,
full_output=self.full_output,
**kwargs,
)
if self.full_output:
ret, self.infodict = ret
return ret
def jac(self, N, t, ext_func, s, t1, t2):
"""Jacobean matrix of the DGL."""
# In this case the Jacobean Matrix is euqal the
# Consturcting matrix of the DGL.
# So it doesn't help much. It just speeds up the thing
# a little.
A = np.array([
[-s * ext_func(t), s * ext_func(t), 0, 0],
[s * ext_func(t), -s * ext_func(t) - 1/t1, 0, 0],
[0, 1 / t1, -1 / t2, 0],
[0, 0, 1 / t2, 0],
], dtype=np.float64)
return A
def fit_populations(self, t):
s, t1, t2, c1, mu = self.p
return self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
t2
)
def start_population(self, t):
s, t1, t2, c1, mu = self.p0
return self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
t2
)
def save(self, fname):
"""Save FourLevelMolKin results."""
parameter_dict = {
'gSigma': self.gSigma,
'rtol': self.rtol,
'atol': self.atol,
'N0': self.N0,
}
super().__save__(fname, parameter_dict)
class Crosspeak(FourLevelMolKinM):
def __init__(
self,
*args,
N0=[1, 0, 0, 0, 0],
**kwargs
):
"""4 Level Model based crosspeak fitter.
"""
FourLevelMolKinM.__init__(self, *args, N0=N0, **kwargs)
def matrix(self, t, t1, teq, tup, tdown, ext_func, s):
"""Matrix to construct the DGL"""
return np.array([
[-s * ext_func(t), -s * ext_func(t), 0, 0, 0],
[s * ext_func(t), -s * ext_func(t)-1/tup-1/t1, 1/tdown, 0, 0],
[0, 1/tup, -1/tdown, 0, 0],
[0, 1/t1, 0, -1/teq, 0],
[0, 0, 0, 1/teq, 0]
], dtype=np.float64)
def dgl(self, N, *args):
"""Matrix form of the DGL"""
dNdt = self.matrix(*args).dot(N)
return dNdt
def fit_func(self, t, t1, teq, tup, tdown, mu, gSigma, s, c):
"""Function that is used for fitting the data.
"""
N = self.population(
t,
t1,
teq,
tup,
tdown,
lambda t: self.ext_gaus(t, mu, gSigma),
s,
).T
# On Pump vibration
y0 = (N[0] + c * N[3] - N[1])**2 / self.N0[0]**2
# Off Pump vibration
y1 = (N[0] + c * N[3] - N[2])**2 / self.N0[0]**2
# Fit function is two dimensional because input data consist of two
# traces.
return np.array([y0, y1])
@property
def ydata(self):
return self._ydata
@ydata.setter
def ydata(self, value):
self._ydata = np.array(value)
@property
def sigma(self):
"""Error of the ydata for the fit."""
if isinstance(self._sigma, type(None)):
return np.ones_like(self.ydata)
if self.ignore_errors:
return np.ones_like(self.ydata)
return self._sigma[self.roi]
@sigma.setter
def sigma(self, value):
if isinstance(value, type(None)):
self._sigma = np.ones_like(self._ydata)
if np.any(value == 0):
raise ValueError('Cant handle 0 errors')
from warnings import warn
warn('Passed uncertainty has a 0 value\nIgnoring errorbars.\n{}'.format(value))
self._sigma = value
self.ignore_errors = True
self._sigma = value
class SingleLifetime(Fitter):
def __init__(
self,
*args,
fit_func_dtype=np.float64,
**kwargs
):
"""Fitting Model with convolution of single exponential and gaussian.
**Arguments**:
- **xsample**: Optional
Stepping size of the convolution. Default minimal
difference of xdata and in the range of xdata.
- **xsample_ext**: Boundary effects of the convolution make int necesarry to,
add additional Datapoints to the xsample data. By default 10% are
added.
- **fit_func_dtype**: The exponential function in the fitfunc can become
very huge. To cope with that one can set the dtype of the fit func.
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
"""
self.fit_func_dtype = fit_func_dtype
kwargs = self._setup_fitter_kwargs(
{'A': 1, 't1':1000, 'c': 0, 'mu': 0, 'sigma': 200 },
kwargs
)
Fitter.__init__(self, *args, **kwargs)
def fit_func(self, t, A, t1, c, mu, ofs, sigma):
"""Result of a convolution of Gausian an exponential recovery.
This function is the Analytically solution to the convolution of:
f = (- A*exp(-t/tau) + c)*UnitStep(t)
g = Gausian(t, mu, sigma)
result = Convolve(f, g)
**Arguments:**
- **t**: array of times
- **A**: Amplitude of the recovery
- **t1**: Livetime of the recovery
- **c**: Convergence of the recovery
- **mu**: Tempoaral Position of the Pulse
- **ofs**: Global offset factor
- **sigma**: Width of the gaussian
"""
## This dtype hack is needed because the exp cant get very large.
return 1/2 * (
c + c * erf((t - mu)/(np.sqrt(2) * sigma)) -
A * np.exp(((sigma**2 - 2 * t * t1 + 2 * mu * t1)/(2 * t1**2)),
dtype=self.fit_func_dtype) *
erfc((sigma**2 - t * t1 + mu * t1)/(np.sqrt(2) * sigma * t1))
) + ofs
class ThreeLevelMolkin(Fitter):
def __init__(
self,
*args,
gSigma=150,
N0=[1, 0, 0],
rtol=1.09012e-9,
atol=1.49012e-9,
full_output=True,
**kwargs
):
"""
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
"""
Fitter.__init__(self, *args, **kwargs)
self.gSigma = gSigma # width of the excitation
self.rtol = rtol # Precition of the numerical integrator.
self.atol = atol
# Starting conditions of the Populations, not to be confuesed with starting conditions of the plot
self.N0 = N0
self.full_output = full_output
self.infodict = None # Infodict return of the Odeint.
def ext_gaus(self, t, mu, sigma):
"""Gausian excitation function.
Due to historic reasons its not a strict gausian, but something
very cloe to it. The Igor Code is:
1/sqrt(pi)/coeff1*exp(-(coeff0-x)^2/coeff1^2) """
return 1 / np.sqrt(np.pi) / sigma * np.exp(-((mu-t)/sigma)**2)
def dgl(self, N, t, ext_func, s, t1):
"""DGL of the three level system.
Parameters
----------
N: deg 3 Array with initial populations of the levels
typically [1, 0, 0]
t: float
time
ext_func: excitation function of the laser. Typically a gaussian.
Function of t.
s: scaling factor of the pump laser.
t1: Livetime of the excited state.
Returns
-------
Dericatives of the system as 3dim array.
"""
A = np.array([
[-s*ext_func(t), s*ext_func(t), 0],
[s*ext_func(t), -s*ext_func(t) - 1/t1, 0],
[0, 1/t1, 0]
], dtype=np.float64)
dNdt = A.dot(N)
return dNdt
def population(self, t, ext_func, s, t1, **kwargs):
"""Nummerical solution of the DGL.
Parameters
----------
t: array if times
ext_func: excitation function. Depends on t.
s: scaling factor of the pump.
t1: livetime of the first state.
Returns
-------
Populations of the 3 levels at the times t.
"""
ret = odeint(
func=self.dgl, # the DGL of the 3 level water system
y0=self.N0, # Starting conditions of the DGL
t=t,
args=(ext_func, s, t1),
rtol=self.rtol,
atol=self.atol,
full_output=self.full_output,
**kwargs,
)
if self.full_output:
ret, self.infodict = ret
return ret
def fit_populations(self, t):
s, t1, c1, mu = self.p
return self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
)
def fit_func(self, t, s, t1, c, mu):
"""
Function we use to fit.
parameters
----------
t: time
s: Gaussian Amplitude
t1: Livetime of first state
c: Coefficient of third(Heat) state
scale: Scaling factor at the very end
Returns:
The bleach of the water model
and the Matrix with the populations"""
N = self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
).T
return ((N[0] + c * N[2] - N[1])**2) / (self.N0[0]**2)
class TwoExponentials(Fitter):
def __init__(self, *args, **kwargs):
"""Two exponentials convoluted with gaussian. Dont use this. Its has
a causality problem.
"""
Fitter.__init__(self, *args, **kwargs)
self.N0 = [1, 0, 0, 0]
def fit_func(self, x, Amp, Aheat, t1, t2, offs, pwidth, mu):
"""Analytical solution to the four level system with gaussian excitation pulse."""
e1 = np.exp((0.5*((t2**-2.)*((pwidth**2)+((-2.*(x*t2))+(2.*(mu*t2)))))))
e2 = np.exp((0.5*((t1**-2.)*((pwidth**2)+((-2.*(x*t1))+(2.*(mu*t1)))))))
er_1 = ((((2.**-0.5)*(((pwidth**2)+(mu*t2))-(x*t2)))/t2)/pwidth)
er_2 = ((((2.**-0.5)*(((pwidth**2)+(mu*t1))-(x*t1)))/t1)/pwidth)
er_3 = (((2.**-0.5)*(x-mu))/pwidth)
aux0=(e1)*(erfc(er_1));
aux1=(e2)*(erfc(er_2));
aux2=Amp*(((offs+(offs*(erf(er_3))))-(Aheat*aux0))-aux1);
output=0.5*aux2+1
# Due to exp overflow, nans can occur.
# However they result in 1 because they happen at negative times.
output[np.isnan(output)] = 1
# +1 to have right population
return output
class FourLevel(Fitter):
"""Analytical Solution to the 4 Level Model.
The Conzept for the solution was taken from: (doi:10.1021/jp003158e) Lock,
A. J.; Woutersen, S. & Bakker, H. J.
"""
def __init__(self, *args, **kwargs):
# Autodiscovery of iminuit doesnt work with implicit
# variable definitions. Thus we must specify parameters
# and there names specifically. We also define some sane defalts,
# that should be updated by the user.
# The oder of the arguments matters, because
kwargs = self._setup_fitter_kwargs(
{'Amp': 1, 't1': 1, 't2': 0.7, 'c': 1, 'sigma':0.2, 'mu': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
self.N = 1 # Number of initial oszillators.
def N(self, t, t1, t2, N10, N20=0, N30=0):
"""Populations of the solution to the 4 level model.
This is only true for t>0.
**Parameters:**
- **t**: Time points to calculated population of
- **t1**: Lifetime of first excited state
- **t2**: Lifetime of intermediate (heat) state
- **N10**: Fraction of initialy excited oszillators 0<N10<1
- **N20**: Fraction of initialy excited oszillators in heated state
- **N30**: Fraction if initialy excited oszillators in final state
**Returns:**
Tuple of N0, N1, N2, N3 at times t
"""
N1 = np.exp(((-t)/t1))*N10
aux0=(np.exp((((-t)/t2)-(t/t1))))*(((np.exp((t/t2)))-(np.exp((t/t1))))\
*(N10*t2));
N2=((np.exp(((-t)/t2)))*N20)+(aux0/(t1-t2));
aux0=(((np.exp(((t/t1)+(t/t2))))*t1)+((np.exp((t/t1)))*t2))-((np.exp((\
(t/t1)+(t/t2))))*t2);
aux1=((np.exp((((-t)/t2)-(t/t1))))*(N10*(aux0-((np.exp((t/t2)))*t1))))\
/(t1-t2);
N3=((np.exp(((-t)/t2)))*((-1.+(np.exp((t/t2))))*N20))+(N30+aux1);
N0 = self.N - N1 - N2 - N3
return N0, N1, N2, N3
def fit_func(self, t, Amp, t1, t2, c, mu, sigma):
"""Function for the time dependency of pump-probe sfg data.
Function is derived by analytically solving the 4 level system and
subsequent convolution with a gaussian excitation function of the
model. Initial state is N0=1. All other states are empty.
This exact implementation has a problem when t1==t2 exactly. Due to
numerical constrains this must be avoided.
If difference instead of ratio is used. The function keeps the same
due to the distributivity of the convolution and the fact that gaussian
convolved with -1 gives -1. Therefore only -1 needs to be subtract.
**Arguments**:
- **t**: Array of Time values. Usually given by experiment.
- **Amp**: Amplitude of the excitation pulse. Determines the fraction
of oscillators excited by the excitation pulse.
- **t1**: Lifetime of the first excited vibrational state in units of
**t**
- **t2**: Lifetime of the second excited vibrational state in units of
**t**
- **c**: Scaling factor of final (heated) state. Used to account for
spectral differences induced by residual heat.
- **mu**: Tempoaral position of pump pulse in units of **t**.
- **sigma**: Temporal width of pump pulse in units of **t**.
**Returns**
Modeled result as deduced from the 4 level system for the given array
of **t** time values.
"""
pi=np.pi;
#a0 = erf((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/sigma))
def mysqrt(x): return np.sqrt(x)
aux0=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux1=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux2=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux3=(((t1-t2)**2))*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))));
aux4=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))))));
aux5=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))))));
aux6=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma)))))));
aux7=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux8=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux9=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)/\
sigma))))));
aux10=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux11=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t1**2)*(-1.+(erf(aux10))))));
aux12=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux13=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux12)))))));
aux14=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux15=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux14)))))));
aux16=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux17=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux16))))));
aux18=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux19=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux18))))));
aux20=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux21=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux20)))))));
aux22=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux23=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux22)))))));
aux24=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux25=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux24))))));
aux26=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux27=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux26)))))));
aux28=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux29=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux28))))));
aux30=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux31=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux30))))));
aux32=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux33=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux32))))));
aux34=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.\
)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux35=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux36=(mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf((aux35-(((2.**-0.5)*t)/sigma))))))));
aux37=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.\
)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux38=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux39=(mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf((aux38-(((2.**-0.5)*t)/sigma)))))));
aux40=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.\
)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux41=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux42=(mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf((aux41-(((2.**-0.5)*t)/sigma)))))));
aux43=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t2))-(((2.**-0.5)\
*t)/sigma);
aux44=(np.exp(((2.*((sigma**2)*(t2**-2.)))+(((2.*mu)/t2)+((-2.*t)/t2))\
)))*((mysqrt((2.*pi)))*(sigma*((t2**2)*(-1.+(erf(aux43))))));
aux45=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma))));
aux46=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux45));
aux47=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma))));
aux48=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux47));
aux49=(t2**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma)));
aux50=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux49));
aux51=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma))));
aux52=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux51));
aux53=(t2**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma)));
aux54=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux53));
aux55=(3.*(Amp*aux46))+((Amp*(c*aux48))+((-2.*(Amp*aux50))+((Amp*(c*\
aux52))+(Amp*aux54))));
aux56=(-2.*((Amp**2)*(c*((np.exp(((aux40-(t/t2))-(t/t1))))*aux42))))+(\
((Amp**2)*(c*aux44))+aux55);
aux57=((Amp**2)*((c**2)*((np.exp(((aux34-(t/t2))-(t/t1))))*aux36)))+((\
2.*((Amp**2)*((np.exp(((aux37-(t/t2))-(t/t1))))*aux39)))+aux56);
aux58=((Amp**2)*aux29)+((-2.*((Amp**2)*(c*aux31)))+(((Amp**2)*((c**2)*\
aux33))+aux57));
aux59=(2.*((Amp**2)*(c*aux23)))+((-2.*((Amp**2)*aux25))+((2.*((Amp**2)\
*(c*aux27)))+aux58));
aux60=(-2.*((Amp**2)*aux17))+((2.*((Amp**2)*(c*aux19)))+((2.*((Amp**2)\
*aux21))+aux59));
aux61=((Amp**2)*((c**2)*aux11))+((3.*((Amp**2)*aux13))+((-2.*((Amp**2)\
*(c*aux15)))+aux60));
aux62=((Amp**2)*((c**2)*((mysqrt((0.5*pi)))*aux8)))+((Amp*(c*((\
mysqrt((2.*pi)))*aux9)))+aux61);
aux63=(2.*((Amp**2)*(c*((mysqrt((2.*pi)))*aux6))))+(((Amp**2)*((\
mysqrt((0.5*pi)))*aux7))+aux62);
aux64=(2.*(Amp*((mysqrt((2.*pi)))*aux4)))+((-2.*(Amp*(c*((mysqrt((\
2.*pi)))*aux5))))+aux63);
aux65=(Amp*(c*((mysqrt((2.*pi)))*aux2)))+(((mysqrt((0.5*pi)))*(\
sigma*aux3))+aux64);
aux66=((Amp**2)*((mysqrt((0.5*pi)))*aux0))+(((Amp**2)*((c**2)*((\
mysqrt((0.5*pi)))*aux1)))+aux65);
aux67=(t2**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma)));
aux68=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux67));
aux69=t1*(t2*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t2))-(t*t2)))/t2)/\
sigma))));
aux70=(np.exp((0.5*((t2**-2.)*((sigma**2)+((2.*(mu*t2))+(-2.*(t*t2))))\
))))*((mysqrt((2.*pi)))*(sigma*aux69));
aux71=(t1**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma)));
aux72=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux71));
aux73=(t1**2)*(erfc(((((2.**-0.5)*(((sigma**2)+(mu*t1))-(t*t1)))/t1)/\
sigma)));
aux74=(np.exp((0.5*((t1**-2.)*((sigma**2)+((2.*(mu*t1))+(-2.*(t*t1))))\
))))*((mysqrt((2.*pi)))*(sigma*aux73));
aux75=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t2))-(((2.**-0.5)\
*t)/sigma);
aux76=(np.exp(((2.*((sigma**2)*(t2**-2.)))+(((2.*mu)/t2)+((-2.*t)/t2))\
)))*((mysqrt((0.5*pi)))*(sigma*((t2**2)*(-1.+(erf(aux75))))));
aux77=((((aux66-(Amp*(c*aux68)))-(Amp*aux70))-(Amp*(c*aux72)))-(Amp*\
aux74))-((Amp**2)*((c**2)*aux76));
aux78=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t2))-(((2.**-0.5)\
*t)/sigma);
aux79=(np.exp(((2.*((sigma**2)*(t2**-2.)))+(((2.*mu)/t2)+((-2.*t)/t2))\
)))*((mysqrt((0.5*pi)))*(sigma*((t2**2)*(-1.+(erf(aux78))))));
aux80=(0.5*((sigma**2)*(t1**-2.)))+((mu/t1)+((0.5*((sigma**2)*(t2**-2.)))+((mu/t2)+(((sigma**2)/t2)/t1))));
aux81=(((2.**-0.5)*mu)/sigma)+((((2.**-0.5)*sigma)/t1)+(((2.**-0.5)*\
sigma)/t2));
aux82=(mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf((aux81-(((2.**-0.5)*t)/sigma))))))));
aux83=(aux77-((Amp**2)*aux79))-((Amp**2)*((np.exp(((aux80-(t/t2))-(t/\
t1))))*aux82));
aux84=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux85=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux84)))))));
aux86=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t2))-(((2.**-0.5)*\
t)/sigma);
aux87=(np.exp((((0.5*((sigma**2)*(t2**-2.)))+(mu/t2))-(t/t2))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux86)))))));
aux88=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux89=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((2.*pi)))*(sigma*((t1**2)*(-1.+(erf(aux88))))));
aux90=((aux83-((Amp**2)*((c**2)*aux85)))-((Amp**2)*aux87))-((Amp**2)*(\
c*aux89));
aux91=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux92=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((0.5*pi)))*(sigma*((t1**2)*(-1.+(erf(aux91))))));
aux93=((((2.**-0.5)*mu)/sigma)+(((mysqrt(2.))*sigma)/t1))-(((2.**-0.5)\
*t)/sigma);
aux94=(np.exp(((2.*((sigma**2)*(t1**-2.)))+(((2.*mu)/t1)+((-2.*t)/t1))\
)))*((mysqrt((0.5*pi)))*(sigma*((t1**2)*(-1.+(erf(aux93))))));
aux95=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux96=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*(t1*(t2*(-1.+(erf(aux95)))))));
aux97=((aux90-((Amp**2)*((c**2)*aux92)))-((Amp**2)*aux94))-((Amp**2)*(\
(c**2)*aux96));
aux98=((((2.**-0.5)*mu)/sigma)+(((2.**-0.5)*sigma)/t1))-(((2.**-0.5)*\
t)/sigma);
aux99=(np.exp((((0.5*((sigma**2)*(t1**-2.)))+(mu/t1))-(t/t1))))*((\
mysqrt((2.*pi)))*(sigma*((t1**2)*(-1.+(erf(aux98))))));
aux100=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux101=sigma*((t2**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux102=((aux97-((Amp**2)*aux99))-((Amp**2)*(c*((mysqrt((2.*pi)))*\
aux100))))-(Amp*((mysqrt((2.*pi)))*aux101));
aux103=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)\
/sigma)))))));
aux104=sigma*(t1*(t2*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*t)\
/sigma)))))));
aux105=(aux102-((Amp**2)*((c**2)*((mysqrt((2.*pi)))*aux103))))-((\
Amp**2)*((mysqrt((2.*pi)))*aux104));
aux106=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux107=sigma*((t1**2)*(1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma))))));
aux108=(aux105-((Amp**2)*(c*((mysqrt((2.*pi)))*aux106))))-(Amp*((\
mysqrt((2.*pi)))*aux107));
aux109=(((t1-t2)**2))*(-1.-(erf(((((2.**-0.5)*mu)/sigma)-(((2.**-0.5)*\
t)/sigma)))));
aux110=((2.*pi)**-0.5)*(((t1-t2)**-2.)*(aux108-((mysqrt((0.5*pi)\
))*(sigma*aux109))));
output=aux110/sigma;
return output
class FourLevelDifference(FourLevel):
"""This is the fit model for the four level model if difference instead
of ratio is used. The only difference is that we need to subtract -1. This
is due to two things. First, the convolution is distributive, second convolution
of -1 with gaussian is -1. Therefore this is the correct and most simple solution.
"""
def fit_func(self, t, Amp, t1, t2, c, mu, sigma):
return super().fit_func(t, Amp, t1, t2, c, mu, sigma) - 1
| mit |
jmmease/pandas | pandas/tests/io/parser/test_network.py | 4 | 8535 | # -*- coding: utf-8 -*-
"""
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
import os
import pytest
import moto
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.io.parsers import read_csv, read_table
from pandas.compat import BytesIO
@pytest.fixture(scope='module')
def tips_file():
return os.path.join(tm.get_data_path(), 'tips.csv')
@pytest.fixture(scope='module')
def salaries_table():
path = os.path.join(tm.get_data_path(), 'salaries.csv')
return read_table(path)
@pytest.fixture(scope='module')
def s3_resource(tips_file):
pytest.importorskip('s3fs')
moto.mock_s3().start()
test_s3_files = [
('tips.csv', tips_file),
('tips.csv.gz', tips_file + '.gz'),
('tips.csv.bz2', tips_file + '.bz2'),
]
def add_tips_files(bucket_name):
for s3_key, file_name in test_s3_files:
with open(file_name, 'rb') as f:
conn.Bucket(bucket_name).put_object(
Key=s3_key,
Body=f)
boto3 = pytest.importorskip('boto3')
# see gh-16135
bucket = 'pandas-test'
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
add_tips_files(bucket)
conn.create_bucket(Bucket='cant_get_it', ACL='private')
add_tips_files('cant_get_it')
yield conn
moto.mock_s3().stop()
@pytest.mark.network
@pytest.mark.parametrize(
"compression,extension",
[('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'),
pytest.param('xz', '.xz',
marks=pytest.mark.skipif(not tm._check_if_lzma(),
reason='need backports.lzma '
'to run'))])
@pytest.mark.parametrize('mode', ['explicit', 'infer'])
@pytest.mark.parametrize('engine', ['python', 'c'])
def test_compressed_urls(salaries_table, compression, extension, mode, engine):
check_compressed_urls(salaries_table, compression, extension, mode, engine)
@tm.network
def check_compressed_urls(salaries_table, compression, extension, mode,
engine):
# test reading compressed urls with various engines and
# extension inference
base_url = ('https://github.com/pandas-dev/pandas/raw/master/'
'pandas/tests/io/parser/data/salaries.csv')
url = base_url + extension
if mode != 'explicit':
compression = mode
url_table = read_table(url, compression=compression, engine=engine)
tm.assert_frame_equal(url_table, salaries_table)
class TestS3(object):
@tm.network
def test_parse_public_s3_bucket(self):
pytest.importorskip('s3fs')
# more of an integration test due to the not-public contents portion
# can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(tm.get_data_path('tips.csv')), df)
def test_parse_public_s3n_bucket(self, s3_resource):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_parse_public_s3a_bucket(self, s3_resource):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_parse_public_s3_bucket_nrows(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_parse_public_s3_bucket_chunked(self, s3_resource):
# Read with a chunksize
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_chunked_python(self, s3_resource):
# Read with a chunksize using the Python parser
chunksize = 5
local_tips = read_csv(tm.get_data_path('tips.csv'))
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp,
engine='python')
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = local_tips.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
def test_infer_s3_compression(self, s3_resource):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')), df)
def test_parse_public_s3_bucket_nrows_python(self, s3_resource):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(read_csv(
tm.get_data_path('tips.csv')).iloc[:10], df)
def test_s3_fails(self, s3_resource):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
# Receive a permission error when trying to read a private bucket.
# It's irrelevant here that this isn't actually a table.
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
def test_read_csv_handles_boto_s3_object(self,
s3_resource,
tips_file):
# see gh-16135
s3_object = s3_resource.meta.client.get_object(
Bucket='pandas-test',
Key='tips.csv')
result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
richardtran415/pymatgen | pymatgen/io/abinit/pseudos.py | 5 | 65306 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides objects describing the basic parameters of the
pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects..
"""
import abc
import collections
import logging
import os
import sys
from collections import OrderedDict, defaultdict, namedtuple
import numpy as np
from monty.collections import AttrDict, Namespace
# from monty.dev import deprecated
from monty.functools import lazy_property
from monty.itertools import iterator_from_slice
from monty.json import MontyDecoder, MSONable
from monty.os.path import find_exts
from monty.string import is_string, list_strings
from tabulate import tabulate
from pymatgen.core.periodic_table import Element
from pymatgen.core.xcfunc import XcFunc
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
from pymatgen.util.serialization import pmg_serialize
logger = logging.getLogger(__name__)
__all__ = [
"Pseudo",
"PseudoTable",
]
__author__ = "Matteo Giantomassi"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return "\n".join((traceback.format_exc(), str(sys.exc_info()[0])))
def _read_nlines(filename, nlines):
"""
Read at most nlines lines from file filename.
If nlines is < 0, the entire file is read.
"""
if nlines < 0:
with open(filename, "r") as fh:
return fh.readlines()
lines = []
with open(filename, "r") as fh:
for lineno, line in enumerate(fh):
if lineno == nlines:
break
lines.append(line)
return lines
_l2str = {
0: "s",
1: "p",
2: "d",
3: "f",
4: "g",
5: "h",
6: "i",
}
_str2l = {v: k for k, v in _l2str.items()}
def l2str(l):
"""Convert the angular momentum l (int) to string."""
try:
return _l2str[l]
except KeyError:
return "Unknown angular momentum, received l = %s" % l
def str2l(s):
"""Convert a string to the angular momentum l (int)"""
return _str2l[s]
class Pseudo(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class defining the methods that must be
implemented by the concrete pseudopotential sub-classes.
"""
@classmethod
def as_pseudo(cls, obj):
"""
Convert obj into a pseudo. Accepts:
* Pseudo object.
* string defining a valid path.
"""
return obj if isinstance(obj, cls) else cls.from_file(obj)
@staticmethod
def from_file(filename):
"""
Build an instance of a concrete Pseudo subclass from filename.
Note: the parser knows the concrete class that should be instantiated
Client code should rely on the abstract interface provided by Pseudo.
"""
return PseudoParser().parse(filename)
def __eq__(self, other):
if other is None:
return False
return (
self.md5 == other.md5
and self.__class__ == other.__class__
and self.Z == other.Z
and self.Z_val == other.Z_val
and self.l_max == other.l_max
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
try:
return "<%s at %s>" % (
self.__class__.__name__,
os.path.relpath(self.filepath),
)
except Exception:
# relpath can fail if the code is executed in demon mode.
return "<%s at %s>" % (self.__class__.__name__, self.filepath)
def __str__(self):
return self.to_string()
def to_string(self, verbose=0):
"""String representation."""
# pylint: disable=E1101
lines = []
app = lines.append
app("<%s: %s>" % (self.__class__.__name__, self.basename))
app(" summary: " + self.summary.strip())
app(" number of valence electrons: %s" % self.Z_val)
app(" maximum angular momentum: %s" % l2str(self.l_max))
app(" angular momentum for local part: %s" % l2str(self.l_local))
app(" XC correlation: %s" % self.xc)
app(" supports spin-orbit: %s" % self.supports_soc)
if self.isnc:
app(" radius for non-linear core correction: %s" % self.nlcc_radius)
if self.has_hints:
for accuracy in ("low", "normal", "high"):
hint = self.hint_for_accuracy(accuracy=accuracy)
app(" hint for %s accuracy: %s" % (accuracy, str(hint)))
return "\n".join(lines)
@property
@abc.abstractmethod
def summary(self):
"""String summarizing the most important properties."""
@property
def filepath(self):
"""Absolute path to pseudopotential file."""
# pylint: disable=E1101
return os.path.abspath(self.path)
@property
def basename(self):
"""File basename."""
# pylint: disable=E1101
return os.path.basename(self.filepath)
@property
@abc.abstractmethod
def Z(self):
"""The atomic number of the atom."""
@property
@abc.abstractmethod
def Z_val(self):
"""Valence charge."""
@property
def type(self):
"""Type of pseudo."""
return self.__class__.__name__
@property
def element(self):
"""Pymatgen :class:`Element`."""
try:
return Element.from_Z(self.Z)
except (KeyError, IndexError):
return Element.from_Z(int(self.Z))
@property
def symbol(self):
"""Element symbol."""
return self.element.symbol
@property
@abc.abstractmethod
def l_max(self):
"""Maximum angular momentum."""
@property
@abc.abstractmethod
def l_local(self):
"""Angular momentum used for the local part."""
@property
def isnc(self):
"""True if norm-conserving pseudopotential."""
return isinstance(self, NcPseudo)
@property
def ispaw(self):
"""True if PAW pseudopotential."""
return isinstance(self, PawPseudo)
@lazy_property
def md5(self):
"""MD5 hash value."""
# if self.has_dojo_report and "md5" in self.dojo_report: return self.dojo_report["md5"]
return self.compute_md5()
def compute_md5(self):
"""Compute and erturn MD5 hash value."""
# pylint: disable=E1101
import hashlib
with open(self.path, "rt") as fh:
text = fh.read()
m = hashlib.md5(text.encode("utf-8"))
return m.hexdigest()
@property
@abc.abstractmethod
def supports_soc(self):
"""
True if the pseudo can be used in a calculation with spin-orbit coupling.
Base classes should provide a concrete implementation that computes this value.
"""
@pmg_serialize
def as_dict(self, **kwargs):
"""Return dictionary for MSONable protocol."""
# pylint: disable=E1101
return dict(
basename=self.basename,
type=self.type,
symbol=self.symbol,
Z=self.Z,
Z_val=self.Z_val,
l_max=self.l_max,
md5=self.md5,
filepath=self.filepath,
# xc=self.xc.as_dict(),
)
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
new = cls.from_file(d["filepath"])
# Consistency test based on md5
if "md5" in d and d["md5"] != new.md5:
raise ValueError(
"The md5 found in file does not agree with the one in dict\n"
"Received %s\nComputed %s" % (d["md5"], new.md5)
)
return new
def as_tmpfile(self, tmpdir=None):
"""
Copy the pseudopotential to a temporary a file and returns a new pseudopotential object.
Useful for unit tests in which we have to change the content of the file.
Args:
tmpdir: If None, a new temporary directory is created and files are copied here
else tmpdir is used.
"""
# pylint: disable=E1101
import shutil
import tempfile
tmpdir = tempfile.mkdtemp() if tmpdir is None else tmpdir
new_path = os.path.join(tmpdir, self.basename)
shutil.copy(self.filepath, new_path)
# Copy dojoreport file if present.
root, ext = os.path.splitext(self.filepath)
djrepo = root + ".djrepo"
if os.path.exists(djrepo):
shutil.copy(djrepo, os.path.join(tmpdir, os.path.basename(djrepo)))
# Build new object and copy dojo_report if present.
new = self.__class__.from_file(new_path)
if self.has_dojo_report:
new.dojo_report = self.dojo_report.deepcopy()
return new
@property
def has_dojo_report(self):
"""True if the pseudo has an associated `DOJO_REPORT` section."""
# pylint: disable=E1101
return hasattr(self, "dojo_report") and bool(self.dojo_report)
@property
def djrepo_path(self):
"""The path of the djrepo file. None if file does not exist."""
# pylint: disable=E1101
root, ext = os.path.splitext(self.filepath)
path = root + ".djrepo"
return path
# if os.path.exists(path): return path
# return None
def hint_for_accuracy(self, accuracy="normal"):
"""
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and
pawecutdg [Ha] for the given accuracy.
ecut and pawecutdg are set to zero if no hint is available.
Args:
accuracy: ["low", "normal", "high"]
"""
# pylint: disable=E1101
if not self.has_dojo_report:
return Hint(ecut=0.0, pawecutdg=0.0)
# Get hints from dojoreport. Try first in hints then in ppgen_hints.
if "hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["hints"][accuracy])
if "ppgen_hints" in self.dojo_report:
return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy])
return Hint(ecut=0.0, pawecutdg=0.0)
@property
def has_hints(self):
"""
True if self provides hints on the cutoff energy.
"""
for acc in ["low", "normal", "high"]:
try:
if self.hint_for_accuracy(acc) is None:
return False
except KeyError:
return False
return True
def open_pspsfile(self, ecut=20, pawecutdg=None):
"""
Calls Abinit to compute the internal tables for the application of the
pseudopotential part. Returns :class:`PspsFile` object providing methods
to plot and analyze the data or None if file is not found or it's not readable.
Args:
ecut: Cutoff energy in Hartree.
pawecutdg: Cutoff energy for the PAW double grid.
"""
from abipy.abio.factories import gs_input
from abipy.core.structure import Structure
from abipy.electrons.psps import PspsFile
from abipy.flowtk import AbinitTask
# Build fake structure.
lattice = 10 * np.eye(3)
structure = Structure(lattice, [self.element], coords=[[0, 0, 0]])
if self.ispaw and pawecutdg is None:
pawecutdg = ecut * 4
inp = gs_input(
structure,
pseudos=[self],
ecut=ecut,
pawecutdg=pawecutdg,
spin_mode="unpolarized",
kppa=1,
)
# Add prtpsps = -1 to make Abinit print the PSPS.nc file and stop.
inp["prtpsps"] = -1
# Build temporary task and run it (ignore retcode because we don't exit cleanly)
task = AbinitTask.temp_shell_task(inp)
task.start_and_wait()
filepath = task.outdir.has_abiext("_PSPS.nc")
if not filepath:
logger.critical("Cannot find PSPS.nc file in %s" % task.outdir)
return None
# Open the PSPS.nc file.
try:
return PspsFile(filepath)
except Exception as exc:
logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc)))
return None
class NcPseudo(metaclass=abc.ABCMeta):
"""
Abstract class defining the methods that must be implemented
by the concrete classes representing norm-conserving pseudopotentials.
"""
@property
@abc.abstractmethod
def nlcc_radius(self):
"""
Radius at which the core charge vanish (i.e. cut-off in a.u.).
Returns 0.0 if nlcc is not used.
"""
@property
def has_nlcc(self):
"""True if the pseudo is generated with non-linear core correction."""
return self.nlcc_radius > 0.0
@property
def rcore(self):
"""Radius of the pseudization sphere in a.u."""
try:
return self._core
except AttributeError:
return None
class PawPseudo(metaclass=abc.ABCMeta):
"""
Abstract class that defines the methods that must be implemented
by the concrete classes representing PAW pseudopotentials.
"""
# def nlcc_radius(self):
# """
# Radius at which the core charge vanish (i.e. cut-off in a.u.).
# Returns 0.0 if nlcc is not used.
# """
# return 0.0
#
# @property
# def has_nlcc(self):
# """True if the pseudo is generated with non-linear core correction."""
# return True
@property
@abc.abstractmethod
def paw_radius(self):
"""Radius of the PAW sphere in a.u."""
@property
def rcore(self):
"""Alias of paw_radius."""
return self.paw_radius
class AbinitPseudo(Pseudo):
"""
An AbinitPseudo is a pseudopotential whose file contains an abinit header.
"""
def __init__(self, path, header):
"""
Args:
path: Filename.
header: :class:`AbinitHeader` instance.
"""
self.path = path
self.header = header
self._summary = header.summary
# Build xc from header.
self.xc = XcFunc.from_abinit_ixc(header["pspxc"])
for attr_name, desc in header.items():
value = header.get(attr_name, None)
# Hide these attributes since one should always use the public interface.
setattr(self, "_" + attr_name, value)
@property
def summary(self):
"""Summary line reported in the ABINIT header."""
return self._summary.strip()
@property
def Z(self):
# pylint: disable=E1101
return self._zatom
@property
def Z_val(self):
# pylint: disable=E1101
return self._zion
@property
def l_max(self):
# pylint: disable=E1101
return self._lmax
@property
def l_local(self):
# pylint: disable=E1101
return self._lloc
@property
def supports_soc(self):
# Treate ONCVPSP pseudos
# pylint: disable=E1101
if self._pspcod == 8:
switch = self.header["extension_switch"]
if switch in (0, 1):
return False
if switch in (2, 3):
return True
raise ValueError("Don't know how to handle extension_switch: %s" % switch)
# TODO Treat HGH HGHK pseudos
# As far as I know, other Abinit pseudos do not support SOC.
return False
class NcAbinitPseudo(NcPseudo, AbinitPseudo):
"""Norm-conserving pseudopotential in the Abinit format."""
@property
def summary(self):
return self._summary.strip()
@property
def Z(self):
# pylint: disable=E1101
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
# pylint: disable=E1101
return self._zion
@property
def l_max(self):
# pylint: disable=E1101
return self._lmax
@property
def l_local(self):
# pylint: disable=E1101
return self._lloc
@property
def nlcc_radius(self):
# pylint: disable=E1101
return self._rchrg
class PawAbinitPseudo(PawPseudo, AbinitPseudo):
"""Paw pseudopotential in the Abinit format."""
@property
def paw_radius(self):
# pylint: disable=E1101
return self._r_cut
# def orbitals(self):
@property
def supports_soc(self):
return True
class Hint:
"""
Suggested value for the cutoff energy [Hartree units]
and the cutoff energy for the dense grid (only for PAW pseudos).
"""
def __init__(self, ecut, pawecutdg=None):
self.ecut = ecut
self.pawecutdg = ecut if pawecutdg is None else pawecutdg
def __str__(self):
if self.pawecutdg is not None:
return "ecut: %s, pawecutdg: %s" % (self.ecut, self.pawecutdg)
return "ecut: %s" % (self.ecut)
@pmg_serialize
def as_dict(self):
"""Return dictionary for MSONable protocol."""
return dict(ecut=self.ecut, pawecutdg=self.pawecutdg)
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
return cls(**{k: v for k, v in d.items() if not k.startswith("@")})
def _dict_from_lines(lines, key_nums, sep=None):
"""
Helper function to parse formatted text structured like:
value1 value2 ... sep key1, key2 ...
key_nums is a list giving the number of keys for each line. 0 if line should be skipped.
sep is a string denoting the character that separates the keys from the value (None if
no separator is present).
Returns:
dict{key1 : value1, key2 : value2, ...}
Raises:
ValueError if parsing fails.
"""
if is_string(lines):
lines = [lines]
if not isinstance(key_nums, collections.abc.Iterable):
key_nums = list(key_nums)
if len(lines) != len(key_nums):
err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums))
raise ValueError(err_msg)
kwargs = Namespace()
for (i, nk) in enumerate(key_nums):
if nk == 0:
continue
line = lines[i]
tokens = [t.strip() for t in line.split()]
values, keys = tokens[:nk], "".join(tokens[nk:])
# Sanitize keys: In some case we might get strings in the form: foo[,bar]
keys.replace("[", "").replace("]", "")
keys = keys.split(",")
if sep is not None:
check = keys[0][0]
if check != sep:
raise ValueError("Expecting separator %s, got %s" % (sep, check))
keys[0] = keys[0][1:]
if len(values) != len(keys):
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (
line,
keys,
values,
)
raise ValueError(msg)
kwargs.update(zip(keys, values))
return kwargs
class AbinitHeader(dict):
"""Dictionary whose keys can be also accessed as attributes."""
def __getattr__(self, name):
try:
# Default behaviour
return super().__getattribute__(name)
except AttributeError:
try:
# Try in the dictionary.
return self[name]
except KeyError as exc:
raise AttributeError(str(exc))
def _int_from_str(string):
"""
Convert string into integer
Raise:
TypeError if string is not a valid integer
"""
float_num = float(string)
int_num = int(float_num)
if float_num == int_num:
return int_num
# Needed to handle pseudos with fractional charge
int_num = np.rint(float_num)
logger.warning("Converting float %s to int %s" % (float_num, int_num))
return int_num
class NcAbinitHeader(AbinitHeader):
"""The abinit header found in the NC pseudopotential files."""
_attr_desc = namedtuple("_attr_desc", "default astype")
_VARS = {
# Mandatory
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"mmax": _attr_desc(None, float),
# Optional variables for non linear-core correction. HGH does not have it.
"rchrg": _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.)
"fchrg": _attr_desc(0.0, float),
"qchrg": _attr_desc(0.0, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super().__init__()
# pseudos generated by APE use llocal instead of lloc.
if "llocal" in kwargs:
kwargs["lloc"] = kwargs.pop("llocal")
self.summary = summary.strip()
for key, desc in NcAbinitHeader._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except Exception:
raise RuntimeError("Conversion Error for key %s, value %s" % (key, value))
self[key] = value
# Add remaining arguments, e.g. extension_switch
if kwargs:
self.update(kwargs)
@staticmethod
def fhi_header(filename, ppdesc):
"""
Parse the FHI abinit header. Example:
Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom, zion, pspdat
1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, 4)
try:
header = _dict_from_lines(lines[:4], [0, 3, 6, 3])
except ValueError:
# The last record with rchrg ... seems to be optional.
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def hgh_header(filename, ppdesc):
"""
Parse the HGH abinit header. Example:
Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998)
10 8 010605 zatom,zion,pspdat
3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well
"""
lines = _read_nlines(filename, 3)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def gth_header(filename, ppdesc):
"""
Parse the GTH abinit header. Example:
Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996
1 1 960508 zatom,zion,pspdat
2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well
0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4
0 0 0 rs, h1s, h2s
0 0 rp, h1p
1.36 .2 0.6 rcutoff, rloc
"""
lines = _read_nlines(filename, 7)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
return NcAbinitHeader(summary, **header)
@staticmethod
def oncvpsp_header(filename, ppdesc):
"""
Parse the ONCVPSP abinit header. Example:
Li ONCVPSP r_core= 2.01 3.02
3.0000 3.0000 140504 zatom,zion,pspd
8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well
5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg
2 2 0 0 0 nproj
0 extension_switch
0 -2.5000025868368D+00 -1.2006906995331D+00
1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00
2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02
"""
lines = _read_nlines(filename, 6)
header = _dict_from_lines(lines[:3], [0, 3, 6])
summary = lines[0]
# Replace pspd with pspdata
header.update({"pspdat": header["pspd"]})
header.pop("pspd")
# Read extension switch
header["extension_switch"] = int(lines[5].split()[0])
return NcAbinitHeader(summary, **header)
@staticmethod
def tm_header(filename, ppdesc):
"""
Parse the TM abinit header. Example:
Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994
100.00000 14.00000 940714 zatom, zion, pspdat
1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well
0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp
.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg
"""
lines = _read_nlines(filename, -1)
header = []
for lineno, line in enumerate(lines):
header.append(line)
if lineno == 2:
# Read lmax.
tokens = line.split()
pspcod, pspxc, lmax, lloc = map(int, tokens[:4])
mmax, r2well = map(float, tokens[4:6])
# if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well":
# raise RuntimeError("%s: Invalid line\n %s" % (filename, line))
lines = lines[3:]
break
# TODO
# Parse the section with the projectors.
# 0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp
# .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm
projectors = OrderedDict()
for idx in range(2 * (lmax + 1)):
line = lines[idx]
if idx % 2 == 0:
proj_info = [
line,
]
if idx % 2 == 1:
proj_info.append(line)
d = _dict_from_lines(proj_info, [5, 4])
projectors[int(d["l"])] = d
# Add the last line with info on nlcc.
header.append(lines[idx + 1])
summary = header[0]
header = _dict_from_lines(header, [0, 3, 6, 3])
return NcAbinitHeader(summary, **header)
class PawAbinitHeader(AbinitHeader):
"""The abinit header found in the PAW pseudopotential files."""
_attr_desc = namedtuple("_attr_desc", "default astype")
_VARS = {
"zatom": _attr_desc(None, _int_from_str),
"zion": _attr_desc(None, float),
"pspdat": _attr_desc(None, float),
"pspcod": _attr_desc(None, int),
"pspxc": _attr_desc(None, int),
"lmax": _attr_desc(None, int),
"lloc": _attr_desc(None, int),
"mmax": _attr_desc(None, int),
"r2well": _attr_desc(None, float),
"pspfmt": _attr_desc(None, str),
"creatorID": _attr_desc(None, int),
"basis_size": _attr_desc(None, int),
"lmn_size": _attr_desc(None, int),
"orbitals": _attr_desc(None, list),
"number_of_meshes": _attr_desc(None, int),
"r_cut": _attr_desc(None, float), # r_cut(PAW) in the header
"shape_type": _attr_desc(None, int),
"rshape": _attr_desc(None, float),
}
del _attr_desc
def __init__(self, summary, **kwargs):
super().__init__()
self.summary = summary.strip()
for key, desc in self._VARS.items():
default, astype = desc.default, desc.astype
value = kwargs.pop(key, None)
if value is None:
value = default
if default is None:
raise RuntimeError("Attribute %s must be specified" % key)
else:
try:
value = astype(value)
except Exception:
raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value))
self[key] = value
if kwargs:
raise RuntimeError("kwargs should be empty but got %s" % str(kwargs))
@staticmethod
def paw_header(filename, ppdesc):
"""
Parse the PAW abinit header. Examples:
Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5
28.000 18.000 20061204 : zatom,zion,pspdat
7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw3 1305 : pspfmt,creatorID
5 13 : basis_size,lmn_size
0 0 1 1 2 : orbitals
3 : number_of_meshes
1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step]
2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step]
3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step]
2.3000000000 : r_cut(SPH)
2 0.
Another format:
C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0
6.000 4.000 20090106 : zatom,zion,pspdat
7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw4 2230 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step]
1.5550009124 : r_cut(PAW)
3 0. : shape_type,rshape
Yet nnother one:
Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1
14.000 4.000 20120814 : zatom,zion,pspdat
7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well
paw5 1331 : pspfmt,creatorID
4 8 : basis_size,lmn_size
0 0 1 1 : orbitals
5 : number_of_meshes
1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step]
2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step]
3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step]
4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step]
5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step]
1.5669671236 : r_cut(PAW)
2 0. : shape_type,rshape
"""
supported_formats = ["paw3", "paw4", "paw5"]
if ppdesc.format not in supported_formats:
raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats))
lines = _read_nlines(filename, -1)
summary = lines[0]
header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":")
lines = lines[5:]
# TODO
# Parse orbitals and number of meshes.
header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()]
header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0])
# print filename, header
# Skip meshes =
lines = lines[2 + num_meshes :]
# for midx in range(num_meshes):
# l = midx + 1
# print lines[0]
header["r_cut"] = float(lines[0].split(":")[0])
# print lines[1]
header.update(_dict_from_lines(lines[1], [2], sep=":"))
# print("PAW header\n", header)
return PawAbinitHeader(summary, **header)
class PseudoParserError(Exception):
"""Base Error class for the exceptions raised by :class:`PseudoParser`"""
class PseudoParser:
"""
Responsible for parsing pseudopotential files and returning pseudopotential objects.
Usage::
pseudo = PseudoParser().parse("filename")
"""
Error = PseudoParserError
# Supported values of pspcod
ppdesc = namedtuple("ppdesc", "pspcod name psp_type format")
# TODO Recheck
_PSPCODES = OrderedDict(
{
1: ppdesc(1, "TM", "NC", None),
2: ppdesc(2, "GTH", "NC", None),
3: ppdesc(3, "HGH", "NC", None),
4: ppdesc(4, "Teter", "NC", None),
# 5: ppdesc(5, "NC", , None),
6: ppdesc(6, "FHI", "NC", None),
7: ppdesc(6, "PAW_abinit_text", "PAW", None),
8: ppdesc(8, "ONCVPSP", "NC", None),
10: ppdesc(10, "HGHK", "NC", None),
}
)
del ppdesc
# renumber functionals from oncvpsp todo confrim that 3 is 2
# _FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'},
# 2: {'n': 5, 'name': 'HL'},
# 3: {'n': 2, 'name': 'PWCA'},
# 4: {'n': 11, 'name': 'PBE'}}
def __init__(self):
# List of files that have been parsed succesfully.
self._parsed_paths = []
# List of files that could not been parsed.
self._wrong_paths = []
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if ext in exclude_exts or fname in exclude_fnames or fname.startswith(".") or not os.path.isfile(path):
continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except Exception:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos
def read_ppdesc(self, filename):
"""
Read the pseudopotential descriptor from file filename.
Returns:
Pseudopotential descriptor. None if filename is not a valid pseudopotential file.
Raises:
`PseudoParserError` if fileformat is not supported.
"""
if filename.endswith(".xml"):
raise self.Error("XML pseudo not supported yet")
# Assume file with the abinit header.
lines = _read_nlines(filename, 80)
for lineno, line in enumerate(lines):
if lineno == 2:
try:
tokens = line.split()
pspcod, pspxc = map(int, tokens[:2])
except Exception:
msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (
filename,
line,
)
logger.critical(msg)
return None
# if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well",
# "pspcod,pspxc,lmax,llocal,mmax,r2well"]:
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
if pspcod not in self._PSPCODES:
raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod))
ppdesc = self._PSPCODES[pspcod]
if pspcod == 7:
# PAW -> need to know the format pspfmt
tokens = lines[lineno + 1].split()
pspfmt, creatorID = tokens[:2]
# if tokens[-1].strip() != "pspfmt,creatorID":
# raise self.Error("%s: Invalid line\n %s" % (filename, line))
# return None
ppdesc = ppdesc._replace(format=pspfmt)
return ppdesc
return None
def parse(self, filename):
"""
Read and parse a pseudopotential file. Main entry point for client code.
Returns:
pseudopotential object or None if filename is not a valid pseudopotential file.
"""
path = os.path.abspath(filename)
# Only PAW supports XML at present.
if filename.endswith(".xml"):
return PawXmlSetup(path)
ppdesc = self.read_ppdesc(path)
if ppdesc is None:
logger.critical("Cannot find ppdesc in %s" % path)
return None
psp_type = ppdesc.psp_type
parsers = {
"FHI": NcAbinitHeader.fhi_header,
"GTH": NcAbinitHeader.gth_header,
"TM": NcAbinitHeader.tm_header,
"Teter": NcAbinitHeader.tm_header,
"HGH": NcAbinitHeader.hgh_header,
"HGHK": NcAbinitHeader.hgh_header,
"ONCVPSP": NcAbinitHeader.oncvpsp_header,
"PAW_abinit_text": PawAbinitHeader.paw_header,
}
try:
header = parsers[ppdesc.name](path, ppdesc)
except Exception:
raise self.Error(path + ":\n" + straceback())
if psp_type == "NC":
pseudo = NcAbinitPseudo(path, header)
elif psp_type == "PAW":
pseudo = PawAbinitPseudo(path, header)
else:
raise NotImplementedError("psp_type not in [NC, PAW]")
return pseudo
# TODO use RadialFunction from pseudo_dojo.
class RadialFunction(namedtuple("RadialFunction", "mesh values")):
"""
Radial Function class.
"""
pass
class PawXmlSetup(Pseudo, PawPseudo):
"""
Setup class for PawXml.
"""
def __init__(self, filepath):
"""
:param filepath:
"""
# pylint: disable=E1101
self.path = os.path.abspath(filepath)
# Get the XML root (this trick is used to that the object is pickleable).
root = self.root
# Get the version of the XML format
self.paw_setup_version = root.get("version")
# Info on the atom.
atom_attrib = root.find("atom").attrib
# self._symbol = atom_attrib["symbol"]
self._zatom = int(float(atom_attrib["Z"]))
self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]])
# Build xc from header.
xc_info = root.find("xc_functional").attrib
self.xc = XcFunc.from_type_name(xc_info["type"], xc_info["name"])
# Old XML files do not define this field!
# In this case we set the PAW radius to None.
# self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"])
# self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()}
pawr_element = root.find("PAW_radius")
self._paw_radius = None
if pawr_element is not None:
self._paw_radius = float(pawr_element.attrib["rpaw"])
# <valence_states>
# <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/>
# <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/>
# <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/>
# <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/>
# <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/>
# </valence_states>
#
# The valence_states element contains several state elements.
# For this setup, the first two lines describe bound eigenstates
# with occupation numbers and principal quantum numbers.
# Notice, that the three additional unbound states should have no f and n attributes.
# In this way, we know that only the first two bound states (with f and n attributes)
# should be used for constructing an initial guess for the wave functions.
self.valence_states = OrderedDict()
for node in root.find("valence_states"):
attrib = AttrDict(node.attrib)
assert attrib.id not in self.valence_states
self.valence_states[attrib.id] = attrib
# print(self.valence_states)
# Parse the radial grids
self.rad_grids = {}
for node in root.findall("radial_grid"):
grid_params = node.attrib
gid = grid_params["id"]
assert gid not in self.rad_grids
self.rad_grids[gid] = self._eval_grid(grid_params)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the XML root element process since Element object cannot be pickled.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_root"]}
@lazy_property
def root(self):
"""
Root tree of XML.
"""
from xml.etree import cElementTree as Et
tree = Et.parse(self.filepath)
return tree.getroot()
@property
def Z(self):
return self._zatom
@property
def Z_val(self):
"""Number of valence electrons."""
return self.valence
# FIXME
@property
def l_max(self):
"""Maximum angular momentum."""
return None
@property
def l_local(self):
"""Angular momentum used for the local part."""
return None
@property
def summary(self):
"""String summarizing the most important properties."""
return ""
@property
def paw_radius(self):
return self._paw_radius
@property
def supports_soc(self):
"""
Here I assume that the ab-initio code can treat the SOC within the on-site approximation
"""
return True
@staticmethod
def _eval_grid(grid_params):
"""
This function receives a dictionary with the parameters defining the
radial mesh and returns a `ndarray` with the mesh
"""
eq = grid_params.get("eq").replace(" ", "")
istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend"))
indices = list(range(istart, iend + 1))
if eq == "r=a*exp(d*i)":
a, d = float(grid_params["a"]), float(grid_params["d"])
mesh = [a * np.exp(d * i) for i in indices]
elif eq == "r=a*i/(n-i)":
a, n = float(grid_params["a"]), float(grid_params["n"])
mesh = [a * i / (n - i) for i in indices]
elif eq == "r=a*(exp(d*i)-1)":
a, d = float(grid_params["a"]), float(grid_params["d"])
mesh = [a * (np.exp(d * i) - 1.0) for i in indices]
elif eq == "r=d*i":
d = float(grid_params["d"])
mesh = [d * i for i in indices]
elif eq == "r=(i/n+a)^5/a-a^4":
a, n = float(grid_params["a"]), float(grid_params["n"])
mesh = [(i / n + a) ** 5 / a - a ** 4 for i in indices]
else:
raise ValueError("Unknown grid type: %s" % eq)
return np.array(mesh)
def _parse_radfunc(self, func_name):
"""Parse the first occurence of func_name in the XML file."""
# pylint: disable=E1101
node = self.root.find(func_name)
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
return self.rad_grids[grid], values, node.attrib
def _parse_all_radfuncs(self, func_name):
"""Parse all the nodes with tag func_name in the XML file."""
# pylint: disable=E1101
for node in self.root.findall(func_name):
grid = node.attrib["grid"]
values = np.array([float(s) for s in node.text.split()])
yield self.rad_grids[grid], values, node.attrib
@lazy_property
def ae_core_density(self):
"""The all-electron radial density."""
mesh, values, attrib = self._parse_radfunc("ae_core_density")
return RadialFunction(mesh, values)
@lazy_property
def pseudo_core_density(self):
"""The pseudized radial density."""
mesh, values, attrib = self._parse_radfunc("pseudo_core_density")
return RadialFunction(mesh, values)
@lazy_property
def ae_partial_waves(self):
"""Dictionary with the AE partial waves indexed by state."""
ae_partial_waves = OrderedDict()
for mesh, values, attrib in self._parse_all_radfuncs("ae_partial_wave"):
state = attrib["state"]
# val_state = self.valence_states[state]
ae_partial_waves[state] = RadialFunction(mesh, values)
return ae_partial_waves
@property
def pseudo_partial_waves(self):
"""Dictionary with the pseudo partial waves indexed by state."""
pseudo_partial_waves = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"):
state = attrib["state"]
# val_state = self.valence_states[state]
pseudo_partial_waves[state] = RadialFunction(mesh, values)
return pseudo_partial_waves
@lazy_property
def projector_functions(self):
"""Dictionary with the PAW projectors indexed by state."""
projector_functions = OrderedDict()
for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"):
state = attrib["state"]
# val_state = self.valence_states[state]
projector_functions[state] = RadialFunction(mesh, values)
return projector_functions
def yield_figs(self, **kwargs): # pragma: no cover
"""
This function *generates* a predefined list of matplotlib figures with minimal input from the user.
"""
yield self.plot_densities(title="PAW densities", show=False)
yield self.plot_waves(title="PAW waves", show=False)
yield self.plot_projectors(title="PAW projectors", show=False)
# yield self.plot_potentials(title="potentials", show=False)
@add_fig_kwargs
def plot_densities(self, ax=None, **kwargs):
"""
Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
# ax.set_ylabel('density')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i == 1 else r"$\tilde{n}_c$"
ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2)
ax.legend(loc="best")
return fig
@add_fig_kwargs
def plot_waves(self, ax=None, fontsize=12, **kwargs):
"""
Plot the AE and the pseudo partial waves.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: fontsize for legends and titles
Returns: `matplotlib` figure
"""
# pylint: disable=E1101
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel(r"$r\phi,\, r\tilde\phi\, [Bohr]^{-\frac{1}{2}}$")
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.pseudo_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state)
for state, rfunc in self.ae_partial_waves.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
@add_fig_kwargs
def plot_projectors(self, ax=None, fontsize=12, **kwargs):
"""
Plot the PAW projectors.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns: `matplotlib` figure
"""
# pylint: disable=E1101
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel("r [Bohr]")
ax.set_ylabel(r"$r\tilde p\, [Bohr]^{-\frac{1}{2}}$")
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
for state, rfunc in self.projector_functions.items():
ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state)
ax.legend(loc="best", shadow=True, fontsize=fontsize)
return fig
# @add_fig_kwargs
# def plot_potentials(self, **kwargs):
# """
# ================ ==============================================================
# kwargs Meaning
# ================ ==============================================================
# title Title of the plot (Default: None).
# show True to show the figure (Default).
# savefig 'abc.png' or 'abc.eps' to save the figure to a file.
# ================ ==============================================================
# Returns:
# `matplotlib` figure
# """
# title = kwargs.pop("title", "Potentials")
# show = kwargs.pop("show", True)
# savefig = kwargs.pop("savefig", None)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.grid(True)
# ax.set_xlabel('r [Bohr]')
# ax.set_ylabel('density')
# ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--")
# ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1))
# for state, rfunc in self.potentials.items():
# ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state)
# ax.legend(loc="best")
# if title is not None: fig.suptitle(title)
# if show: plt.show()
# if savefig: fig.savefig(savefig)
# return fig
class PseudoTable(collections.abc.Sequence, MSONable, metaclass=abc.ABCMeta):
"""
Define the pseudopotentials from the element table.
Individidual elements are accessed by name, symbol or atomic number.
For example, the following all retrieve iron:
print elements[26]
Fe
print elements.Fe
Fe
print elements.symbol('Fe')
Fe
print elements.name('iron')
Fe
print elements.isotope('Fe')
Fe
"""
@classmethod
def as_table(cls, items):
"""
Return an instance of :class:`PseudoTable` from the iterable items.
"""
if isinstance(items, cls):
return items
return cls(items)
@classmethod
def from_dir(cls, top, exts=None, exclude_dirs="_*"):
"""
Find all pseudos in the directory tree starting from top.
Args:
top: Top of the directory tree
exts: List of files extensions. if exts == "all_files"
we try to open all files in top
exclude_dirs: Wildcard used to exclude directories.
return: :class:`PseudoTable` sorted by atomic number Z.
"""
pseudos = []
if exts == "all_files":
for f in [os.path.join(top, fn) for fn in os.listdir(top)]:
if os.path.isfile(f):
try:
p = Pseudo.from_file(f)
if p:
pseudos.append(p)
else:
logger.info("Skipping file %s" % f)
except Exception:
logger.info("Skipping file %s" % f)
if not pseudos:
logger.warning("No pseudopotentials parsed from folder %s" % top)
return None
logger.info("Creating PseudoTable with %i pseudopotentials" % len(pseudos))
else:
if exts is None:
exts = ("psp8",)
for p in find_exts(top, exts, exclude_dirs=exclude_dirs):
try:
pseudos.append(Pseudo.from_file(p))
except Exception as exc:
logger.critical("Error in %s:\n%s" % (p, exc))
return cls(pseudos).sort_by_z()
def __init__(self, pseudos):
"""
Args:
pseudos: List of pseudopotentials or filepaths
"""
# Store pseudos in a default dictionary with z as key.
# Note that we can have more than one pseudo for given z.
# hence the values are lists of pseudos.
if not isinstance(pseudos, collections.abc.Iterable):
pseudos = [pseudos]
if len(pseudos) and is_string(pseudos[0]):
pseudos = list_strings(pseudos)
self._pseudos_with_z = defaultdict(list)
for pseudo in pseudos:
if not isinstance(pseudo, Pseudo):
pseudo = Pseudo.from_file(pseudo)
if pseudo is not None:
self._pseudos_with_z[pseudo.Z].append(pseudo)
for z in self.zlist:
pseudo_list = self._pseudos_with_z[z]
symbols = [p.symbol for p in pseudo_list]
symbol = symbols[0]
if any(symb != symbol for symb in symbols):
raise ValueError("All symbols must be equal while they are: %s" % str(symbols))
setattr(self, symbol, pseudo_list)
def __getitem__(self, Z):
"""
Retrieve pseudos for the atomic number z. Accepts both int and slice objects.
"""
if isinstance(Z, slice):
assert Z.stop is not None
pseudos = []
for znum in iterator_from_slice(Z):
pseudos.extend(self._pseudos_with_z[znum])
return self.__class__(pseudos)
return self.__class__(self._pseudos_with_z[Z])
def __len__(self):
return len(list(self.__iter__()))
def __iter__(self):
"""Process the elements in Z order."""
for z in self.zlist:
for pseudo in self._pseudos_with_z[z]:
yield pseudo
def __repr__(self):
return "<%s at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return self.to_table()
@property
def allnc(self):
"""True if all pseudos are norm-conserving."""
return all(p.isnc for p in self)
@property
def allpaw(self):
"""True if all pseudos are PAW."""
return all(p.ispaw for p in self)
@property
def zlist(self):
"""Ordered list with the atomic numbers available in the table."""
return sorted(list(self._pseudos_with_z.keys()))
# def max_ecut_pawecutdg(self, accuracy):
# """Return the maximum value of ecut and pawecutdg based on the hints available in the pseudos."""
# ecut = max(p.hint_for_accuracy(accuracy=accuracy).ecut for p in self)
# pawecutdg = max(p.hint_for_accuracy(accuracy=accuracy).pawecutdg for p in self)
# return ecut, pawecutdg
def as_dict(self, **kwargs):
"""Return dictionary for MSONable protocol."""
d = {}
for p in self:
k, count = p.element.name, 1
# k, count = p.element, 1
# Handle multiple-pseudos with the same name!
while k in d:
k += k.split("#")[0] + "#" + str(count)
count += 1
d.update({k: p.as_dict()})
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
"""Build instance from dictionary (MSONable protocol)."""
pseudos = []
dec = MontyDecoder()
for k, v in d.items():
if not k.startswith("@"):
pseudos.append(dec.process_decoded(v))
return cls(pseudos)
def is_complete(self, zmax=118):
"""
True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential
"""
for z in range(1, zmax):
if not self[z]:
return False
return True
def all_combinations_for_elements(self, element_symbols):
"""
Return a list with all the the possible combination of pseudos
for the given list of element_symbols.
Each item is a list of pseudopotential objects.
Example::
table.all_combinations_for_elements(["Li", "F"])
"""
d = OrderedDict()
for symbol in element_symbols:
d[symbol] = self.select_symbols(symbol, ret_list=True)
from itertools import product
return list(product(*d.values()))
def pseudo_with_symbol(self, symbol, allow_multi=False):
"""
Return the pseudo with the given chemical symbol.
Args:
symbols: String with the chemical symbol of the element
allow_multi: By default, the method raises ValueError
if multiple occurrences are found. Use allow_multi to prevent this.
Raises:
ValueError if symbol is not found or multiple occurences are present and not allow_multi
"""
pseudos = self.select_symbols(symbol, ret_list=True)
if not pseudos or (len(pseudos) > 1 and not allow_multi):
raise ValueError("Found %d occurrences of symbol %s" % (len(pseudos), symbol))
if not allow_multi:
return pseudos[0]
return pseudos
def pseudos_with_symbols(self, symbols):
"""
Return the pseudos with the given chemical symbols.
Raises:
ValueError if one of the symbols is not found or multiple occurences are present.
"""
pseudos = self.select_symbols(symbols, ret_list=True)
found_symbols = [p.symbol for p in pseudos]
duplicated_elements = [s for s, o in collections.Counter(found_symbols).items() if o > 1]
if duplicated_elements:
raise ValueError("Found multiple occurrences of symbol(s) %s" % ", ".join(duplicated_elements))
missing_symbols = [s for s in symbols if s not in found_symbols]
if missing_symbols:
raise ValueError("Missing data for symbol(s) %s" % ", ".join(missing_symbols))
return pseudos
def select_symbols(self, symbols, ret_list=False):
"""
Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable`
"""
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
symbols = [s[1:] for s in symbols]
symbols = set(symbols)
pseudos = []
for p in self:
if exclude:
if p.symbol in symbols:
continue
else:
if p.symbol not in symbols:
continue
pseudos.append(p)
if ret_list:
return pseudos
return self.__class__(pseudos)
def get_pseudos_for_structure(self, structure):
"""
Return the list of :class:`Pseudo` objects to be used for this :class:`Structure`.
Args:
structure: pymatgen :class:`Structure`.
Raises:
`ValueError` if one of the chemical symbols is not found or
multiple occurences are present in the table.
"""
return self.pseudos_with_symbols(structure.symbol_set)
def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream)
def to_table(self, filter_function=None):
"""Return string with data in tabular form."""
table = []
for p in self:
if filter_function is not None and filter_function(p):
continue
table.append([p.basename, p.symbol, p.Z_val, p.l_max, p.l_local, p.xc, p.type])
return tabulate(
table,
headers=["basename", "symbol", "Z_val", "l_max", "l_local", "XC", "type"],
tablefmt="grid",
)
def sorted(self, attrname, reverse=False):
"""
Sort the table according to the value of attribute attrname.
Return:
New class:`PseudoTable` object
"""
attrs = []
for i, pseudo in self:
try:
a = getattr(pseudo, attrname)
except AttributeError:
a = np.inf
attrs.append((i, a))
# Sort attrs, and build new table with sorted pseudos.
return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])
def sort_by_z(self):
"""Return a new :class:`PseudoTable` with pseudos sorted by Z"""
return self.__class__(sorted(self, key=lambda p: p.Z))
def select(self, condition):
"""
Select only those pseudopotentials for which condition is True.
Return new class:`PseudoTable` object.
Args:
condition:
Function that accepts a :class:`Pseudo` object and returns True or False.
"""
return self.__class__([p for p in self if condition(p)])
def with_dojo_report(self):
"""Select pseudos containing the DOJO_REPORT section. Return new class:`PseudoTable` object."""
return self.select(condition=lambda p: p.has_dojo_report)
def select_rows(self, rows):
"""
Return new class:`PseudoTable` object with pseudos in the given rows of the periodic table.
rows can be either a int or a list of integers.
"""
if not isinstance(rows, (list, tuple)):
rows = [rows]
return self.__class__([p for p in self if p.element.row in rows])
def select_family(self, family):
"""
Return PseudoTable with element beloging to the specified family, e.g. familiy="alkaline"
"""
# e.g element.is_alkaline
return self.__class__([p for p in self if getattr(p.element, "is_" + family)])
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/newscalarformatter_demo.py | 13 | 3313 | #!/usr/bin/env python
# Demonstrating the improvements and options of the proposed new ScalarFormatter
from pylab import *
from matplotlib.ticker import OldScalarFormatter
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The old formatter',horizontalalignment='center',verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(OldScalarFormatter())
gca().yaxis.set_major_formatter(OldScalarFormatter())
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The new formatter, default settings',horizontalalignment='center',
verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter())
gca().yaxis.set_major_formatter(ScalarFormatter())
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The new formatter, no numerical offset',horizontalalignment='center',
verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
gca().yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
x=frange(0,1,.01)
f=figure(figsize=(6,6))
f.text(0.5,0.975,'The new formatter, with mathtext',horizontalalignment='center',
verticalalignment='top')
subplot(221)
plot(x*1e5+1e10,x*1e-10+1e-5)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
subplot(222)
plot(x*1e5,x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
subplot(223)
plot(-x*1e5-1e10,-x*1e-5-1e-10)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
subplot(224)
plot(-x*1e5,-x*1e-4)
gca().xaxis.set_major_formatter(ScalarFormatter(useMathText=True))
gca().yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
show()
| gpl-2.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/linear_model/plot_logistic_path.py | 37 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
# #############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| gpl-3.0 |
beepee14/scikit-learn | sklearn/utils/tests/test_class_weight.py | 90 | 12846 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
jbhuang0604/WSL | lib/roi_data_layer/minibatch.py | 44 | 7337 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs = {'data': im_blob,
'rois': rois_blob,
'labels': labels_blob}
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
return labels, overlaps, rois, bbox_targets, bbox_loss_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/sphinxext/plot_directive.py | 7 | 28322 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix,dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and len(images),
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| mit |
dnidever/lsst-tools | python/qascript.py | 1 | 14469 | #!/usr/bin/env python
# call this like: % python test_matcher.py
#repodir = "/data/lsst/decam/redux/cp/cosmos/"
#visitid = 177341
#ccdnum = 15
import lsst.afw.image as afwImage
import lsst.afw.table as afwTable
import lsst.afw.geom as afwGeom
import lsst.daf.persistence as dafPersist
import lsst.meas.astrom as measAstrom
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
It needs to be scaled by 1.4826x to be on the same "scale" as std. dev.
"""
med = np.median(arr)
return 1.4826*np.median(np.abs(arr - med))
#def getqainfo(repodir,visitid,ccdnum):
def getqainfo(dataref):
#butler = dataRef.butlerSubset.butler
#butler = dafPersist.Butler(repodir)
#exp = butler.get('calexp',visit=visitid,ccdnum=ccdnum)
# Start the structure
dt = np.dtype([('visit',int),('ccdnum',int),('calexp_exists',bool),('background_exists',bool),('src_exists',bool),('metadata_exists',bool),
('icsrc_exists',bool),('icmatch_exists',bool),('psf_exists',bool),('wcs_exists',bool),
('ra',float),('dec',float),('filter',np.str_,1),('datetime',np.str_,30),('nx',int),('ny',int),('exptime',float),('ncrpix',int),
('fluxmag0',float),('fluxmag0sigma',float),
('calexp_bgmean',float),('calexp_bgvar',float),('calexp_bgmean2',float),('calexp_bgvar2',float),('calexp_magzero',float),
('calexp_magzero_rms',float),('calexp_magzero_nobj',int),('calexp_colorterm1',float),('calexp_colorterm2',float),('calexp_colorterm3',float),
('apcorr_exists',bool),('apcorr_med',float),('apcorr_std',float),
('ixx',float),('iyy',float),('ixy',float),('semimajor_pix',float),('semiminor_pix',float),('pa_pix',float),
('ellipticity_pix',float),('semimajor_arcsec',float),('semiminor_arcsec',float),
('pa_arcsec',float),('ellipticity_arcsec',float),('nsources',int),('bkgdmed',float),('bkgdsig',float),
('measurePsf_numAvailStars',int),('measurePsf_numGoodStars',int),('measurePsf_spatialFitChi2',float),
('detection_sigma',float),('detection_nGrow',int),('detection_doSmooth',bool),
('detection_smoothingKernelWidth',int),
('metadata_processccd_exists',bool),('metadata_processccd_calibrate_exists',bool),('metadata_processccd_calibrate_astrometry_exists',bool),
('metadata_processccd_calibrate_astrometry_matcher_exists',bool),('metadata_processccd_calibrate_astrometry_refobjloader_exists',bool),
('metadata_processccd_calibrate_astrometry_wcsfitter_exists',bool),('metadata_processccd_calibrate_detection_exists',bool),
('metadata_processccd_calibrate_measurepsf_exists',bool),('metadata_processccd_calibrate_photocal_exists',bool),
('metadata_processccd_calibrate_repair_exists',bool),('metadata_processccd_deblend_exists',bool),('metadata_processccd_detection_exists',bool)])
#data = np.empty(1,dtype=dt)
data = np.zeros(1,dtype=dt)
visit = dataref.dataId['visit']
ccdnum = dataref.dataId['ccdnum']
data['visit'] = visit
data['ccdnum'] = ccdnum
# Check if the files/products exist
data['calexp_exists'] = dataref.datasetExists('calexp')
data['background_exists'] = dataref.datasetExists('calexpBackground')
data['src_exists'] = dataref.datasetExists('src')
data['metadata_exists'] = dataref.datasetExists('processCcdDecam_metadata')
data['icsrc_exists'] = dataref.datasetExists('icSrc')
data['icmatch_exists'] = dataref.datasetExists('icMatch')
# Load the exposure
if data['calexp_exists']:
exp = dataref.get('calexp')
mask = exp.getMaskedImage().getMask()
data['wcs_exists'] = exp.hasWcs()
wcs = exp.getWcs()
calib = exp.getCalib()
data['psf_exists'] = exp.hasPsf()
psf = exp.getPsf()
shape = psf.computeShape()
# Chip/Exposure level information
filt = exp.getFilter().getName()
exptime = calib.getExptime()
fluxmag0 = calib.getFluxMag0()
datetime = calib.getMidTime()
data['filter'] = filt
data['exptime'] = exptime
data['fluxmag0'] = fluxmag0[0]
data['fluxmag0sigma'] = fluxmag0[1]
data['datetime'] = datetime.toString()[0:30] # can only be 30 char long
# Get calexp metadata
calexp_meta = exp.getMetadata()
calexp_meta_names = ['BGMEAN','BGVAR','BGMEAN2','BGVAR2','MAGZERO','MAGZERO_RMS','MAGZERO_NOBJ','COLORTERM1','COLORTERM2','COLORTERM3']
for name in calexp_meta_names:
if calexp_meta.exists(name):
data['calexp_'+name.lower()] = calexp_meta.get(name)
# Size of the image
nx = exp.getWidth()
ny = exp.getHeight()
data['nx'] = nx
data['ny'] = ny
# Central coordinates
cen = wcs.pixelToSky(nx/2,ny/2)
ra = cen.getLongitude().asDegrees()
dec = cen.getLatitude().asDegrees()
data['ra'] = ra
data['dec'] = dec
# Get aperture correction
info = exp.getInfo()
data['apcorr_exists'] = info.hasApCorrMap()
if data['apcorr_exists']:
apcorr = info.getApCorrMap()
apcorr_psfflux = apcorr.get('base_PsfFlux_flux')
# fill the entire image, THIS IS SLOW!! sample the data instead
apcorr_im = exp.getMaskedImage().getImage() # initialize with flux image
apcorr_psfflux.fillImage(apcorr_im)
# sample the area
apcorr_med = np.median(apcorr_im.getArray())
apcorr_std = np.std(apcorr_im.getArray())
data['apcorr_med'] = apcorr_med
data['apcorr_std'] = apcorr_std
# Get shape parameters
ixx = shape.getIxx()
iyy = shape.getIyy()
ixy = shape.getIxy()
data['ixx'] = ixx
data['iyy'] = iyy
data['ixy'] = ixy
# Get ellipticity and PA
axes = afwGeom.ellipses.Axes(shape)
pa_pix = axes.getTheta() * 180 / math.pi # CCW from x-axis on pixel-grid
semimajor_pix = axes.getA()
semiminor_pix = axes.getB()
ellipticity_pix = (semimajor_pix-semiminor_pix)/(semimajor_pix+semiminor_pix)
data['semimajor_pix'] = semimajor_pix
data['semiminor_pix'] = semiminor_pix
data['pa_pix'] = pa_pix
data['ellipticity_pix'] = ellipticity_pix
# Transform ellipse to on sky
point = afwGeom.Point2D(nx/2, ny/2)
local_transform = wcs.linearizePixelToSky(point, afwGeom.arcseconds) # or whatever angle unit you want for radii; PA is always radians
pixel_ellipse = afwGeom.ellipses.Axes(psf.computeShape(point))
sky_ellipse = pixel_ellipse.transform(local_transform.getLinear())
pa_arcsec = sky_ellipse.getTheta() * 180 / math.pi # east of north???
semimajor_arcsec = sky_ellipse.getA()
semiminor_arcsec = sky_ellipse.getB()
ellipticity_arcsec = (semimajor_arcsec-semiminor_arcsec)/(semimajor_arcsec+semiminor_arcsec)
data['semimajor_arcsec'] = semimajor_arcsec
data['semiminor_arcsec'] = semiminor_arcsec
data['pa_arcsec'] = pa_arcsec
data['ellipticity_arcsec'] = ellipticity_arcsec
# Number of CR pixels, I don't know what bit it is
#threshold = 16
# is there a better way of selecting "CR" pixels than this??
#cr = np.bitwise_and(np.int16(mask.getArray()),threshold) == threshold
crBit = mask.getPlaneBitMask('CR')
crmask = (mask.getArray() & crBit) == crBit
ncrpix = crmask.sum()
data['ncrpix'] = ncrpix
# No calexp, try to get basic info from raw or instcal
else:
# Try to load raw or instcal
try:
raw = dataref.get('raw')
except:
try:
instcal = dataref.get('instcal')
calib = instcal.getCalib()
# Chip/Exposure level information
filt = instcal.getFilter().getName()
exptime = calib.getExptime()
datetime = calib.getMidTime()
data['filter'] = filt
data['exptime'] = exptime
data['datetime'] = datetime.toString()[0:30] # can only be 30 char long
# Size of the image
nx = instcal.getWidth()
ny = instcal.getHeight()
data['nx'] = nx
data['ny'] = ny
# Central coordinates
wcs = instcal.getWcs()
cen = wcs.pixelToSky(nx/2,ny/2)
ra = cen.getLongitude().asDegrees()
dec = cen.getLatitude().asDegrees()
data['ra'] = ra
data['dec'] = dec
except:
pass
# Load the source catalog
if data['src_exists']:
src = dataref.get('src')
nsources = len(src)
data['nsources'] = nsources
# Background level
if data['background_exists']:
backgrounds = dataref.get('calexpBackground')
bkgdimage = backgrounds.getImage()
bkgdmed = np.median(bkgdimage.getArray())
bkgdsig = mad(bkgdimage.getArray())
data['bkgdmed'] = bkgdmed
data['bkgdsig'] = bkgdsig
# Getting metadata
if dataref.datasetExists('processCcdDecam_metadata'):
try:
meta = dataref.get('processCcdDecam_metadata')
# check for the existence of various entries in the metadata
data['metadata_processccd_exists'] = meta.exists('processCcdDecam')
data['metadata_processccd_calibrate_exists'] = meta.exists('processCcdDecam:calibrate')
data['metadata_processccd_calibrate_astrometry_exists'] = meta.exists('processCcdDecam:calibrate:astrometry')
data['metadata_processccd_calibrate_astrometry_matcher_exists'] = meta.exists('processCcdDecam:calibrate:astrometry:matcher')
data['metadata_processccd_calibrate_astrometry_refobjloader_exists'] = meta.exists('processCcdDecam:calibrate:astrometry:refObjLoader')
data['metadata_processccd_calibrate_astrometry_wcsfitter_exists'] = meta.exists('processCcdDecam:calibrate:astrometry:wcsFitter')
data['metadata_processccd_calibrate_detection_exists'] = meta.exists('processCcdDecam:calibrate:detection')
data['metadata_processccd_calibrate_measurepsf_exists'] = meta.exists('processCcdDecam:calibrate:measurePsf')
data['metadata_processccd_calibrate_photocal_exists'] = meta.exists('processCcdDecam:calibrate:photocal')
data['metadata_processccd_calibrate_repair_exists'] = meta.exists('processCcdDecam:calibrate:repair')
data['metadata_processccd_deblend_exists'] = meta.exists('processCcdDecam:deblend')
data['metadata_processccd_detection_exists'] = meta.exists('processCcdDecam:detection')
# PSF values
if data['metadata_processccd_calibrate_measurepsf_exists']:
meta_measurePsf = meta.get('processCcdDecam:calibrate:measurePsf')
numAvailStars = meta_measurePsf.get('numAvailStars')
numGoodStars = meta_measurePsf.get('numGoodStars')
spatialFitChi2 = meta_measurePsf.get('spatialFitChi2')
data['measurePsf_numAvailStars'] = numAvailStars
data['measurePsf_numGoodStars'] = numGoodStars
data['measurePsf_spatialFitChi2'] = spatialFitChi2
# Detection information
if data['metadata_processccd_detection_exists']:
meta_det = meta.get('processCcdDecam:detection')
sigma = meta_det.get('sigma')
doSmooth = meta_det.get('doSmooth')
nGrow = meta_det.get('nGrow')
smoothingKernelWidth = meta_det.get('smoothingKernelWidth')
data['detection_sigma'] = sigma
data['detection_doSmooth'] = doSmooth
data['detection_nGrow'] = nGrow
data['detection_smoothingKernelWidth'] = smoothingKernelWidth
except:
print "Error loading metadata for ",visit,ccdnum
return data
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Get QA metric data for a calexp data.")
parser.add_argument('--datarepo', '-d', action="store", help="The data repository directory", default="/data/lsst/decam/redux/cp/cosmos/")
parser.add_argument('--outfile', '-o', action="store", help="The output filename for the metrics.", default="qametrics.csv")
parser.add_argument('--verbose', '-v', action="store_true", help="Print out the data as it is gathered.", default=False)
args = parser.parse_args()
print "Getting QA metric data for ", args.datarepo
# Get all the data IDs
butler = dafPersist.Butler(args.datarepo)
# Get the total number of dataIds that EXIST
#ndata=0
#for dataref in butler.subset(datasetType='calexp'):
# if dataref.datasetExists(): # processCcd did not fail
# ndata = ndata+1
ndata = len(butler.subset(datasetType='calexp'))
print ndata, "calexps"
count = 0
for dataref in butler.subset(datasetType='calexp'):
#if dataref.datasetExists(): # processCcd did not fail
data1 = getqainfo(dataref)
# Create the structured array for all calexps
if count == 0:
dt = data1.dtype
data = np.empty(ndata,dtype=dt)
# Stuff in the data for THIS calexp
data[count] = data1
# Print out the information
if args.verbose:
print count, data1[0]
count = count+1
# output to csv file
print "Writing outputs to", args.outfile
data.tofile(args.outfile,sep='\n')
# Add header line
f = open(args.outfile,'r') # read it all back in first
temp = f.read()
f.close()
# now write out with header line
f = open(args.outfile, 'w')
f.write(str(dt.names)+'\n')
f.write(temp)
f.close()
| mit |
YinongLong/scikit-learn | sklearn/learning_curve.py | 8 | 14757 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/plot_geopotential_diff_8km.py | 1 | 13240 | """
Load mean geopotential heights and plot in colour
"""
import os, sys
from matplotlib import rc
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from mpl_toolkits.basemap import Basemap
import iris
import iris.analysis.cartography
import numpy as np
import imp
import h5py
import cartopy.crs as ccrs
#import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
try:
import cStringIO as StringIO
except:
import StringIO
import PIL
import Image
#from matplotlib.font_manager import FontProperties
#from matplotlib import rcParams
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 14}
matplotlib.rc('font', **font)
from textwrap import wrap
import math
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
def main():
# Plot diagnostics, model and pressure levels etc. to plot on for looping through
plot_type='mean_masked'
plot_type_h5py_var = 'mean'
plot_diags=['temp', 'sp_hum']
plot_levels = [925, 850, 700, 500]
#plot_levels = [925]
experiment_ids = ['dklyu']
difference_id = 'dkmgw'
diffidmin1 = difference_id[:-1]
divisor=10 # for lat/lon rounding
p_levels = [1000, 950, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20, 10]
degs_crop_top = 3.7
degs_crop_bottom = 3.5
degs_crop_left = 2
degs_crop_right = 3
###############################################################################
#################### Load heights, winds and temp/sp_hum for difference id #####################
f_glob_h = '/nfs/a90/eepdw/Data/EMBRACE/Pressure_level_means/408_pressure_levels_interp_pressure_%s_%s' % (difference_id, plot_type)
with h5py.File(f_glob_h, 'r') as i:
mh = i['%s' % plot_type_h5py_var]
mean_heights_global = mh[. . .]
######################################################################################
fu_g = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/30201_mean.pp' % (diffidmin1, difference_id)
u_wind_g,v_wind_g = iris.load(fu_g)
for pl in plot_diags:
plot_diag=pl
f_glob_d = '/nfs/a90/eepdw/Data/EMBRACE/Pressure_level_means/%s_pressure_levels_interp_%s_%s' % (plot_diag, difference_id, plot_type)
with h5py.File(f_glob_d, 'r') as i:
mg = i['%s' % plot_type_h5py_var]
mean_var_global = mg[. . .]
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
###############################################################################
#################### Load heights and temp/sp_hum #####################
fname_h = '/nfs/a90/eepdw/Data/EMBRACE/Pressure_level_means/408_pressure_levels_interp_pressure_%s_%s' % (experiment_id, plot_type)
fname_d = '/nfs/a90/eepdw/Data/EMBRACE/Pressure_level_means/%s_pressure_levels_interp_%s_%s' % (plot_diag, experiment_id, plot_type)
# print fname_h
# print fname_d
# Height data file
with h5py.File(fname_h, 'r') as i:
mh = i['%s' % plot_type_h5py_var]
mean_heights = mh[. . .]
# print mean_heights.shape
with h5py.File(fname_d, 'r') as i:
mh = i['%s' % plot_type_h5py_var]
mean_var = mh[. . .]
# print mean_var.shape
f_oro = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/33.pp' % (expmin1, experiment_id)
oro = iris.load_cube(f_oro)
fu = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/30201_mean.pp' % (expmin1, experiment_id)
u_wind,v_wind = iris.load(fu)
lat_w = u_wind.coord('grid_latitude').points
lon_w = u_wind.coord('grid_longitude').points
p_levs = u_wind.coord('pressure').points
lat = oro.coord('grid_latitude').points
lon = oro.coord('grid_longitude').points
cs = oro.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' 33.pp - %s - Unrotate pole %s' % (experiment_id, cs)
lons, lats = np.meshgrid(lon, lat)
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_corner_u,lat_corner_u = iris.analysis.cartography.unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
#lon_highu,lat_highu = iris.analysis.cartography.unrotate_pole(lon_high, lat_high, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
lon_min=np.min(lon)
lon_max=np.max(lon)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lat)
lat_max=np.max(lat)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
cs_w = u_wind.coord_system('CoordSystem')
if isinstance(cs_w, iris.coord_systems.RotatedGeogCS):
print ' Wind - %s - Unrotate pole %s' % (experiment_id, cs_w)
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = iris.analysis.cartography.unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
lon_high = 102
lon_low = 64
lat_high= 30.
lat_low=-10
csur_w=cs_w.ellipsoid
for p in plot_levels:
### Search for pressure level match
s = np.searchsorted(p_levels[::-1], p)
# Difference heights
plt_h = np.where(np.isnan(mean_heights[:,:,-(s+1)]), np.nan, mean_heights[:,:,-(s+1)] - mean_heights_global[:,:,-(s+1)])
#Difference temperature/specific humidity
plt_v = np.where(np.isnan(mean_var[:,:,-(s+1)]), np.nan, mean_var[:,:,-(s+1)] - mean_var_global[:,:,-(s+1)])
# Set u,v for winds, linear interpolate to approx. 2 degree grid
sc = np.searchsorted(p_levs, p)
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
### Regrid winds to 2 degree spacing
lons_wi, lats_wi = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
fl_la_lo = (lats_w.flatten(),lons_w.flatten())
u_wind_diff = u_wind[sc,:,:] - u_wind_g[sc,:,:]
v_wind_diff = v_wind[sc,:,:] - v_wind_g[sc,:,:]
u = scipy.interpolate.griddata(fl_la_lo, u_wind_diff.data.flatten(), (lats_wi, lons_wi), method='linear')
v = scipy.interpolate.griddata(fl_la_lo, v_wind_diff.data.flatten(), (lats_wi, lons_wi), method='linear')
#######################################################################################
### Plotting #########################################################################
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
#m_title = 'Height of %s-hPa level (m)' % (p)
# Set pressure height contour min/max
if p == 925:
clev_min = -24.
clev_max = 24.
elif p == 850:
clev_min = -24.
clev_max = 24.
elif p == 700:
clev_min = -24.
clev_max = 24.
elif p == 500:
clev_min = -24.
clev_max = 24.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p == 925:
clevpt_min = -3.
clevpt_max = 3.
elif p == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
clevs_lin = np.linspace(clev_min, clev_max, num=24)
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
x,y = m(lons,lats)
x_w,y_w = m(lons_wi, lats_wi)
fig=plt.figure(figsize=(8,10))
ax = fig.add_axes([0.05,0.05,0.9,0.85], axisbg='#262626')
m.drawcoastlines(color='#262626')
m.drawcountries(color='#262626')
m.drawcoastlines(linewidth=0.5)
#m.fillcontinents(color='#CCFF99')
m.drawparallels(np.arange(int(lat_low_tick),int(lat_high_tick)+divisor,divisor),labels=[1,0,0,0], color='#262626')
m.drawmeridians(np.arange(int(lon_low_tick),int(lon_high_tick)+divisor,divisor),labels=[0,0,0,1], color='#262626' )
cs_lin = m.contour(x,y, plt_h, clevs_lin,colors='#262626',linewidths=0.5)
cmap=plt.cm.RdBu_r
if plot_diag=='temp':
plt_v = np.ma.masked_outside(plt_v, clevpt_max+20, clevpt_min-20)
cs_col = m.contourf(x,y, plt_v, np.linspace(clevpt_min, clevpt_max, 256), cmap=cmap, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%d')
#cbar = plt.colorbar(cs_col, orientation='horizontal', pad=0.05, extend='both', format = '%d')
tick_gap=1.
cbar.set_ticks(np.arange(clevpt_min,clevpt_max+tick_gap,tick_gap))
cbar.set_ticklabels(np.arange(clevpt_min,clevpt_max+tick_gap,tick_gap))
cbar.set_label('K')
pn='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours), and wind (vectors)'
elif plot_diag=='sp_hum':
plt_v = np.ma.masked_outside(plt_v, clevsh_max+20, clevsh_min-20)
cs_col = m.contourf(x,y, plt_v, np.linspace(clevsh_min, clevsh_max, 256), cmap=cmap, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%.3f')
#cbar = plt.colorbar(cs_col, orientation='horizontal', pad=0.05, extend='both', format = '%d')
cbar.set_label('kg/kg')
pn='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours), and wind (vectors)'
wind = m.quiver(x_w,y_w, u, v,scale=75, color='#262626' )
qk = plt.quiverkey(wind, 0.1, 0.1, 1, '5 m/s', labelpos='W')
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#pn='%s' % (model_name_convert_title.main(experiment_id))
# pn in sphum and temp loops
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, plot_diag)): os.makedirs('%s%s/%s' % (save_path, experiment_id, plot_diag))
# Save no title # #
ram = StringIO.StringIO()
plt.savefig(ram, format='png', bbox_inches='tight', rasterized=True)
#plt.savefig('%s%s/%s/geop_height_difference_8km_%shPa_%s_%s_notitle.png' % (save_path, experiment_id, plot_diag, p, experiment_id, plot_diag), format='png', bbox_inches='tight', rasterized=True)
ram.seek(0)
im = Image.open(ram)
im2 = im.convert('RGB').convert('P', palette=Image.ADAPTIVE)
im2.save('%s%s/%s/geop_height_difference_%shPa_%s_minus_%s_%s_notitle_large_font.png' \
% (save_path, experiment_id, plot_diag, p, experiment_id, diff_id, plot_diag) , format='PNG', optimize=True)
plt.title('\n'.join(wrap('%s-hPa\n%s' % (p, pn) , 75, replace_whitespace=False)), fontsize=16, color='#262626')
#plt.show()
# Save with title # #
ram = StringIO.StringIO()
plt.savefig(ram, format='png', bbox_inches='tight', rasterized=True)
#plt.savefig('%s%s/%s/geop_height_difference_8km_%shPa_%s_%s.png' % (save_path, experiment_id, plot_diag, p, experiment_id, plot_diag), format='png', bbox_inches='tight', rasterized=True)
ram.seek(0)
im = Image.open(ram)
im2 = im.convert('RGB').convert('P', palette=Image.ADAPTIVE)
im2.save('%s%s/%s/geop_height_difference_8km_%shPa_%s_minus_%s_%s_large_font.png' \
% (save_path, experiment_id, plot_diag, p, experiment_id, diff_id, plot_diag) , format='PNG', optimize=True)
plt.cla()
plt.clf()
# Save fig - update dpi if need for printing
if __name__ == '__main__':
main()
| mit |
lamastex/scalable-data-science | dbcArchives/2021/000_0-sds-3-x-projects/student-project-08_group-DistributedEnsemble/00_project.py | 1 | 34478 | # Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# COMMAND ----------
# MAGIC %md
# MAGIC # Distributed ensembles
# MAGIC
# MAGIC _Amanda Olmin, Amirhossein Ahmadian and Jakob Lindqvist_
# MAGIC
# MAGIC [Video presentation](https://www.youtube.com/watch?v=zbYewn3nDtk)
# COMMAND ----------
# MAGIC %md
# MAGIC Python version: python 3.7
# MAGIC
# MAGIC **Library dependencies**
# MAGIC - PySpark
# MAGIC - PyTorch
# MAGIC - toolz
# MAGIC - matplotlib
# COMMAND ----------
# MAGIC %md
# MAGIC ## Introduction
# MAGIC
# MAGIC In this project, we create a distributed ensemble of neural networks that we can train and make predictions with in a distributed fashion, and we also apply this model to the out-of-distribution detection problem [2] (detecting inputs that are highly dissimilar from the training data).
# MAGIC
# MAGIC #### Why ensembles?
# MAGIC Ensembles of neural networks
# MAGIC - often have better predictive performance than single ensemble members [1]
# MAGIC - have shown to provide reliable uncertainty estimates
# MAGIC
# MAGIC The latter quality is beneficial in itself but is especially useful when it comes to tasks such as out-of-distribution detection, where a model’s uncertainty estimates can be used to determine if a sample is in-distribution or not. We demonstrate this in the experiments below.
# MAGIC
# MAGIC
# MAGIC #### Distributed ensembles
# MAGIC In Spark, it is common to distribute *data* over several worker nodes. In this way, the same function is performed on several nodes on different parts of the data. The result from each node is then communicated and aggregated to a final function output. Similarily, we can train a neural network (a single ensemble member) in a distributed way by distributing the data that we use to train it. This can for example be done using the built-in MLP and MLPC classes in Pyspark [3]. However, this approach requires continuous communication between nodes to update model weights (possibly at every iteration) since every node keeps its own version of the model weights. The approach therefore scales badly as
# MAGIC - the number of model parameters grow (more information to communicate between nodes)
# MAGIC - when the complexity of the training algorithm increases, e.g. we wish to use a stochastic training algorithm
# MAGIC
# MAGIC In this regard, the communication becomes a bottleneck. Asynchronous updating can reduce the amount of communication, but might also hurt model performance [4].
# MAGIC
# MAGIC Considering that the ensemble members are independent models, they never need to communicate during the training phase. Hence, training ensemble members in a way that requires the otherwise independent training processes to integrate or synchronize, would cause unnecessary costs, for example since the training processes all need to communicate through the driver node. The same holds for prediction; no communication is needed between ensemble members except at the very end when the predictions are aggregated.
# MAGIC
# MAGIC To avoid unnecessary communication, we distribute the *ensemble members* and train them on separate worker nodes such that we
# MAGIC - are able to train several ensemble members in parallell (limited by the number of nodes in our cluster) and independently
# MAGIC - avoid communication between worker nodes
# MAGIC
# MAGIC To achieve this, we implement our own training processes below. In addition, we implement our own MLP class with the help of PyTorch. MLP objects and their training data are then distributed on worker nodes using Spark. This is not only to avoid distributing the training data over several nodes during training but also to package the ensemble members in a way that makes it possible for us to send them between the driver and the worker nodes prior to and at the end of training.
# MAGIC
# MAGIC <img src="files/shared_uploads/[email protected]/distributed_fig_small.png"/>
# COMMAND ----------
# MAGIC %md
# MAGIC ##Imports
# COMMAND ----------
from random import randrange
import random
from pathlib import Path
# External libs added to cluster
from pyspark.mllib.random import RandomRDDs
from pyspark.ml.feature import StringIndexer, OneHotEncoder, StandardScaler, VectorAssembler
from pyspark.ml import Pipeline
from pyspark.rdd import PipelinedRDD
from toolz.itertoolz import partition_all
from toolz.itertoolz import cons
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib import pyplot as plt
# COMMAND ----------
# MAGIC %md
# MAGIC ## Data
# MAGIC
# MAGIC We introduce the functions that we use to load the data for the experiments that we conduct. We split the available training data between ensemble members using sampling with or without replacement. The number of training data points that we can distribute to each ensemble member is only limited by the memory available to each worker node.
# COMMAND ----------
# MAGIC %md
# MAGIC **TOY DATA**
# MAGIC
# MAGIC We create a function for generating data consisting of Gaussian clusters. The function takes as input, user defined means and variances for each cluster in the data as well as the total number of observations and a vector of intended class proportions. It also comes with an option to split the final RDD into train and test sets.
# MAGIC
# MAGIC We will use this data later on to demonstrate our distributed ensembles framework as well as to generate out-of-distribution data for OOD detection.
# COMMAND ----------
def create_gaussian_RDD(means, variances, num_observations, class_proportions, train_test_split=False):
"""Create toy Gaussian classification data
Let C := number of clusters/classes and P := number of data features
Args:
means (np.array[float]): mean vector of shape (C, P)
variances (np.array[float]): vector of variances, shape (C, P)
num_observations (scalar[int]): the total number of observations in the final data set
class_proportions (np.array[float]): vector of class proportions, length C
train_test_split: whether to split the data into train/test sets or not
Returns:
Gaussian data, RDD of tuples (list(features), int(label))
"""
assert means.shape[0] == variances.shape[0]
assert means.shape[1] == variances.shape[1]
assert class_proportions.sum() == 1
num_classes = means.shape[0]
num_features = means.shape[1]
data_rdd = sc.emptyRDD()
for k in range(num_classes):
# Generate standard normal data
class_size = int(num_observations * class_proportions[k])
class_rdd = RandomRDDs.normalVectorRDD(sc, numRows=class_size, numCols=num_features, numPartitions=1) #, seed=123)
# Map to true distribution
class_rdd_transformed = class_rdd.map(lambda v: means[k, :] + (variances[k, :]**0.5) * v)
# Add labels
class_rdd_w_label = class_rdd_transformed.map(lambda v: (v, k))
data_rdd = data_rdd.union(class_rdd_w_label)
# We will shuffle and repartition the data
num_partitions = 10
shuffled_rdd = data_rdd.sortBy(lambda v: randrange(num_observations)).repartition(num_partitions)
final_rdd = shuffled_rdd.map(tuple).map(lambda v: (list(v[0]), int(v[1])))
if train_test_split:
train_rdd, test_rdd = final_rdd.randomSplit(weights=[0.8, 0.2], seed=12)
final_rdd = (train_rdd, test_rdd)
return final_rdd
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **FIRE WALL DATA**
# MAGIC
# MAGIC We will also consider some real data. The dataset that we will use consits of traffic from a firewall tracking record. We have accessed it through the UCI Machine Learning repository [4]: https://archive.ics.uci.edu/ml/datasets/Internet+Firewall+Data.
# MAGIC
# MAGIC - Number of data points: 65,532.
# MAGIC
# MAGIC - Number of features: 11 (all numerical).
# MAGIC
# MAGIC - Number of classes: 4 (allow/deny/drop/reset both).
# COMMAND ----------
def load_firewall_data(train_test_split=False,file_location="/FileStore/shared_uploads/[email protected]/fire_wall_data.csv"):
"""Load and preprocess firewall data
Args:
file_location: file location from which to load the data
train_test_split: whether to split the data into train/test sets or not
Returns:
Firewall data, RDD of tuples (list(features), int(label))
"""
# File location and type
# file_location = "/FileStore/shared_uploads/[email protected]/fire_wall_data.csv"
file_type = "csv"
# CSV options
infer_schema = "true"
first_row_is_header = "true"
delimiter = ","
# Load the data from file
df = spark.read.format(file_type) \
.option("inferSchema", infer_schema) \
.option("header", first_row_is_header) \
.option("sep", delimiter) \
.load(file_location)
# Preprocess data
col_num = ["Source Port", "Destination Port", "NAT Source Port", "NAT Destination Port", "Bytes", "Bytes Sent", "Bytes Received", "Packets", "Elapsed Time (sec)", "pkts_sent", "pkts_received"]
# Index qualitative variable
indexer = StringIndexer(inputCol = "Action", outputCol = "label")
# Scale numerical features
va = VectorAssembler(inputCols = col_num, outputCol = "numerical_features")
scaler = StandardScaler(inputCol = "numerical_features", outputCol = "features")
# Apply pipeline
pipeline = Pipeline(stages=[indexer, va, scaler])
final_df = pipeline.fit(df).transform(df).select("features", "label")
# Convert to RDD
final_rdd = final_df.rdd.map(tuple).map(lambda v: (list(v[0]), int(v[1])))
if train_test_split:
train_rdd, test_rdd = final_rdd.randomSplit(weights=[0.8, 0.2], seed=12)
final_rdd = (train_rdd, test_rdd)
return final_rdd
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ** RDD partition **
# MAGIC
# MAGIC Below, we provide a function that partitions an RDD. We will use it to distribute data between ensemble members.
# COMMAND ----------
def get_partitioned_rdd(input_rdd, partition_size=1000):
"""Partition RDD
Args:
input_rdd: RDD to be partitioned
Returns:
Partitioned RDD
"""
return input_rdd.mapPartitions(lambda partition: partition_all(partition_size, partition))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Distributed ensemble of neural networks
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **PyTorch Model**
# COMMAND ----------
# MAGIC %md
# MAGIC To implement the ensemble members, we first write an ordinary feedforward (MLP) neural network class using PyTorch, which has a Softmax output and Tanh activation functions. The number of layers and neurons in each layer is passed as an argument to the constructor of this class. Moreover, any instance of this network class (parameters and structure) can be easily stored in and loaded from a state dictionary (state_dict) object.
# COMMAND ----------
#Feedforward network for classification
class MLP(nn.Module):
def __init__(self,shape):
#shape: number of neurons in each layer (including the input and output layers)
super(MLP,self).__init__()
self.units=nn.ModuleList()
for i in range(len(shape)-1):
self.units.append(nn.Linear(shape[i],shape[i+1]))
self._shape=shape
self._nlayers=len(shape)
def forward(self,x):
y=x
for i,layer in enumerate(self.units):
if i<self._nlayers-2:
y=nn.functional.tanh(layer(y))
else:
y=nn.functional.softmax(layer(y),dim=1)
return y
#constructing an instance of this class based on a state dictionary (network parameters)
@staticmethod
def from_state_dict(state_dict):
net_shape = MLP.shape_from_state_dict(state_dict)
net=MLP(net_shape)
net.load_state_dict(state_dict)
return net
@staticmethod
def shape_from_state_dict(state_dict):
"""Infer MLP layer shapes from state_dict"""
iter_ = iter(state_dict.items())
_, input_size = next(iter_)
bias_tensors = filter(lambda key_val: key_val[0].find("bias") != -1, iter_)
shapes = map(lambda key_val: key_val[1].size(0), bias_tensors)
return list(cons(input_size.size(1), shapes))
# COMMAND ----------
# MAGIC %md
# MAGIC **Functions for training and testing networks**
# COMMAND ----------
# MAGIC %md
# MAGIC Here we have some functions that are used to train/test each individual network in the ensemble. The *Train* function takes the initial weights of a network, trains it on a set of input-taraget data based on stochastic gradient optimization and cross-entropy loss, and returns the state dictionary of the trained network. PyTorch's backpropagation and optimization tools are used to implement this function as usual. The *Predict* function simply takes the state dictionary corresponding to a network as well as a data point (or batch of data), and returns the output (probabilities) of the network at that point.
# MAGIC
# MAGIC We note that Spark can automatically distribute these functions on the nodes, and thus writing them for a distributed ensemble is not basically different from a local setup.
# COMMAND ----------
#utility class for pytorch data loader
class DataSet(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return self.x.shape[0]
def __getitem__(self, ind):
x = self.x[ind]
y = self.y[ind]
return x, y
#The main training function (is run on worker nodes)
def Train(net_params,x,y):
#net_params: initial parameters of the feedforward network (state dictionary)
#x,y: training data (pytorch tensors)
n_epochs=100
batchsize=10
net=MLP.from_state_dict(net_params)
train_data = DataSet(x, y)
dataloader = torch.utils.data.DataLoader(train_data, batch_size=batchsize)
opt=optim.Adam(net.parameters())
loss=nn.CrossEntropyLoss()
for i in range(n_epochs):
for batch in dataloader:
opt.zero_grad()
xb,yb=batch
yhat=net(xb)
err=loss(yhat,yb)
err.backward()
opt.step()
err=loss(net(x),y)
lossval=float(err.detach().numpy())
#returns parameters of the trained network and loss
return (net.state_dict(),lossval)
#Get the output of a feedforward network given an input tensor
def Predict(net_params, x):
#net_params: parameters (state dictionary) of the network
#x: input (pytorch tensor)
net = MLP.from_state_dict(net_params)
net.eval()
return net(x)
#Reshaping and converting the tuples stored in a dataset RDD into input and target tensors
def Totensor(d):
#d: the dataset (list of tuples)
x=[v[0] for v in d]
y=[v[1] for v in d]
x=torch.tensor(x,dtype=torch.float)
y=torch.tensor(y,dtype=torch.long)
return (x,y)
def make_prediction(state_dict, x):
print(state_dict)
return Predict(state_dict, x)
# COMMAND ----------
# MAGIC %md
# MAGIC **Creating an ensemble of networks, and training them in parallel**
# COMMAND ----------
# MAGIC %md
# MAGIC We now use the class and functions defined above to create an ensemble of feedforward neural networks, and train it in a distributed fashion, where each network is trained on a single worker independently from the other ones. Firstly, several networks are initialized using the MLP class with a random number of hidden layers and neurons, and random initial weights. Using randomness helps to increase the diversity in the ensemble (without which, the outputs of ensemble members could get correlated with each other).
# MAGIC
# MAGIC As mentioned before, the training data is partioned into equal size parts, and each of the networks in the ensemble is assigned one part. Since the dataset is assumed to be an RDD (to let it be huge), an iterator object is needed which collects one part of the data RDD (transfers it from the cloud to the driver node) in each call. Note that we here implicitly assume that each part of the data (but not the whole dataset) fits into the memory of a single machine.
# MAGIC
# MAGIC After constructing the network object and loading data for each member of the ensemble, the state dictionary of the network and its corresponding training data are packed into a tuple, and appended to a list. The list of state_dict/data tuples is then parallelized to obtain an Spark RDD. We found out that it is difficult to directly put the PyTorch neural network objects in an RDD, apparently becasue Spark does not know by default how to encode these objects and transfer them between nodes. Therefore, we use the state dictionary instead, which contains all the necessary information about a network.
# MAGIC
# MAGIC Finally, the network training function (*Train* defined above) is applied to each element of the model/data RDD, in the form of a *map* operation. This tells Spark to run the function on each element in parallel (on worker machines) independently.
# COMMAND ----------
def train_ensemble(n_models, inputdims, nclasses, max_layers, min_neurons, max_neurons, data_iterator):
"""Constructing and training a distributed ensemble of feedforward networks
Args:
n_models: number of the ensemble memebrs
inputdims: number of features dimesnions
nclasses: number of the classes
max_layers: maximum allowed number of hidden layers for the networks
min_neurons,max_neurons: the valid range for the number of neurons in each hidden layer
data_iterator: a Python iterator over the parts of the training data (one part per each member of the ensemble)
Returns: a list of state dictionaries of the trained networks
"""
# initialization
model_data=[] # pairs of model parameters and their training data
for i in range(n_models):
# pick random number of hidden layers and neurons for each network
nhidden=random.randint(1, max_layers)
shape=[inputdims]
for k in range(nhidden):
shape.append(random.randint(min_neurons, max_neurons))
shape.append(nclasses)
net=MLP(shape)
#fetch the next part of data
d=next(data_iterator)
x=d[0]
y=d[1]
model_data.append((net.state_dict(),x,y))
# distribute the array
model_data_par= sc.parallelize(model_data)
# execute the train function on the worker nodes
models_trained = model_data_par.map(lambda t: Train(*t))
#transfer the trained models and loss values to the driver
models_trained=models_trained.collect()
#print the training loss values
print("training losses:")
print([v[1] for v in models_trained])
# return the state dicts
return [v[0] for v in models_trained]
# COMMAND ----------
# MAGIC %md
# MAGIC ** Utility functions for saving and loading the ensemble model from the disk **
# COMMAND ----------
def save_models_distr(models, dir_, model_names=None):
dir_ = Path(dir_)
dir_.mkdir(exist_ok=True, parents=True)
if model_names is None:
model_names = [f"m{idx}.pt" for idx in range(0, models.count())]
assert len(model_names) == models.count()
model_paths = [dir_ / model_name for model_name in model_names]
model_paths = sc.parallelize(model_paths)
models.zip(model_paths).foreach(lambda dict_and_path: torch.save(*dict_and_path))
def save_models(models, dir_, model_names=None):
dir_ = Path(dir_)
dir_.mkdir(exist_ok=True, parents=True)
if model_names is None:
model_names = [f"m{idx}.pt" for idx in range(0, len(models))]
assert len(model_names) == len(models)
model_paths = [dir_ / model_name for model_name in model_names]
for state_dict, path in zip(models, model_paths):
torch.save(state_dict, path)
def load_models(model_names, dir_):
dir_ = Path(dir_)
model_paths = [dir_ / model_name for model_name in model_names]
state_dicts = [torch.load(path) for path in model_paths]
return sc.parallelize(state_dicts)
# COMMAND ----------
# MAGIC %md
# MAGIC # Distributed ensembles prediction API
# MAGIC
# MAGIC From the training process we get a distributed iterator `models` over the trained models.
# MAGIC (NB. the `train_ensemble` function actually collects the trained models for convenience.)
# MAGIC Internally this is an iterator over `torch.state_dicts` holding the param's of each model respectively.
# MAGIC
# MAGIC There are different ways in which we can do predictions:
# MAGIC
# MAGIC - Distributed predictions with `ens_preds(models, test_x)`, which maps the combined model and test data to predictions for each data point.
# MAGIC This iterator can be collected to a list of the predictions for each ensemble member, or further processed in a distributed and functional manner.
# MAGIC This is the most flexible variant since it preserves the prediction of every member on every datapoint.
# MAGIC It is also the most expensive (if we do collect all the data).
# MAGIC
# MAGIC - Reduced/aggregated predictions with `ens_preds_reduced(models, test_x, red_fn)`. Working with an ensemble, we are often concerned with some aggregate of the members' predictions, eg., the average prediction.
# MAGIC For this we provide an reducing version of `ens_preds` where the user need only supply the reduce function `red_fn`, describing how to combine the predictions of two ensemble members.
# MAGIC For instance, if you would like to get the average probability vector of a classifier ensemble for every data point you would use:
# MAGIC ```python
# MAGIC avg_prob_vecs = ens_preds_reduced(models, x, lambda x, y: (x+y)/2)
# MAGIC ```
# MAGIC Internally, this simply calls `.reduce(red_fn)` on the iterator returned from `ens_preds`. This is merely a convenience function.
# MAGIC
# MAGIC - Metrics per ensemble member. If the number of test samples is large, we will collect a lot of predictions over the cluster. If we know that we only want an aggregate metric for each member across the whole test data,
# MAGIC we use the `ens_metrics` method for aggregation on the worker nodes.
# MAGIC ```python
# MAGIC avg_acc_per_member = ens_metrics(models, test_input, test_true_labels, <list of metric functions>)
# MAGIC ```
# MAGIC Note that each metric function must be on the form: f: R^(N x D_x) x R^(N) --> T
# COMMAND ----------
def ens_preds(models, test_x):
"""Distributed ensemble predictions
Takes a set of models and test data and makes distributed predictions
Let N := number of data points and D_x := the dimension of a single datapoint x
Args:
models (list[state_dict]): set of models represented as a list (state_dict, shape)
test_x (torch.Tensor): Tensor of size (N, D_x)
Returns:
Distributed iterator over the predictions. E.g. an iterator over probability vectors in the case of a classifier ens.
"""
pred_iter = _pred_models_iter(models, test_x)
return pred_iter.map(lambda t: Predict(*t))
def ens_preds_reduced(models, test_x, red_fn):
"""Reduced/aggregated ensemble predictions
Takes a set of models and test data and makes distributed predictions and reduces them with a provided `red_fn`
Let N := number of data points and D_x := the dimension of a single datapoint x
Args:
models (list[state_dict]): set of models represented as a list (state_dict, shape)
test_x (torch.Tensor): Tensor of size (N, D_x)
red_fn function: f: R^D_x x R^D_x --> R^D_x
Returns:
Single reduced/aggregated prediction of the whole ensemble
"""
return ens_preds(models, test_x).reduce(red_fn)
def ens_metrics(models, test_x, test_y, metrics):
"""Distributed ensemble metrics
Takes a set of models and test data, predicts probability vectors and calculates the provided metrics
given true labels `test_y`
Let N := number of data points and D_x := the dimension of a single datapoint x
Args:
models (list[state_dict]): set of models represented as a list (state_dict, shape)
test_x (torch.Tensor): Tensor of size (N, D_x)
test_y (torch.Tensor): Tensor of size (N). NB: hard labels
metrics (list[functions]): List of functions where each funcion f: R^(N x D_x) x R^(N) --> T, where T is a generic output type.
"""
return ens_preds(models, test_x).map(lambda prob_vecs: [metric(prob_vecs, test_y) for metric in metrics])
def _pred_models_iter(models, test_x):
"""Helper function to generate a distributed iterator over models and test data
NB: the same `test_x` is given to all elements in the iterator
Args:
models (list[state_dict]): set of models represented as a list (state_dict, shape)
test_x (torch.Tensor): Tensor of size (N, D_x)
"""
if isinstance(models, PipelinedRDD):
return models.map(lambda model: (model, test_x))
elif isinstance(models, list):
models_and_data = [(params, test_x) for params in models]
return sc.parallelize(models_and_data)
else:
raise TypeError("'models' must be an RDD or a list")
def avg_accuracy(prob_vecs, labels):
"""Example metrics function: average accuracy
Let N := number of data points and C := the number of classes
Args:
prob_vecs (torch.Tensor): Tensor of size (N, C)
labels (torch.Tensor): Tensor of size (N), hard labels, with classes corresponding to indices 0, ..., C-1
Returns:
torch.Tensor: Tensor of size (N), average accuracy over all datapoints.
"""
hard_preds = torch.argmax(prob_vecs, 1)
return (hard_preds == labels).float().mean()
def entropy(prob_vecs):
return - (prob_vecs * torch.log(prob_vecs)).sum(1)
def avg_entropy(prob_vec_1, prob_vec_2):
e_1 = entropy(prob_vec_1)
e_2 = entropy(prob_vec_2)
return (e_1 + e_2)
# COMMAND ----------
# MAGIC %md
# MAGIC # Application example: Distributed predictions
# MAGIC
# MAGIC Let's first demonstrate our distributed ensembles with a simple toy example.
# MAGIC We'll create gaussian toy data with three slightly overlapping clusters:
# COMMAND ----------
means = np.array([(0, 0), (1,0), (1, 1)])
variances = 0.1 * np.ones((3, 2))
num_observations = 5000
class_proportions = np.array([1/3, 1/3, 1/3])
data_train, data_test = create_gaussian_RDD(means, variances, num_observations, class_proportions, train_test_split=True)
# COMMAND ----------
# MAGIC %md
# MAGIC Now we'll create and distributedly train a classifier ensemble and save it to file.
# MAGIC This is not necessary, we can -- in fact -- make predictions with the trained ensemble without ever collecting it from the worker nodes, but in most use cases it will be convenient to save the ensemble on disk.
# COMMAND ----------
data_iterator=get_partitioned_rdd(data_train).map(Totensor).toLocalIterator()
n_models=5 # ensemble size
inputdims=2 # features dimensions
nclasses=3 # number of classes
max_layers=2
min_neurons=2
max_neurons=5
models_trained = train_ensemble(n_models, inputdims, nclasses, max_layers, min_neurons, max_neurons, data_iterator)
saved_models_dir = Path("saved_models/gaussian")
save_models(models_trained, saved_models_dir)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Making distributed predictions
# MAGIC
# MAGIC With the trained ensemble we can make predictions and calculate metrics, all in a distributed manner.
# COMMAND ----------
test_xx, test_yy = Totensor(data_test.collect())
model_names = [f"m{idx}.pt" for idx in range(n_models)]
models = load_models(model_names, saved_models_dir).collect()
avg_prob_vecs = ens_preds_reduced(models, test_xx, lambda x, y: (x+y)/2) # (A single) Average prob. vec for all data points.
avg_acc = ens_metrics(models, test_xx, test_yy, [avg_accuracy]).collect() # Average acc. for each ens. over all data points
print(f"Average accuracy for each ensemble member: {[acc[0].item() for acc in avg_acc]}")
print(f"Average accuracy for the whole ensemble: {avg_accuracy(avg_prob_vecs, test_yy).item()}")
# COMMAND ----------
# MAGIC %md
# MAGIC We can also make use of the uncertainty description provided by the ensemble.
# MAGIC We'll plot the test data, each point coloured the predicted distribution, which will illustrate the certain predictions with distinct colur and uncertain with muddied colours.
# COMMAND ----------
preds = avg_prob_vecs.detach().numpy()
hard_preds = avg_prob_vecs.argmax(1).detach().numpy()
every_nth = 5
train_xx, train_yy = Totensor(data_train.collect())
(fig, (ax_1, ax_2)) = plt.subplots(1, 2)
# For the train data we use the true labels to simulate a completely certain prediction.
color_map = {0: [1, 0 ,0], 1: [0, 1, 0], 2: [0, 0, 1]}
ax_1.scatter(train_xx[:, 0], train_xx[:, 1], c=[color_map[class_.item()] for class_ in train_yy], label="Train")
ax_2.scatter(test_xx[::every_nth, 0], test_xx[::every_nth, 1], c=preds[::every_nth], label="Test")
ax_1.set_title("Train")
ax_2.set_title("Test")
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC # Application example: Out of distribution detection
# MAGIC
# MAGIC Our distributed ensemble can be used for out of distribution (OOD) detection.
# MAGIC A simple way is to measure the entropy of the combined ensemble prediction; high entropy signals weird data, not seen in the training distribution.
# MAGIC
# MAGIC "Real world" out of distribution data can be hard to come by, but a typical example would be images in different contexts. E.g. scenic vistas or pathology scans may share the same feature space but have very different distribution. For the data we have collected, no such OOD set exists, so we will showcase it with an OOD set of gaussian noise.
# MAGIC Of course, noise that is very far from the in distribution (ID) data will saturate the classifiers softmax for one element, actually yielding very confident, low entropy, nonsense predictions.
# MAGIC
# MAGIC Regardless, let's see how to do this with the distributed ensemble.
# MAGIC First, we train it and again, save the trained parameters to file
# COMMAND ----------
data_train, data_test = load_firewall_data(True)
data_iterator=get_partitioned_rdd(data_train).map(Totensor).toLocalIterator()
n_models=10
models_trained=train_ensemble(n_models,
inputdims=11,
nclasses=4,
max_layers=4,
min_neurons=5,
max_neurons=15,
data_iterator=data_iterator)
saved_models_dir = Path("saved_models/firewall")
save_models(models_trained, saved_models_dir)
# COMMAND ----------
def gen_ood_data(test_x, num_samples):
num_test_samples, dim_x = test_x.size()
random_mean = np.random.rand(dim_x).reshape(1, dim_x)
random_cov = np.random.rand(dim_x).reshape(1, dim_x) * 10
ood_x, _ = Totensor(create_gaussian_RDD(random_mean, random_cov, num_test_samples, np.array([1.0]), train_test_split=False).collect())
return ood_x
data = data_test.collect()
batch_size = -1
batch = data[0:batch_size]
test_xx, test_yy = Totensor(batch)
ood_x = gen_ood_data(test_xx, batch_size)
models_p = load_models(model_names, saved_models_dir).collect()
# We can either calculate the average entropy of the ensemble members
avg_entropy_id = ens_preds(models_p, test_xx).map(entropy).reduce(lambda x, y: (x+y)/2).detach().numpy()
avg_entropy_ood = ens_preds(models_p, ood_x).map(entropy).reduce(lambda x, y: (x+y)/2).detach().numpy()
# ... or we the entropy of the average ensemble prediction.
entropy_avg_id = entropy(ens_preds_reduced(models_p, test_xx, lambda x, y: (x+y)/2)).detach().numpy()
entropy_avg_ood = entropy(ens_preds_reduced(models_p, ood_x, lambda x, y: (x+y)/2)).detach().numpy()
# Set entropy measure
entropy_id = avg_entropy_id
entropy_ood = avg_entropy_ood
# COMMAND ----------
# MAGIC %md
# MAGIC **Comparison of the entropy of the ensemble classifier on in-distribution and OOD data**
# COMMAND ----------
def entropy_hist(id_, ood, n_bins, upper_x_bound):
(fig, (ax_1, ax_2)) = plt.subplots(2, 1)
_plot_hist(ax_1, id_, n_bins, "ID", "b", upper_x_bound)
_plot_hist(ax_2, ood, n_bins, "OOD", "r", upper_x_bound)
fig.suptitle("Entropy histogram")
ax_2.set_xlabel("entropy")
plt.show()
def _plot_hist(ax, counts, n_bins, label, color, upper_x_bound):
ax.hist(counts, bins=n_bins, label=label, color=color, density=True)
ax.set_xbound(lower = 0.0, upper = upper_x_bound)
ax.set_ylabel("rel freq")
ax.legend()
n_bins = 100
entropy_bound = 0.15
entropy_hist(entropy_id, entropy_ood, n_bins, entropy_bound)
# COMMAND ----------
# MAGIC %md
# MAGIC **Evaluation of the OOD detection in terms of ROC curve and area under this curve (AUROC)**
# COMMAND ----------
def is_ood(entropies, cut_off_entropy):
return entropies > cut_off_entropy
def fpr_and_tpr(id_, ood, res):
max_entropy = max(id_.max(), ood.max())
# max_entropy = id_.max()
thresholds = np.arange(0.0, max_entropy, max_entropy / res)
roc = np.array([(fpr(id_, th), tpr(ood, th)) for th in thresholds])
roc = roc[roc[:,0].argsort()]
fprs, tprs = (roc[:, 0], roc[:, 1])
return fprs, tprs
def fpr(id_, th):
id_pred = is_ood(id_, th)
fp = id_pred.sum()
tn = id_pred.shape[0] - fp
return fp / (tn + fp)
def tpr(ood, th):
ood_pred = is_ood(ood, th)
tp = ood_pred.sum()
fn = ood_pred.shape[0] - tp
return tp / (tp + fn)
fpr, tpr = fpr_and_tpr(avg_entropy_id, avg_entropy_ood, res = 100)
(fig, ax) = plt.subplots()
ax.plot(fpr, tpr)
ax.set_xlabel("FPR")
ax.set_ylabel("TPR")
ax.set_title("ROC")
# COMMAND ----------
print(f"AUROC: {np.trapz(tpr, fpr)}")
# COMMAND ----------
# MAGIC %md
# MAGIC ## References
# MAGIC [1] Lakshminarayanan, B., Pritzel, A., & Blundell, C. (2017). Simple and scalable predictive uncertainty estimation using deep ensembles. In Advances in neural information processing systems (pp. 6402-6413).
# MAGIC
# MAGIC [2] Ovadia, Y., Fertig, E., Ren, J., Nado, Z., Sculley, D., Nowozin, S., ... & Snoek, J. (2019). Can you trust your model's uncertainty? Evaluating predictive uncertainty under dataset shift. In Advances in Neural Information Processing Systems (pp. 13991-14002).
# MAGIC
# MAGIC [3] Apache Spark. (2021, 01, 11). Classification and Regression [https://spark.apache.org/docs/latest/ml-classification-regression.html].
# MAGIC
# MAGIC [4] Chen, J., Pan, X., Monga, R., Bengio, S., & Jozefowicz, R. (2016). Revisiting distributed synchronous SGD. arXiv preprint arXiv:1604.00981.
# MAGIC
# MAGIC [5] Dua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. | unlicense |
pierre-chaville/automlk | automlk/worker.py | 1 | 24631 | import eli5
import threading
import _thread
import sys
import os
from copy import deepcopy
from .config import *
from .dataset import get_dataset, get_dataset_status
from .graphs import graph_histogram_regression, graph_histogram_classification, graph_predict_regression, \
graph_predict_classification
from .prepare import get_eval_sets
from .solutions import *
from .monitor import *
from .metrics import evaluate_metric
from .specific import apply_specific_metrics, return_specific_metrics
from .solutions_pp import pp_solutions_map
from .results import get_pred_eval_test
from .xyset import XySet
from sklearn.pipeline import make_pipeline
log = logging.getLogger(__name__)
def get_search_rounds(dataset_id):
"""
get all the results of the search with preprocessing and models
:param dataset_id: id of the dataset
:return: results of the search as a dataframe
"""
results = list_key_store('dataset:%s:rounds' % dataset_id)
return pd.DataFrame(results)
def create_model_json(dataset_id, round_id):
"""
creates a json file to be downloaded
:param dataset_id: id of the dataset
:param round_id: id of the round
:return: file name
"""
search = get_search_rounds(dataset_id)
round = search[search.round_id == int(round_id)].to_dict(orient='records')[0]
d = {}
for key in ['solution', 'pipeline', 'model_params', 'mode', 'model_class', 'level']:
d[key] = round[key]
# update n_estimators when early stopping
solution = model_solutions_map[round['solution']]
if solution.early_stopping != '':
model = pickle.load(open(get_dataset_folder(dataset_id) + '/models/%s_model.pkl' % round_id, 'rb'))
if hasattr(model, 'n_estimators'):
d['model_params']['n_estimators'] = model.n_estimators
filename = get_dataset_folder(dataset_id) + '/models/model_%s.json' % round_id
with open(filename, 'w') as f:
f.write(json.dumps(d))
return filename
def __timer_control(f_stop):
global __worker_timer_start
global __worker_timer_limit
global __worker_dataset
t = time.time()
# check if duration > max
if (__worker_timer_limit > 0) and (t - __worker_timer_start > __worker_timer_limit):
f_stop.set()
log.info('max delay %d seconds reached...' % __worker_timer_limit)
_thread.interrupt_main()
# check dataset is in searching model
if __worker_dataset != '':
if get_dataset_status(__worker_dataset) != 'searching':
f_stop.set()
log.info('dataset %s is no more in searching mode, aborting...' % __worker_dataset)
_thread.interrupt_main()
if not f_stop.is_set():
# call again in 10 seconds
threading.Timer(10, __timer_control, [f_stop]).start()
def worker_loop(worker_id, gpu=False):
"""
periodically pool the receiver queue for a search job
:param worker_id: index of the worker on this machine
:param gpu: can use gpu on this machine
:return:
"""
global __worker_timer_start
global __worker_timer_limit
global __worker_dataset
__worker_dataset = ''
__worker_timer_start = 0
__worker_timer_limit = 0
f_stop = threading.Event()
# start calling f now and every 60 sec thereafter
__timer_control(f_stop)
while True:
try:
# poll queue
msg_search = brpop_key_store('controller:search_queue')
heart_beep('worker', msg_search, worker_id, gpu)
__worker_timer_start = time.time()
__worker_timer_limit = 0
__worker_dataset = ''
if msg_search is not None:
__worker_dataset = msg_search['dataset_id']
__worker_timer_limit = msg_search['time_limit']
log.info('received %s' % msg_search)
msg_search = {**msg_search, **{'start_time': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'host_name': socket.gethostname()}}
job_search(msg_search)
except KeyboardInterrupt:
log.info('Keyboard interrupt: exiting')
# stop the timer thread
f_stop.set()
exit()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)))
with open(get_data_folder() + '/errors.txt', 'a') as f:
f.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + str(msg_search) + '\n')
f.write('%s in %s line:%s error: %s' % (exc_type.__name__, fname, str(exc_tb.tb_lineno), str(e)) + '\n')
f.write('-'*80 + '\n')
def job_search(msg_search):
"""
execute the search on the scope defined in the messsage msg
:param msg_search: message with parameters of the searcg
:return:
"""
# load dataset
dataset = get_dataset(msg_search['dataset_id'])
# define function for specific metric if any
if dataset.metric == 'specific':
apply_specific_metrics(dataset.dataset_id)
# load train/eval/test data
ds_ini = get_eval_sets(msg_search['dataset_id'])
if msg_search['level'] == 2:
ds_ini = __create_stacking(dataset, __get_pool_models(dataset, msg_search['ensemble_depth']), ds_ini)
# pre-processing
t_start = time.time()
feature_names, ds, pipe = __pre_processing(dataset, msg_search['pipeline'], deepcopy(ds_ini))
t_end = time.time()
msg_search['duration_process'] = int(t_end - t_start)
# generate model from solution
solution = model_solutions_map[msg_search['solution']]
if solution.is_wrapper:
model = solution.model(**{**{'problem_type': dataset.problem_type,
'y_n_classes': dataset.y_n_classes},
**msg_search['model_params']})
else:
model = solution.model(**msg_search['model_params'])
msg_search['model_class'] = model.__class__.__name__
pipe_transform, pipe_model = make_pipeline(*pipe), make_pipeline(*(pipe + [model]))
# then proceed to the search
__search(dataset, feature_names, solution, pipe_transform, pipe_model, model, msg_search, ds)
def make_model(dataset, msg_search, ds_ini):
# pre-processing
feature_names, ds, pipe = __pre_processing(dataset, msg_search['pipeline'], deepcopy(ds_ini))
# generate model from solution
solution = model_solutions_map[msg_search['solution']]
if solution.is_wrapper:
model = solution.model(**{**{'problem_type': dataset.problem_type,
'y_n_classes': dataset.y_n_classes},
**msg_search['model_params']})
else:
model = solution.model(**msg_search['model_params'])
msg_search['model_class'] = model.__class__.__name__
pipe_transform, pipe_model = make_pipeline(*pipe), make_pipeline(*(pipe + [model]))
return pipe_model
def __pre_processing(dataset, pipeline, ds):
# performs the different pre-processing steps
pipe = []
feature_names = None
p_context = [{'name': f.name, 'col_type': f.col_type, 'raw_type': f.raw_type, 'n_missing': int(f.n_missing),
'n_unique_values': int(f.n_unique_values), 'text_ref': f.text_ref}
for f in dataset.features if f.name in dataset.x_cols]
for ref, category, name, params in pipeline:
if category != 'sampling':
solution = pp_solutions_map[ref]
p_class = solution.process
process = p_class(**{**params, 'context': p_context})
log.info('executing process %s %s %s' % (category, name, process.transformer_params))
ds.X_train = process.fit_transform(ds.X_train, ds.y_train)
pipe.append(process)
if len(ds.X_test) > 0:
ds.X_test = process.transform(ds.X_test)
ds.X = process.transform(ds.X)
if len(ds.X_submit) > 0:
ds.X_submit = process.transform(ds.X_submit)
log.info('-> %d features (%s)' % (len(process.get_feature_names()), type(ds.X_train)))
feature_names = process.get_feature_names()
return feature_names, ds, pipe
def __search(dataset, feature_names, solution, pipe_transform, pipe_model, model, msg_search, ds):
log.info('optimizing with %s, params: %s' % (solution.name, msg_search['model_params']))
# fit, test & score
t_start = time.time()
round_id = msg_search['round_id']
level = msg_search['level']
threshold = msg_search['threshold']
pct = msg_search['pct']
cv = msg_search['cv']
outlier, y_pred_eval_list, y_pred_test, y_pred_submit, ds = __cross_validation(solution, model, dataset, ds, threshold, pct, cv)
if hasattr(model, 'num_rounds'):
msg_search['num_rounds'] = model.num_rounds
elif hasattr(model, 'n_estimators'):
msg_search['num_rounds'] = model.n_estimators
else:
msg_search['num_rounds'] = None
# check outlier
if outlier:
log.info('outlier, skipping this round')
return
# save model importance
if level == 2 and solution.is_wrapper:
__save_importance(model.model, dataset, feature_names, round_id)
else:
__save_importance(model, dataset, feature_names, round_id)
# create eval and submit results
y_pred_eval = __create_eval_submit(dataset, ds, round_id, y_pred_eval_list, y_pred_submit, cv)
# save predictions (eval and test set)
pickle.dump([y_pred_eval, y_pred_test, y_pred_submit],
open(get_dataset_folder(dataset.dataset_id) + '/predict/%s.pkl' % round_id, 'wb'))
# then feature names
pickle.dump(ds.X_train.columns, open(
get_dataset_folder(dataset.dataset_id) + '/models/' + '%s_feature_names.pkl' % msg_search['round_id'], 'wb'))
# generate graphs
__create_graphs(dataset, round_id, ds, y_pred_eval, y_pred_test)
t_end = time.time()
msg_search['duration_model'] = int(t_end - t_start)
# calculate metrics
__evaluate_round(dataset, msg_search, ds.y_train, y_pred_eval, ds.y_test, y_pred_test, ds.y_eval_list,
y_pred_eval_list)
# then save model, pipe
__save_model(dataset, round_id, pipe_transform, pipe_model, model)
# explain model
__explain_model(dataset, msg_search['round_id'], pipe_model, model, feature_names)
def __cross_validation(solution, model, dataset, ds, threshold, pct, cv):
# performs a cross validation on cv_folds, and predict also on X_test
y_pred_eval, y_pred_test, y_pred_submit = [], [], []
for i, (train_index, eval_index) in enumerate(ds.cv_folds):
# use only a percentage of data (default is 100% )
train_index1 = train_index[:int(len(train_index)*pct)]
X1, y1 = ds.X_train.iloc[train_index1], ds.y_train[train_index1]
X2, y2 = ds.X_train.iloc[eval_index], ds.y_train[eval_index]
if i == 0 and solution.use_early_stopping:
log.info('early stopping round')
if __fit_early_stopping(solution, model, dataset, threshold, X1, y1, X2, y2):
return True, 0, 0, 0, ds
# then train on train set and predict on eval set
model.fit(X1, y1)
y_pred = __predict(solution, model, X2)
if threshold != 0:
# test outlier:
score = __evaluate_metric(dataset, y2, y_pred)
if score > threshold:
log.info('%dth round found outlier: %.5f with threshold %.5f' % (i, score, threshold))
return True, 0, 0, 0, ds
y_pred_eval.append(y_pred)
# we also predict on test & submit set (to be averaged later)
y_pred_test.append(__predict(solution, model, ds.X_test))
if not cv:
# we stop at the first fold
y_pred_test = y_pred_test[0]
if dataset.mode == 'competition':
y_pred_submit = __predict(solution, model, ds.X_submit)
# update y_train on fold, in order to compute metrics and graphs
ds.y_train = y2
return False, y_pred_eval, y_pred_test, y_pred_submit, ds
if dataset.mode == 'standard':
# train on complete train set
model.fit(ds.X_train, ds.y_train)
y_pred_test = __predict(solution, model, ds.X_test)
else:
# train on complete X y set
model.fit(ds.X, ds.y)
if dataset.mode == 'competition':
y_pred_submit = __predict(solution, model, ds.X_submit)
# test = mean of y_pred_test on multiple folds
y_pred_test = np.mean(y_pred_test, axis=0)
else:
y_pred_test = __predict(solution, model, ds.X_test)
return False, y_pred_eval, y_pred_test, y_pred_submit, ds
def __create_stacking(dataset, pool, ds):
# create X by stacking predictions
for j, (u, m, p_eval, p_test, p_submit) in enumerate(
zip(pool.pool_model_round_ids, pool.pool_model_names, pool.pool_eval_preds,
pool.pool_test_preds, pool.pool_submit_preds)):
# check if array has 2 dimensions
shape = len(np.shape(p_eval))
if shape == 1:
p_eval = np.reshape(p_eval, (len(p_eval), 1))
p_test = np.reshape(p_test, (len(p_test), 1))
if dataset.mode == 'competition':
p_submit = np.reshape(p_submit, (len(p_submit), 1))
if j == 0:
ds.X_train = p_eval
ds.X_test = p_test
if dataset.mode == 'competition':
ds.X_submit = p_submit
else:
# stack vertically the predictions
ds.X_train = np.concatenate((ds.X_train, p_eval), axis=1)
ds.X_test = np.concatenate((ds.X_test, p_test), axis=1)
if dataset.mode == 'competition':
ds.X_submit = np.concatenate((ds.X_submit, p_submit), axis=1)
# then convert to dataframes
ds.X_train, ds.X_test, ds.X_submit = pd.DataFrame(ds.X_train), pd.DataFrame(ds.X_test), pd.DataFrame(ds.X_submit)
# update feature names
feature_names = __get_pool_features(dataset, pool)
ds.X_train.columns = feature_names
if len(ds.X_test) > 0:
ds.X_test.columns = feature_names
if len(ds.X_submit) > 0:
ds.X_submit.columns = feature_names
# X and y for fit
ds.X = pd.concat([ds.X_train, ds.X_test])
ds.y = np.concatenate((ds.y_train, ds.y_test))
return ds
def __create_eval_submit(dataset, ds, round_id, y_pred_eval_list, y_pred_submit, cv):
# create eval and submit results from lists
if cv:
# y_pred_eval as concat of folds
y_pred_eval = np.concatenate(y_pred_eval_list)
# reindex eval to be aligned with y
y_pred_eval[ds.i_eval] = y_pred_eval.copy()
else:
y_pred_eval = y_pred_eval_list[0]
# generate submit file
if dataset.filename_submit != '':
ls = len(ds.id_submit)
# if dataset.problem_type == 'regression':
if np.shape(y_pred_submit)[1] == 1:
submit = np.concatenate((np.reshape(ds.id_submit, (ls, 1)), np.reshape(y_pred_submit, (ls, 1))), axis=1)
else:
submit = np.concatenate((np.reshape(ds.id_submit, (ls, 1)), np.reshape(y_pred_submit[:, 1], (ls, 1))),
axis=1)
# create submission file
df_submit = pd.DataFrame(submit)
df_submit.columns = [dataset.col_submit, dataset.y_col]
# allocate id column to avoid type conversion (to float)
df_submit[dataset.col_submit] = np.reshape(ds.id_submit, (ls, 1))
df_submit.to_csv(get_dataset_folder(dataset.dataset_id) + '/submit/submit_%s.csv' % round_id, index=False)
return y_pred_eval
def __create_graphs(dataset, round_id, ds, y_pred_eval, y_pred_test):
# generate graphs
if dataset.problem_type == 'regression':
graph_predict_regression(dataset, round_id, ds.y_train, y_pred_eval, 'eval')
graph_predict_regression(dataset, round_id, ds.y_test, y_pred_test, 'test')
graph_histogram_regression(dataset, round_id, y_pred_eval, 'eval')
graph_histogram_regression(dataset, round_id, y_pred_test, 'test')
else:
graph_predict_classification(dataset, round_id, ds.y_train, y_pred_eval, 'eval')
graph_predict_classification(dataset, round_id, ds.y_test, y_pred_test, 'test')
graph_histogram_classification(dataset, round_id, y_pred_eval, 'eval')
graph_histogram_classification(dataset, round_id, y_pred_test, 'test')
def __fit_early_stopping(solution, model, dataset, threshold, X1, y1, X2, y2):
# fit with early stopping the model
if solution.is_wrapper:
# with early stopping, we perform an initial round to get number of rounds
model.fit_early_stopping(X1, y1, X2, y2)
else:
model.fit(X1, y1, eval_set=[(X2, y2)], early_stopping_rounds=PATIENCE, verbose=False)
if solution.early_stopping == 'LGBM':
num_rounds = model.best_iteration_ if model.best_iteration_ != 0 else MAX_ROUNDS
elif solution.early_stopping == 'XGB':
num_rounds = model.best_iteration if model.best_iteration != 0 else MAX_ROUNDS
params = model.get_params()
params['n_estimators'] = num_rounds
model.set_params(**params)
log.info('early stopping best iteration = %d' % num_rounds)
if threshold != 0:
# test outlier (i.e. exceeds threshold)
y_pred = __predict(solution, model, X2)
score = __evaluate_metric(dataset, y2, y_pred)
if score > threshold:
log.info('early stopping found outlier: %.5f with threshold %.5f' % (score, threshold))
time.sleep(10)
return True
return False
def __predict(solution, model, X):
if solution.problem_type == 'regression':
return model.predict(X)
else:
return model.predict_proba(X)
def __resample(pipeline, X, y):
# apply resampling steps in pipeline
for ref, category, name, params in pipeline:
if category == 'sampling':
solution = pp_solutions_map[ref]
p_class = solution.process
process = p_class(params)
return process.fit_sample(X, y)
return X, y
def __save_importance(model, dataset, feature_names, round_id):
# saves feature importance (as a dataframe)
if hasattr(model, 'feature_importances_'):
importance = pd.DataFrame(feature_names)
importance['importance'] = model.feature_importances_
importance.columns = ['feature', 'importance']
pickle.dump(importance, open(get_dataset_folder(dataset.dataset_id) + '/features/%s.pkl' % round_id, 'wb'))
elif hasattr(model, 'dict_importance_'):
# xgboost type: feature importance is a dictionary
imp = model.dict_importance_
importance = pd.DataFrame([{'feature': key, 'importance': imp[key]} for key in imp.keys()])
pickle.dump(importance, open(get_dataset_folder(dataset.dataset_id) + '/features/%s.pkl' % round_id, 'wb'))
def __evaluate_round(dataset, msg_search, y_train, y_pred_eval, y_test, y_pred_test, y_eval_list, y_pred_eval_list):
# score on full eval set, test set and cv
msg_search['score_eval'] = __evaluate_metric(dataset, y_train, y_pred_eval)
msg_search['score_test'] = __evaluate_metric(dataset, y_test, y_pred_test)
msg_search['scores_cv'] = [__evaluate_metric(dataset, y_act, y_pred) for y_act, y_pred in
zip(y_eval_list, y_pred_eval_list)]
msg_search['cv_mean'] = np.mean(msg_search['scores_cv'])
msg_search['cv_std'] = np.std(msg_search['scores_cv'])
msg_search['cv_max'] = np.max(msg_search['scores_cv'])
# score with secondary metrics
msg_search['eval_other_metrics'] = {m: __evaluate_other_metrics(dataset, m, y_train, y_pred_eval) for m in
dataset.other_metrics}
msg_search['test_other_metrics'] = {m: __evaluate_other_metrics(dataset, m, y_test, y_pred_test) for m in
dataset.other_metrics}
rpush_key_store(RESULTS_QUEUE, msg_search)
log.info('completed search')
def __get_pool_features(dataset, pool):
# return the lst of features in an ensemble model
if dataset.problem_type == 'regression':
feature_names = [name + '_' + str(round_id) for round_id, name in
zip(pool.pool_model_round_ids, pool.pool_model_names)]
else:
feature_names = []
for round_id, name in zip(pool.pool_model_round_ids, pool.pool_model_names):
for k in range(dataset.y_n_classes):
feature_names.append(name + '_' + str(k) + '_' + str(round_id))
return feature_names
def __get_pool_models(dataset, depth):
# retrieves all results in order to build and ensemble
df = get_search_rounds(dataset.dataset_id)
# keep only the first (depth) models of level 0
df = df[((df.level == 1) & (df.score_eval != METRIC_NULL)) & df.cv].sort_values(by=['model_name', 'score_eval'])
round_ids = []
model_names = []
k_model = ''
for index, row in df.iterrows():
if k_model != row['model_name']:
count_model = 0
k_model = row['model_name']
if count_model > depth:
continue
model_names.append(row['model_name'])
round_ids.append(row['round_id'])
count_model += 1
log.info('length of pool: %d for ensemble of depth %d' % (len(round_ids), depth))
# retrieves predictions
preds = [get_pred_eval_test(dataset.dataset_id, round_id) for round_id in round_ids]
# exclude predictions with nan
excluded = [i for i, x in enumerate(preds) if not (np.max(x[0]) == np.max(x[0]))]
preds = [x for i, x in enumerate(preds) if i not in excluded]
round_ids = [x for i, x in enumerate(round_ids) if i not in excluded]
model_names = [x for i, x in enumerate(model_names) if i not in excluded]
preds_eval = [x[0] for x in preds]
preds_test = [x[1] for x in preds]
preds_submit = [x[2] for x in preds]
return EnsemblePool(round_ids, model_names, preds_eval, preds_test, preds_submit)
def __store_search_error(dataset, t, e, model):
log.info('Error: %s' % e)
# track error
def __evaluate_metric(dataset, y_act, y_pred):
"""
evaluates primary metrics for the dataset
:param dataset: dataset object
:param y_act: actual values
:param y_pred: predicted values
:return: metrics
"""
if dataset.metric == 'specific':
if dataset.best_is_min:
return return_specific_metrics(y_act, y_pred)
else:
return -return_specific_metrics(y_act, y_pred)
else:
return evaluate_metric(y_act, y_pred, dataset.metric, dataset.y_n_classes)
def __evaluate_other_metrics(dataset, m, y_act, y_pred):
"""
evaluates other metrics for the dataset
:param dataset: dataset object
:param m: name of the other metric
:param y_act: actual values
:param y_pred: predicted values
:return: metrics
"""
return evaluate_metric(y_act, y_pred, m, dataset.y_n_classes)
def __save_model(dataset, round_id, pipe_transform, pipe_model, model):
"""
save model, pipe
:param dataset: dataset object
:param round_id: round id
:param pipe_transform: sklearn pipeline of pre-processing steps only
:param pipe_model: sklearn pipeline of pre-processing steps + model
:param model: estimator model
:return:
"""
folder = get_dataset_folder(dataset.dataset_id) + '/models/'
pickle.dump(model, open(folder + '%s_model.pkl' % round_id, 'wb'))
pickle.dump(pipe_model, open(folder + '%s_pipe_model.pkl' % round_id, 'wb'))
pickle.dump(pipe_transform, open(folder + '%s_pipe_transform.pkl' % round_id, 'wb'))
def __explain_model(dataset, round_id, pipe_model, model, feature_names):
"""
explain the weights and the prediction of the model
:param dataset: dataset
:param round_id: round if
:param pipe_model: the pipeline including the model
:param model: the model only
:param feature_names: feature names
:return:
"""
try:
exp = eli5.explain_weights(model, feature_names=list(feature_names))
with open(get_dataset_folder(dataset.dataset_id) + '/predict/eli5_model_%s.html' % round_id, 'w') as f:
f.write(eli5.format_as_html(exp))
except:
return | mit |
sagark/tsdb-perf-test | logparse/logparser_insertquery.py | 1 | 2749 | import sys
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import numpy as np
#filename = sys.argv[1]
DB_NAMES = ["mysql-myisam", "mysql-innodb", "opentsdb", "postgres",
"readingdb", "scidb"]
itervals = ['r-', 'g-', 'b-', 'k-', 'r:', 'g:', 'b:', 'k:']
graph_1_iter = iter(itervals)
graph_2_iter = iter(itervals)
graph_3_iter = iter(itervals)
def parsedata(filearg):
log = file(filearg)
lines = log.readlines()
log.close()
while "Started Logging" not in lines[0]:
lines.pop(0)
#get title
title = lines.pop(0).split("Started Logging: ")[1].split(' at')[0]
while "[ run-0 ]" not in lines[0]:
lines.pop(0)
#print(lines[0])
points = []
counter = 0
while "finished" not in lines[0]:
insert = lines.pop(0).split(": ")[2]
query = lines.pop(0).split(": ")[2]
size = lines.pop(0).split("now ")[1].replace(" bytes.", "")
points.append([counter, eval(insert), eval(query), eval(size)])
counter += 10000 #number of records before each round
graphthis = []
for x in points:
addtog = []
addtog.append(x[0]) #number of points in db already
addtog.append(x[1][2]) #time to insert
addtog.append(x[2][2]) #time to query all
addtog.append(x[3]/1000000) #db size after completion, convert to MB
graphthis.append(addtog)
a = np.array(graphthis)
for name in DB_NAMES:
if name in filearg:
a = (name, a)
return a
db_arrays = []
for x in sys.argv[1:]:
db_arrays.append(parsedata(x))
fig = plt.figure(figsize=(20, 30), dpi=300)
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax1.set_title('Insert: Add 1 Record to 10000 Streams, 100 times')
ax1.set_xlabel('# of Records in DB')
ax1.set_ylabel('Time for operation completion (s)')
ax2.set_title('Query: Add 1 Record to 10000 Streams, 100 times')
ax2.set_xlabel('# of Records in DB')
ax2.set_ylabel('Time for operation completion (s)')
ax3.set_title('DB Size: Add 1 Record to 10000 Streams, 100 times')
ax3.set_xlabel('# of Records in DB')
ax3.set_ylabel('DB size (MB)')
legend1 = ()
legend2 = ()
legend3 = ()
for a in db_arrays:
name = a[0]
a = a[1]
x = a[:,0]
y1 = a[:,1]
y2 = a[:,2]
y3 = a[:,3]
ax1.plot(x, y1, graph_1_iter.next())
ax2.plot(x, y2, graph_2_iter.next())
ax3.plot(x, y3, graph_3_iter.next())
legend1 += (name,)
legend2 += (name,)
legend3 += (name,)
leg1 = ax1.legend(legend1, 'upper left', shadow=True)
leg2 = ax2.legend(legend2, 'upper left', shadow=True)
leg3 = ax3.legend(legend3, 'upper left', shadow=True)
plt.savefig('test.png')
| bsd-2-clause |
ericmckean/syzygy | third_party/numpy/files/numpy/fft/fftpack.py | 22 | 39261 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
| apache-2.0 |
cactusbin/nyt | matplotlib/lib/matplotlib/tight_layout.py | 16 | 13115 | """
This module provides routines to adjust subplot params so that subplots are
nicely fit in the figure. In doing so, only axis labels, tick labels and axes
titles are currently considered.
Internally, it assumes that the margins (left_margin, etc.) which are
differences between ax.get_tightbbox and ax.bbox are independent of axes
position. This may fail if Axes.adjustable is datalim. Also, This will fail
for some cases (for example, left or right margin is affected by xlabel).
"""
import warnings
import matplotlib
from matplotlib.transforms import TransformedBbox, Bbox
from matplotlib.font_manager import FontProperties
rcParams = matplotlib.rcParams
def _get_left(tight_bbox, axes_bbox):
return axes_bbox.xmin - tight_bbox.xmin
def _get_right(tight_bbox, axes_bbox):
return tight_bbox.xmax - axes_bbox.xmax
def _get_bottom(tight_bbox, axes_bbox):
return axes_bbox.ymin - tight_bbox.ymin
def _get_top(tight_bbox, axes_bbox):
return tight_bbox.ymax - axes_bbox.ymax
def auto_adjust_subplotpars(fig, renderer,
nrows_ncols,
num1num2_list,
subplot_list,
ax_bbox_list=None,
pad=1.08, h_pad=None, w_pad=None,
rect=None):
"""
Return a dictionary of subplot parameters so that spacing between
subplots are adjusted. Note that this function ignore geometry
information of subplot itself, but uses what is given by
*nrows_ncols* and *num1num2_list* parameteres. Also, the results could be
incorrect if some subplots have ``adjustable=datalim``.
Parameters:
nrows_ncols
number of rows and number of columns of the grid.
num1num2_list
list of numbers specifying the area occupied by the subplot
subplot_list
list of subplots that will be used to calcuate optimal subplot_params.
pad : float
padding between the figure edge and the edges of subplots, as a fraction
of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect
[left, bottom, right, top] in normalized (0, 1) figure coordinates.
"""
rows, cols = nrows_ncols
pad_inches = pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
if h_pad is not None:
vpad_inches = h_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
vpad_inches = pad_inches
if w_pad is not None:
hpad_inches = w_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
hpad_inches = pad_inches
if len(subplot_list) == 0:
raise RuntimeError("")
if len(num1num2_list) != len(subplot_list):
raise RuntimeError("")
if rect is None:
margin_left = None
margin_bottom = None
margin_right = None
margin_top = None
else:
margin_left, margin_bottom, _right, _top = rect
if _right:
margin_right = 1. - _right
else:
margin_right = None
if _top:
margin_top = 1. - _top
else:
margin_top = None
vspaces = [[] for i in range((rows + 1) * cols)]
hspaces = [[] for i in range(rows * (cols + 1))]
union = Bbox.union
if ax_bbox_list is None:
ax_bbox_list = []
for subplots in subplot_list:
ax_bbox = union([ax.get_position(original=True)
for ax in subplots])
ax_bbox_list.append(ax_bbox)
for subplots, ax_bbox, (num1, num2) in zip(subplot_list,
ax_bbox_list,
num1num2_list):
#ax_bbox = union([ax.get_position(original=True) for ax in subplots])
tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots])
tight_bbox = TransformedBbox(tight_bbox_raw,
fig.transFigure.inverted())
row1, col1 = divmod(num1, cols)
if num2 is None:
# left
hspaces[row1 * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row1 * (cols + 1) + (col1 + 1)].append(
_get_right(tight_bbox, ax_bbox))
# top
vspaces[row1 * cols + col1].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row1 + 1) * cols + col1].append(
_get_bottom(tight_bbox, ax_bbox))
else:
row2, col2 = divmod(num2, cols)
for row_i in range(row1, row2 + 1):
# left
hspaces[row_i * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row_i * (cols + 1) + (col2 + 1)].append(
_get_right(tight_bbox, ax_bbox))
for col_i in range(col1, col2 + 1):
# top
vspaces[row1 * cols + col_i].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row2 + 1) * cols + col_i].append(
_get_bottom(tight_bbox, ax_bbox))
fig_width_inch, fig_height_inch = fig.get_size_inches()
# margins can be negative for axes with aspect applied. And we
# append + [0] to make minimum margins 0
if not margin_left:
margin_left = max([sum(s) for s in hspaces[::cols + 1]] + [0])
margin_left += pad_inches / fig_width_inch
if not margin_right:
margin_right = max([sum(s) for s in hspaces[cols::cols + 1]] + [0])
margin_right += pad_inches / fig_width_inch
if not margin_top:
margin_top = max([sum(s) for s in vspaces[:cols]] + [0])
margin_top += pad_inches / fig_height_inch
if not margin_bottom:
margin_bottom = max([sum(s) for s in vspaces[-cols:]] + [0])
margin_bottom += pad_inches / fig_height_inch
kwargs = dict(left=margin_left,
right=1 - margin_right,
bottom=margin_bottom,
top=1 - margin_top)
if cols > 1:
hspace = max([sum(s)
for i in range(rows)
for s
in hspaces[i * (cols + 1) + 1:(i + 1) * (cols + 1) - 1]])
hspace += hpad_inches / fig_width_inch
h_axes = ((1 - margin_right - margin_left) -
hspace * (cols - 1)) / cols
kwargs["wspace"] = hspace / h_axes
if rows > 1:
vspace = max([sum(s) for s in vspaces[cols:-cols]])
vspace += vpad_inches / fig_height_inch
v_axes = ((1 - margin_top - margin_bottom) -
vspace * (rows - 1)) / rows
kwargs["hspace"] = vspace / v_axes
return kwargs
def get_renderer(fig):
if fig._cachedRenderer:
renderer = fig._cachedRenderer
else:
canvas = fig.canvas
if canvas and hasattr(canvas, "get_renderer"):
renderer = canvas.get_renderer()
else:
# not sure if this can happen
warnings.warn("tight_layout : falling back to Agg renderer")
from matplotlib.backends.backend_agg import FigureCanvasAgg
canvas = FigureCanvasAgg(fig)
renderer = canvas.get_renderer()
return renderer
def get_subplotspec_list(axes_list, grid_spec=None):
"""
Return a list of subplotspec from the given list of axes. For an
instance of axes that does not support subplotspec, None is
inserted in the list.
If grid_spec is given, None is inserted for those not from
the given grid_spec.
"""
subplotspec_list = []
for ax in axes_list:
axes_or_locator = ax.get_axes_locator()
if axes_or_locator is None:
axes_or_locator = ax
if hasattr(axes_or_locator, "get_subplotspec"):
subplotspec = axes_or_locator.get_subplotspec()
subplotspec = subplotspec.get_topmost_subplotspec()
gs = subplotspec.get_gridspec()
if grid_spec is not None:
if gs != grid_spec:
subplotspec = None
elif gs.locally_modified_subplot_params():
subplotspec = None
else:
subplotspec = None
subplotspec_list.append(subplotspec)
return subplotspec_list
def get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer,
pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Return subplot parameters for tight-layouted-figure with specified
padding.
Parameters:
*fig* : figure instance
*axes_list* : a list of axes
*subplotspec_list* : a list of subplotspec associated with each
axes in axes_list
*renderer* : renderer instance
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
subplot_list = []
nrows_list = []
ncols_list = []
ax_bbox_list = []
subplot_dict = {} # multiple axes can share
# same subplot_interface (e.g., axes_grid1). Thus
# we need to join them together.
subplotspec_list2 = []
for ax, subplotspec in zip(axes_list,
subplotspec_list):
if subplotspec is None:
continue
subplots = subplot_dict.setdefault(subplotspec, [])
if not subplots:
myrows, mycols, _, _ = subplotspec.get_geometry()
nrows_list.append(myrows)
ncols_list.append(mycols)
subplotspec_list2.append(subplotspec)
subplot_list.append(subplots)
ax_bbox_list.append(subplotspec.get_position(fig))
subplots.append(ax)
max_nrows = max(nrows_list)
max_ncols = max(ncols_list)
num1num2_list = []
for subplotspec in subplotspec_list2:
rows, cols, num1, num2 = subplotspec.get_geometry()
div_row, mod_row = divmod(max_nrows, rows)
div_col, mod_col = divmod(max_ncols, cols)
if (mod_row != 0) or (mod_col != 0):
raise RuntimeError("")
rowNum1, colNum1 = divmod(num1, cols)
if num2 is None:
rowNum2, colNum2 = rowNum1, colNum1
else:
rowNum2, colNum2 = divmod(num2, cols)
num1num2_list.append((rowNum1 * div_row * max_ncols +
colNum1 * div_col,
((rowNum2 + 1) * div_row - 1) * max_ncols +
(colNum2 + 1) * div_col - 1))
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad)
if rect is not None:
# if rect is given, the whole subplots area (including
# labels) will fit into the rect instead of the
# figure. Note that the rect argument of
# *auto_adjust_subplotpars* specify the area that will be
# covered by the total area of axes.bbox. Thus we call
# auto_adjust_subplotpars twice, where the second run
# with adjusted rect parameters.
left, bottom, right, top = rect
if left is not None:
left += kwargs["left"]
if bottom is not None:
bottom += kwargs["bottom"]
if right is not None:
right -= (1 - kwargs["right"])
if top is not None:
top -= (1 - kwargs["top"])
#if h_pad is None: h_pad = pad
#if w_pad is None: w_pad = pad
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=(left, bottom, right, top))
return kwargs
| unlicense |
jfinkels/networkx | examples/drawing/knuth_miles.py | 34 | 2952 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.